diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index c902a69207108..c0682da029a61 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -8,3 +8,4 @@ ES_BUILD_JAVA=java11 ES_RUNTIME_JAVA=java8 GRADLE_TASK=build +GRADLE_EXTRA_ARGS=-Dtests.bwc.refspec=elastic/index-lifecycle-6.x diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 9c8ab20a3a61c..f0e5b87c6e08b 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -229,34 +229,6 @@ Pass arbitrary jvm arguments. ./gradlew test -Dtests.jvm.argline="-Djava.security.debug=access,failure" ------------------------------ -== Backwards Compatibility Tests - -Running backwards compatibility tests is disabled by default since it -requires a release version of elasticsearch to be present on the test system. -To run backwards compatibility tests untar or unzip a release and run the tests -with the following command: - ---------------------------------------------------------------------------- -./gradlew test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.bwc.path=/path/to/elasticsearch -Dtests.security.manager=false ---------------------------------------------------------------------------- - -Note that backwards tests must be run with security manager disabled. -If the elasticsearch release is placed under `./backwards/elasticsearch-x.y.z` the path -can be omitted: - ---------------------------------------------------------------------------- -./gradlew test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.security.manager=false ---------------------------------------------------------------------------- - -To setup the bwc test environment execute the following steps (provided you are -already in your elasticsearch clone): - ---------------------------------------------------------------------------- -$ mkdir backwards && cd backwards -$ curl -O https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.2.1.tar.gz -$ tar -xzf elasticsearch-1.2.1.tar.gz ---------------------------------------------------------------------------- - == Running verification tasks To run all verification tasks, including static checks, unit tests, and integration tests: @@ -554,25 +526,28 @@ environment variable. == Testing backwards compatibility Backwards compatibility tests exist to test upgrading from each supported version -to the current version. To run all backcompat tests use: +to the current version. To run them all use: ------------------------------------------------- ./gradlew bwcTest ------------------------------------------------- -A specific version can be tested as well. For example, to test backcompat with +A specific version can be tested as well. For example, to test bwc with version 5.3.2 run: ------------------------------------------------- ./gradlew v5.3.2#bwcTest ------------------------------------------------- -When running `./gradlew check`, some minimal backcompat checks are run. Which version -is tested depends on the branch. On master, this will test against the current -stable branch. On the stable branch, it will test against the latest release -branch. Finally, on a release branch, it will test against the most recent release. +Tests are ran for versions that are not yet released but with which the current version will be compatible with. +These are automatically checked out and built from source. +See link:./buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java[VersionCollection] +and link:./distribution/bwc/build.gradle[distribution/bwc/build.gradle] +for more information. + +When running `./gradlew check`, minimal bwc checks are also run against compatible versions that are not yet released. -=== BWC Testing against a specific remote/branch +==== BWC Testing against a specific remote/branch Sometimes a backward compatibility change spans two versions. A common case is a new functionality that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). @@ -597,7 +572,7 @@ will contain your change. . Push both branches to your remote repository. . Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x`. -== Skip fetching latest +==== Skip fetching latest For some BWC testing scenarios, you want to use the local clone of the repository without fetching latest. For these use cases, you can set the system diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java index 591fa400d18da..39a2bdfca0953 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java @@ -46,10 +46,6 @@ public final class Allocators { private static class NoopGatewayAllocator extends GatewayAllocator { public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator(); - protected NoopGatewayAllocator() { - super(Settings.EMPTY); - } - @Override public void applyStartedShards(RoutingAllocation allocation, List startedShards) { // noop @@ -79,7 +75,7 @@ public static AllocationService createAllocationService(Settings settings) throw public static AllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings) throws InvocationTargetException, NoSuchMethodException, InstantiationException, IllegalAccessException { - return new AllocationService(settings, + return new AllocationService( defaultAllocationDeciders(settings, clusterSettings), NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE); } @@ -88,7 +84,7 @@ public static AllocationDeciders defaultAllocationDeciders(Settings settings, Cl IllegalAccessException, InvocationTargetException, InstantiationException, NoSuchMethodException { Collection deciders = ClusterModule.createAllocationDeciders(settings, clusterSettings, Collections.emptyList()); - return new AllocationDeciders(settings, deciders); + return new AllocationDeciders(deciders); } diff --git a/build.gradle b/build.gradle index a2b79d31bad7e..9857d5d7a21cf 100644 --- a/build.gradle +++ b/build.gradle @@ -39,7 +39,7 @@ if (properties.get("org.elasticsearch.acceptScanTOS", "false") == "true") { // common maven publishing configuration subprojects { group = 'org.elasticsearch' - version = VersionProperties.elasticsearch.toString() + version = VersionProperties.elasticsearch description = "Elasticsearch subproject ${project.path}" } @@ -103,10 +103,6 @@ subprojects { * in a branch if there are only betas and rcs in the branch so we have * *something* to test against. */ VersionCollection versions = new VersionCollection(file('server/src/main/java/org/elasticsearch/Version.java').readLines('UTF-8')) -if (versions.currentVersion != VersionProperties.elasticsearch) { - throw new GradleException("The last version in Versions.java [${versions.currentVersion}] does not match " + - "VersionProperties.elasticsearch [${VersionProperties.elasticsearch}]") -} // build metadata from previous build, contains eg hashes for bwc builds String buildMetadataValue = System.getenv('BUILD_METADATA') @@ -140,26 +136,16 @@ task verifyVersions { if (gradle.startParameter.isOffline()) { throw new GradleException("Must run in online mode to verify versions") } - // Read the list from maven central - Node xml + // Read the list from maven central. + // Fetch the metadata an parse the xml into Version instances because it's more straight forward here + // rather than bwcVersion ( VersionCollection ). new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> - xml = new XmlParser().parse(s) - } - Set knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ }.collect { Version.fromString(it) }) - - // Limit the known versions to those that should be index compatible, and are not future versions - knownVersions = knownVersions.findAll { it.major >= bwcVersions.currentVersion.major - 1 && it.before(VersionProperties.elasticsearch) } - - /* Limit the listed versions to those that have been marked as released. - * Versions not marked as released don't get the same testing and we want - * to make sure that we flip all unreleased versions to released as soon - * as possible after release. */ - Set actualVersions = new TreeSet<>(bwcVersions.indexCompatible.findAll { false == it.snapshot }) - - // Finally, compare! - if (knownVersions.equals(actualVersions) == false) { - throw new GradleException("out-of-date released versions\nActual :" + actualVersions + "\nExpected:" + knownVersions + - "\nUpdate Version.java. Note that Version.CURRENT doesn't count because it is not released.") + bwcVersions.compareToAuthoritative( + new XmlParser().parse(s) + .versioning.versions.version + .collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ } + .collect { Version.fromString(it) } + ) } } } @@ -251,20 +237,17 @@ subprojects { "org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator', "org.elasticsearch.plugin:rank-eval-client:${version}": ':modules:rank-eval', ] - - bwcVersions.snapshotProjectNames.each { snapshotName -> - Version snapshot = bwcVersions.getSnapshotForProject(snapshotName) - if (snapshot != null ) { - String snapshotProject = ":distribution:bwc:${snapshotName}" - project(snapshotProject).ext.bwcVersion = snapshot - ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${snapshot}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${snapshot}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${snapshot}"] = snapshotProject - if (snapshot.onOrAfter('6.3.0')) { - ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch-oss:${snapshot}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch-oss:${snapshot}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch-oss:${snapshot}"] = snapshotProject - } + // substitute unreleased versions with projects that check out and build locally + bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unreleasedVersion -> + Version unreleased = unreleasedVersion.version + String snapshotProject = ":distribution:bwc:${unreleasedVersion.gradleProjectName}" + ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${unreleased}"] = snapshotProject + if (unreleased.onOrAfter('6.3.0')) { + ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch-oss:${unreleased}"] = snapshotProject } } @@ -299,7 +282,7 @@ subprojects { // other packages (e.g org.elasticsearch.client) will point to server rather than // their own artifacts. if (project.plugins.hasPlugin(BuildPlugin) || project.plugins.hasPlugin(PluginBuildPlugin)) { - String artifactsHost = VersionProperties.elasticsearch.isSnapshot() ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" + String artifactsHost = VersionProperties.elasticsearch.endsWith("-SNAPSHOT") ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" Closure sortClosure = { a, b -> b.group <=> a.group } Closure depJavadocClosure = { shadowed, dep -> if (dep.group == null || false == dep.group.startsWith('org.elasticsearch')) { @@ -572,11 +555,13 @@ wrapper { } } -/* Remove assemble/dependenciesInfo on all qa projects because we don't need to publish - * artifacts for them. */ gradle.projectsEvaluated { subprojects { - if (project.path.startsWith(':qa')) { + /* + * Remove assemble/dependenciesInfo on all qa projects because we don't + * need to publish artifacts for them. + */ + if (project.name.equals('qa') || project.path.contains(':qa:')) { Task assemble = project.tasks.findByName('assemble') if (assemble) { assemble.enabled = false diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 71828468e64aa..2acf1288578e2 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -41,46 +41,33 @@ if (project == rootProject) { * Propagating version.properties to the rest of the build * *****************************************************************************/ -Properties props = new Properties() -props.load(project.file('version.properties').newDataInputStream()) -version = props.getProperty('elasticsearch') -boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true")); -if (snapshot) { - // we update the version property to reflect if we are building a snapshot or a release build - // we write this back out below to load it in the Build.java which will be shown in rest main action - // to indicate this being a snapshot build or a release build. - version += "-SNAPSHOT" - props.put("elasticsearch", version); -} - -File tempPropertiesFile = new File(project.buildDir, "version.properties") -task writeVersionProperties { - inputs.properties(props) - outputs.file(tempPropertiesFile) +// we update the version property to reflect if we are building a snapshot or a release build +// we write this back out below to load it in the Build.java which will be shown in rest main action +// to indicate this being a snapshot build or a release build. +File propsFile = project.file('version.properties') +Properties props = VersionPropertiesLoader.loadBuildSrcVersion(propsFile) +version = props.getProperty("elasticsearch") +processResources { + inputs.file(propsFile) + // We need to be explicit with the version because we add snapshot and qualifier to it based on properties + inputs.property("dynamic_elasticsearch_version", props.getProperty("elasticsearch")) doLast { - OutputStream stream = Files.newOutputStream(tempPropertiesFile.toPath()); + Writer writer = file("$destinationDir/version.properties").newWriter() try { - props.store(stream, "UTF-8"); + props.store(writer, "Generated version properties") } finally { - stream.close(); + writer.close() } } } -processResources { - dependsOn writeVersionProperties - from tempPropertiesFile -} - - -if (JavaVersion.current() < JavaVersion.VERSION_1_10) { - throw new GradleException('At least Java 10 is required to build elasticsearch gradle tools') -} - /***************************************************************************** * Java version * *****************************************************************************/ +if (JavaVersion.current() < JavaVersion.VERSION_11) { + throw new GradleException('At least Java 11 is required to build elasticsearch gradle tools') +} // Gradle 4.10 does not support setting this to 11 yet targetCompatibility = "10" sourceCompatibility = "10" @@ -232,3 +219,42 @@ if (project != rootProject) { generatePomFileForPluginMavenPublication.enabled = false } } + +// Define this here because we need it early. +class VersionPropertiesLoader { + static Properties loadBuildSrcVersion(File input) throws IOException { + Properties props = new Properties(); + InputStream is = new FileInputStream(input) + try { + props.load(is) + } finally { + is.close() + } + loadBuildSrcVersion(props, System.getProperties()) + return props + } + + protected static void loadBuildSrcVersion(Properties loadedProps, Properties systemProperties) { + String elasticsearch = loadedProps.getProperty("elasticsearch") + if (elasticsearch == null) { + throw new IllegalStateException("Elasticsearch version is missing from properties.") + } + if (elasticsearch.matches("[0-9]+\\.[0-9]+\\.[0-9]+") == false) { + throw new IllegalStateException( + "Expected elasticsearch version to be numbers only of the form X.Y.Z but it was: " + + elasticsearch + ) + } + String qualifier = systemProperties.getProperty("build.version_qualifier", "alpha1"); + if (qualifier.isEmpty() == false) { + if (qualifier.matches("(alpha|beta|rc)\\d+") == false) { + throw new IllegalStateException("Invalid qualifier: " + qualifier) + } + elasticsearch += "-" + qualifier + } + if ("true".equals(systemProperties.getProperty("build.snapshot", "true"))) { + elasticsearch += "-SNAPSHOT" + } + loadedProps.put("elasticsearch", elasticsearch) + } +} \ No newline at end of file diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 4d3fe8f19fcce..a97989c1167c2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -696,18 +696,12 @@ class BuildPlugin implements Plugin { jarTask.destinationDir = new File(project.buildDir, 'distributions') // fixup the jar manifest jarTask.doFirst { - final Version versionWithoutSnapshot = new Version( - VersionProperties.elasticsearch.major, - VersionProperties.elasticsearch.minor, - VersionProperties.elasticsearch.revision, - VersionProperties.elasticsearch.suffix, - false) // this doFirst is added before the info plugin, therefore it will run // after the doFirst added by the info plugin, and we can override attributes jarTask.manifest.attributes( - 'X-Compile-Elasticsearch-Version': versionWithoutSnapshot, + 'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch.replace("-SNAPSHOT", ""), 'X-Compile-Lucene-Version': VersionProperties.lucene, - 'X-Compile-Elasticsearch-Snapshot': VersionProperties.elasticsearch.isSnapshot(), + 'X-Compile-Elasticsearch-Snapshot': VersionProperties.isElasticsearchSnapshot(), 'Build-Date': ZonedDateTime.now(ZoneOffset.UTC), 'Build-Java-Version': project.compilerJavaVersion) if (jarTask.manifest.attributes.containsKey('Change') == false) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy deleted file mode 100644 index 063dcf7d3bb7d..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gradle - -import org.gradle.api.GradleException -import org.gradle.api.InvalidUserDataException - -import java.util.regex.Matcher - -/** - * The collection of version constants declared in Version.java, for use in BWC testing. - * - * if major+1 released: released artifacts from $version down to major-1.highestMinor.highestPatch, none of these should be snapshots, period. - * if major+1 unreleased: - * - if released: - * -- caveat 0: snapshot for the major-1.highestMinor.highestPatch - * - if unreleased: - * -- caveat 0: snapshot for the major-1.highestMinor.highestPatch - * -- caveat 1: every same major lower minor branch should also be tested if its released, and if not, its a snapshot. There should only be max 2 of these. - * -- caveat 2: the largest released minor branch before the unreleased minor should also be a snapshot - * -- caveat 3: if the current version is a different major than the previous rules apply to major - 1 of the current version - * - * Please note that the caveat's also correspond with the 4 types of snapshots. - * - Caveat 0 - always maintenanceBugfixSnapshot. - * - Caveat 1 - This is tricky. If caveat 3 applies, the highest matching value is nextMinorSnapshot, if there is another it is the stagedMinorSnapshot. - * If caveat 3 does not apply then the only possible value is the stagedMinorSnapshot. - * - Caveat 2 - always nextBugfixSnapshot - * - Caveat 3 - this only changes the applicability of Caveat 1 - * - * Notes on terminology: - * - The case for major+1 being released is accomplished through the isReleasableBranch value. If this is false, then the branch is no longer - * releasable, meaning not to test against any snapshots. - * - Released is defined as having > 1 suffix-free version in a major.minor series. For instance, only 6.2.0 means unreleased, but a - * 6.2.0 and 6.2.1 mean that 6.2.0 was released already. - */ -class VersionCollection { - - private final List versions - Version nextMinorSnapshot - Version stagedMinorSnapshot - Version nextBugfixSnapshot - Version maintenanceBugfixSnapshot - final Version currentVersion - private final TreeSet versionSet = new TreeSet<>() - final List snapshotProjectNames = ['next-minor-snapshot', - 'staged-minor-snapshot', - 'next-bugfix-snapshot', - 'maintenance-bugfix-snapshot'] - - // When we roll 8.0 its very likely these will need to be extracted from this class - private final boolean isReleasableBranch = true - - /** - * Construct a VersionCollection from the lines of the Version.java file. The basic logic for the following is pretty straight forward. - - * @param versionLines The lines of the Version.java file. - */ - VersionCollection(List versionLines) { - final boolean buildSnapshot = System.getProperty("build.snapshot", "true") == "true" - - List versions = [] - // This class should be converted wholesale to use the treeset - - for (final String line : versionLines) { - final Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+)(_alpha\d+|_beta\d+|_rc\d+)? .*/ - if (match.matches()) { - final Version foundVersion = new Version( - Integer.parseInt(match.group(1)), Integer.parseInt(match.group(2)), - Integer.parseInt(match.group(3)), (match.group(4) ?: '').replace('_', '-'), false) - safeAddToSet(foundVersion) - } - } - - if (versionSet.empty) { - throw new GradleException("Unexpectedly found no version constants in Versions.java") - } - - // If the major version has been released, then remove all of the alpha/beta/rc versions that exist in the set - versionSet.removeAll { it.suffix.isEmpty() == false && isMajorReleased(it, versionSet) } - - // set currentVersion - Version lastVersion = versionSet.last() - currentVersion = new Version(lastVersion.major, lastVersion.minor, lastVersion.revision, lastVersion.suffix, buildSnapshot) - - // remove all of the potential alpha/beta/rc from the currentVersion - versionSet.removeAll { - it.suffix.isEmpty() == false && - it.major == currentVersion.major && - it.minor == currentVersion.minor && - it.revision == currentVersion.revision } - - // re-add the currentVersion to the set - versionSet.add(currentVersion) - - if (isReleasableBranch) { - if (isReleased(currentVersion)) { - // caveat 0 - if the minor has been released then it only has a maintenance version - // go back 1 version to get the last supported snapshot version of the line, which is a maint bugfix - Version highestMinor = getHighestPreviousMinor(currentVersion.major) - maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) - } else { - // caveat 3 - if our currentVersion is a X.0.0, we need to check X-1 minors to see if they are released - if (currentVersion.minor == 0) { - for (Version version: getMinorTips(currentVersion.major - 1)) { - if (isReleased(version) == false) { - // caveat 1 - This should only ever contain 2 non released branches in flight. An example is 6.x is frozen, - // and 6.2 is cut but not yet released there is some simple logic to make sure that in the case of more than 2, - // it will bail. The order is that the minor snapshot is fulfilled first, and then the staged minor snapshot - if (nextMinorSnapshot == null) { - // it has not been set yet - nextMinorSnapshot = replaceAsSnapshot(version) - } else if (stagedMinorSnapshot == null) { - stagedMinorSnapshot = replaceAsSnapshot(version) - } else { - throw new GradleException("More than 2 snapshot version existed for the next minor and staged (frozen) minors.") - } - } else { - // caveat 2 - this is the last minor snap for this major, so replace the highest (last) one of these and break - nextBugfixSnapshot = replaceAsSnapshot(version) - // we only care about the largest minor here, so in the case of 6.1 and 6.0, it will only get 6.1 - break - } - } - // caveat 0 - the last supported snapshot of the line is on a version that we don't support (N-2) - maintenanceBugfixSnapshot = null - } else { - // caveat 3 did not apply. version is not a X.0.0, so we are somewhere on a X.Y line - // only check till minor == 0 of the major - for (Version version: getMinorTips(currentVersion.major)) { - if (isReleased(version) == false) { - // caveat 1 - This should only ever contain 0 or 1 branch in flight. An example is 6.x is frozen, and 6.2 is cut - // but not yet released there is some simple logic to make sure that in the case of more than 1, it will bail - if (stagedMinorSnapshot == null) { - stagedMinorSnapshot = replaceAsSnapshot(version) - } else { - throw new GradleException("More than 1 snapshot version existed for the staged (frozen) minors.") - } - } else { - // caveat 2 - this is the last minor snap for this major, so replace the highest (last) one of these and break - nextBugfixSnapshot = replaceAsSnapshot(version) - // we only care about the largest minor here, so in the case of 6.1 and 6.0, it will only get 6.1 - break - } - } - // caveat 0 - now dip back 1 version to get the last supported snapshot version of the line - Version highestMinor = getHighestPreviousMinor(currentVersion.major) - maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) - } - } - } - - this.versions = Collections.unmodifiableList(versionSet.toList()) - } - - /** - * @return The list of versions read from the Version.java file - */ - List getVersions() { - return versions - } - - /** - * Index compat supports 1 previous entire major version. For instance, any 6.x test for this would test all of 5 up to that 6.x version - * - * @return All earlier versions that should be tested for index BWC with the current version. - */ - List getIndexCompatible() { - int actualMajor = (currentVersion.major == 5 ? 2 : currentVersion.major - 1) - return versionSet - .tailSet(Version.fromString("${actualMajor}.0.0")) - .headSet(currentVersion) - .asList() - } - - /** - * Ensures the types of snapshot are not null and are also in the index compat list - */ - List getSnapshotsIndexCompatible() { - List compatSnapshots = [] - List allCompatVersions = getIndexCompatible() - if (allCompatVersions.contains(nextMinorSnapshot)) { - compatSnapshots.add(nextMinorSnapshot) - } - if (allCompatVersions.contains(stagedMinorSnapshot)) { - compatSnapshots.add(stagedMinorSnapshot) - } - if (allCompatVersions.contains(nextBugfixSnapshot)) { - compatSnapshots.add(nextBugfixSnapshot) - } - if (allCompatVersions.contains(maintenanceBugfixSnapshot)) { - compatSnapshots.add(maintenanceBugfixSnapshot) - } - - return compatSnapshots - } - - /** - * Wire compat supports the last minor of the previous major. For instance, any 6.x test would test 5.6 up to that 6.x version - * - * @return All earlier versions that should be tested for wire BWC with the current version. - */ - List getWireCompatible() { - // Get the last minor of the previous major - Version lowerBound = getHighestPreviousMinor(currentVersion.major) - return versionSet - .tailSet(Version.fromString("${lowerBound.major}.${lowerBound.minor}.0")) - .headSet(currentVersion) - .toList() - } - - /** - * Ensures the types of snapshot are not null and are also in the wire compat list - */ - List getSnapshotsWireCompatible() { - List compatSnapshots = [] - List allCompatVersions = getWireCompatible() - if (allCompatVersions.contains(nextMinorSnapshot)) { - compatSnapshots.add(nextMinorSnapshot) - } - if (allCompatVersions.contains(stagedMinorSnapshot)) { - compatSnapshots.add(stagedMinorSnapshot) - } - if (allCompatVersions.contains(nextBugfixSnapshot)) { - compatSnapshots.add(nextBugfixSnapshot) - } - if (allCompatVersions.contains(maintenanceBugfixSnapshot)) { - compatSnapshots.add(maintenanceBugfixSnapshot) - } - // There was no wire compat for the 2.x line - compatSnapshots.removeAll {it.major == 2} - - return compatSnapshots - } - - /** - * Grabs the proper snapshot based on the name passed in. These names should correspond with gradle project names under bwc. If you - * are editing this if/else it is only because you added another project under :distribution:bwc. Do not modify this method or its - * reasoning for throwing the exception unless you are sure that it will not harm :distribution:bwc. - */ - Version getSnapshotForProject(String snapshotProjectName) { - if (snapshotProjectName == 'next-minor-snapshot') { - return nextMinorSnapshot - } else if (snapshotProjectName == 'staged-minor-snapshot') { - return stagedMinorSnapshot - } else if (snapshotProjectName == 'maintenance-bugfix-snapshot') { - return maintenanceBugfixSnapshot - } else if (snapshotProjectName == 'next-bugfix-snapshot') { - return nextBugfixSnapshot - } else { - throw new InvalidUserDataException("Unsupported project name ${snapshotProjectName}") - } - } - - /** - * Uses basic logic about our releases to determine if this version has been previously released - */ - private boolean isReleased(Version version) { - return version.revision > 0 - } - - /** - * Validates that the count of non suffixed (alpha/beta/rc) versions in a given major to major+1 is greater than 1. - * This means that there is more than just a major.0.0 or major.0.0-alpha in a branch to signify it has been prevously released. - */ - private boolean isMajorReleased(Version version, TreeSet items) { - return items - .tailSet(Version.fromString("${version.major}.0.0")) - .headSet(Version.fromString("${version.major + 1}.0.0")) - .count { it.suffix.isEmpty() } // count only non suffix'd versions as actual versions that may be released - .intValue() > 1 - } - - /** - * Gets the largest version previous major version based on the nextMajorVersion passed in. - * If you have a list [5.0.2, 5.1.2, 6.0.1, 6.1.1] and pass in 6 for the nextMajorVersion, it will return you 5.1.2 - */ - private Version getHighestPreviousMinor(Integer nextMajorVersion) { - SortedSet result = versionSet.headSet(Version.fromString("${nextMajorVersion}.0.0")) - return result.isEmpty() ? null : result.last() - } - - /** - * Helper function for turning a version into a snapshot version, removing and readding it to the tree - */ - private Version replaceAsSnapshot(Version version) { - versionSet.remove(version) - Version snapshotVersion = new Version(version.major, version.minor, version.revision, version.suffix, true) - safeAddToSet(snapshotVersion) - return snapshotVersion - } - - /** - * Safely adds a value to the treeset, or bails if the value already exists. - * @param version - */ - private void safeAddToSet(Version version) { - if (versionSet.add(version) == false) { - throw new GradleException("Versions.java contains duplicate entries for ${version}") - } - } - - /** - * Gets the entire set of major.minor.* given those parameters. - */ - private SortedSet getMinorSetForMajor(Integer major, Integer minor) { - return versionSet - .tailSet(Version.fromString("${major}.${minor}.0")) - .headSet(Version.fromString("${major}.${minor + 1}.0")) - } - - /** - * Gets the entire set of major.* to the currentVersion - */ - private SortedSet getMajorSet(Integer major) { - return versionSet - .tailSet(Version.fromString("${major}.0.0")) - .headSet(currentVersion) - } - - /** - * Gets the tip of each minor set and puts it in a list. - * - * examples: - * [1.0.0, 1.1.0, 1.1.1, 1.2.0, 1.3.1] will return [1.0.0, 1.1.1, 1.2.0, 1.3.1] - * [1.0.0, 1.0.1, 1.0.2, 1.0.3, 1.0.4] will return [1.0.4] - */ - private List getMinorTips(Integer major) { - TreeSet majorSet = getMajorSet(major) - List minorList = new ArrayList<>() - for (int minor = majorSet.last().minor; minor >= 0; minor--) { - TreeSet minorSetInMajor = getMinorSetForMajor(major, minor) - minorList.add(minorSetInMajor.last()) - } - return minorList - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 9b2b1ca215673..881fce443a792 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -42,7 +42,7 @@ public class DocsTestPlugin extends RestTestPlugin { * to the version being built for testing but needs to resolve to * the last released version for docs. */ '\\{version\\}': - VersionProperties.elasticsearch.toString().replace('-SNAPSHOT', ''), + VersionProperties.elasticsearch.replace('-SNAPSHOT', ''), '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), '\\{build_flavor\\}' : project.integTestCluster.distribution.startsWith('oss-') ? 'oss' : 'default', diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index a14a3a680da1c..28d18e9b876f5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -98,7 +98,7 @@ public class PluginBuildPlugin extends BuildPlugin { project.pluginProperties.extension.name + "-client" ) project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> - generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.version}.pom" + generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.versions.elasticsearch}.pom" } } else { project.plugins.withType(MavenPublishPlugin).whenPluginAdded { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index 9588f77a71db7..633647514ed7d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -76,7 +76,7 @@ class PluginPropertiesTask extends Copy { 'name': extension.name, 'description': extension.description, 'version': stringSnap(extension.version), - 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch.toString()), + 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch), 'javaVersion': project.targetCompatibility as String, 'classname': extension.classname, 'extendedPlugins': extension.extendedPlugins.join(','), diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/FilePermissionsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/FilePermissionsTask.groovy deleted file mode 100644 index d8da9a4207bf7..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/FilePermissionsTask.groovy +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle.precommit - -import org.gradle.api.DefaultTask -import org.gradle.api.GradleException -import org.gradle.api.file.FileCollection -import org.gradle.api.tasks.InputFiles -import org.gradle.api.tasks.OutputFile -import org.gradle.api.tasks.SourceSet -import org.gradle.api.tasks.TaskAction -import org.gradle.api.tasks.util.PatternSet -import org.gradle.api.tasks.util.PatternFilterable -import org.apache.tools.ant.taskdefs.condition.Os - -import java.nio.file.Files -import java.nio.file.attribute.PosixFilePermission -import java.nio.file.attribute.PosixFileAttributeView - -import static java.nio.file.attribute.PosixFilePermission.OTHERS_EXECUTE -import static java.nio.file.attribute.PosixFilePermission.GROUP_EXECUTE -import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE - -/** - * Checks source files for correct file permissions. - */ -public class FilePermissionsTask extends DefaultTask { - - /** A pattern set of which files should be checked. */ - private PatternFilterable filesFilter = new PatternSet() - - @OutputFile - File outputMarker = new File(project.buildDir, 'markers/filePermissions') - - FilePermissionsTask() { - onlyIf { !Os.isFamily(Os.FAMILY_WINDOWS) } - description = "Checks java source files for correct file permissions" - // we always include all source files, and exclude what should not be checked - filesFilter.include('**') - // exclude sh files that might have the executable bit set - filesFilter.exclude('**/*.sh') - } - - /** Returns the files this task will check */ - @InputFiles - FileCollection files() { - List collections = new ArrayList<>() - for (SourceSet sourceSet : project.sourceSets) { - collections.add(sourceSet.allSource.matching(filesFilter)) - } - return project.files(collections.toArray()) - } - - @TaskAction - void checkInvalidPermissions() { - List failures = new ArrayList<>() - for (File f : files()) { - PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(f.toPath(), PosixFileAttributeView.class) - Set permissions = fileAttributeView.readAttributes().permissions() - if (permissions.contains(OTHERS_EXECUTE) || permissions.contains(OWNER_EXECUTE) || - permissions.contains(GROUP_EXECUTE)) { - failures.add("Source file is executable: " + f) - } - } - if (failures.isEmpty() == false) { - throw new GradleException('Found invalid file permissions:\n' + failures.join('\n')) - } - outputMarker.setText('done', 'UTF-8') - } - -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 0e706aa5956f1..b5476ea96621b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -22,6 +22,7 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask +import org.elasticsearch.gradle.VersionProperties import org.gradle.api.JavaVersion import org.gradle.api.Project import org.gradle.api.Task @@ -220,7 +221,7 @@ class PrecommitTasks { private static Task configureLoggerUsage(Project project) { project.configurations.create('loggerUsagePlugin') project.dependencies.add('loggerUsagePlugin', - "org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}") + "org.elasticsearch.test:logger-usage:${VersionProperties.elasticsearch}") return project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) { classpath = project.configurations.loggerUsagePlugin javaHome = project.runtimeJavaHome diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index d2eb6cc60a576..e8415fa66fd43 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -69,7 +69,11 @@ class ClusterConfiguration { */ @Input Closure minimumMasterNodes = { - return getNumNodes() > 1 ? getNumNodes() : -1 + if (bwcVersion != null && bwcVersion.before("6.5.0-SNAPSHOT")) { + return numNodes > 1 ? numNodes : -1 + } else { + return numNodes > 1 ? numNodes.intdiv(2) + 1 : -1 + } } @Input diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index e08fd3f6b75e9..2c034f6e4f4b9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -101,7 +101,7 @@ class ClusterFormationTasks { // from here on everything else works the same as if it's the current version, we fetch the BWC version // from mirrors using gradles built-in mechanism etc. - configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion) + configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion.toString()) for (Map.Entry entry : config.plugins.entrySet()) { configureBwcPluginDependency(project, entry.getValue(), bwcPlugins, config.bwcVersion) } @@ -112,9 +112,12 @@ class ClusterFormationTasks { // we start N nodes and out of these N nodes there might be M bwc nodes. // for each of those nodes we might have a different configuration final Configuration distro - final Version elasticsearchVersion + final String elasticsearchVersion if (i < config.numBwcNodes) { - elasticsearchVersion = config.bwcVersion + elasticsearchVersion = config.bwcVersion.toString() + if (project.bwcVersions.unreleased.contains(config.bwcVersion)) { + elasticsearchVersion += "-SNAPSHOT" + } distro = bwcDistro } else { elasticsearchVersion = VersionProperties.elasticsearch @@ -156,8 +159,10 @@ class ClusterFormationTasks { } /** Adds a dependency on the given distribution */ - static void configureDistributionDependency(Project project, String distro, Configuration configuration, Version elasticsearchVersion) { - if (elasticsearchVersion.before('6.3.0') && distro.startsWith('oss-')) { + static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { + if (Version.fromString(elasticsearchVersion).before('6.3.0') && + distro.startsWith('oss-') + ) { distro = distro.substring('oss-'.length()) } String packaging = distro @@ -227,7 +232,7 @@ class ClusterFormationTasks { setup = configureAddKeystoreFileTasks(prefix, project, setup, node) if (node.config.plugins.isEmpty() == false) { - if (node.nodeVersion == VersionProperties.elasticsearch) { + if (node.nodeVersion == Version.fromString(VersionProperties.elasticsearch)) { setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node, prefix) } else { setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node, prefix) @@ -343,6 +348,13 @@ class ClusterFormationTasks { // this will also allow new and old nodes in the BWC case to become the master esConfig['discovery.initial_state_timeout'] = '0s' } + if (esConfig.containsKey('discovery.zen.master_election.wait_for_joins_timeout') == false) { + // If a node decides to become master based on partial information from the pinging, don't let it hang for 30 seconds to correct + // its mistake. Instead, only wait 5s to do another round of pinging. + // This is necessary since we use 30s as the default timeout in REST requests waiting for cluster formation + // so we need to bail quicker than the default 30s for the cluster to form in time. + esConfig['discovery.zen.master_election.wait_for_joins_timeout'] = '5s' + } esConfig['node.max_local_storage_nodes'] = node.config.numNodes esConfig['http.port'] = node.config.httpPort esConfig['transport.tcp.port'] = node.config.transportPort @@ -584,7 +596,7 @@ class ClusterFormationTasks { static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, String pluginName, String prefix) { final FileCollection pluginZip; - if (node.nodeVersion != VersionProperties.elasticsearch) { + if (node.nodeVersion != Version.fromString(VersionProperties.elasticsearch)) { pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, pluginName)) } else { pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, pluginName)) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 60362a800db55..cd99950902e55 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -112,7 +112,7 @@ class NodeInfo { Version nodeVersion /** Holds node configuration for part of a test cluster. */ - NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, Version nodeVersion, File sharedDir) { + NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) { this.config = config this.nodeNum = nodeNum this.project = project @@ -124,7 +124,7 @@ class NodeInfo { } baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}") pidFile = new File(baseDir, 'es.pid') - this.nodeVersion = nodeVersion + this.nodeVersion = Version.fromString(nodeVersion) homeDir = homeDir(baseDir, config.distribution, nodeVersion) pathConf = pathConf(baseDir, config.distribution, nodeVersion) if (config.dataDir != null) { @@ -173,11 +173,11 @@ class NodeInfo { } - if (nodeVersion.before("6.2.0")) { + if (this.nodeVersion.before("6.2.0")) { javaVersion = 8 - } else if (nodeVersion.onOrAfter("6.2.0") && nodeVersion.before("6.3.0")) { + } else if (this.nodeVersion.onOrAfter("6.2.0") && this.nodeVersion.before("6.3.0")) { javaVersion = 9 - } else if (nodeVersion.onOrAfter("6.3.0") && nodeVersion.before("6.5.0")) { + } else if (this.nodeVersion.onOrAfter("6.3.0") && this.nodeVersion.before("6.5.0")) { javaVersion = 10 } @@ -301,7 +301,7 @@ class NodeInfo { } /** Returns the directory elasticsearch home is contained in for the given distribution */ - static File homeDir(File baseDir, String distro, Version nodeVersion) { + static File homeDir(File baseDir, String distro, String nodeVersion) { String path switch (distro) { case 'integ-test-zip': @@ -321,7 +321,7 @@ class NodeInfo { return new File(baseDir, path) } - static File pathConf(File baseDir, String distro, Version nodeVersion) { + static File pathConf(File baseDir, String distro, String nodeVersion) { switch (distro) { case 'integ-test-zip': case 'zip': diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java index 53855716840dd..31738f140878d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java @@ -12,44 +12,20 @@ public final class Version implements Comparable { private final int minor; private final int revision; private final int id; - private final boolean snapshot; - /** - * Suffix on the version name. - */ - private final String suffix; private static final Pattern pattern = Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-alpha\\d+|-beta\\d+|-rc\\d+)?(-SNAPSHOT)?"); - public Version(int major, int minor, int revision, String suffix, boolean snapshot) { + public Version(int major, int minor, int revision) { Objects.requireNonNull(major, "major version can't be null"); Objects.requireNonNull(minor, "minor version can't be null"); Objects.requireNonNull(revision, "revision version can't be null"); this.major = major; this.minor = minor; this.revision = revision; - this.snapshot = snapshot; - this.suffix = suffix == null ? "" : suffix; - - int suffixOffset = 0; - if (this.suffix.isEmpty()) { - // no suffix will be considered smaller, uncomment to change that - // suffixOffset = 100; - } else { - if (this.suffix.contains("alpha")) { - suffixOffset += parseSuffixNumber(this.suffix.substring(6)); - } else if (this.suffix.contains("beta")) { - suffixOffset += 25 + parseSuffixNumber(this.suffix.substring(5)); - } else if (this.suffix.contains("rc")) { - suffixOffset += 50 + parseSuffixNumber(this.suffix.substring(3)); - } - else { - throw new IllegalArgumentException("Suffix must contain one of: alpha, beta or rc"); - } - } // currently snapshot is not taken into account - this.id = major * 10000000 + minor * 100000 + revision * 1000 + suffixOffset * 10 /*+ (snapshot ? 1 : 0)*/; + this.id = major * 10000000 + minor * 100000 + revision * 1000; } private static int parseSuffixNumber(String substring) { @@ -71,17 +47,13 @@ public static Version fromString(final String s) { return new Version( Integer.parseInt(matcher.group(1)), parseSuffixNumber(matcher.group(2)), - parseSuffixNumber(matcher.group(3)), - matcher.group(4), - matcher.group(5) != null + parseSuffixNumber(matcher.group(3)) ); } @Override public String toString() { - final String snapshotStr = snapshot ? "-SNAPSHOT" : ""; - return String.valueOf(getMajor()) + "." + String.valueOf(getMinor()) + "." + String.valueOf(getRevision()) + - (suffix == null ? "" : suffix) + snapshotStr; + return String.valueOf(getMajor()) + "." + String.valueOf(getMinor()) + "." + String.valueOf(getRevision()); } public boolean before(Version compareTo) { @@ -116,19 +88,6 @@ public boolean after(String compareTo) { return after(fromString(compareTo)); } - public boolean onOrBeforeIncludingSuffix(Version otherVersion) { - if (id != otherVersion.getId()) { - return id < otherVersion.getId(); - } - - if (suffix.equals("")) { - return otherVersion.getSuffix().equals(""); - } - - - return otherVersion.getSuffix().equals("") || suffix.compareTo(otherVersion.getSuffix()) < 0; - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -136,16 +95,12 @@ public boolean equals(Object o) { Version version = (Version) o; return major == version.major && minor == version.minor && - revision == version.revision && - id == version.id && - snapshot == version.snapshot && - Objects.equals(suffix, version.suffix); + revision == version.revision; } @Override public int hashCode() { - - return Objects.hash(major, minor, revision, id, snapshot, suffix); + return Objects.hash(major, minor, revision, id); } public int getMajor() { @@ -164,16 +119,9 @@ protected int getId() { return id; } - public boolean isSnapshot() { - return snapshot; - } - - public String getSuffix() { - return suffix; - } - @Override public int compareTo(Version other) { return Integer.compare(getId(), other.getId()); } + } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java new file mode 100644 index 0000000000000..1cf2fd9e1037c --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java @@ -0,0 +1,328 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; + +/** + * A container for elasticsearch supported version information used in BWC testing. + * + * Parse the Java source file containing the versions declarations and use the known rules to figure out which are all + * the version the current one is wire and index compatible with. + * On top of this, figure out which of these are unreleased and provide the branch they can be built from. + * + * Note that in this context, currentVersion is the unreleased version this build operates on. + * At any point in time there will surely be four such unreleased versions being worked on, + * thus currentVersion will be one of these. + * + * Considering: + *
+ *
M, M > 0
+ *
last released major
+ *
N, N > 0
+ *
last released minor
+ *
+ * + *
    + *
  • the unreleased major, M+1.0.0 on the `master` branch
  • + *
  • the unreleased minor, M.N.0 on the `M.x` (x is literal) branch
  • + *
  • the unreleased bugfix, M.N.c (c > 0) on the `M.b` branch
  • + *
  • the unreleased maintenance, M-1.d.e ( d > 0, e > 0) on the `(M-1).d` branch
  • + *
+ * In addition to these, there will be a fifth one when a minor reaches feature freeze, we call this the staged + * version: + *
    + *
  • the unreleased staged, M.N-2.0 (N > 2) on the `M.(N-2)` branch
  • + *
+ * + * Each build is only concerned with versions before it, as those are the ones that need to be tested + * for backwards compatibility. We never look forward, and don't add forward facing version number to branches of previous + * version. + * + * Each branch has a current version, and expected compatible versions are parsed from the server code's Version` class. + * We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased + * version number to server in all branches when a version is released. + * E.x when M.N.c is released M.N.c+1 is added to the Version class mentioned above in all the following branches: + * `M.b`, `M.x` and `master` so we can reliably assume that the leafs of the version tree are unreleased. + * This convention is enforced by checking the versions we consider to be unreleased against an + * authoritative source (maven central). + * We are then able to map the unreleased version to branches in git and Gradle projects that are capable of checking + * out and building them, so we can include these in the testing plan as well. + */ +public class VersionCollection { + + private static final Pattern LINE_PATTERN = Pattern.compile( + "\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)? .*" + ); + + private final Version currentVersion; + private final Map> groupByMajor; + + public class UnreleasedVersionInfo { + public final Version version; + public final String branch; + public final String gradleProjectName; + + UnreleasedVersionInfo(Version version, String branch, String gradleProjectName) { + this.version = version; + this.branch = branch; + this.gradleProjectName = gradleProjectName; + } + } + + public VersionCollection(List versionLines) { + this(versionLines, Version.fromString(VersionProperties.getElasticsearch())); + } + + protected VersionCollection(List versionLines, Version currentVersionProperty) { + groupByMajor = versionLines.stream() + .map(LINE_PATTERN::matcher) + .filter(Matcher::matches) + .map(match -> new Version( + Integer.parseInt(match.group(1)), + Integer.parseInt(match.group(2)), + Integer.parseInt(match.group(3)) + )) + .sorted() + .distinct() + .collect(Collectors.groupingBy(Version::getMajor, Collectors.toList())); + + if (groupByMajor.isEmpty()) { + throw new IllegalArgumentException("Could not parse any versions"); + } + + currentVersion = getLatestVersionByKey( + groupByMajor, + groupByMajor.keySet().stream().max(Integer::compareTo) + .orElseThrow(() -> new IllegalStateException("Unexpected number of versions in collection")) + ); + + assertCurrentVersionMatchesParsed(currentVersionProperty); + + assertNoOlderThanTwoMajors(); + } + + private void assertNoOlderThanTwoMajors() { + Set majors = groupByMajor.keySet(); + if (majors.size() != 2 && currentVersion.getMinor() != 0 && currentVersion.getRevision() != 0) { + throw new IllegalStateException( + "Expected exactly 2 majors in parsed versions but found: " + majors + ); + } + } + + private void assertCurrentVersionMatchesParsed(Version currentVersionProperty) { + if (currentVersionProperty.equals(currentVersion) == false) { + throw new IllegalStateException( + "Parsed versions latest version does not match the one configured in build properties. " + + "Parsed latest version is " + currentVersion + " but the build has " + + currentVersionProperty + ); + } + } + + public void forPreviousUnreleased(Consumer consumer) { + getUnreleased().stream() + .filter(version -> version.equals(currentVersion) == false) + .forEach(version -> consumer.accept( + new UnreleasedVersionInfo( + version, + getBranchFor(version), + getGradleProjectNameFor(version) + ) + )); + } + + private String getGradleProjectNameFor(Version version) { + if (version.equals(currentVersion)) { + throw new IllegalArgumentException("The Gradle project to build " + version + " is the current build."); + } + Map> releasedMajorGroupedByMinor = getReleasedMajorGroupedByMinor(); + + if (version.getRevision() == 0) { + if (releasedMajorGroupedByMinor + .get(releasedMajorGroupedByMinor.keySet().stream().max(Integer::compareTo).orElse(0)) + .contains(version)) { + return "minor"; + } else { + return "staged"; + } + } else { + if (releasedMajorGroupedByMinor + .getOrDefault(version.getMinor(), emptyList()) + .contains(version)) { + return "bugfix"; + } else { + return "maintenance"; + } + } + } + + private String getBranchFor(Version version) { + switch (getGradleProjectNameFor(version)) { + case "minor": + return version.getMajor() + ".x"; + case "staged": + case "maintenance": + case "bugfix": + return version.getMajor() + "." + version.getMinor(); + default: + throw new IllegalStateException("Unexpected Gradle project name"); + } + } + + public List getUnreleased() { + List unreleased = new ArrayList<>(); + // The current version is being worked, is always unreleased + unreleased.add(currentVersion); + + // the tip of the previous major is unreleased for sure, be it a minor or a bugfix + unreleased.add(getLatestVersionByKey(this.groupByMajor, currentVersion.getMajor() - 1)); + + final Map> groupByMinor = getReleasedMajorGroupedByMinor(); + int greatestMinor = groupByMinor.keySet().stream().max(Integer::compareTo).orElse(0); + + // the last bugfix for this minor series is always unreleased + unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor)); + + if (groupByMinor.get(greatestMinor).size() == 1) { + // we found an unreleased minor + unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 1)); + if (groupByMinor.getOrDefault(greatestMinor - 1, emptyList()).size() == 1) { + // we found that the previous minor is staged but not yet released + // in this case, the minor before that has a bugfix + unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2)); + } + } + + return unmodifiableList( + unreleased.stream() + .sorted() + .distinct() + .collect(Collectors.toList()) + ); + } + + private Version getLatestVersionByKey(Map> groupByMajor, int key) { + return groupByMajor.getOrDefault(key, emptyList()).stream() + .max(Version::compareTo) + .orElseThrow(() -> new IllegalStateException("Unexpected number of versions in collection")); + } + + private Map> getReleasedMajorGroupedByMinor() { + List currentMajorVersions = groupByMajor.get(currentVersion.getMajor()); + List previousMajorVersions = groupByMajor.get(currentVersion.getMajor() - 1); + + final Map> groupByMinor; + if (currentMajorVersions.size() == 1) { + // Current is an unreleased major: x.0.0 so we have to look for other unreleased versions in the previous major + groupByMinor = previousMajorVersions.stream() + .collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); + } else { + groupByMinor = currentMajorVersions.stream() + .collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); + } + return groupByMinor; + } + + public void compareToAuthoritative(List authoritativeReleasedVersions) { + Set notReallyReleased = new HashSet<>(getReleased()); + notReallyReleased.removeAll(authoritativeReleasedVersions); + if (notReallyReleased.isEmpty() == false) { + throw new IllegalStateException( + "out-of-date released versions" + + "\nFollowing versions are not really released, but the build thinks they are: " + notReallyReleased + ); + } + + Set incorrectlyConsideredUnreleased = new HashSet<>(authoritativeReleasedVersions); + incorrectlyConsideredUnreleased.retainAll(getUnreleased()); + if (incorrectlyConsideredUnreleased.isEmpty() == false) { + throw new IllegalStateException( + "out-of-date released versions" + + "\nBuild considers versions unreleased, " + + "but they are released according to an authoritative source: " + incorrectlyConsideredUnreleased + + "\nThe next versions probably needs to be added to Version.java (CURRENT doesn't count)." + ); + } + } + + private List getReleased() { + List unreleased = getUnreleased(); + return groupByMajor.values().stream() + .flatMap(Collection::stream) + .filter(each -> unreleased.contains(each) == false) + .collect(Collectors.toList()); + } + + public List getIndexCompatible() { + return unmodifiableList( + Stream.concat( + groupByMajor.get(currentVersion.getMajor() - 1).stream(), + groupByMajor.get(currentVersion.getMajor()).stream() + ) + .filter(version -> version.equals(currentVersion) == false) + .collect(Collectors.toList()) + ); + } + + public List getWireCompatible() { + List wireCompat = new ArrayList<>(); + + List prevMajors = groupByMajor.get(currentVersion.getMajor() - 1); + int minor = prevMajors.get(prevMajors.size() - 1).getMinor(); + for (int i = prevMajors.size() - 1; + i > 0 && prevMajors.get(i).getMinor() == minor; + i-- + ) { + wireCompat.add(prevMajors.get(i)); + } + wireCompat.addAll(groupByMajor.get(currentVersion.getMajor())); + wireCompat.remove(currentVersion); + wireCompat.sort(Version::compareTo); + + return unmodifiableList(wireCompat); + } + + public List getUnreleasedIndexCompatible() { + List unreleasedIndexCompatible = new ArrayList<>(getIndexCompatible()); + unreleasedIndexCompatible.retainAll(getUnreleased()); + return unmodifiableList(unreleasedIndexCompatible); + } + + public List getUnreleasedWireCompatible() { + List unreleasedWireCompatible = new ArrayList<>(getWireCompatible()); + unreleasedWireCompatible.retainAll(getUnreleased()); + return unmodifiableList(unreleasedWireCompatible); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java index 9ee597eb25ad8..23ac9458b961d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java @@ -10,7 +10,7 @@ * Accessor for shared dependency versions used by elasticsearch, namely the elasticsearch and lucene versions. */ public class VersionProperties { - public static Version getElasticsearch() { + public static String getElasticsearch() { return elasticsearch; } @@ -22,12 +22,12 @@ public static Map getVersions() { return versions; } - private static final Version elasticsearch; + private static final String elasticsearch; private static final String lucene; private static final Map versions = new HashMap(); static { Properties props = getVersionProperties(); - elasticsearch = Version.fromString(props.getProperty("elasticsearch")); + elasticsearch = props.getProperty("elasticsearch"); lucene = props.getProperty("lucene"); for (String property : props.stringPropertyNames()) { versions.put(property, props.getProperty(property)); @@ -38,13 +38,17 @@ private static Properties getVersionProperties() { Properties props = new Properties(); InputStream propsStream = VersionProperties.class.getResourceAsStream("/version.properties"); if (propsStream == null) { - throw new RuntimeException("/version.properties resource missing"); + throw new IllegalStateException("/version.properties resource missing"); } try { props.load(propsStream); } catch (IOException e) { - throw new RuntimeException(e); + throw new IllegalStateException("Failed to load version properties", e); } return props; } + + public static boolean isElasticsearchSnapshot() { + return elasticsearch.endsWith("-SNAPSHOT"); + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/FilePermissionsTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/FilePermissionsTask.java new file mode 100644 index 0000000000000..100c3a22700ad --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/FilePermissionsTask.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import org.apache.tools.ant.taskdefs.condition.Os; +import org.gradle.api.DefaultTask; +import org.gradle.api.GradleException; +import org.gradle.api.file.FileCollection; +import org.gradle.api.file.FileTree; +import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.SkipWhenEmpty; +import org.gradle.api.tasks.SourceSetContainer; +import org.gradle.api.tasks.StopExecutionException; +import org.gradle.api.tasks.TaskAction; +import org.gradle.api.tasks.util.PatternFilterable; +import org.gradle.api.tasks.util.PatternSet; + +/** + * Checks source files for correct file permissions. + */ +public class FilePermissionsTask extends DefaultTask { + + /** + * A pattern set of which files should be checked. + */ + private final PatternFilterable filesFilter = new PatternSet() + // we always include all source files, and exclude what should not be checked + .include("**") + // exclude sh files that might have the executable bit set + .exclude("**/*.sh"); + + private File outputMarker = new File(getProject().getBuildDir(), "markers/filePermissions"); + + public FilePermissionsTask() { + setDescription("Checks java source files for correct file permissions"); + } + + private static boolean isExecutableFile(File file) { + try { + Set permissions = Files.getFileAttributeView(file.toPath(), PosixFileAttributeView.class) + .readAttributes() + .permissions(); + return permissions.contains(PosixFilePermission.OTHERS_EXECUTE) + || permissions.contains(PosixFilePermission.OWNER_EXECUTE) + || permissions.contains(PosixFilePermission.GROUP_EXECUTE); + } catch (IOException e) { + throw new IllegalStateException("unable to read the file " + file + " attributes", e); + } + } + + /** + * Returns the files this task will check + */ + @InputFiles + @SkipWhenEmpty + public FileCollection getFiles() { + SourceSetContainer sourceSets = getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets(); + return sourceSets.stream() + .map(sourceSet -> sourceSet.getAllSource().matching(filesFilter)) + .reduce(FileTree::plus) + .orElse(getProject().files().getAsFileTree()); + } + + @TaskAction + public void checkInvalidPermissions() throws IOException { + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + throw new StopExecutionException(); + } + List failures = getFiles().getFiles().stream() + .filter(FilePermissionsTask::isExecutableFile) + .map(file -> "Source file is executable: " + file) + .collect(Collectors.toList()); + + if (!failures.isEmpty()) { + throw new GradleException("Found invalid file permissions:\n" + String.join("\n", failures)); + } + + outputMarker.getParentFile().mkdirs(); + Files.write(outputMarker.toPath(), "done".getBytes("UTF-8")); + } + + @OutputFile + public File getOutputMarker() { + return outputMarker; + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 85931c7846b34..012e05f2f6c8d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -25,16 +25,13 @@ import org.gradle.api.logging.Logging; import java.util.Objects; -import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; public class ElasticsearchNode { private final String name; private final GradleServicesAdapter services; - private final AtomicInteger noOfClaims = new AtomicInteger(); - private final AtomicBoolean started = new AtomicBoolean(false); + private final AtomicBoolean configurationFrozen = new AtomicBoolean(false); private final Logger logger = Logging.getLogger(ElasticsearchNode.class); private Distribution distribution; @@ -54,7 +51,7 @@ public Version getVersion() { } public void setVersion(Version version) { - checkNotRunning(); + checkFrozen(); this.version = version; } @@ -63,47 +60,26 @@ public Distribution getDistribution() { } public void setDistribution(Distribution distribution) { - checkNotRunning(); + checkFrozen(); this.distribution = distribution; } - public void claim() { - noOfClaims.incrementAndGet(); + void start() { + logger.info("Starting `{}`", this); } - /** - * Start the cluster if not running. Does nothing if the cluster is already running. - * - * @return future of thread running in the background - */ - public Future start() { - if (started.getAndSet(true)) { - logger.lifecycle("Already started cluster: {}", name); - } else { - logger.lifecycle("Starting cluster: {}", name); - } - return null; + void stop(boolean tailLogs) { + logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs); } - /** - * Stops a running cluster if it's not claimed. Does nothing otherwise. - */ - public void unClaimAndStop() { - int decrementedClaims = noOfClaims.decrementAndGet(); - if (decrementedClaims > 0) { - logger.lifecycle("Not stopping {}, since cluster still has {} claim(s)", name, decrementedClaims); - return; - } - if (started.get() == false) { - logger.lifecycle("Asked to unClaimAndStop, but cluster was not running: {}", name); - return; - } - logger.lifecycle("Stopping {}, number of claims is {}", name, decrementedClaims); + public void freeze() { + logger.info("Locking configuration of `{}`", this); + configurationFrozen.set(true); } - private void checkNotRunning() { - if (started.get()) { - throw new IllegalStateException("Configuration can not be altered while running "); + private void checkFrozen() { + if (configurationFrozen.get()) { + throw new IllegalStateException("Configuration can not be altered, already locked"); } } @@ -119,4 +95,9 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(name); } + + @Override + public String toString() { + return "ElasticsearchNode{name='" + name + "'}"; + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index fcd83a1f46101..5191c7d4febb2 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -33,73 +33,171 @@ import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; public class TestClustersPlugin implements Plugin { private static final String LIST_TASK_NAME = "listTestClusters"; private static final String NODE_EXTENSION_NAME = "testClusters"; + public static final String PROPERTY_TESTCLUSTERS_RUN_ONCE = "_testclusters_run_once"; private final Logger logger = Logging.getLogger(TestClustersPlugin.class); + // this is static because we need a single mapping across multi project builds, as some of the listeners we use, + // like task graph are singletons across multi project builds. + private static final Map> usedClusters = new ConcurrentHashMap<>(); + private static final Map claimsInventory = new ConcurrentHashMap<>(); + private static final Set runningClusters = Collections.synchronizedSet(new HashSet<>()); + @Override public void apply(Project project) { - NamedDomainObjectContainer container = project.container( + // enable the DSL to describe clusters + NamedDomainObjectContainer container = createTestClustersContainerExtension(project); + + // provide a task to be able to list defined clusters. + createListClustersTask(project, container); + + // create DSL for tasks to mark clusters these use + createUseClusterTaskExtension(project); + + // There's a single Gradle instance for multi project builds, this means that some configuration needs to be + // done only once even if the plugin is applied multiple times as a part of multi project build + ExtraPropertiesExtension rootProperties = project.getRootProject().getExtensions().getExtraProperties(); + if (rootProperties.has(PROPERTY_TESTCLUSTERS_RUN_ONCE) == false) { + rootProperties.set(PROPERTY_TESTCLUSTERS_RUN_ONCE, true); + // When running in the Daemon it's possible for this to hold references to past + usedClusters.clear(); + claimsInventory.clear(); + runningClusters.clear(); + + // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters + // that are defined in the build script and the ones that will actually be used in this invocation of gradle + // we use this information to determine when the last task that required the cluster executed so that we can + // terminate the cluster right away and free up resources. + configureClaimClustersHook(project); + + // Before each task, we determine if a cluster needs to be started for that task. + configureStartClustersHook(project); + + // After each task we determine if there are clusters that are no longer needed. + configureStopClustersHook(project); + } + } + + private NamedDomainObjectContainer createTestClustersContainerExtension(Project project) { + // Create an extensions that allows describing clusters + NamedDomainObjectContainer container = project.container( ElasticsearchNode.class, - (name) -> new ElasticsearchNode(name, GradleServicesAdapter.getInstance(project)) + name -> new ElasticsearchNode( + name, + GradleServicesAdapter.getInstance(project) + ) ); project.getExtensions().add(NODE_EXTENSION_NAME, container); + return container; + } + + private void createListClustersTask(Project project, NamedDomainObjectContainer container) { Task listTask = project.getTasks().create(LIST_TASK_NAME); listTask.setGroup("ES cluster formation"); listTask.setDescription("Lists all ES clusters configured for this project"); listTask.doLast((Task task) -> - container.forEach((ElasticsearchNode cluster) -> + container.forEach(cluster -> logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getDistribution()) ) ); + } - Map> taskToCluster = new HashMap<>(); - + private void createUseClusterTaskExtension(Project project) { // register an extension for all current and future tasks, so that any task can declare that it wants to use a // specific cluster. project.getTasks().all((Task task) -> task.getExtensions().findByType(ExtraPropertiesExtension.class) - .set( - "useCluster", - new Closure(this, this) { - public void doCall(ElasticsearchNode conf) { - taskToCluster.computeIfAbsent(task, k -> new ArrayList<>()).add(conf); - } - }) + .set( + "useCluster", + new Closure(this, task) { + public void doCall(ElasticsearchNode node) { + usedClusters.computeIfAbsent(task, k -> new ArrayList<>()).add(node); + } + }) ); + } + private void configureClaimClustersHook(Project project) { project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> taskExecutionGraph.getAllTasks() .forEach(task -> - taskToCluster.getOrDefault(task, Collections.emptyList()).forEach(ElasticsearchNode::claim) + usedClusters.getOrDefault(task, Collections.emptyList()).forEach(each -> { + synchronized (claimsInventory) { + claimsInventory.put(each, claimsInventory.getOrDefault(each, 0) + 1); + } + each.freeze(); + }) ) ); + } + + private void configureStartClustersHook(Project project) { project.getGradle().addListener( new TaskActionListener() { @Override public void beforeActions(Task task) { // we only start the cluster before the actions, so we'll not start it if the task is up-to-date - taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchNode::start); + final List clustersToStart; + synchronized (runningClusters) { + clustersToStart = usedClusters.getOrDefault(task,Collections.emptyList()).stream() + .filter(each -> runningClusters.contains(each) == false) + .collect(Collectors.toList()); + runningClusters.addAll(clustersToStart); + } + clustersToStart.forEach(ElasticsearchNode::start); + } @Override public void afterActions(Task task) {} } ); + } + + private void configureStopClustersHook(Project project) { project.getGradle().addListener( new TaskExecutionListener() { @Override public void afterExecute(Task task, TaskState state) { - // always un-claim the cluster, even if _this_ task is up-to-date, as others might not have been and caused the - // cluster to start. - taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchNode::unClaimAndStop); + // always unclaim the cluster, even if _this_ task is up-to-date, as others might not have been + // and caused the cluster to start. + List clustersUsedByTask = usedClusters.getOrDefault( + task, + Collections.emptyList() + ); + if (state.getFailure() != null) { + // If the task fails, and other tasks use this cluster, the other task will likely never be + // executed at all, so we will never get to un-claim and terminate it. + // The downside is that with multi project builds if that other task is in a different + // project and executing right now, we may terminate the cluster while it's running it. + clustersUsedByTask.forEach(each -> each.stop(true)); + } else { + clustersUsedByTask.forEach(each -> { + synchronized (claimsInventory) { + claimsInventory.put(each, claimsInventory.get(each) - 1); + } + }); + final List stoppable; + synchronized (runningClusters) { + stoppable = claimsInventory.entrySet().stream() + .filter(entry -> entry.getValue() == 0) + .filter(entry -> runningClusters.contains(entry.getKey())) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + } + stoppable.forEach(each -> each.stop(false)); + } } @Override public void beforeExecute(Task task) {} diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 1da4c0f9fbd2d..9c776d419b207 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -25,7 +25,6 @@ Truly temporary suppressions suppression of snippets included in documentation that are so wide that they scroll. --> - @@ -78,39 +77,8 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -121,53 +89,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -182,27 +103,6 @@ - - - - - - - - - - - - - - - - - - - - - @@ -291,105 +191,16 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -397,29 +208,6 @@ - - - - - - - - - - - - - - - - - - - - - - - diff --git a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy deleted file mode 100644 index f6b9cb5fc95bf..0000000000000 --- a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy +++ /dev/null @@ -1,236 +0,0 @@ -package org.elasticsearch.gradle - -import org.elasticsearch.gradle.test.GradleUnitTestCase -import org.junit.Test - -class VersionCollectionTests extends GradleUnitTestCase { - - String formatVersion(String version) { - return " public static final Version V_${version.replaceAll("\\.", "_")} " - } - List allVersions = [formatVersion('5.0.0'), formatVersion('5.0.0_alpha1'), formatVersion('5.0.0_alpha2'), formatVersion('5.0.0_beta1'), - formatVersion('5.0.0_rc1'),formatVersion('5.0.0_rc2'),formatVersion('5.0.1'), formatVersion('5.0.2'), - formatVersion('5.1.1'), formatVersion('5.1.2'), formatVersion('5.2.0'), formatVersion('5.2.1'), formatVersion('6.0.0'), - formatVersion('6.0.1'), formatVersion('6.1.0'), formatVersion('6.1.1'), formatVersion('6.2.0'), formatVersion('6.3.0'), - formatVersion('7.0.0_alpha1'), formatVersion('7.0.0_alpha2')] - - /** - * This validates the logic of being on a unreleased major branch with a staged major-1.minor sibling. This case happens when a version is - * branched from Major-1.x At the time of this writing 6.2 is unreleased and 6.3 is the 6.x branch. This test simulates the behavior - * from 7.0 perspective, or master at the time of this writing. - */ - @Test - void testAgainstMajorUnreleasedWithExistingStagedMinorRelease() { - VersionCollection vc = new VersionCollection(allVersions) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) - assertEquals(vc.stagedMinorSnapshot, Version.fromString("6.2.0-SNAPSHOT")) - assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.1.1-SNAPSHOT")) - assertNull(vc.maintenanceBugfixSnapshot) - - vc.indexCompatible.containsAll(vc.versions) - - // This should contain the same list sans the current version - List indexCompatList = [Version.fromString("6.0.0"), Version.fromString("6.0.1"), - Version.fromString("6.1.0"), Version.fromString("6.1.1-SNAPSHOT"), - Version.fromString("6.2.0-SNAPSHOT"), Version.fromString("6.3.0-SNAPSHOT")] - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("6.3.0-SNAPSHOT")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 3) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.3.0-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.0-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.1.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 1) - assertEquals(vc.snapshotsWireCompatible.first(), Version.fromString("6.3.0-SNAPSHOT")) - } - - /** - * This validates the logic of being on a unreleased major branch without a staged major-1.minor sibling. This case happens once a staged, - * unreleased minor is released. At the time of this writing 6.2 is unreleased, so adding a 6.2.1 simulates a 6.2 release. This test - * simulates the behavior from 7.0 perspective, or master at the time of this writing. - */ - @Test - void testAgainstMajorUnreleasedWithoutStagedMinorRelease() { - List localVersion = allVersions.clone() - localVersion.add(formatVersion('6.2.1')) // release 6.2 - - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) - assertEquals(vc.stagedMinorSnapshot, null) - assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.2.1-SNAPSHOT")) - assertNull(vc.maintenanceBugfixSnapshot) - - vc.indexCompatible.containsAll(vc.versions) - - // This should contain the same list sans the current version - List indexCompatList = [Version.fromString("6.0.0"), Version.fromString("6.0.1"), - Version.fromString("6.1.0"), Version.fromString("6.1.1"), - Version.fromString("6.2.0"), Version.fromString("6.2.1-SNAPSHOT"), - Version.fromString("6.3.0-SNAPSHOT")] - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("6.3.0-SNAPSHOT")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 2) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.3.0-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 1) - assertEquals(vc.snapshotsWireCompatible.first(), Version.fromString("6.3.0-SNAPSHOT")) - } - - /** - * This validates the logic of being on a unreleased minor branch with a staged minor sibling. This case happens when a version is - * branched from Major.x At the time of this writing 6.2 is unreleased and 6.3 is the 6.x branch. This test simulates the behavior - * from 6.3 perspective. - */ - @Test - void testAgainstMinorReleasedBranch() { - List localVersion = allVersions.clone() - localVersion.removeAll { it.toString().contains('7_0_0')} // remove all the 7.x so that the actual version is 6.3 (6.x) - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, null) - assertEquals(vc.stagedMinorSnapshot, Version.fromString("6.2.0-SNAPSHOT")) - assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.1.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) - - // This should contain the same list sans the current version - List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), - Version.fromString("6.0.1"), Version.fromString("6.1.0"), Version.fromString("6.1.1-SNAPSHOT"), - Version.fromString("6.2.0-SNAPSHOT")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 3) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.0-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.1.1-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 3) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("6.2.0-SNAPSHOT"))) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("6.1.1-SNAPSHOT"))) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - } - - /** - * This validates the logic of being on a unreleased minor branch without a staged minor sibling. This case happens once a staged, - * unreleased minor is released. At the time of this writing 6.2 is unreleased, so adding a 6.2.1 simulates a 6.2 release. This test - * simulates the behavior from 6.3 perspective. - */ - @Test - void testAgainstMinorReleasedBranchNoStagedMinor() { - List localVersion = allVersions.clone() - // remove all the 7.x and add a 6.2.1 which means 6.2 was released - localVersion.removeAll { it.toString().contains('7_0_0')} - localVersion.add(formatVersion('6.2.1')) - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, null) - assertEquals(vc.stagedMinorSnapshot, null) - assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.2.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) - - // This should contain the same list sans the current version - List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), - Version.fromString("6.0.1"), Version.fromString("6.1.0"), Version.fromString("6.1.1"), - Version.fromString("6.2.0"), Version.fromString("6.2.1-SNAPSHOT")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 2) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.1-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 2) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("6.2.1-SNAPSHOT"))) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - } - - /** - * This validates the logic of being on a released minor branch. At the time of writing, 6.2 is unreleased, so this is equivalent of being - * on 6.1. - */ - @Test - void testAgainstOldMinor() { - - List localVersion = allVersions.clone() - // remove the 7 alphas and the ones greater than 6.1 - localVersion.removeAll { it.toString().contains('7_0_0') || it.toString().contains('V_6_2') || it.toString().contains('V_6_3') } - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, null) - assertEquals(vc.stagedMinorSnapshot, null) - assertEquals(vc.nextBugfixSnapshot, null) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) - - // This should contain the same list sans the current version - List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), - Version.fromString("6.0.1"), Version.fromString("6.1.0")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 1) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 1) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - } - - /** - * This validates the lower bound of wire compat, which is 5.0. It also validates that the span of 2.x to 5.x if it is decided to port - * this fix all the way to the maint 5.6 release. - */ - @Test - void testFloorOfWireCompatVersions() { - List localVersion = [formatVersion('2.0.0'), formatVersion('2.0.1'), formatVersion('2.1.0'), formatVersion('2.1.1'), - formatVersion('5.0.0'), formatVersion('5.0.1'), formatVersion('5.1.0'), formatVersion('5.1.1'), - formatVersion('5.2.0'),formatVersion('5.2.1'),formatVersion('5.3.0'),formatVersion('5.3.1'), - formatVersion('5.3.2')] - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("2.1.1-SNAPSHOT")) - - // This should contain the same list sans the current version - List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("2.1.0"), Version.fromString("2.1.1-SNAPSHOT"), Version.fromString("5.0.0"), - Version.fromString("5.0.1"), Version.fromString("5.1.0"), - Version.fromString("5.1.1"), Version.fromString("5.2.0"), Version.fromString("5.2.1"), - Version.fromString("5.3.0"), Version.fromString("5.3.1")] - - List compatible = vc.wireCompatible - assertTrue(wireCompatList.containsAll(compatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 1) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("2.1.1-SNAPSHOT"))) - - // ensure none of the 2.x snapshots appear here, as this is the floor of bwc for wire compat - assertEquals(vc.snapshotsWireCompatible.size(), 0) - } -} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index aca9906701150..02bbd89a3ea63 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.GradleRunner; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Rule; import org.junit.rules.TemporaryFolder; @@ -38,6 +39,7 @@ import java.util.Objects; import java.util.stream.Collectors; +@Ignore public class BuildExamplePluginsIT extends GradleIntegrationTestCase { private static List EXAMPLE_PLUGINS = Collections.unmodifiableList( diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java new file mode 100644 index 0000000000000..d1b4e893ec6ad --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java @@ -0,0 +1,406 @@ +package org.elasticsearch.gradle; + +import org.elasticsearch.gradle.test.GradleUnitTestCase; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +public class VersionCollectionTests extends GradleUnitTestCase { + + private static final Map> sampleVersions = new HashMap<>(); + + @Rule + public ExpectedException expectedEx = ExpectedException.none(); + + static { + // unreleased major and two unreleased minors ( minor in feature freeze ) + sampleVersions.put("8.0.0", asList( + "7_0_0", "7_0_1", "7_1_0", "7_1_1", "7_2_0", "7_3_0", "8.0.0" + )); + sampleVersions.put("7.0.0-alpha1", asList( + "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", + "6_0_0", "6_0_1", "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", + "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", + "6_3_0", "6_3_1", "6_3_2", + "6_4_0", "6_4_1", "6_4_2", + "6_5_0", "7_0_0_alpha1" + )); + sampleVersions.put("6.5.0", asList( + "5_0_0_alpha1", "5_0_0_alpha2", "5_0_0_alpha3", "5_0_0_alpha4", "5_0_0_alpha5", "5_0_0_beta1", "5_0_0_rc1", + "5_0_0", "5_0_1", "5_0_2", "5_1_1", "5_1_2", "5_2_0", "5_2_1", "5_2_2", "5_3_0", "5_3_1", "5_3_2", "5_3_3", + "5_4_0", "5_4_1", "5_4_2", "5_4_3", "5_5_0", "5_5_1", "5_5_2", "5_5_3", "5_6_0", "5_6_1", "5_6_2", "5_6_3", + "5_6_4", "5_6_5", "5_6_6", "5_6_7", "5_6_8", "5_6_9", "5_6_10", "5_6_11", "5_6_12", "5_6_13", + "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", "6_0_0", "6_0_1", + "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", "6_3_0", "6_3_1", + "6_3_2", "6_4_0", "6_4_1", "6_4_2", "6_5_0" + )); + sampleVersions.put("6.6.0", asList( + "5_0_0_alpha1", "5_0_0_alpha2", "5_0_0_alpha3", "5_0_0_alpha4", "5_0_0_alpha5", "5_0_0_beta1", "5_0_0_rc1", + "5_0_0", "5_0_1", "5_0_2", "5_1_1", "5_1_2", "5_2_0", "5_2_1", "5_2_2", "5_3_0", "5_3_1", "5_3_2", "5_3_3", + "5_4_0", "5_4_1", "5_4_2", "5_4_3", "5_5_0", "5_5_1", "5_5_2", "5_5_3", "5_6_0", "5_6_1", "5_6_2", "5_6_3", + "5_6_4", "5_6_5", "5_6_6", "5_6_7", "5_6_8", "5_6_9", "5_6_10", "5_6_11", "5_6_12", "5_6_13", + "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", "6_0_0", "6_0_1", + "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", "6_3_0", "6_3_1", + "6_3_2", "6_4_0", "6_4_1", "6_4_2", "6_5_0", "6_6_0" + )); + sampleVersions.put("6.4.2", asList( + "5_0_0_alpha1", "5_0_0_alpha2", "5_0_0_alpha3", "5_0_0_alpha4", "5_0_0_alpha5", "5_0_0_beta1", "5_0_0_rc1", + "5_0_0", "5_0_1", "5_0_2", "5_1_1", "5_1_2", "5_2_0", "5_2_1", "5_2_2", "5_3_0", + "5_3_1", "5_3_2", "5_3_3", "5_4_0", "5_4_1", "5_4_2", "5_4_3", "5_5_0", "5_5_1", "5_5_2", "5_5_3", + "5_6_0", "5_6_1", "5_6_2", "5_6_3", "5_6_4", "5_6_5", "5_6_6", "5_6_7", "5_6_8", "5_6_9", "5_6_10", + "5_6_11", "5_6_12", "5_6_13", + "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", + "6_0_0", "6_0_1", "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", + "6_2_4", "6_3_0", "6_3_1", "6_3_2", "6_4_0", "6_4_1", "6_4_2" + )); + } + + @Test(expected = IllegalArgumentException.class) + public void testExceptionOnEmpty() { + new VersionCollection(asList("foo", "bar"), Version.fromString("7.0.0")); + } + + @Test(expected = IllegalStateException.class) + public void testExceptionOnNonCurrent() { + new VersionCollection(singletonList(formatVersionToLine("6.5.0")), Version.fromString("7.0.0")); + } + + @Test(expected = IllegalStateException.class) + public void testExceptionOnTooManyMajors() { + new VersionCollection( + asList( + formatVersionToLine("5.6.12"), + formatVersionToLine("6.5.0"), + formatVersionToLine("7.0.0") + ), + Version.fromString("6.5.0") + ); + } + + public void testWireCompatible() { + assertVersionsEquals( + singletonList("6.5.0-SNAPSHOT"), + getVersionCollection("7.0.0-alpha1").getWireCompatible() + ); + assertVersionsEquals( + asList( + "5.6.0", "5.6.1", "5.6.2", "5.6.3", "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", + "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", + "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT" + ), + getVersionCollection("6.5.0").getWireCompatible() + ); + + assertVersionsEquals( + asList( + "5.6.0", "5.6.1", "5.6.2", "5.6.3", "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", + "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", + "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1" + ), + getVersionCollection("6.4.2").getWireCompatible() + ); + + assertVersionsEquals( + asList( + "5.6.0", "5.6.1", "5.6.2", "5.6.3", "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", + "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", + "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT" + ), + getVersionCollection("6.6.0").getWireCompatible() + ); + + assertVersionsEquals( + singletonList("7.3.0"), + getVersionCollection("8.0.0").getWireCompatible() + ); + } + + public void testWireCompatibleUnreleased() { + assertVersionsEquals( + singletonList("6.5.0-SNAPSHOT"), + getVersionCollection("7.0.0-alpha1").getUnreleasedWireCompatible() + ); + assertVersionsEquals( + asList("5.6.13-SNAPSHOT", "6.4.2-SNAPSHOT"), + getVersionCollection("6.5.0").getUnreleasedWireCompatible() + ); + + assertVersionsEquals( + singletonList("5.6.13-SNAPSHOT"), + getVersionCollection("6.4.2").getUnreleasedWireCompatible() + ); + + assertVersionsEquals( + asList("5.6.13-SNAPSHOT", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT"), + getVersionCollection("6.6.0").getUnreleasedWireCompatible() + ); + + assertVersionsEquals( + singletonList("7.3.0"), + getVersionCollection("8.0.0").getUnreleasedWireCompatible() + ); + } + + public void testIndexCompatible() { + assertVersionsEquals( + asList( + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", + "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", "6.3.0", "6.3.1", + "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT" + ), + getVersionCollection("7.0.0-alpha1").getIndexCompatible() + ); + + assertVersionsEquals( + asList( + "5.0.0", "5.0.1", "5.0.2", "5.1.1", "5.1.2", "5.2.0", "5.2.1", "5.2.2", "5.3.0", "5.3.1", "5.3.2", "5.3.3", + "5.4.0", "5.4.1", "5.4.2", "5.4.3", "5.5.0", "5.5.1", "5.5.2", "5.5.3", "5.6.0", "5.6.1", "5.6.2", "5.6.3", + "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT" + ), + getVersionCollection("6.5.0").getIndexCompatible() + ); + + assertVersionsEquals( + asList( + "5.0.0", "5.0.1", "5.0.2", "5.1.1", "5.1.2", "5.2.0", "5.2.1", "5.2.2", "5.3.0", "5.3.1", "5.3.2", "5.3.3", + "5.4.0", "5.4.1", "5.4.2", "5.4.3", "5.5.0", "5.5.1", "5.5.2", "5.5.3", "5.6.0", "5.6.1", "5.6.2", "5.6.3", + "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1" + ), + getVersionCollection("6.4.2").getIndexCompatible() + ); + + assertVersionsEquals( + asList( + "5.0.0", "5.0.1", "5.0.2", "5.1.1", "5.1.2", "5.2.0", "5.2.1", "5.2.2", "5.3.0", "5.3.1", "5.3.2", "5.3.3", + "5.4.0", "5.4.1", "5.4.2", "5.4.3", "5.5.0", "5.5.1", "5.5.2", "5.5.3", "5.6.0", "5.6.1", "5.6.2", "5.6.3", + "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT" + ), + getVersionCollection("6.6.0").getIndexCompatible() + ); + + assertVersionsEquals( + asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0"), + getVersionCollection("8.0.0").getIndexCompatible() + ); + } + + public void testIndexCompatibleUnreleased() { + assertVersionsEquals( + asList("6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT"), + getVersionCollection("7.0.0-alpha1").getUnreleasedIndexCompatible() + ); + + assertVersionsEquals( + asList("5.6.13-SNAPSHOT", "6.4.2-SNAPSHOT"), + getVersionCollection("6.5.0").getUnreleasedIndexCompatible() + ); + + assertVersionsEquals( + singletonList("5.6.13-SNAPSHOT"), + getVersionCollection("6.4.2").getUnreleasedIndexCompatible() + ); + + assertVersionsEquals( + asList("5.6.13-SNAPSHOT", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT"), + getVersionCollection("6.6.0").getUnreleasedIndexCompatible() + ); + + assertVersionsEquals( + asList("7.1.1", "7.2.0", "7.3.0"), + getVersionCollection("8.0.0").getUnreleasedIndexCompatible() + ); + } + + public void testGetUnreleased() { + assertVersionsEquals( + asList("6.4.2", "6.5.0", "7.0.0-alpha1"), + getVersionCollection("7.0.0-alpha1").getUnreleased() + ); + assertVersionsEquals( + asList("5.6.13", "6.4.2", "6.5.0"), + getVersionCollection("6.5.0").getUnreleased() + ); + assertVersionsEquals( + asList("5.6.13", "6.4.2"), + getVersionCollection("6.4.2").getUnreleased() + ); + assertVersionsEquals( + asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), + getVersionCollection("6.6.0").getUnreleased() + ); + assertVersionsEquals( + asList("7.1.1", "7.2.0", "7.3.0", "8.0.0"), + getVersionCollection("8.0.0").getUnreleased() + ); + } + + public void testGetBranch() { + assertUnreleasedBranchNames( + asList("6.4", "6.x"), + getVersionCollection("7.0.0-alpha1") + ); + assertUnreleasedBranchNames( + asList("5.6", "6.4"), + getVersionCollection("6.5.0") + ); + assertUnreleasedBranchNames( + singletonList("5.6"), + getVersionCollection("6.4.2") + ); + assertUnreleasedBranchNames( + asList("5.6", "6.4", "6.5"), + getVersionCollection("6.6.0") + ); + assertUnreleasedBranchNames( + asList("7.1", "7.2", "7.x"), + getVersionCollection("8.0.0") + ); + } + + public void testGetGradleProjectName() { + assertUnreleasedGradleProjectNames( + asList("bugfix", "minor"), + getVersionCollection("7.0.0-alpha1") + ); + assertUnreleasedGradleProjectNames( + asList("maintenance", "bugfix"), + getVersionCollection("6.5.0") + ); + assertUnreleasedGradleProjectNames( + singletonList("maintenance"), + getVersionCollection("6.4.2") + ); + assertUnreleasedGradleProjectNames( + asList("maintenance", "bugfix", "staged"), + getVersionCollection("6.6.0") + ); + assertUnreleasedGradleProjectNames( + asList("bugfix", "staged", "minor"), + getVersionCollection("8.0.0") + ); + } + + public void testCompareToAuthoritative() { + List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); + List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1", "7.1.0") + .map(Version::fromString) + .collect(Collectors.toList()); + + VersionCollection vc = new VersionCollection( + listOfVersions.stream() + .map(this::formatVersionToLine) + .collect(Collectors.toList()), + Version.fromString("8.0.0") + ); + vc.compareToAuthoritative(authoritativeReleasedVersions); + } + + public void testCompareToAuthoritativeUnreleasedActuallyReleased() { + List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); + List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1", "7.1.0", "7.1.1", "8.0.0") + .map(Version::fromString) + .collect(Collectors.toList()); + + VersionCollection vc = new VersionCollection( + listOfVersions.stream() + .map(this::formatVersionToLine) + .collect(Collectors.toList()), + Version.fromString("8.0.0") + ); + expectedEx.expect(IllegalStateException.class); + expectedEx.expectMessage("but they are released"); + vc.compareToAuthoritative(authoritativeReleasedVersions); + } + + public void testCompareToAuthoritativeNotReallyRelesed() { + List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); + List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1") + .map(Version::fromString) + .collect(Collectors.toList()); + VersionCollection vc = new VersionCollection( + listOfVersions.stream() + .map(this::formatVersionToLine) + .collect(Collectors.toList()), + Version.fromString("8.0.0") + ); + expectedEx.expect(IllegalStateException.class); + expectedEx.expectMessage("not really released"); + vc.compareToAuthoritative(authoritativeReleasedVersions); + } + + private void assertUnreleasedGradleProjectNames(List expectedNAmes, VersionCollection versionCollection) { + List actualNames = new ArrayList<>(); + versionCollection.forPreviousUnreleased(unreleasedVersion -> + actualNames.add(unreleasedVersion.gradleProjectName) + ); + assertEquals(expectedNAmes, actualNames); + } + + private void assertUnreleasedBranchNames(List expectedBranches, VersionCollection versionCollection) { + List actualBranches = new ArrayList<>(); + versionCollection.forPreviousUnreleased(unreleasedVersionInfo -> + actualBranches.add(unreleasedVersionInfo.branch) + ); + assertEquals(expectedBranches, actualBranches); + } + + private String formatVersionToLine(final String version) { + return " public static final Version V_" + version.replaceAll("\\.", "_") + " "; + } + + private void assertVersionsEquals(List expected, List actual) { + assertEquals( + expected.stream() + .map(Version::fromString) + .collect(Collectors.toList()), + actual + ); + } + + private VersionCollection getVersionCollection(String currentVersion) { + return new VersionCollection( + sampleVersions.get(currentVersion).stream() + .map(this::formatVersionToLine) + .collect(Collectors.toList()), + Version.fromString(currentVersion) + ); + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java index d3c3b4a43cb41..3394285157e17 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java @@ -33,27 +33,23 @@ public class VersionTests extends GradleUnitTestCase { public ExpectedException expectedEx = ExpectedException.none(); public void testVersionParsing() { - assertVersionEquals("7.0.1", 7, 0, 1, "", false); - assertVersionEquals("7.0.1-alpha2", 7, 0, 1, "-alpha2", false); - assertVersionEquals("5.1.2-rc3", 5, 1, 2, "-rc3", false); - assertVersionEquals("6.1.2-SNAPSHOT", 6, 1, 2, "", true); - assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2, "-beta1", true); + assertVersionEquals("7.0.1", 7, 0, 1); + assertVersionEquals("7.0.1-alpha2", 7, 0, 1); + assertVersionEquals("5.1.2-rc3", 5, 1, 2); + assertVersionEquals("6.1.2-SNAPSHOT", 6, 1, 2); + assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2); } public void testCompareWithStringVersions() { assertTrue("1.10.20 is not interpreted as before 2.0.0", Version.fromString("1.10.20").before("2.0.0") ); - assertTrue("7.0.0-alpha1 is not interpreted as before 7.0.0-alpha2", - Version.fromString("7.0.0-alpha1").before("7.0.0-alpha2") - ); assertTrue("7.0.0-alpha1 should be equal to 7.0.0-alpha1", Version.fromString("7.0.0-alpha1").equals(Version.fromString("7.0.0-alpha1")) ); assertTrue("7.0.0-SNAPSHOT should be equal to 7.0.0-SNAPSHOT", Version.fromString("7.0.0-SNAPSHOT").equals(Version.fromString("7.0.0-SNAPSHOT")) ); - assertEquals(Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("5.2.1-SNAPSHOT")); } public void testCollections() { @@ -78,62 +74,12 @@ public void testCollections() { } public void testToString() { - assertEquals("7.0.1", new Version(7, 0, 1, null, false).toString()); + assertEquals("7.0.1", new Version(7, 0, 1).toString()); } public void testCompareVersions() { - assertEquals(0, new Version(7, 0, 0, null, true).compareTo( - new Version(7, 0, 0, null, true) - )); - assertEquals(0, new Version(7, 0, 0, null, true).compareTo( - new Version(7, 0, 0, "", true) - )); - - // snapshot is not taken into account TODO inconsistent with equals - assertEquals( - 0, - new Version(7, 0, 0, "", false).compareTo( - new Version(7, 0, 0, null, true)) - ); - // without sufix is smaller than with TODO - assertOrder( - new Version(7, 0, 0, null, false), - new Version(7, 0, 0, "-alpha1", false) - ); - // numbered sufix - assertOrder( - new Version(7, 0, 0, "-alpha1", false), - new Version(7, 0, 0, "-alpha2", false) - ); - // ranked sufix - assertOrder( - new Version(7, 0, 0, "-alpha8", false), - new Version(7, 0, 0, "-rc1", false) - ); - // ranked sufix - assertOrder( - new Version(7, 0, 0, "-alpha8", false), - new Version(7, 0, 0, "-beta1", false) - ); - // ranked sufix - assertOrder( - new Version(7, 0, 0, "-beta8", false), - new Version(7, 0, 0, "-rc1", false) - ); - // major takes precedence - assertOrder( - new Version(6, 10, 10, "-alpha8", true), - new Version(7, 0, 0, "-alpha2", false) - ); - // then minor - assertOrder( - new Version(7, 0, 10, "-alpha8", true), - new Version(7, 1, 0, "-alpha2", false) - ); - // then revision - assertOrder( - new Version(7, 1, 0, "-alpha8", true), - new Version(7, 1, 10, "-alpha2", false) + assertEquals(0, + new Version(7, 0, 0).compareTo(new Version(7, 0, 0)) ); } @@ -149,33 +95,15 @@ public void testExceptionSyntax() { Version.fromString("foo.bar.baz"); } - public void testExceptionSuffixNumber() { - expectedEx.expect(IllegalArgumentException.class); - expectedEx.expectMessage("Invalid suffix"); - new Version(7, 1, 1, "-alpha", true); - } - - public void testExceptionSuffix() { - expectedEx.expect(IllegalArgumentException.class); - expectedEx.expectMessage("Suffix must contain one of:"); - new Version(7, 1, 1, "foo1", true); - } - private void assertOrder(Version smaller, Version bigger) { assertEquals(smaller + " should be smaller than " + bigger, -1, smaller.compareTo(bigger)); } - private void assertVersionEquals(String stringVersion, int major, int minor, int revision, String sufix, boolean snapshot) { + private void assertVersionEquals(String stringVersion, int major, int minor, int revision) { Version version = Version.fromString(stringVersion); assertEquals(major, version.getMajor()); assertEquals(minor, version.getMinor()); assertEquals(revision, version.getRevision()); - if (snapshot) { - assertTrue("Expected version to be a snapshot but it was not", version.isSnapshot()); - } else { - assertFalse("Expected version not to be a snapshot but it was", version.isSnapshot()); - } - assertEquals(sufix, version.getSuffix()); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/FilePermissionsTaskTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/FilePermissionsTaskTests.java new file mode 100644 index 0000000000000..1b3593d52424f --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/FilePermissionsTaskTests.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.util.List; + +import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.apache.tools.ant.taskdefs.condition.Os; +import org.elasticsearch.gradle.test.GradleUnitTestCase; +import org.gradle.api.GradleException; +import org.gradle.api.Project; +import org.gradle.api.plugins.JavaPlugin; +import org.gradle.testfixtures.ProjectBuilder; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; + +public class FilePermissionsTaskTests extends GradleUnitTestCase { + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + public void testCheckPermissionsWhenAnExecutableFileExists() throws Exception { + RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); + + Project project = createProject(); + + FilePermissionsTask filePermissionsTask = createTask(project); + + File file = new File(project.getProjectDir(), "src/main/java/Code.java"); + file.getParentFile().mkdirs(); + file.createNewFile(); + file.setExecutable(true); + + try { + filePermissionsTask.checkInvalidPermissions(); + Assert.fail("the check should have failed because of the executable file permission"); + } catch (GradleException e) { + assertTrue(e.getMessage().startsWith("Found invalid file permissions")); + } + file.delete(); + } + + + public void testCheckPermissionsWhenNoFileExists() throws Exception { + RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); + + Project project = createProject(); + + FilePermissionsTask filePermissionsTask = createTask(project); + + filePermissionsTask.checkInvalidPermissions(); + + File outputMarker = new File(project.getBuildDir(), "markers/filePermissions"); + List result = Files.readAllLines(outputMarker.toPath(), Charset.forName("UTF-8")); + assertEquals("done", result.get(0)); + } + + public void testCheckPermissionsWhenNoExecutableFileExists() throws Exception { + RandomizedTest.assumeFalse("Functionality is Unix specific", Os.isFamily(Os.FAMILY_WINDOWS)); + + Project project = createProject(); + + FilePermissionsTask filePermissionsTask = createTask(project); + + File file = new File(project.getProjectDir(), "src/main/java/Code.java"); + file.getParentFile().mkdirs(); + file.createNewFile(); + + filePermissionsTask.checkInvalidPermissions(); + + File outputMarker = new File(project.getBuildDir(), "markers/filePermissions"); + List result = Files.readAllLines(outputMarker.toPath(), Charset.forName("UTF-8")); + assertEquals("done", result.get(0)); + + file.delete(); + + } + + private Project createProject() throws IOException { + Project project = ProjectBuilder.builder().withProjectDir(temporaryFolder.newFolder()).build(); + project.getPlugins().apply(JavaPlugin.class); + return project; + } + + private FilePermissionsTask createTask(Project project) { + return project.getTasks().create("filePermissionsTask", FilePermissionsTask.class); + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f8e3cf88c4094..025c549489afa 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -78,8 +78,16 @@ protected void assertTaskFailed(BuildResult result, String taskName) { assertTaskOutcome(result, taskName, TaskOutcome.FAILED); } - protected void assertTaskSuccessful(BuildResult result, String taskName) { - assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS); + protected void assertTaskSuccessful(BuildResult result, String... taskNames) { + for (String taskName : taskNames) { + assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS); + } + } + + protected void assertTaskSkipped(BuildResult result, String... taskNames) { + for (String taskName : taskNames) { + assertTaskOutcome(result, taskName, TaskOutcome.SKIPPED); + } } private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome taskOutcome) { @@ -96,17 +104,19 @@ private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome ); } - protected void assertTaskUpToDate(BuildResult result, String taskName) { - BuildTask task = result.task(taskName); - if (task == null) { - fail("Expected task `" + taskName + "` to be up-to-date, but it did not run"); + protected void assertTaskUpToDate(BuildResult result, String... taskNames) { + for (String taskName : taskNames) { + BuildTask task = result.task(taskName); + if (task == null) { + fail("Expected task `" + taskName + "` to be up-to-date, but it did not run"); + } + assertEquals( + "Expected task to be up to date but it was: " + task.getOutcome() + + "\n\nOutput is:\n" + result.getOutput(), + TaskOutcome.UP_TO_DATE, + task.getOutcome() + ); } - assertEquals( - "Expected task to be up to date but it was: " + task.getOutcome() + - "\n\nOutput is:\n" + result.getOutput() , - TaskOutcome.UP_TO_DATE, - task.getOutcome() - ); } protected void assertBuildFileExists(BuildResult result, String projectName, String path) { @@ -139,4 +149,16 @@ protected String getLocalTestRepoPath() { return file.getAbsolutePath(); } } + + public void assertOutputOnlyOnce(String output, String... text) { + for (String each : text) { + int i = output.indexOf(each); + if (i == -1 ) { + fail("Expected `" + text + "` to appear at most once, but it didn't at all.\n\nOutout is:\n"+ output); + } + if(output.indexOf(each) != output.lastIndexOf(each)) { + fail("Expected `" + text + "` to appear at most once, but it did multiple times.\n\nOutout is:\n"+ output); + } + } + } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index f1461dbbd3d97..c6e3b2ca370ce 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,124 +21,130 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; -import org.gradle.testkit.runner.TaskOutcome; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import java.util.Arrays; public class TestClustersPluginIT extends GradleIntegrationTestCase { public void testListClusters() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("listTestClusters", "-s") - .withPluginClasspath() - .build(); + BuildResult result = getTestClustersRunner("listTestClusters").build(); - assertEquals(TaskOutcome.SUCCESS, result.task(":listTestClusters").getOutcome()); + assertTaskSuccessful(result, ":listTestClusters"); assertOutputContains( result.getOutput(), - " * myTestCluster:" + " * myTestCluster:" ); - } public void testUseClusterByOne() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("user1", "-s") - .withPluginClasspath() - .build(); - - assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); - assertOutputContains( - result.getOutput(), - "Starting cluster: myTestCluster", - "Stopping myTestCluster, number of claims is 0" - ); + BuildResult result = getTestClustersRunner("user1").build(); + assertTaskSuccessful(result, ":user1"); + assertStartedAndStoppedOnce(result); } public void testUseClusterByOneWithDryRun() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("user1", "-s", "--dry-run") - .withPluginClasspath() - .build(); - + BuildResult result = getTestClustersRunner("--dry-run", "user1").build(); assertNull(result.task(":user1")); - assertOutputDoesNotContain( - result.getOutput(), - "Starting cluster: myTestCluster", - "Stopping myTestCluster, number of claims is 0" - ); + assertNotStarted(result); } public void testUseClusterByTwo() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("user1", "user2", "-s") - .withPluginClasspath() - .build(); + BuildResult result = getTestClustersRunner("user1", "user2").build(); + assertTaskSuccessful(result, ":user1", ":user2"); + assertStartedAndStoppedOnce(result); + } - assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); - assertEquals(TaskOutcome.SUCCESS, result.task(":user2").getOutcome()); + public void testUseClusterByUpToDateTask() { + BuildResult result = getTestClustersRunner("upToDate1", "upToDate2").build(); + assertTaskUpToDate(result, ":upToDate1", ":upToDate2"); + assertNotStarted(result); + } + + public void testUseClusterBySkippedTask() { + BuildResult result = getTestClustersRunner("skipped1", "skipped2").build(); + assertTaskSkipped(result, ":skipped1", ":skipped2"); + assertNotStarted(result); + } + + public void testUseClusterBySkippedAndWorkingTask() { + BuildResult result = getTestClustersRunner("skipped1", "user1").build(); + assertTaskSkipped(result, ":skipped1"); + assertTaskSuccessful(result, ":user1"); assertOutputContains( result.getOutput(), - "Starting cluster: myTestCluster", - "Not stopping myTestCluster, since cluster still has 1 claim(s)", - "Stopping myTestCluster, number of claims is 0" + "> Task :user1", + "Starting `ElasticsearchNode{name='myTestCluster'}`", + "Stopping `ElasticsearchNode{name='myTestCluster'}`" ); } - public void testUseClusterByUpToDateTask() { + public void testMultiProject() { BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("upToDate1", "upToDate2", "-s") + .withProjectDir(getProjectDir("testclusters_multiproject")) + .withArguments("user1", "user2", "-s", "-i", "--parallel") .withPluginClasspath() .build(); + assertTaskSuccessful(result, ":user1", ":user2"); + + assertStartedAndStoppedOnce(result); + } - assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate1").getOutcome()); - assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate2").getOutcome()); + public void testUseClusterByFailingOne() { + BuildResult result = getTestClustersRunner("itAlwaysFails").buildAndFail(); + assertTaskFailed(result, ":itAlwaysFails"); + assertStartedAndStoppedOnce(result); assertOutputContains( result.getOutput(), - "Not stopping myTestCluster, since cluster still has 1 claim(s)", - "cluster was not running: myTestCluster" + "Stopping `ElasticsearchNode{name='myTestCluster'}`, tailLogs: true", + "Execution failed for task ':itAlwaysFails'." ); - assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); } - public void testUseClusterBySkippedTask() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("skipped1", "skipped2", "-s") - .withPluginClasspath() - .build(); + public void testUseClusterByFailingDependency() { + BuildResult result = getTestClustersRunner("dependsOnFailed").buildAndFail(); + assertTaskFailed(result, ":itAlwaysFails"); + assertNull(result.task(":dependsOnFailed")); + assertStartedAndStoppedOnce(result); + assertOutputContains( + result.getOutput(), + "Stopping `ElasticsearchNode{name='myTestCluster'}`, tailLogs: true", + "Execution failed for task ':itAlwaysFails'." + ); + } - assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); - assertEquals(TaskOutcome.SKIPPED, result.task(":skipped2").getOutcome()); + public void testConfigurationLocked() { + BuildResult result = getTestClustersRunner("illegalConfigAlter").buildAndFail(); + assertTaskFailed(result, ":illegalConfigAlter"); assertOutputContains( result.getOutput(), - "Not stopping myTestCluster, since cluster still has 1 claim(s)", - "cluster was not running: myTestCluster" + "Configuration can not be altered, already locked" ); - assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); } - public void tetUseClusterBySkippedAndWorkingTask() { - BuildResult result = GradleRunner.create() + private void assertNotStarted(BuildResult result) { + assertOutputDoesNotContain( + result.getOutput(), + "Starting ", + "Stopping " + ); + } + + private GradleRunner getTestClustersRunner(String... tasks) { + String[] arguments = Arrays.copyOf(tasks, tasks.length + 2); + arguments[tasks.length] = "-s"; + arguments[tasks.length + 1] = "-i"; + return GradleRunner.create() .withProjectDir(getProjectDir("testclusters")) - .withArguments("skipped1", "user1", "-s") - .withPluginClasspath() - .build(); + .withArguments(arguments) + .withPluginClasspath(); + } - assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); - assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); - assertOutputContains( + + private void assertStartedAndStoppedOnce(BuildResult result) { + assertOutputOnlyOnce( result.getOutput(), - "> Task :user1", - "Starting cluster: myTestCluster", - "Stopping myTestCluster, number of claims is 0" + "Starting `ElasticsearchNode{name='myTestCluster'}`", + "Stopping `ElasticsearchNode{name='myTestCluster'}`" ); } - } diff --git a/buildSrc/src/testKit/testclusters/build.gradle b/buildSrc/src/testKit/testclusters/build.gradle index 470111f056ef9..bd1cfc143f42d 100644 --- a/buildSrc/src/testKit/testclusters/build.gradle +++ b/buildSrc/src/testKit/testclusters/build.gradle @@ -39,3 +39,23 @@ task skipped2 { enabled = false useCluster testClusters.myTestCluster } + +task itAlwaysFails { + doLast { + throw new GradleException("Task 1 failed!") + } + useCluster testClusters.myTestCluster +} + +task dependsOnFailed { + dependsOn itAlwaysFails + useCluster testClusters.myTestCluster +} + +task illegalConfigAlter { + useCluster testClusters.myTestCluster + doFirst { + println "Going to alter configuration after use" + testClusters.myTestCluster.distribution = 'ZIP_OSS' + } +} diff --git a/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle new file mode 100644 index 0000000000000..d9f18afd68b90 --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle @@ -0,0 +1,18 @@ +plugins { + id 'elasticsearch.testclusters' +} +testClusters { + myTestCluster +} +task user1 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} +task user2 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} diff --git a/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle new file mode 100644 index 0000000000000..2e1461f0b0f28 --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle @@ -0,0 +1,21 @@ +plugins { + id 'elasticsearch.testclusters' +} + +testClusters { + myTestCluster +} + +task user1 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} + +task user2 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} diff --git a/buildSrc/src/testKit/testclusters_multiproject/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/build.gradle new file mode 100644 index 0000000000000..3527d1821d212 --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/build.gradle @@ -0,0 +1,21 @@ +plugins { + id 'elasticsearch.testclusters' +} + +testClusters { + myTestCluster +} + +task user1 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} + +task user2 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} \ No newline at end of file diff --git a/buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle new file mode 100644 index 0000000000000..f63a77aaea42d --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle @@ -0,0 +1,5 @@ +task hello() { + doLast { + println "This task does not use the testclusters plugin. So it will have no extension." + } +} \ No newline at end of file diff --git a/buildSrc/src/testKit/testclusters_multiproject/settings.gradle b/buildSrc/src/testKit/testclusters_multiproject/settings.gradle new file mode 100644 index 0000000000000..aa91948920148 --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/settings.gradle @@ -0,0 +1,3 @@ +include ':alpha' +include ':bravo' +include ':charlie' \ No newline at end of file diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 5f76b232ecb30..e792cfa3378ff 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ -elasticsearch = 7.0.0-alpha1 -lucene = 8.0.0-snapshot-7d0a7782fa +elasticsearch = 7.0.0 +lucene = 8.0.0-snapshot-31d7dfe6b1 # optional dependencies spatial4j = 0.7 diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index b9520e667be67..6580d213548f9 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -38,8 +37,8 @@ public class TransportNoopBulkAction extends HandledTransportAction { @Inject - public TransportNoopSearchAction(Settings settings, TransportService transportService, ActionFilters actionFilters) { - super(settings, NoopSearchAction.NAME, transportService, actionFilters, (Writeable.Reader) SearchRequest::new); + public TransportNoopSearchAction(TransportService transportService, ActionFilters actionFilters) { + super(NoopSearchAction.NAME, transportService, actionFilters, (Writeable.Reader) SearchRequest::new); } @Override diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index b3a8da10eb610..6cfcebff29040 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -51,7 +51,6 @@ dependencies { compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" compile "org.elasticsearch.plugin:rank-eval-client:${version}" compile "org.elasticsearch.plugin:lang-mustache-client:${version}" - bundle project(':x-pack:protocol') testCompile "org.elasticsearch.client:test:${version}" testCompile "org.elasticsearch.test:framework:${version}" @@ -88,6 +87,7 @@ integTestCluster { systemProperty 'es.scripting.update.ctx_in_params', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' + setting 'xpack.security.authc.token.enabled', 'true' // Truststore settings are not used since TLS is not enabled. Included for testing the get certificates API setting 'xpack.ssl.certificate_authorities', 'testnode.crt' setting 'xpack.security.transport.ssl.truststore.path', 'testnode.jks' diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java index 5099bf8d51d32..70912b094d023 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java @@ -20,8 +20,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; -import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; +import org.elasticsearch.client.graph.GraphExploreRequest; +import org.elasticsearch.client.graph.GraphExploreResponse; import java.io.IOException; @@ -44,7 +44,7 @@ public class GraphClient { public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, GraphRequestConverters::explore, - options, GraphExploreResponse::fromXContext, emptySet()); + options, GraphExploreResponse::fromXContent, emptySet()); } /** @@ -57,7 +57,7 @@ public final void exploreAsync(GraphExploreRequest graphExploreRequest, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, GraphRequestConverters::explore, - options, GraphExploreResponse::fromXContext, listener, emptySet()); + options, GraphExploreResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java index f5387047db158..fae987fb337c2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphRequestConverters.java @@ -20,7 +20,7 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpGet; -import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.client.graph.GraphExploreRequest; import java.io.IOException; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java new file mode 100644 index 0000000000000..88456f8dcc095 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java @@ -0,0 +1,303 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusResponse; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; + +import java.io.IOException; + +import static java.util.Collections.emptySet; + +public class IndexLifecycleClient { + private final RestHighLevelClient restHighLevelClient; + + IndexLifecycleClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Retrieve one or more lifecycle policy definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetLifecyclePolicyResponse getLifecyclePolicy(GetLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::getLifecyclePolicy, options, + GetLifecyclePolicyResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously retrieve one or more lifecycle policy definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getLifecyclePolicyAsync(GetLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getLifecyclePolicy, options, + GetLifecyclePolicyResponse::fromXContent, listener, emptySet()); + } + + /** + * Create or modify a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse putLifecyclePolicy(PutLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::putLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously create or modify a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putLifecyclePolicyAsync(PutLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::putLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Delete a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse deleteLifecyclePolicy(DeleteLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::deleteLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously delete a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deleteLifecyclePolicyAsync(DeleteLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::deleteLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Remove the index lifecycle policy for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public RemoveIndexLifecyclePolicyResponse removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::removeIndexLifecyclePolicy, + options, RemoveIndexLifecyclePolicyResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously remove the index lifecycle policy for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void removeIndexLifecyclePolicyAsync(RemoveIndexLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::removeIndexLifecyclePolicy, options, + RemoveIndexLifecyclePolicyResponse::fromXContent, listener, emptySet()); + } + + /** + * Start the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse startILM(StartILMRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::startILM, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously start the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void startILMAsync(StartILMRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::startILM, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Stop the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse stopILM(StopILMRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::stopILM, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Get the status of index lifecycle management + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public LifecycleManagementStatusResponse lifecycleManagementStatus(LifecycleManagementStatusRequest request, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::lifecycleManagementStatus, + options, LifecycleManagementStatusResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get the status of index lifecycle management + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void lifecycleManagementStatusAsync(LifecycleManagementStatusRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::lifecycleManagementStatus, options, + LifecycleManagementStatusResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously stop the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void stopILMAsync(StopILMRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::stopILM, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Explain the lifecycle state for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ExplainLifecycleResponse explainLifecycle(ExplainLifecycleRequest request,RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::explainLifecycle, options, + ExplainLifecycleResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously explain the lifecycle state for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void explainLifecycleAsync(ExplainLifecycleRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::explainLifecycle, options, + ExplainLifecycleResponse::fromXContent, listener, emptySet()); + } + + /** + * Retry lifecycle step for given indices + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse retryLifecycleStep(RetryLifecyclePolicyRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously retry the lifecycle step for given indices + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void retryLifecycleStepAsync(RetryLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java new file mode 100644 index 0000000000000..0ca4f22edf282 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.common.Strings; + +import java.io.IOException; + +final class IndexLifecycleRequestConverters { + + private IndexLifecycleRequestConverters() {} + + static Request getLifecyclePolicy(GetLifecyclePolicyRequest getLifecyclePolicyRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_ilm/policy") + .addCommaSeparatedPathParts(getLifecyclePolicyRequest.getPolicyNames()).build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(getLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(getLifecyclePolicyRequest.timeout()); + return request; + } + + static Request putLifecyclePolicy(PutLifecyclePolicyRequest putLifecycleRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm/policy") + .addPathPartAsIs(putLifecycleRequest.getName()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(putLifecycleRequest.masterNodeTimeout()); + params.withTimeout(putLifecycleRequest.timeout()); + request.setEntity(RequestConverters.createEntity(putLifecycleRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteLifecyclePolicy(DeleteLifecyclePolicyRequest deleteLifecyclePolicyRequest) { + Request request = new Request(HttpDelete.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm/policy") + .addPathPartAsIs(deleteLifecyclePolicyRequest.getLifecyclePolicy()) + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(deleteLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(deleteLifecyclePolicyRequest.timeout()); + return request; + } + + static Request removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyRequest removePolicyRequest) { + String[] indices = removePolicyRequest.indices() == null ? + Strings.EMPTY_ARRAY : removePolicyRequest.indices().toArray(new String[] {}); + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(indices) + .addPathPartAsIs("_ilm", "remove") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(removePolicyRequest.indicesOptions()); + params.withMasterTimeout(removePolicyRequest.masterNodeTimeout()); + return request; + } + + static Request startILM(StartILMRequest startILMRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm") + .addPathPartAsIs("start") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(startILMRequest.masterNodeTimeout()); + params.withTimeout(startILMRequest.timeout()); + return request; + } + + static Request stopILM(StopILMRequest stopILMRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm") + .addPathPartAsIs("stop") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(stopILMRequest.masterNodeTimeout()); + params.withTimeout(stopILMRequest.timeout()); + return request; + } + + static Request lifecycleManagementStatus(LifecycleManagementStatusRequest lifecycleManagementStatusRequest){ + Request request = new Request(HttpGet.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm") + .addPathPartAsIs("status") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(lifecycleManagementStatusRequest.masterNodeTimeout()); + params.withTimeout(lifecycleManagementStatusRequest.timeout()); + return request; + } + + static Request explainLifecycle(ExplainLifecycleRequest explainLifecycleRequest) { + String[] indices = explainLifecycleRequest.indices() == null ? Strings.EMPTY_ARRAY : explainLifecycleRequest.indices(); + Request request = new Request(HttpGet.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(indices) + .addPathPartAsIs("_ilm") + .addPathPartAsIs("explain") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(explainLifecycleRequest.indicesOptions()); + params.withMasterTimeout(explainLifecycleRequest.masterNodeTimeout()); + return request; + } + + static Request retryLifecycle(RetryLifecyclePolicyRequest retryLifecyclePolicyRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(retryLifecyclePolicyRequest.getIndices()) + .addPathPartAsIs("_ilm") + .addPathPartAsIs("retry") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(retryLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(retryLifecyclePolicyRequest.timeout()); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 6a242b65a7312..be513b0985c55 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -49,9 +49,11 @@ import org.elasticsearch.client.ml.PreviewDatafeedRequest; import org.elasticsearch.client.ml.PutCalendarRequest; import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedRequest; +import org.elasticsearch.client.ml.UpdateDatafeedRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -209,6 +211,19 @@ static Request putDatafeed(PutDatafeedRequest putDatafeedRequest) throws IOExcep return request; } + static Request updateDatafeed(UpdateDatafeedRequest updateDatafeedRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("datafeeds") + .addPathPart(updateDatafeedRequest.getDatafeedUpdate().getId()) + .addPathPartAsIs("_update") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(updateDatafeedRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request getDatafeed(GetDatafeedRequest getDatafeedRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -449,4 +464,16 @@ static Request deleteCalendar(DeleteCalendarRequest deleteCalendarRequest) { Request request = new Request(HttpDelete.METHOD_NAME, endpoint); return request; } + + static Request putFilter(PutFilterRequest putFilterRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("filters") + .addPathPart(putFilterRequest.getMlFilter().getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + request.setEntity(createEntity(putFilterRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 8c442d8ffa646..3f7a938f9ce1a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -61,12 +61,15 @@ import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutDatafeedResponse; +import org.elasticsearch.client.ml.PutFilterRequest; +import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedResponse; import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedResponse; +import org.elasticsearch.client.ml.UpdateDatafeedRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.job.stats.JobStats; @@ -494,6 +497,46 @@ public void putDatafeedAsync(PutDatafeedRequest request, RequestOptions options, Collections.emptySet()); } + /** + * Updates a Machine Learning Datafeed + *

+ * For additional info + * see + * ML Update datafeed documentation + * + * @param request The UpdateDatafeedRequest containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedUpdate} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return PutDatafeedResponse with enclosed, updated {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} object + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PutDatafeedResponse updateDatafeed(UpdateDatafeedRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::updateDatafeed, + options, + PutDatafeedResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Updates a Machine Learning Datafeed asynchronously and notifies listener on completion + *

+ * For additional info + * see + * ML Update datafeed documentation + * + * @param request The request containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedUpdate} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void updateDatafeedAsync(UpdateDatafeedRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::updateDatafeed, + options, + PutDatafeedResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets one or more Machine Learning datafeed configuration info. * @@ -1125,4 +1168,43 @@ public void deleteCalendarAsync(DeleteCalendarRequest request, RequestOptions op listener, Collections.emptySet()); } + + /** + * Creates a new Machine Learning Filter + *

+ * For additional info + * see ML PUT Filter documentation + * + * @param request The PutFilterRequest containing the {@link org.elasticsearch.client.ml.job.config.MlFilter} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return PutFilterResponse with enclosed {@link org.elasticsearch.client.ml.job.config.MlFilter} object + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PutFilterResponse putFilter(PutFilterRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::putFilter, + options, + PutFilterResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Creates a new Machine Learning Filter asynchronously and notifies listener on completion + *

+ * For additional info + * see ML PUT Filter documentation + * + * @param request The request containing the {@link org.elasticsearch.client.ml.job.config.MlFilter} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void putFilterAsync(PutFilterRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::putFilter, + options, + PutFilterResponse::fromXContent, + listener, + Collections.emptySet()); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java index 8717943d79718..15bf71bc8db8f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java @@ -19,8 +19,8 @@ package org.elasticsearch.client; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoResponse; +import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; +import org.elasticsearch.client.migration.IndexUpgradeInfoResponse; import java.io.IOException; import java.util.Collections; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java index ddd1a2a43456e..50d21f844d4e3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java @@ -20,7 +20,7 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpGet; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; +import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; final class MigrationRequestConverters { @@ -28,7 +28,7 @@ private MigrationRequestConverters() {} static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRequest) { RequestConverters.EndpointBuilder endpointBuilder = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_xpack/migration/assistance") + .addPathPartAsIs("_xpack", "migration", "assistance") .addCommaSeparatedPathParts(indexUpgradeInfoRequest.indices()); String endpoint = endpointBuilder.build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 2ff944b0a5343..38dbbb8f1519b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -49,6 +49,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Nullable; @@ -84,6 +85,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; +import java.util.List; import java.util.Locale; import java.util.StringJoiner; @@ -118,7 +120,8 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { Params parameters = new Params(request); parameters.withTimeout(bulkRequest.timeout()); parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); - + parameters.withPipeline(bulkRequest.pipeline()); + parameters.withRouting(bulkRequest.routing()); // Bulk API only supports newline delimited JSON or Smile. Before executing // the bulk, we need to check that all requests have the same content-type // and this content-type is supported by the Bulk API. @@ -440,6 +443,16 @@ static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplat return request; } + static Request count(CountRequest countRequest) throws IOException { + Request request = new Request(HttpPost.METHOD_NAME, endpoint(countRequest.indices(), countRequest.types(), "_count")); + Params params = new Params(request); + params.withRouting(countRequest.routing()); + params.withPreference(countRequest.preference()); + params.withIndicesOptions(countRequest.indicesOptions()); + request.setEntity(createEntity(countRequest.source(), REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request explain(ExplainRequest explainRequest) throws IOException { Request request = new Request(HttpGet.METHOD_NAME, endpoint(explainRequest.index(), explainRequest.type(), explainRequest.id(), "_explain")); @@ -705,10 +718,10 @@ Params withFetchSourceContext(FetchSourceContext fetchSourceContext) { putParam("_source", Boolean.FALSE.toString()); } if (fetchSourceContext.includes() != null && fetchSourceContext.includes().length > 0) { - putParam("_source_include", String.join(",", fetchSourceContext.includes())); + putParam("_source_includes", String.join(",", fetchSourceContext.includes())); } if (fetchSourceContext.excludes() != null && fetchSourceContext.excludes().length > 0) { - putParam("_source_exclude", String.join(",", fetchSourceContext.excludes())); + putParam("_source_excludes", String.join(",", fetchSourceContext.excludes())); } } return this; @@ -1006,7 +1019,12 @@ EndpointBuilder addCommaSeparatedPathParts(String[] parts) { return this; } - EndpointBuilder addPathPartAsIs(String... parts) { + EndpointBuilder addCommaSeparatedPathParts(List parts) { + addPathPart(String.join(",", parts)); + return this; + } + + EndpointBuilder addPathPartAsIs(String ... parts) { for (String part : parts) { if (Strings.hasLength(part)) { joiner.add(part); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 7e8a965361426..11fff4c0a6b4d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -56,6 +56,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.core.CountRequest; +import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.common.CheckedConsumer; @@ -156,6 +158,8 @@ import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ParsedMedianAbsoluteDeviation; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; @@ -222,6 +226,7 @@ public class RestHighLevelClient implements Closeable { private final MigrationClient migrationClient = new MigrationClient(this); private final MachineLearningClient machineLearningClient = new MachineLearningClient(this); private final SecurityClient securityClient = new SecurityClient(this); + private final IndexLifecycleClient ilmClient = new IndexLifecycleClient(this); private final RollupClient rollupClient = new RollupClient(this); /** @@ -368,6 +373,17 @@ public final XPackClient xpack() { */ public LicenseClient license() { return licenseClient; } + /** + * A wrapper for the {@link RestHighLevelClient} that provides methods for + * accessing the Elastic Index Lifecycle APIs. + *

+ * See the X-Pack APIs + * on elastic.co for more information. + */ + public IndexLifecycleClient indexLifecycle() { + return ilmClient; + } + /** * Provides methods for accessing the Elastic Licensed Licensing APIs that * are shipped with the default distribution of Elasticsearch. All of @@ -777,6 +793,31 @@ public final void indexAsync(IndexRequest indexRequest, RequestOptions options, emptySet()); } + /** + * Executes a count request using the Count API. + * See Count API on elastic.co + * @param countRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final CountResponse count(CountRequest countRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(countRequest, RequestConverters::count, options, CountResponse::fromXContent, + emptySet()); + } + + /** + * Asynchronously executes a count request using the Count API. + * See Count API on elastic.co + * @param countRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void countAsync(CountRequest countRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(countRequest, RequestConverters::count, options,CountResponse::fromXContent, + listener, emptySet()); + } + /** * Updates a document using the Update API. * See Update API on elastic.co @@ -1537,6 +1578,7 @@ static List getDefaultNamedXContents() { map.put(InternalTDigestPercentiles.NAME, (p, c) -> ParsedTDigestPercentiles.fromXContent(p, (String) c)); map.put(InternalTDigestPercentileRanks.NAME, (p, c) -> ParsedTDigestPercentileRanks.fromXContent(p, (String) c)); map.put(PercentilesBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedPercentilesBucket.fromXContent(p, (String) c)); + map.put(MedianAbsoluteDeviationAggregationBuilder.NAME, (p, c) -> ParsedMedianAbsoluteDeviation.fromXContent(p, (String) c)); map.put(MinAggregationBuilder.NAME, (p, c) -> ParsedMin.fromXContent(p, (String) c)); map.put(MaxAggregationBuilder.NAME, (p, c) -> ParsedMax.fromXContent(p, (String) c)); map.put(SumAggregationBuilder.NAME, (p, c) -> ParsedSum.fromXContent(p, (String) c)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java index 3a9759e47a20e..fa36c02d02c22 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java @@ -28,6 +28,8 @@ import org.elasticsearch.client.rollup.GetRollupCapsResponse; import org.elasticsearch.client.rollup.PutRollupJobRequest; import org.elasticsearch.client.rollup.PutRollupJobResponse; +import org.elasticsearch.client.rollup.StartRollupJobRequest; +import org.elasticsearch.client.rollup.StartRollupJobResponse; import java.io.IOException; import java.util.Collections; @@ -80,6 +82,40 @@ public void putRollupJobAsync(PutRollupJobRequest request, RequestOptions option listener, Collections.emptySet()); } + /** + * Start a rollup job + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public StartRollupJobResponse startRollupJob(StartRollupJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + RollupRequestConverters::startJob, + options, + StartRollupJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Asynchronously start a rollup job + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void startRollupJobAsync(StartRollupJobRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + RollupRequestConverters::startJob, + options, + StartRollupJobResponse::fromXContent, + listener, Collections.emptySet()); + } + /** * Delete a rollup job from the cluster * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java index 6b6a05ed03c65..f662d592d1a62 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java @@ -20,11 +20,13 @@ import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.rollup.DeleteRollupJobRequest; -import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupCapsRequest; +import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.PutRollupJobRequest; +import org.elasticsearch.client.rollup.StartRollupJobRequest; import java.io.IOException; @@ -38,9 +40,7 @@ private RollupRequestConverters() { static Request putJob(final PutRollupJobRequest putRollupJobRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("rollup") - .addPathPartAsIs("job") + .addPathPartAsIs("_xpack", "rollup", "job") .addPathPart(putRollupJobRequest.getConfig().getId()) .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); @@ -48,11 +48,19 @@ static Request putJob(final PutRollupJobRequest putRollupJobRequest) throws IOEx return request; } + static Request startJob(final StartRollupJobRequest startRollupJobRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack", "rollup", "job") + .addPathPart(startRollupJobRequest.getJobId()) + .addPathPartAsIs("_start") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + return request; + } + static Request getJob(final GetRollupJobRequest getRollupJobRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("rollup") - .addPathPartAsIs("job") + .addPathPartAsIs("_xpack", "rollup", "job") .addPathPart(getRollupJobRequest.getJobId()) .build(); return new Request(HttpGet.METHOD_NAME, endpoint); @@ -60,9 +68,7 @@ static Request getJob(final GetRollupJobRequest getRollupJobRequest) { static Request deleteJob(final DeleteRollupJobRequest deleteRollupJobRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("rollup") - .addPathPartAsIs("job") + .addPathPartAsIs("_xpack", "rollup", "job") .addPathPart(deleteRollupJobRequest.getId()) .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); @@ -72,9 +78,7 @@ static Request deleteJob(final DeleteRollupJobRequest deleteRollupJobRequest) th static Request getRollupCaps(final GetRollupCapsRequest getRollupCapsRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("rollup") - .addPathPartAsIs("data") + .addPathPartAsIs("_xpack", "rollup", "data") .addPathPart(getRollupCapsRequest.getIndexPattern()) .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index e528116da6c5d..aee6eb5efccd5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -20,22 +20,30 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.security.AuthenticateRequest; +import org.elasticsearch.client.security.AuthenticateResponse; +import org.elasticsearch.client.security.ChangePasswordRequest; import org.elasticsearch.client.security.ClearRolesCacheRequest; import org.elasticsearch.client.security.ClearRolesCacheResponse; +import org.elasticsearch.client.security.CreateTokenRequest; +import org.elasticsearch.client.security.CreateTokenResponse; +import org.elasticsearch.client.security.DeleteRoleMappingRequest; +import org.elasticsearch.client.security.DeleteRoleMappingResponse; import org.elasticsearch.client.security.DeleteRoleRequest; import org.elasticsearch.client.security.DeleteRoleResponse; -import org.elasticsearch.client.security.PutRoleMappingRequest; -import org.elasticsearch.client.security.PutRoleMappingResponse; import org.elasticsearch.client.security.DisableUserRequest; import org.elasticsearch.client.security.EmptyResponse; import org.elasticsearch.client.security.EnableUserRequest; +import org.elasticsearch.client.security.GetRoleMappingsRequest; +import org.elasticsearch.client.security.GetRoleMappingsResponse; import org.elasticsearch.client.security.GetSslCertificatesRequest; import org.elasticsearch.client.security.GetSslCertificatesResponse; +import org.elasticsearch.client.security.InvalidateTokenRequest; +import org.elasticsearch.client.security.InvalidateTokenResponse; +import org.elasticsearch.client.security.PutRoleMappingRequest; +import org.elasticsearch.client.security.PutRoleMappingResponse; import org.elasticsearch.client.security.PutUserRequest; import org.elasticsearch.client.security.PutUserResponse; -import org.elasticsearch.client.security.ChangePasswordRequest; -import org.elasticsearch.client.security.DeleteRoleMappingRequest; -import org.elasticsearch.client.security.DeleteRoleMappingResponse; import java.io.IOException; @@ -112,6 +120,40 @@ public void putRoleMappingAsync(final PutRoleMappingRequest request, final Reque PutRoleMappingResponse::fromXContent, listener, emptySet()); } + /** + * Synchronously get role mapping(s). + * See + * the docs for more. + * + * @param request {@link GetRoleMappingsRequest} with role mapping name(s). + * If no role mapping name is provided then retrieves all role mappings. + * @param options the request options (e.g. headers), use + * {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the get role mapping call + * @throws IOException in case there is a problem sending the request or + * parsing back the response + */ + public GetRoleMappingsResponse getRoleMappings(final GetRoleMappingsRequest request, final RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::getRoleMappings, + options, GetRoleMappingsResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get role mapping(s). + * See + * the docs for more. + * + * @param request {@link GetRoleMappingsRequest} with role mapping name(s). + * If no role mapping name is provided then retrieves all role mappings. + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getRoleMappingsAsync(final GetRoleMappingsRequest request, final RequestOptions options, + final ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getRoleMappings, + options, GetRoleMappingsResponse::fromXContent, listener, emptySet()); + } + /** * Enable a native realm or built-in user synchronously. * See @@ -172,6 +214,32 @@ public void disableUserAsync(DisableUserRequest request, RequestOptions options, EmptyResponse::fromXContent, listener, emptySet()); } + /** + * Authenticate the current user and return all the information about the authenticated user. + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the responsee from the authenticate user call + */ + public AuthenticateResponse authenticate(RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(AuthenticateRequest.INSTANCE, AuthenticateRequest::getRequest, options, + AuthenticateResponse::fromXContent, emptySet()); + } + + /** + * Authenticate the current user asynchronously and return all the information about the authenticated user. + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void authenticateAsync(RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(AuthenticateRequest.INSTANCE, AuthenticateRequest::getRequest, options, + AuthenticateResponse::fromXContent, listener, emptySet()); + } + /** * Clears the native roles cache for a set of roles. * See @@ -314,4 +382,63 @@ public void deleteRoleAsync(DeleteRoleRequest request, RequestOptions options, A DeleteRoleResponse::fromXContent, listener, singleton(404)); } + /** + * Creates an OAuth2 token. + * See + * the docs for more. + * + * @param request the request for the token + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the create token call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public CreateTokenResponse createToken(CreateTokenRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::createToken, options, + CreateTokenResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously creates an OAuth2 token. + * See + * the docs for more. + * + * @param request the request for the token + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void createTokenAsync(CreateTokenRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::createToken, options, + CreateTokenResponse::fromXContent, listener, emptySet()); + } + + /** + * Invalidates an OAuth2 token. + * See + * the docs for more. + * + * @param request the request to invalidate the token + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the create token call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public InvalidateTokenResponse invalidateToken(InvalidateTokenRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::invalidateToken, options, + InvalidateTokenResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously invalidates an OAuth2 token. + * See + * the docs for more. + * + * @param request the request to invalidate the token + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void invalidateTokenAsync(InvalidateTokenRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::invalidateToken, options, + InvalidateTokenResponse::fromXContent, listener, emptySet()); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java index e3acda9313a65..c8e3fe2b04dfb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java @@ -19,18 +19,23 @@ package org.elasticsearch.client; +import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.security.ClearRolesCacheRequest; +import org.elasticsearch.client.security.CreateTokenRequest; import org.elasticsearch.client.security.DeleteRoleMappingRequest; import org.elasticsearch.client.security.DeleteRoleRequest; +import org.elasticsearch.client.security.InvalidateTokenRequest; import org.elasticsearch.client.security.PutRoleMappingRequest; import org.elasticsearch.client.security.DisableUserRequest; import org.elasticsearch.client.security.EnableUserRequest; +import org.elasticsearch.client.security.GetRoleMappingsRequest; import org.elasticsearch.client.security.ChangePasswordRequest; import org.elasticsearch.client.security.PutUserRequest; import org.elasticsearch.client.security.SetUserEnabledRequest; +import org.elasticsearch.common.Strings; import java.io.IOException; @@ -78,6 +83,15 @@ static Request putRoleMapping(final PutRoleMappingRequest putRoleMappingRequest) return request; } + static Request getRoleMappings(final GetRoleMappingsRequest getRoleMappingRequest) throws IOException { + RequestConverters.EndpointBuilder builder = new RequestConverters.EndpointBuilder(); + builder.addPathPartAsIs("_xpack/security/role_mapping"); + if (getRoleMappingRequest.getRoleMappingNames().size() > 0) { + builder.addPathPart(Strings.collectionToCommaDelimitedString(getRoleMappingRequest.getRoleMappingNames())); + } + return new Request(HttpGet.METHOD_NAME, builder.build()); + } + static Request enableUser(EnableUserRequest enableUserRequest) { return setUserEnabled(enableUserRequest); } @@ -128,4 +142,16 @@ static Request deleteRole(DeleteRoleRequest deleteRoleRequest) { params.withRefreshPolicy(deleteRoleRequest.getRefreshPolicy()); return request; } + + static Request createToken(CreateTokenRequest createTokenRequest) throws IOException { + Request request = new Request(HttpPost.METHOD_NAME, "/_xpack/security/oauth2/token"); + request.setEntity(createEntity(createTokenRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request invalidateToken(InvalidateTokenRequest invalidateTokenRequest) throws IOException { + Request request = new Request(HttpDelete.METHOD_NAME, "/_xpack/security/oauth2/token"); + request.setEntity(createEntity(invalidateTokenRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java index c26a7ba48ca17..be2b2f5ed5c6b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java @@ -28,7 +28,7 @@ * Please note, any requests that use a ackTimeout should set timeout as they * represent the same backing field on the server. */ -public class TimedRequest implements Validatable { +public abstract class TimedRequest implements Validatable { public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); @@ -38,7 +38,6 @@ public class TimedRequest implements Validatable { public void setTimeout(TimeValue timeout) { this.timeout = timeout; - } public void setMasterTimeout(TimeValue masterTimeout) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java index a2b11772c1277..712477645669b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java @@ -28,10 +28,10 @@ import org.elasticsearch.client.watcher.AckWatchResponse; import org.elasticsearch.client.watcher.StartWatchServiceRequest; import org.elasticsearch.client.watcher.StopWatchServiceRequest; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; +import org.elasticsearch.client.watcher.DeleteWatchRequest; +import org.elasticsearch.client.watcher.DeleteWatchResponse; +import org.elasticsearch.client.watcher.PutWatchRequest; +import org.elasticsearch.client.watcher.PutWatchResponse; import java.io.IOException; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java index 49764025273b6..57e817e083ad3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherRequestConverters.java @@ -30,8 +30,8 @@ import org.elasticsearch.client.watcher.StartWatchServiceRequest; import org.elasticsearch.client.watcher.StopWatchServiceRequest; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; +import org.elasticsearch.client.watcher.DeleteWatchRequest; +import org.elasticsearch.client.watcher.PutWatchRequest; final class WatcherRequestConverters { @@ -59,9 +59,7 @@ static Request stopWatchService(StopWatchServiceRequest stopWatchServiceRequest) static Request putWatch(PutWatchRequest putWatchRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("watcher") - .addPathPartAsIs("watch") + .addPathPartAsIs("_xpack", "watcher", "watch") .addPathPart(putWatchRequest.getId()) .build(); @@ -89,9 +87,7 @@ static Request deactivateWatch(DeactivateWatchRequest deactivateWatchRequest) { static Request deleteWatch(DeleteWatchRequest deleteWatchRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("watcher") - .addPathPartAsIs("watch") + .addPathPartAsIs("_xpack", "watcher", "watch") .addPathPart(deleteWatchRequest.getId()) .build(); @@ -101,9 +97,7 @@ static Request deleteWatch(DeleteWatchRequest deleteWatchRequest) { public static Request ackWatch(AckWatchRequest ackWatchRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("watcher") - .addPathPartAsIs("watch") + .addPathPartAsIs("_xpack", "watcher", "watch") .addPathPart(ackWatchRequest.getWatchId()) .addPathPartAsIs("_ack") .addCommaSeparatedPathParts(ackWatchRequest.getActionIds()) @@ -114,9 +108,7 @@ public static Request ackWatch(AckWatchRequest ackWatchRequest) { static Request activateWatch(ActivateWatchRequest activateWatchRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_xpack") - .addPathPartAsIs("watcher") - .addPathPartAsIs("watch") + .addPathPartAsIs("_xpack", "watcher", "watch") .addPathPart(activateWatchRequest.getWatchId()) .addPathPartAsIs("_activate") .build(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java new file mode 100644 index 0000000000000..6d4589c7861f6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java @@ -0,0 +1,206 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.search.builder.SearchSourceBuilder; + +import java.util.Arrays; +import java.util.Objects; + +import static org.elasticsearch.action.search.SearchRequest.DEFAULT_INDICES_OPTIONS; + +/** + * Encapsulates a request to _count API against one, several or all indices. + */ +public final class CountRequest extends ActionRequest implements IndicesRequest.Replaceable { + + private String[] indices = Strings.EMPTY_ARRAY; + private String[] types = Strings.EMPTY_ARRAY; + private String routing; + private String preference; + private SearchSourceBuilder searchSourceBuilder; + private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; + + public CountRequest() { + this.searchSourceBuilder = new SearchSourceBuilder(); + } + + /** + * Constructs a new count request against the indices. No indices provided here means that count will execute on all indices. + */ + public CountRequest(String... indices) { + this(indices, new SearchSourceBuilder()); + } + + /** + * Constructs a new search request against the provided indices with the given search source. + */ + public CountRequest(String[] indices, SearchSourceBuilder searchSourceBuilder) { + indices(indices); + this.searchSourceBuilder = searchSourceBuilder; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + /** + * Sets the indices the count will be executed on. + */ + public CountRequest indices(String... indices) { + Objects.requireNonNull(indices, "indices must not be null"); + for (String index : indices) { + Objects.requireNonNull(index, "index must not be null"); + } + this.indices = indices; + return this; + } + + /** + * The source of the count request. + */ + public CountRequest source(SearchSourceBuilder searchSourceBuilder) { + this.searchSourceBuilder = Objects.requireNonNull(searchSourceBuilder, "source must not be null"); + return this; + } + + /** + * The document types to execute the count against. Defaults to be executed against all types. + * + * @deprecated Types are going away, prefer filtering on a type. + */ + @Deprecated + public CountRequest types(String... types) { + Objects.requireNonNull(types, "types must not be null"); + for (String type : types) { + Objects.requireNonNull(type, "type must not be null"); + } + this.types = types; + return this; + } + + /** + * The routing values to control the shards that the search will be executed on. + */ + public CountRequest routing(String routing) { + this.routing = routing; + return this; + } + + /** + * A comma separated list of routing values to control the shards the count will be executed on. + */ + public CountRequest routing(String... routings) { + this.routing = Strings.arrayToCommaDelimitedString(routings); + return this; + } + + /** + * Returns the indices options used to resolve indices. They tell for instance whether a single index is accepted, whether an empty + * array will be converted to _all, and how wildcards will be expanded if needed. + * + * @see org.elasticsearch.action.support.IndicesOptions + */ + public CountRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + return this; + } + + /** + * Sets the preference to execute the count. Defaults to randomize across shards. Can be set to {@code _local} to prefer local shards + * or a custom value, which guarantees that the same order will be used across different requests. + */ + public CountRequest preference(String preference) { + this.preference = preference; + return this; + } + + public IndicesOptions indicesOptions() { + return this.indicesOptions; + } + + public String routing() { + return this.routing; + } + + public String preference() { + return this.preference; + } + + public String[] indices() { + return Arrays.copyOf(this.indices, this.indices.length); + } + + public Float minScore() { + return this.searchSourceBuilder.minScore(); + } + + public CountRequest minScore(Float minScore) { + this.searchSourceBuilder.minScore(minScore); + return this; + } + + public int terminateAfter() { + return this.searchSourceBuilder.terminateAfter(); + } + + public CountRequest terminateAfter(int terminateAfter) { + this.searchSourceBuilder.terminateAfter(terminateAfter); + return this; + } + + public String[] types() { + return Arrays.copyOf(this.types, this.types.length); + } + + public SearchSourceBuilder source() { + return this.searchSourceBuilder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CountRequest that = (CountRequest) o; + return Objects.equals(indicesOptions, that.indicesOptions) && + Arrays.equals(indices, that.indices) && + Arrays.equals(types, that.types) && + Objects.equals(routing, that.routing) && + Objects.equals(preference, that.preference); + } + + @Override + public int hashCode() { + int result = Objects.hash(indicesOptions, routing, preference); + result = 31 * result + Arrays.hashCode(indices); + result = 31 * result + Arrays.hashCode(types); + return result; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountResponse.java new file mode 100644 index 0000000000000..f97f79127e690 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountResponse.java @@ -0,0 +1,236 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * A response to _count API request. + */ +public final class CountResponse extends ActionResponse { + + static final ParseField COUNT = new ParseField("count"); + static final ParseField TERMINATED_EARLY = new ParseField("terminated_early"); + static final ParseField SHARDS = new ParseField("_shards"); + + private final long count; + private final Boolean terminatedEarly; + private final ShardStats shardStats; + + public CountResponse(long count, Boolean terminatedEarly, ShardStats shardStats) { + this.count = count; + this.terminatedEarly = terminatedEarly; + this.shardStats = shardStats; + } + + public ShardStats getShardStats() { + return shardStats; + } + + /** + * Number of documents matching request. + */ + public long getCount() { + return count; + } + + /** + * The total number of shards the search was executed on. + */ + public int getTotalShards() { + return shardStats.totalShards; + } + + /** + * The successful number of shards the search was executed on. + */ + public int getSuccessfulShards() { + return shardStats.successfulShards; + } + + /** + * The number of shards skipped due to pre-filtering + */ + public int getSkippedShards() { + return shardStats.skippedShards; + } + + /** + * The failed number of shards the search was executed on. + */ + public int getFailedShards() { + return shardStats.shardFailures.length; + } + + /** + * The failures that occurred during the search. + */ + public ShardSearchFailure[] getShardFailures() { + return shardStats.shardFailures; + } + + public RestStatus status() { + return RestStatus.status(shardStats.successfulShards, shardStats.totalShards, shardStats.shardFailures); + } + + public static CountResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); + String currentName = parser.currentName(); + Boolean terminatedEarly = null; + long count = 0; + ShardStats shardStats = new ShardStats(-1, -1,0, ShardSearchFailure.EMPTY_ARRAY); + + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token.isValue()) { + if (COUNT.match(currentName, parser.getDeprecationHandler())) { + count = parser.longValue(); + } else if (TERMINATED_EARLY.match(currentName, parser.getDeprecationHandler())) { + terminatedEarly = parser.booleanValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (SHARDS.match(currentName, parser.getDeprecationHandler())) { + shardStats = ShardStats.fromXContent(parser); + } else { + parser.skipChildren(); + } + } + } + return new CountResponse(count, terminatedEarly, shardStats); + } + + @Override + public String toString() { + String s = "{" + + "count=" + count + + (isTerminatedEarly() != null ? ", terminatedEarly=" + terminatedEarly : "") + + ", " + shardStats + + '}'; + return s; + } + + public Boolean isTerminatedEarly() { + return terminatedEarly; + } + + /** + * Encapsulates _shards section of count api response. + */ + public static final class ShardStats { + + static final ParseField FAILED = new ParseField("failed"); + static final ParseField SKIPPED = new ParseField("skipped"); + static final ParseField TOTAL = new ParseField("total"); + static final ParseField SUCCESSFUL = new ParseField("successful"); + static final ParseField FAILURES = new ParseField("failures"); + + private final int successfulShards; + private final int totalShards; + private final int skippedShards; + private final ShardSearchFailure[] shardFailures; + + public ShardStats(int successfulShards, int totalShards, int skippedShards, ShardSearchFailure[] shardFailures) { + this.successfulShards = successfulShards; + this.totalShards = totalShards; + this.skippedShards = skippedShards; + this.shardFailures = Arrays.copyOf(shardFailures, shardFailures.length); + } + + public int getSuccessfulShards() { + return successfulShards; + } + + public int getTotalShards() { + return totalShards; + } + + public int getSkippedShards() { + return skippedShards; + } + + public ShardSearchFailure[] getShardFailures() { + return Arrays.copyOf(shardFailures, shardFailures.length, ShardSearchFailure[].class); + } + + static ShardStats fromXContent(XContentParser parser) throws IOException { + int successfulShards = -1; + int totalShards = -1; + int skippedShards = 0; //BWC @see org.elasticsearch.action.search.SearchResponse + List failures = new ArrayList<>(); + XContentParser.Token token; + String currentName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token.isValue()) { + if (FAILED.match(currentName, parser.getDeprecationHandler())) { + parser.intValue(); + } else if (SKIPPED.match(currentName, parser.getDeprecationHandler())) { + skippedShards = parser.intValue(); + } else if (TOTAL.match(currentName, parser.getDeprecationHandler())) { + totalShards = parser.intValue(); + } else if (SUCCESSFUL.match(currentName, parser.getDeprecationHandler())) { + successfulShards = parser.intValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (FAILURES.match(currentName, parser.getDeprecationHandler())) { + while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { + failures.add(ShardSearchFailure.fromXContent(parser)); + } + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + return new ShardStats(successfulShards, totalShards, skippedShards, failures.toArray(new ShardSearchFailure[failures.size()])); + } + + @Override + public String toString() { + return "_shards : {" + + "total=" + totalShards + + ", successful=" + successfulShards + + ", skipped=" + skippedShards + + ", failed=" + (shardFailures != null && shardFailures.length > 0 ? shardFailures.length : 0 ) + + (shardFailures != null && shardFailures.length > 0 ? ", failures: " + Arrays.asList(shardFailures): "") + + '}'; + } + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Connection.java similarity index 89% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Connection.java index 455434f7ac4a9..07aff3888d260 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Connection.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Connection.java @@ -16,22 +16,19 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.graph; +package org.elasticsearch.client.graph; import com.carrotsearch.hppc.ObjectIntHashMap; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; +import org.elasticsearch.client.graph.Vertex.VertexId; import java.io.IOException; import java.util.List; -import java.util.Map; import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -55,25 +52,9 @@ public Connection(Vertex from, Vertex to, double weight, long docCount) { this.docCount = docCount; } - public Connection(StreamInput in, Map vertices) throws IOException { - from = vertices.get(new VertexId(in.readString(), in.readString())); - to = vertices.get(new VertexId(in.readString(), in.readString())); - weight = in.readDouble(); - docCount = in.readVLong(); - } - Connection() { } - void writeTo(StreamOutput out) throws IOException { - out.writeString(from.getField()); - out.writeString(from.getTerm()); - out.writeString(to.getField()); - out.writeString(to.getTerm()); - out.writeDouble(weight); - out.writeVLong(docCount); - } - public ConnectionId getId() { return new ConnectionId(from.getId(), to.getId()); } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java similarity index 82% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java index 495ea5fd28ac3..4d2a000a00c89 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java @@ -16,13 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.graph; +package org.elasticsearch.client.graph; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -37,14 +36,14 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Iterator; import java.util.List; +import java.util.Optional; /** * Holds the criteria required to guide the exploration of connected terms which * can be returned as a graph. */ -public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { +public class GraphExploreRequest implements IndicesRequest.Replaceable, ToXContentObject, Validatable { public static final String NO_HOPS_ERROR_MESSAGE = "Graph explore request must have at least one hop"; public static final String NO_VERTICES_ERROR_MESSAGE = "Graph explore hop must have at least one VertexRequest"; @@ -74,15 +73,15 @@ public GraphExploreRequest(String... indices) { } @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; + public Optional validate() { + ValidationException validationException = new ValidationException(); if (hops.size() == 0) { - validationException = ValidateActions.addValidationError(NO_HOPS_ERROR_MESSAGE, validationException); + validationException.addValidationError(NO_HOPS_ERROR_MESSAGE); } for (Hop hop : hops) { - validationException = hop.validate(validationException); + hop.validate(validationException); } - return validationException; + return validationException.validationErrors().isEmpty() ? Optional.empty() : Optional.of(validationException); } @Override @@ -159,55 +158,6 @@ public GraphExploreRequest timeout(String timeout) { return this; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - types = in.readStringArray(); - routing = in.readOptionalString(); - timeout = in.readOptionalTimeValue(); - sampleSize = in.readInt(); - sampleDiversityField = in.readOptionalString(); - maxDocsPerDiversityValue = in.readInt(); - - useSignificance = in.readBoolean(); - returnDetailedInfo = in.readBoolean(); - - int numHops = in.readInt(); - Hop parentHop = null; - for (int i = 0; i < numHops; i++) { - Hop hop = new Hop(parentHop); - hop.readFrom(in); - hops.add(hop); - parentHop = hop; - } - - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(indices); - indicesOptions.writeIndicesOptions(out); - out.writeStringArray(types); - out.writeOptionalString(routing); - out.writeOptionalTimeValue(timeout); - - out.writeInt(sampleSize); - out.writeOptionalString(sampleDiversityField); - out.writeInt(maxDocsPerDiversityValue); - - out.writeBoolean(useSignificance); - out.writeBoolean(returnDetailedInfo); - out.writeInt(hops.size()); - for (Iterator iterator = hops.iterator(); iterator.hasNext();) { - Hop hop = iterator.next(); - hop.writeTo(out); - } - } - @Override public String toString() { return "graph explore [" + Arrays.toString(indices) + "][" + Arrays.toString(types) + "]"; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java similarity index 76% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java index baaaedf0163ed..dddc4bedfe466 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java @@ -16,24 +16,21 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.graph; +package org.elasticsearch.client.graph; import com.carrotsearch.hppc.ObjectIntHashMap; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; -import org.elasticsearch.protocol.xpack.graph.Connection.UnresolvedConnection; -import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; +import org.elasticsearch.client.graph.Connection.ConnectionId; +import org.elasticsearch.client.graph.Connection.UnresolvedConnection; +import org.elasticsearch.client.graph.Vertex.VertexId; import java.io.IOException; import java.util.Collection; @@ -41,7 +38,6 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -51,7 +47,7 @@ * * @see GraphExploreRequest */ -public class GraphExploreResponse extends ActionResponse implements ToXContentObject { +public class GraphExploreResponse implements ToXContentObject { private long tookInMillis; private boolean timedOut = false; @@ -94,76 +90,32 @@ public ShardOperationFailedException[] getShardFailures() { return shardFailures; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - tookInMillis = in.readVLong(); - timedOut = in.readBoolean(); - - int size = in.readVInt(); - if (size == 0) { - shardFailures = ShardSearchFailure.EMPTY_ARRAY; - } else { - shardFailures = new ShardSearchFailure[size]; - for (int i = 0; i < shardFailures.length; i++) { - shardFailures[i] = readShardSearchFailure(in); - } - } - // read vertices - size = in.readVInt(); - vertices = new HashMap<>(); - for (int i = 0; i < size; i++) { - Vertex n = Vertex.readFrom(in); - vertices.put(n.getId(), n); - } - - size = in.readVInt(); - - connections = new HashMap<>(); - for (int i = 0; i < size; i++) { - Connection e = new Connection(in, vertices); - connections.put(e.getId(), e); - } - - returnDetailedInfo = in.readBoolean(); + public Collection getConnections() { + return connections.values(); + } + public Collection getConnectionIds() { + return connections.keySet(); } - public Collection getConnections() { - return connections.values(); + public Connection getConnection(ConnectionId connectionId) { + return connections.get(connectionId); } public Collection getVertices() { return vertices.values(); } - + + public Collection getVertexIds() { + return vertices.keySet(); + } + public Vertex getVertex(VertexId id) { return vertices.get(id); } - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVLong(tookInMillis); - out.writeBoolean(timedOut); - - out.writeVInt(shardFailures.length); - for (ShardOperationFailedException shardSearchFailure : shardFailures) { - shardSearchFailure.writeTo(out); - } - - out.writeVInt(vertices.size()); - for (Vertex vertex : vertices.values()) { - vertex.writeTo(out); - } - - out.writeVInt(connections.size()); - for (Connection connection : connections.values()) { - connection.writeTo(out); - } - - out.writeBoolean(returnDetailedInfo); - + public boolean isReturnDetailedInfo() { + return returnDetailedInfo; } private static final ParseField TOOK = new ParseField("took"); @@ -254,7 +206,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); } - public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException { + public static GraphExploreResponse fromXContent(XContentParser parser) throws IOException { return PARSER.apply(parser, null); } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Hop.java similarity index 76% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Hop.java index 70ec61067f5b8..83196aada7061 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Hop.java @@ -16,12 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.graph; +package org.elasticsearch.client.graph; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ValidateActions; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.client.ValidationException; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -56,7 +53,7 @@ *

* */ -public class Hop implements ToXContentFragment{ +public class Hop implements ToXContentFragment { final Hop parentHop; List vertices = null; QueryBuilder guidingQuery = null; @@ -65,44 +62,16 @@ public Hop(Hop parent) { this.parentHop = parent; } - public ActionRequestValidationException validate(ActionRequestValidationException validationException) { - + public void validate(ValidationException validationException) { if (getEffectiveVertexRequests().size() == 0) { - validationException = ValidateActions.addValidationError(GraphExploreRequest.NO_VERTICES_ERROR_MESSAGE, validationException); + validationException.addValidationError(GraphExploreRequest.NO_VERTICES_ERROR_MESSAGE); } - return validationException; - } public Hop getParentHop() { return parentHop; } - void writeTo(StreamOutput out) throws IOException { - out.writeOptionalNamedWriteable(guidingQuery); - if (vertices == null) { - out.writeVInt(0); - } else { - out.writeVInt(vertices.size()); - for (VertexRequest vr : vertices) { - vr.writeTo(out); - } - } - } - - void readFrom(StreamInput in) throws IOException { - guidingQuery = in.readOptionalNamedWriteable(QueryBuilder.class); - int size = in.readVInt(); - if (size > 0) { - vertices = new ArrayList<>(); - for (int i = 0; i < size; i++) { - VertexRequest vr = new VertexRequest(); - vr.readFrom(in); - vertices.add(vr); - } - } - } - public QueryBuilder guidingQuery() { if (guidingQuery != null) { return guidingQuery; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Vertex.java similarity index 93% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Vertex.java index cfc26f44fac04..54b0b5223277d 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/Vertex.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Vertex.java @@ -16,11 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.graph; +package org.elasticsearch.client.graph; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -64,19 +62,6 @@ public Vertex(String field, String term, double weight, int depth, long bg, long this.bg = bg; this.fg = fg; } - - static Vertex readFrom(StreamInput in) throws IOException { - return new Vertex(in.readString(), in.readString(), in.readDouble(), in.readVInt(), in.readVLong(), in.readVLong()); - } - - void writeTo(StreamOutput out) throws IOException { - out.writeString(field); - out.writeString(term); - out.writeDouble(weight); - out.writeVInt(depth); - out.writeVLong(bg); - out.writeVLong(fg); - } @Override public int hashCode() { @@ -235,6 +220,14 @@ public VertexId(String field, String term) { this.term = term; } + public String getField() { + return field; + } + + public String getTerm() { + return term; + } + @Override public boolean equals(Object o) { if (this == o) diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/VertexRequest.java similarity index 78% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/graph/VertexRequest.java index 116497fe2301c..4947244a6c8bd 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/VertexRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/VertexRequest.java @@ -16,13 +16,11 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.graph; +package org.elasticsearch.client.graph; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; +import org.elasticsearch.client.graph.GraphExploreRequest.TermBoost; import java.io.IOException; import java.util.HashMap; @@ -52,57 +50,6 @@ public VertexRequest() { } - void readFrom(StreamInput in) throws IOException { - fieldName = in.readString(); - size = in.readVInt(); - minDocCount = in.readVInt(); - shardMinDocCount = in.readVInt(); - - int numIncludes = in.readVInt(); - if (numIncludes > 0) { - includes = new HashMap<>(); - for (int i = 0; i < numIncludes; i++) { - TermBoost tb = new TermBoost(); - tb.readFrom(in); - includes.put(tb.term, tb); - } - } - - int numExcludes = in.readVInt(); - if (numExcludes > 0) { - excludes = new HashSet<>(); - for (int i = 0; i < numExcludes; i++) { - excludes.add(in.readString()); - } - } - - } - - void writeTo(StreamOutput out) throws IOException { - out.writeString(fieldName); - out.writeVInt(size); - out.writeVInt(minDocCount); - out.writeVInt(shardMinDocCount); - - if (includes != null) { - out.writeVInt(includes.size()); - for (TermBoost tb : includes.values()) { - tb.writeTo(out); - } - } else { - out.writeVInt(0); - } - - if (excludes != null) { - out.writeVInt(excludes.size()); - for (String term : excludes) { - out.writeString(term); - } - } else { - out.writeVInt(0); - } - } - public String fieldName() { return fieldName; } @@ -224,7 +171,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (shardMinDocCount != DEFAULT_SHARD_MIN_DOC_COUNT) { builder.field("shard_min_doc_count", shardMinDocCount); } - if(includes!=null) { + if (includes != null) { builder.startArray("include"); for (TermBoost tb : includes.values()) { builder.startObject(); @@ -234,7 +181,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); } - if(excludes!=null) { + if (excludes != null) { builder.startArray("exclude"); for (String value : excludes) { builder.value(value); diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/package-info.java similarity index 94% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/graph/package-info.java index f4f666074a118..27cfb29d381d9 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/graph/package-info.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/package-info.java @@ -21,4 +21,4 @@ * Request and Response objects for the default distribution's Graph * APIs. */ -package org.elasticsearch.protocol.xpack.graph; +package org.elasticsearch.client.graph; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/AllocateAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/AllocateAction.java new file mode 100644 index 0000000000000..702db15b965c7 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/AllocateAction.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class AllocateAction implements LifecycleAction, ToXContentObject { + + public static final String NAME = "allocate"; + static final ParseField NUMBER_OF_REPLICAS_FIELD = new ParseField("number_of_replicas"); + static final ParseField INCLUDE_FIELD = new ParseField("include"); + static final ParseField EXCLUDE_FIELD = new ParseField("exclude"); + static final ParseField REQUIRE_FIELD = new ParseField("require"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new AllocateAction((Integer) a[0], (Map) a[1], (Map) a[2], (Map) a[3])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUMBER_OF_REPLICAS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), INCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), EXCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), REQUIRE_FIELD); + } + + private final Integer numberOfReplicas; + private final Map include; + private final Map exclude; + private final Map require; + + public static AllocateAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public AllocateAction(Integer numberOfReplicas, Map include, Map exclude, Map require) { + if (include == null) { + this.include = Collections.emptyMap(); + } else { + this.include = include; + } + if (exclude == null) { + this.exclude = Collections.emptyMap(); + } else { + this.exclude = exclude; + } + if (require == null) { + this.require = Collections.emptyMap(); + } else { + this.require = require; + } + if (this.include.isEmpty() && this.exclude.isEmpty() && this.require.isEmpty() && numberOfReplicas == null) { + throw new IllegalArgumentException( + "At least one of " + INCLUDE_FIELD.getPreferredName() + ", " + EXCLUDE_FIELD.getPreferredName() + " or " + + REQUIRE_FIELD.getPreferredName() + "must contain attributes for action " + NAME); + } + if (numberOfReplicas != null && numberOfReplicas < 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0"); + } + this.numberOfReplicas = numberOfReplicas; + } + + public Integer getNumberOfReplicas() { + return numberOfReplicas; + } + + public Map getInclude() { + return include; + } + + public Map getExclude() { + return exclude; + } + + public Map getRequire() { + return require; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + if (numberOfReplicas != null) { + builder.field(NUMBER_OF_REPLICAS_FIELD.getPreferredName(), numberOfReplicas); + } + builder.field(INCLUDE_FIELD.getPreferredName(), include); + builder.field(EXCLUDE_FIELD.getPreferredName(), exclude); + builder.field(REQUIRE_FIELD.getPreferredName(), require); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(numberOfReplicas, include, exclude, require); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + AllocateAction other = (AllocateAction) obj; + return Objects.equals(numberOfReplicas, other.numberOfReplicas) && + Objects.equals(include, other.include) && + Objects.equals(exclude, other.exclude) && + Objects.equals(require, other.require); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteAction.java new file mode 100644 index 0000000000000..299b0ac582771 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteAction.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class DeleteAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "delete"; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, DeleteAction::new); + + public static DeleteAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public DeleteAction() { + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..fc029f37ac928 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequest.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; + +import java.util.Objects; + +public class DeleteLifecyclePolicyRequest extends TimedRequest { + + private final String lifecyclePolicy; + + public DeleteLifecyclePolicyRequest(String lifecyclePolicy) { + if (Strings.isNullOrEmpty(lifecyclePolicy)) { + throw new IllegalArgumentException("lifecycle name must be present"); + } + this.lifecyclePolicy = lifecyclePolicy; + } + + public String getLifecyclePolicy() { + return lifecyclePolicy; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DeleteLifecyclePolicyRequest that = (DeleteLifecyclePolicyRequest) o; + return Objects.equals(getLifecyclePolicy(), that.getLifecyclePolicy()); + } + + @Override + public int hashCode() { + return Objects.hash(getLifecyclePolicy()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequest.java new file mode 100644 index 0000000000000..9d9e80bf1eeee --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequest.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.Strings; + +import java.util.Arrays; +import java.util.Objects; +import java.util.Optional; + +/** + * The request object used by the Explain Lifecycle API. + * + * Multiple indices may be queried in the same request using the + * {@link #indices(String...)} method + */ +public class ExplainLifecycleRequest extends TimedRequest { + + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + public ExplainLifecycleRequest() { + super(); + } + + public ExplainLifecycleRequest indices(String... indices) { + this.indices = indices; + return this; + } + + public String[] indices() { + return indices; + } + + public ExplainLifecycleRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public Optional validate() { + return Optional.empty(); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices()), indicesOptions()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleRequest other = (ExplainLifecycleRequest) obj; + return Objects.deepEquals(indices(), other.indices()) && + Objects.equals(indicesOptions(), other.indicesOptions()); + } + + @Override + public String toString() { + return "ExplainLifecycleRequest [indices()=" + Arrays.toString(indices()) + ", indicesOptions()=" + indicesOptions() + "]"; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponse.java new file mode 100644 index 0000000000000..de2803afe5415 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponse.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * The response object returned by the Explain Lifecycle API. + * + * Since the API can be run over multiple indices the response provides a map of + * index to the explanation of the lifecycle status for that index. + */ +public class ExplainLifecycleResponse implements ToXContentObject { + + private static final ParseField INDICES_FIELD = new ParseField("indices"); + + private Map indexResponses; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "explain_lifecycle_response", a -> new ExplainLifecycleResponse(((List) a[0]).stream() + .collect(Collectors.toMap(IndexLifecycleExplainResponse::getIndex, Function.identity())))); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> IndexLifecycleExplainResponse.PARSER.apply(p, c), + INDICES_FIELD); + } + + public static ExplainLifecycleResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ExplainLifecycleResponse(Map indexResponses) { + this.indexResponses = indexResponses; + } + + /** + * @return a map of the responses from each requested index. The maps key is + * the index name and the value is the + * {@link IndexLifecycleExplainResponse} describing the current + * lifecycle status of that index + */ + public Map getIndexResponses() { + return indexResponses; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(INDICES_FIELD.getPreferredName()); + for (IndexLifecycleExplainResponse indexResponse : indexResponses.values()) { + builder.field(indexResponse.getIndex(), indexResponse); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(indexResponses); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleResponse other = (ExplainLifecycleResponse) obj; + return Objects.equals(indexResponses, other.indexResponses); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ForceMergeAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ForceMergeAction.java new file mode 100644 index 0000000000000..eb564b7cd27b6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ForceMergeAction.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class ForceMergeAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "forcemerge"; + private static final ParseField MAX_NUM_SEGMENTS_FIELD = new ParseField("max_num_segments"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + false, a -> { + int maxNumSegments = (int) a[0]; + return new ForceMergeAction(maxNumSegments); + }); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_NUM_SEGMENTS_FIELD); + } + + private final int maxNumSegments; + + public static ForceMergeAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ForceMergeAction(int maxNumSegments) { + if (maxNumSegments <= 0) { + throw new IllegalArgumentException("[" + MAX_NUM_SEGMENTS_FIELD.getPreferredName() + + "] must be a positive integer"); + } + this.maxNumSegments = maxNumSegments; + } + + public int getMaxNumSegments() { + return maxNumSegments; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MAX_NUM_SEGMENTS_FIELD.getPreferredName(), maxNumSegments); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(maxNumSegments); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ForceMergeAction other = (ForceMergeAction) obj; + return Objects.equals(maxNumSegments, other.maxNumSegments); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..af17a3ea48cf9 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequest.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; + +import java.util.Arrays; + +public class GetLifecyclePolicyRequest extends TimedRequest { + + private final String[] policyNames; + + public GetLifecyclePolicyRequest(String... policyNames) { + if (policyNames == null) { + this.policyNames = Strings.EMPTY_ARRAY; + } else { + for (String name : policyNames) { + if (name == null) { + throw new IllegalArgumentException("cannot include null policy name"); + } + } + this.policyNames = policyNames; + } + } + + public String[] getPolicyNames() { + return policyNames; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetLifecyclePolicyRequest request = (GetLifecyclePolicyRequest) o; + return Arrays.equals(getPolicyNames(), request.getPolicyNames()); + } + + @Override + public int hashCode() { + return Arrays.hashCode(getPolicyNames()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java new file mode 100644 index 0000000000000..fc007cb5aebd4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.client.indexlifecycle; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +public class GetLifecyclePolicyResponse implements ToXContentObject { + + private final ImmutableOpenMap policies; + + public GetLifecyclePolicyResponse(ImmutableOpenMap policies) { + this.policies = policies; + } + + public ImmutableOpenMap getPolicies() { + return policies; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + for (ObjectObjectCursor stringLifecyclePolicyObjectObjectCursor : policies) { + builder.field(stringLifecyclePolicyObjectObjectCursor.key, stringLifecyclePolicyObjectObjectCursor.value); + } + builder.endObject(); + return builder; + } + + public static GetLifecyclePolicyResponse fromXContent(XContentParser parser) throws IOException { + ImmutableOpenMap.Builder policies = ImmutableOpenMap.builder(); + + if (parser.currentToken() == null) { + parser.nextToken(); + } + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + parser.nextToken(); + + while (!parser.isClosed()) { + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + String policyName = parser.currentName(); + LifecyclePolicyMetadata policyDefinion = LifecyclePolicyMetadata.parse(parser, policyName); + policies.put(policyName, policyDefinion); + } else { + parser.nextToken(); + } + } + + return new GetLifecyclePolicyResponse(policies.build()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetLifecyclePolicyResponse that = (GetLifecyclePolicyResponse) o; + return Objects.equals(getPolicies(), that.getPolicies()); + } + + @Override + public int hashCode() { + return Objects.hash(getPolicies()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java new file mode 100644 index 0000000000000..58ba7e63c03a7 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java @@ -0,0 +1,263 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Objects; + +public class IndexLifecycleExplainResponse implements ToXContentObject { + + private static final ParseField INDEX_FIELD = new ParseField("index"); + private static final ParseField MANAGED_BY_ILM_FIELD = new ParseField("managed"); + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField LIFECYCLE_DATE_MILLIS_FIELD = new ParseField("lifecycle_date_millis"); + private static final ParseField LIFECYCLE_DATE_FIELD = new ParseField("lifecycle_date"); + private static final ParseField PHASE_FIELD = new ParseField("phase"); + private static final ParseField ACTION_FIELD = new ParseField("action"); + private static final ParseField STEP_FIELD = new ParseField("step"); + private static final ParseField FAILED_STEP_FIELD = new ParseField("failed_step"); + private static final ParseField PHASE_TIME_MILLIS_FIELD = new ParseField("phase_time_millis"); + private static final ParseField PHASE_TIME_FIELD = new ParseField("phase_time"); + private static final ParseField ACTION_TIME_MILLIS_FIELD = new ParseField("action_time_millis"); + private static final ParseField ACTION_TIME_FIELD = new ParseField("action_time"); + private static final ParseField STEP_TIME_MILLIS_FIELD = new ParseField("step_time_millis"); + private static final ParseField STEP_TIME_FIELD = new ParseField("step_time"); + private static final ParseField STEP_INFO_FIELD = new ParseField("step_info"); + private static final ParseField PHASE_EXECUTION_INFO = new ParseField("phase_execution"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "index_lifecycle_explain_response", + a -> new IndexLifecycleExplainResponse( + (String) a[0], + (boolean) a[1], + (String) a[2], + (long) (a[3] == null ? -1L: a[3]), + (String) a[4], + (String) a[5], + (String) a[6], + (String) a[7], + (long) (a[8] == null ? -1L: a[8]), + (long) (a[9] == null ? -1L: a[9]), + (long) (a[10] == null ? -1L: a[10]), + (BytesReference) a[11], + (PhaseExecutionInfo) a[12])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_FIELD); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), MANAGED_BY_ILM_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), POLICY_NAME_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LIFECYCLE_DATE_MILLIS_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PHASE_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ACTION_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), STEP_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FAILED_STEP_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), PHASE_TIME_MILLIS_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), ACTION_TIME_MILLIS_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), STEP_TIME_MILLIS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.copyCurrentStructure(p); + return BytesArray.bytes(builder); + }, STEP_INFO_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), + PHASE_EXECUTION_INFO); + } + + private final String index; + private final String policyName; + private final String phase; + private final String action; + private final String step; + private final String failedStep; + private final long lifecycleDate; + private final long phaseTime; + private final long actionTime; + private final long stepTime; + private final boolean managedByILM; + private final BytesReference stepInfo; + private final PhaseExecutionInfo phaseExecutionInfo; + + public static IndexLifecycleExplainResponse newManagedIndexResponse(String index, String policyName, long lifecycleDate, + String phase, String action, String step, String failedStep, + long phaseTime, long actionTime, long stepTime, + BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + return new IndexLifecycleExplainResponse(index, true, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, + actionTime, stepTime, stepInfo, phaseExecutionInfo); + } + + public static IndexLifecycleExplainResponse newUnmanagedIndexResponse(String index) { + return new IndexLifecycleExplainResponse(index, false, null, -1L, null, null, null, null, -1L, -1L, -1L, null, null); + } + + private IndexLifecycleExplainResponse(String index, boolean managedByILM, String policyName, long lifecycleDate, + String phase, String action, String step, String failedStep, long phaseTime, long actionTime, + long stepTime, BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + if (managedByILM) { + if (policyName == null) { + throw new IllegalArgumentException("[" + POLICY_NAME_FIELD.getPreferredName() + "] cannot be null for managed index"); + } + } else { + if (policyName != null || lifecycleDate >= 0 || phase != null || action != null || step != null || failedStep != null + || phaseTime >= 0 || actionTime >= 0 || stepTime >= 0 || stepInfo != null || phaseExecutionInfo != null) { + throw new IllegalArgumentException( + "Unmanaged index response must only contain fields: [" + MANAGED_BY_ILM_FIELD + ", " + INDEX_FIELD + "]"); + } + } + this.index = index; + this.policyName = policyName; + this.managedByILM = managedByILM; + this.lifecycleDate = lifecycleDate; + this.phase = phase; + this.action = action; + this.step = step; + this.phaseTime = phaseTime; + this.actionTime = actionTime; + this.stepTime = stepTime; + this.failedStep = failedStep; + this.stepInfo = stepInfo; + this.phaseExecutionInfo = phaseExecutionInfo; + } + + public String getIndex() { + return index; + } + + public boolean managedByILM() { + return managedByILM; + } + + public String getPolicyName() { + return policyName; + } + + public long getLifecycleDate() { + return lifecycleDate; + } + + public String getPhase() { + return phase; + } + + public long getPhaseTime() { + return phaseTime; + } + + public String getAction() { + return action; + } + + public long getActionTime() { + return actionTime; + } + + public String getStep() { + return step; + } + + public long getStepTime() { + return stepTime; + } + + public String getFailedStep() { + return failedStep; + } + + public BytesReference getStepInfo() { + return stepInfo; + } + + public PhaseExecutionInfo getPhaseExecutionInfo() { + return phaseExecutionInfo; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INDEX_FIELD.getPreferredName(), index); + builder.field(MANAGED_BY_ILM_FIELD.getPreferredName(), managedByILM); + if (managedByILM) { + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + builder.timeField(LIFECYCLE_DATE_MILLIS_FIELD.getPreferredName(), LIFECYCLE_DATE_FIELD.getPreferredName(), lifecycleDate); + builder.field(PHASE_FIELD.getPreferredName(), phase); + builder.timeField(PHASE_TIME_MILLIS_FIELD.getPreferredName(), PHASE_TIME_FIELD.getPreferredName(), phaseTime); + builder.field(ACTION_FIELD.getPreferredName(), action); + builder.timeField(ACTION_TIME_MILLIS_FIELD.getPreferredName(), ACTION_TIME_FIELD.getPreferredName(), actionTime); + builder.field(STEP_FIELD.getPreferredName(), step); + builder.timeField(STEP_TIME_MILLIS_FIELD.getPreferredName(), STEP_TIME_FIELD.getPreferredName(), stepTime); + if (Strings.hasLength(failedStep)) { + builder.field(FAILED_STEP_FIELD.getPreferredName(), failedStep); + } + if (stepInfo != null && stepInfo.length() > 0) { + builder.rawField(STEP_INFO_FIELD.getPreferredName(), stepInfo.streamInput(), XContentType.JSON); + } + if (phaseExecutionInfo != null) { + builder.field(PHASE_EXECUTION_INFO.getPreferredName(), phaseExecutionInfo); + } + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(index, managedByILM, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, actionTime, + stepTime, stepInfo, phaseExecutionInfo); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + IndexLifecycleExplainResponse other = (IndexLifecycleExplainResponse) obj; + return Objects.equals(index, other.index) && + Objects.equals(managedByILM, other.managedByILM) && + Objects.equals(policyName, other.policyName) && + Objects.equals(lifecycleDate, other.lifecycleDate) && + Objects.equals(phase, other.phase) && + Objects.equals(action, other.action) && + Objects.equals(step, other.step) && + Objects.equals(failedStep, other.failedStep) && + Objects.equals(phaseTime, other.phaseTime) && + Objects.equals(actionTime, other.actionTime) && + Objects.equals(stepTime, other.stepTime) && + Objects.equals(stepInfo, other.stepInfo) && + Objects.equals(phaseExecutionInfo, other.phaseExecutionInfo); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleNamedXContentProvider.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleNamedXContentProvider.java new file mode 100644 index 0000000000000..22935f197731c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleNamedXContentProvider.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.plugins.spi.NamedXContentProvider; + +import java.util.Arrays; +import java.util.List; + +public class IndexLifecycleNamedXContentProvider implements NamedXContentProvider { + + + @Override + public List getNamedXContentParsers() { + return Arrays.asList( + // ILM + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(AllocateAction.NAME), + AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(DeleteAction.NAME), + DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(ForceMergeAction.NAME), + ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(ReadOnlyAction.NAME), + ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(RolloverAction.NAME), + RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(ShrinkAction.NAME), + ShrinkAction::parse) + ); + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/package-info.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleAction.java similarity index 78% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/package-info.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleAction.java index fab18ccc637e8..3787d26f5f889 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/package-info.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleAction.java @@ -16,8 +16,15 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.client.indexlifecycle; /** - * Request and Response objects for miscellaneous X-Pack APIs. + * interface for index lifecycle management actions */ -package org.elasticsearch.protocol.xpack; +public interface LifecycleAction { + + /** + * @return the name of this action + */ + String getName(); +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusRequest.java new file mode 100644 index 0000000000000..5db3d2d8c4e11 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusRequest.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; + +/** + * A {@link TimedRequest} to get the current status of index lifecycle management. + */ +public class LifecycleManagementStatusRequest extends TimedRequest { +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponse.java new file mode 100644 index 0000000000000..c1586d7e1c738 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponse.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Objects; + +/** + * The current status of index lifecycle management. See {@link OperationMode} for available statuses. + */ +public class LifecycleManagementStatusResponse { + + private final OperationMode operationMode; + private static final String OPERATION_MODE = "operation_mode"; + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + OPERATION_MODE, a -> new LifecycleManagementStatusResponse((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField(OPERATION_MODE)); + } + + //package private for testing + LifecycleManagementStatusResponse(String operationMode) { + this.operationMode = OperationMode.fromString(operationMode); + } + + public OperationMode getOperationMode() { + return operationMode; + } + + public static LifecycleManagementStatusResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + LifecycleManagementStatusResponse that = (LifecycleManagementStatusResponse) o; + return operationMode == that.operationMode; + } + + @Override + public int hashCode() { + return Objects.hash(operationMode); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicy.java new file mode 100644 index 0000000000000..2dc4e3644d1e4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicy.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents the lifecycle of an index from creation to deletion. A + * {@link LifecyclePolicy} is made up of a set of {@link Phase}s which it will + * move through. + */ +public class LifecyclePolicy implements ToXContentObject { + static final ParseField PHASES_FIELD = new ParseField("phases"); + + @SuppressWarnings("unchecked") + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("lifecycle_policy", false, + (a, name) -> { + List phases = (List) a[0]; + Map phaseMap = phases.stream().collect(Collectors.toMap(Phase::getName, Function.identity())); + return new LifecyclePolicy(name, phaseMap); + }); + private static Map> ALLOWED_ACTIONS = new HashMap<>(); + + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> Phase.parse(p, n), v -> { + throw new IllegalArgumentException("ordered " + PHASES_FIELD.getPreferredName() + " are not supported"); + }, PHASES_FIELD); + + ALLOWED_ACTIONS.put("hot", Sets.newHashSet(RolloverAction.NAME)); + ALLOWED_ACTIONS.put("warm", Sets.newHashSet(AllocateAction.NAME, ForceMergeAction.NAME, ReadOnlyAction.NAME, ShrinkAction.NAME)); + ALLOWED_ACTIONS.put("cold", Sets.newHashSet(AllocateAction.NAME)); + ALLOWED_ACTIONS.put("delete", Sets.newHashSet(DeleteAction.NAME)); + } + + private final String name; + private final Map phases; + + /** + * @param name + * the name of this {@link LifecyclePolicy} + * @param phases + * a {@link Map} of {@link Phase}s which make up this + * {@link LifecyclePolicy}. + */ + public LifecyclePolicy(String name, Map phases) { + phases.values().forEach(phase -> { + if (ALLOWED_ACTIONS.containsKey(phase.getName()) == false) { + throw new IllegalArgumentException("Lifecycle does not support phase [" + phase.getName() + "]"); + } + phase.getActions().forEach((actionName, action) -> { + if (ALLOWED_ACTIONS.get(phase.getName()).contains(actionName) == false) { + throw new IllegalArgumentException("invalid action [" + actionName + "] " + + "defined in phase [" + phase.getName() +"]"); + } + }); + }); + this.name = name; + this.phases = phases; + } + + public static LifecyclePolicy parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + /** + * @return the name of this {@link LifecyclePolicy} + */ + public String getName() { + return name; + } + + /** + * @return the {@link Phase}s for this {@link LifecyclePolicy} in the order + * in which they will be executed. + */ + public Map getPhases() { + return phases; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(PHASES_FIELD.getPreferredName()); + for (Phase phase : phases.values()) { + builder.field(phase.getName(), phase); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(name, phases); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + LifecyclePolicy other = (LifecyclePolicy) obj; + return Objects.equals(name, other.name) && + Objects.equals(phases, other.phases); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadata.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadata.java new file mode 100644 index 0000000000000..84de81437065d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadata.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.Objects; + +public class LifecyclePolicyMetadata implements ToXContentObject { + + static final ParseField POLICY = new ParseField("policy"); + static final ParseField VERSION = new ParseField("version"); + static final ParseField MODIFIED_DATE = new ParseField("modified_date"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("policy_metadata", + a -> { + LifecyclePolicy policy = (LifecyclePolicy) a[0]; + return new LifecyclePolicyMetadata(policy, (long) a[1], ZonedDateTime.parse((String) a[2]).toInstant().toEpochMilli()); + }); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), LifecyclePolicy::parse, POLICY); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION); + PARSER.declareString(ConstructingObjectParser.constructorArg(), MODIFIED_DATE); + } + + public static LifecyclePolicyMetadata parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final LifecyclePolicy policy; + private final long version; + private final long modifiedDate; + + public LifecyclePolicyMetadata(LifecyclePolicy policy, long version, long modifiedDate) { + this.policy = policy; + this.version = version; + this.modifiedDate = modifiedDate; + } + + public LifecyclePolicy getPolicy() { + return policy; + } + + public String getName() { + return policy.getName(); + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + public String getModifiedDateString() { + ZonedDateTime modifiedDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(modifiedDate), ZoneOffset.UTC); + return modifiedDateTime.toString(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY.getPreferredName(), policy); + builder.field(VERSION.getPreferredName(), version); + builder.field(MODIFIED_DATE.getPreferredName(), + ZonedDateTime.ofInstant(Instant.ofEpochMilli(modifiedDate), ZoneOffset.UTC).toString()); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(policy, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + LifecyclePolicyMetadata other = (LifecyclePolicyMetadata) obj; + return Objects.equals(policy, other.policy) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/OperationMode.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/OperationMode.java new file mode 100644 index 0000000000000..81634e5824ec8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/OperationMode.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; + +import java.util.EnumSet; +import java.util.Locale; + +/** + * Enum representing the different modes that Index Lifecycle Service can operate in. + */ +public enum OperationMode { + /** + * This represents a state where no policies are executed + */ + STOPPED { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING; + } + }, + + /** + * this represents a state where only sensitive actions (like {@link ShrinkAction}) will be executed + * until they finish, at which point the operation mode will move to STOPPED. + */ + STOPPING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING || nextMode == STOPPED; + } + }, + + /** + * Normal operation where all policies are executed as normal. + */ + RUNNING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == STOPPING; + } + }; + + public abstract boolean isValidChange(OperationMode nextMode); + + static OperationMode fromString(String string) { + return EnumSet.allOf(OperationMode.class).stream() + .filter(e -> string.equalsIgnoreCase(e.name())).findFirst() + .orElseThrow(() -> new IllegalArgumentException(String.format(Locale.ROOT, "%s is not a valid operation_mode", string))); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/Phase.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/Phase.java new file mode 100644 index 0000000000000..0c19d39c85964 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/Phase.java @@ -0,0 +1,143 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents set of {@link LifecycleAction}s which should be executed at a + * particular point in the lifecycle of an index. + */ +public class Phase implements ToXContentObject { + + static final ParseField MIN_AGE = new ParseField("min_age"); + static final ParseField ACTIONS_FIELD = new ParseField("actions"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("phase", false, + (a, name) -> new Phase(name, (TimeValue) a[0], ((List) a[1]).stream() + .collect(Collectors.toMap(LifecycleAction::getName, Function.identity())))); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MIN_AGE.getPreferredName()), MIN_AGE, ValueType.VALUE); + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), + (p, c, n) -> p.namedObject(LifecycleAction.class, n, null), v -> { + throw new IllegalArgumentException("ordered " + ACTIONS_FIELD.getPreferredName() + " are not supported"); + }, ACTIONS_FIELD); + } + + public static Phase parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String name; + private final Map actions; + private final TimeValue minimumAge; + + /** + * @param name + * the name of this {@link Phase}. + * @param minimumAge + * the age of the index when the index should move to this + * {@link Phase}. + * @param actions + * a {@link Map} of the {@link LifecycleAction}s to run when + * during this {@link Phase}. The keys in this map are the associated + * action names. + */ + public Phase(String name, TimeValue minimumAge, Map actions) { + this.name = name; + if (minimumAge == null) { + this.minimumAge = TimeValue.ZERO; + } else { + this.minimumAge = minimumAge; + } + this.actions = actions; + } + + /** + * @return the age of the index when the index should move to this + * {@link Phase}. + */ + public TimeValue getMinimumAge() { + return minimumAge; + } + + /** + * @return the name of this {@link Phase} + */ + public String getName() { + return name; + } + + /** + * @return a {@link Map} of the {@link LifecycleAction}s to run when during + * this {@link Phase}. + */ + public Map getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MIN_AGE.getPreferredName(), minimumAge.getStringRep()); + builder.field(ACTIONS_FIELD.getPreferredName(), actions); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(name, minimumAge, actions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Phase other = (Phase) obj; + return Objects.equals(name, other.name) && + Objects.equals(minimumAge, other.minimumAge) && + Objects.equals(actions, other.actions); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfo.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfo.java new file mode 100644 index 0000000000000..802ca8834cdd3 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfo.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class contains information about the current phase being executed by Index + * Lifecycle Management on the specific index. + */ +public class PhaseExecutionInfo implements ToXContentObject { + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField PHASE_DEFINITION_FIELD = new ParseField("phase_definition"); + private static final ParseField VERSION_FIELD = new ParseField("version"); + private static final ParseField MODIFIED_DATE_IN_MILLIS_FIELD = new ParseField("modified_date_in_millis"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "phase_execution_info", false, + (a, name) -> new PhaseExecutionInfo((String) a[0], (Phase) a[1], (long) a[2], (long) a[3])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), POLICY_NAME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Phase::parse, PHASE_DEFINITION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_IN_MILLIS_FIELD); + } + + public static PhaseExecutionInfo parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String policyName; + private final Phase phase; + private final long version; + private final long modifiedDate; + + /** + * This class holds information about the current phase that is being executed + * + * @param policyName the name of the policy being executed, this may not be the current policy assigned to an index + * @param phase the current phase definition executed + * @param version the version of the policyName being executed + * @param modifiedDate the time the executing version of the phase was modified + */ + public PhaseExecutionInfo(String policyName, Phase phase, long version, long modifiedDate) { + this.policyName = policyName; + this.phase = phase; + this.version = version; + this.modifiedDate = modifiedDate; + } + + public String getPolicyName() { + return policyName; + } + + public Phase getPhase() { + return phase; + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + @Override + public int hashCode() { + return Objects.hash(policyName, phase, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PhaseExecutionInfo other = (PhaseExecutionInfo) obj; + return Objects.equals(policyName, other.policyName) && + Objects.equals(phase, other.phase) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + + @Override + public String toString() { + return Strings.toString(this, false, true); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + if (phase != null) { + builder.field(PHASE_DEFINITION_FIELD.getPreferredName(), phase); + } + builder.field(VERSION_FIELD.getPreferredName(), version); + builder.timeField(MODIFIED_DATE_IN_MILLIS_FIELD.getPreferredName(), "modified_date", modifiedDate); + builder.endObject(); + return builder; + } +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..ddfcc6bf6e65a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequest.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class PutLifecyclePolicyRequest extends TimedRequest implements ToXContentObject { + + private final LifecyclePolicy policy; + + public PutLifecyclePolicyRequest(LifecyclePolicy policy) { + if (policy == null) { + throw new IllegalArgumentException("policy definition cannot be null"); + } + if (Strings.isNullOrEmpty(policy.getName())) { + throw new IllegalArgumentException("policy name must be present"); + } + this.policy = policy; + } + + public String getName() { + return policy.getName(); + } + + public LifecyclePolicy getLifecyclePolicy() { + return policy; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("policy", policy); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PutLifecyclePolicyRequest that = (PutLifecyclePolicyRequest) o; + return Objects.equals(getLifecyclePolicy(), that.getLifecyclePolicy()); + } + + @Override + public int hashCode() { + return Objects.hash(getLifecyclePolicy()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ReadOnlyAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ReadOnlyAction.java new file mode 100644 index 0000000000000..7734e792bbc5b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ReadOnlyAction.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ReadOnlyAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "readonly"; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, ReadOnlyAction::new); + + public static ReadOnlyAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ReadOnlyAction() { + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return ReadOnlyAction.class.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..88bdf4dd6868d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequest.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.TimedRequest; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class RemoveIndexLifecyclePolicyRequest extends TimedRequest { + + private final List indices; + private final IndicesOptions indicesOptions; + + public RemoveIndexLifecyclePolicyRequest(List indices) { + this(indices, IndicesOptions.strictExpandOpen()); + } + + public RemoveIndexLifecyclePolicyRequest(List indices, IndicesOptions indicesOptions) { + this.indices = Collections.unmodifiableList(Objects.requireNonNull(indices)); + this.indicesOptions = Objects.requireNonNull(indicesOptions); + } + + public List indices() { + return indices; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public int hashCode() { + return Objects.hash(indices, indicesOptions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RemoveIndexLifecyclePolicyRequest other = (RemoveIndexLifecyclePolicyRequest) obj; + return Objects.deepEquals(indices, other.indices) && + Objects.equals(indicesOptions, other.indicesOptions); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponse.java new file mode 100644 index 0000000000000..3aae1537faa29 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponse.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class RemoveIndexLifecyclePolicyResponse { + + public static final ParseField HAS_FAILURES_FIELD = new ParseField("has_failures"); + public static final ParseField FAILED_INDEXES_FIELD = new ParseField("failed_indexes"); + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "change_policy_for_index_response", true, args -> new RemoveIndexLifecyclePolicyResponse((List)args[0])); + static { + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), FAILED_INDEXES_FIELD); + // Needs to be declared but not used in constructing the response object + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), HAS_FAILURES_FIELD); + } + + private final List failedIndexes; + + public RemoveIndexLifecyclePolicyResponse(List failedIndexes) { + if (failedIndexes == null) { + throw new IllegalArgumentException(FAILED_INDEXES_FIELD.getPreferredName() + " cannot be null"); + } + this.failedIndexes = Collections.unmodifiableList(failedIndexes); + } + + public List getFailedIndexes() { + return failedIndexes; + } + + public boolean hasFailures() { + return failedIndexes.isEmpty() == false; + } + + public static RemoveIndexLifecyclePolicyResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(failedIndexes); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RemoveIndexLifecyclePolicyResponse other = (RemoveIndexLifecyclePolicyResponse) obj; + return Objects.equals(failedIndexes, other.failedIndexes); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RetryLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RetryLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..6f3acaf19aaea --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RetryLifecyclePolicyRequest.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import org.elasticsearch.client.TimedRequest; + +public class RetryLifecyclePolicyRequest extends TimedRequest { + + private final List indices; + + public RetryLifecyclePolicyRequest(String... indices) { + if (indices.length == 0) { + throw new IllegalArgumentException("Must at least specify one index to retry"); + } + this.indices = Arrays.asList(indices); + } + + public List getIndices() { + return indices; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RetryLifecyclePolicyRequest that = (RetryLifecyclePolicyRequest) o; + return indices.size() == that.indices.size() && indices.containsAll(that.indices); + } + + @Override + public int hashCode() { + return Objects.hash(indices); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RolloverAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RolloverAction.java new file mode 100644 index 0000000000000..0cc9dcf234969 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RolloverAction.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + + +public class RolloverAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "rollover"; + private static final ParseField MAX_SIZE_FIELD = new ParseField("max_size"); + private static final ParseField MAX_DOCS_FIELD = new ParseField("max_docs"); + private static final ParseField MAX_AGE_FIELD = new ParseField("max_age"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new RolloverAction((ByteSizeValue) a[0], (TimeValue) a[1], (Long) a[2])); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SIZE_FIELD.getPreferredName()), MAX_SIZE_FIELD, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_AGE_FIELD.getPreferredName()), MAX_AGE_FIELD, ValueType.VALUE); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_DOCS_FIELD); + } + + private final ByteSizeValue maxSize; + private final Long maxDocs; + private final TimeValue maxAge; + + public static RolloverAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public RolloverAction(ByteSizeValue maxSize, TimeValue maxAge, Long maxDocs) { + if (maxSize == null && maxAge == null && maxDocs == null) { + throw new IllegalArgumentException("At least one rollover condition must be set."); + } + this.maxSize = maxSize; + this.maxAge = maxAge; + this.maxDocs = maxDocs; + } + public ByteSizeValue getMaxSize() { + return maxSize; + } + + public TimeValue getMaxAge() { + return maxAge; + } + + public Long getMaxDocs() { + return maxDocs; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (maxSize != null) { + builder.field(MAX_SIZE_FIELD.getPreferredName(), maxSize.getStringRep()); + } + if (maxAge != null) { + builder.field(MAX_AGE_FIELD.getPreferredName(), maxAge.getStringRep()); + } + if (maxDocs != null) { + builder.field(MAX_DOCS_FIELD.getPreferredName(), maxDocs); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(maxSize, maxAge, maxDocs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + RolloverAction other = (RolloverAction) obj; + return Objects.equals(maxSize, other.maxSize) && + Objects.equals(maxAge, other.maxAge) && + Objects.equals(maxDocs, other.maxDocs); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ShrinkAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ShrinkAction.java new file mode 100644 index 0000000000000..345356380145e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ShrinkAction.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class ShrinkAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "shrink"; + private static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME, a -> new ShrinkAction((Integer) a[0])); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_SHARDS_FIELD); + } + + private int numberOfShards; + + public static ShrinkAction parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public ShrinkAction(int numberOfShards) { + if (numberOfShards <= 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0"); + } + this.numberOfShards = numberOfShards; + } + + int getNumberOfShards() { + return numberOfShards; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShrinkAction that = (ShrinkAction) o; + return Objects.equals(numberOfShards, that.numberOfShards); + } + + @Override + public int hashCode() { + return Objects.hash(numberOfShards); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StartILMRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StartILMRequest.java new file mode 100644 index 0000000000000..84cc844a92a98 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StartILMRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; + +public class StartILMRequest extends TimedRequest { + + public StartILMRequest() { + } + + @Override + public int hashCode() { + return 64; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StopILMRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StopILMRequest.java new file mode 100644 index 0000000000000..1695fc0dd7aea --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StopILMRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; + +public class StopILMRequest extends TimedRequest { + + public StopILMRequest() { + } + + @Override + public int hashCode() { + return 75; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/license/StartBasicResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/license/StartBasicResponse.java index f6ab026402462..c2596f3e38a4e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/license/StartBasicResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/license/StartBasicResponse.java @@ -84,14 +84,13 @@ public class StartBasicResponse { } } return new Tuple<>(message, acknowledgeMessages); - }, - new ParseField("acknowledge")); + }, new ParseField("acknowledge")); } private Map acknowledgeMessages; private String acknowledgeMessage; - enum Status { + public enum Status { GENERATED_BASIC(true, null, RestStatus.OK), ALREADY_USING_BASIC(false, "Operation failed: Current license is basic.", RestStatus.FORBIDDEN), NEED_ACKNOWLEDGEMENT(false, "Operation failed: Needs acknowledgement.", RestStatus.OK); @@ -141,6 +140,10 @@ public StartBasicResponse() { this.acknowledgeMessage = acknowledgeMessage; } + public Status getStatus() { + return status; + } + public boolean isAcknowledged() { return status != StartBasicResponse.Status.NEED_ACKNOWLEDGEMENT; } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequest.java similarity index 65% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequest.java index ae26bc4de8d0e..fb37a449435f4 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequest.java @@ -16,21 +16,17 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.migration; +package org.elasticsearch.client.migration; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.client.TimedRequest; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import java.io.IOException; import java.util.Arrays; import java.util.Objects; -public class IndexUpgradeInfoRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { +public class IndexUpgradeInfoRequest extends TimedRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); @@ -39,19 +35,6 @@ public IndexUpgradeInfoRequest(String... indices) { indices(indices); } - public IndexUpgradeInfoRequest(StreamInput in) throws IOException { - super(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(indices); - indicesOptions.writeIndicesOptions(out); - } - @Override public String[] indices() { return indices; @@ -72,16 +55,6 @@ public void indicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; } - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoResponse.java similarity index 68% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoResponse.java index 4c1208f960ebd..a9af1e36cc258 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoResponse.java @@ -16,25 +16,19 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.migration; +package org.elasticsearch.client.migration; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import java.io.IOException; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -public class IndexUpgradeInfoResponse extends ActionResponse implements ToXContentObject { +public class IndexUpgradeInfoResponse { private static final ParseField INDICES = new ParseField("indices"); private static final ParseField ACTION_REQUIRED = new ParseField("action_required"); @@ -70,50 +64,16 @@ public class IndexUpgradeInfoResponse extends ActionResponse implements ToXConte } - private Map actions; - - public IndexUpgradeInfoResponse() { - - } + private final Map actions; public IndexUpgradeInfoResponse(Map actions) { this.actions = actions; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - actions = in.readMap(StreamInput::readString, UpgradeActionRequired::readFromStream); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeMap(actions, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); - } - public Map getActions() { return actions; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - builder.startObject(INDICES.getPreferredName()); - for (Map.Entry entry : actions.entrySet()) { - builder.startObject(entry.getKey()); - { - builder.field(ACTION_REQUIRED.getPreferredName(), entry.getValue().toString()); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endObject(); - return builder; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java similarity index 72% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java index c87e37be7a55a..26b7b1e815d7f 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/UpgradeActionRequired.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java @@ -16,19 +16,14 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.migration; +package org.elasticsearch.client.migration; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; - -import java.io.IOException; import java.util.Locale; /** * Indicates the type of the upgrade required for the index */ -public enum UpgradeActionRequired implements Writeable { +public enum UpgradeActionRequired { NOT_APPLICABLE, // Indicates that the check is not applicable to this index type, the next check will be performed UP_TO_DATE, // Indicates that the check finds this index to be up to date - no additional checks are required REINDEX, // The index should be reindex @@ -38,15 +33,6 @@ public static UpgradeActionRequired fromString(String value) { return UpgradeActionRequired.valueOf(value.toUpperCase(Locale.ROOT)); } - public static UpgradeActionRequired readFromStream(StreamInput in) throws IOException { - return in.readEnum(UpgradeActionRequired.class); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeEnum(this); - } - @Override public String toString() { return name().toLowerCase(Locale.ROOT); diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/package-info.java similarity index 94% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/migration/package-info.java index 12dc8eebc1773..dcb29a3776e3c 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/migration/package-info.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/package-info.java @@ -21,4 +21,4 @@ * Request and Response objects for the default distribution's Migration * APIs. */ -package org.elasticsearch.protocol.xpack.migration; +package org.elasticsearch.client.migration; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterRequest.java new file mode 100644 index 0000000000000..5414c86258111 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterRequest.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request to create a new Machine Learning MlFilter given a {@link MlFilter} configuration + */ +public class PutFilterRequest extends ActionRequest implements ToXContentObject { + + private final MlFilter filter; + + /** + * Construct a new PutMlFilterRequest + * + * @param filter a {@link MlFilter} configuration to create + */ + public PutFilterRequest(MlFilter filter) { + this.filter = filter; + } + + public MlFilter getMlFilter() { + return filter; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return filter.toXContent(builder, params); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + + if (object == null || getClass() != object.getClass()) { + return false; + } + + PutFilterRequest request = (PutFilterRequest) object; + return Objects.equals(filter, request.filter); + } + + @Override + public int hashCode() { + return Objects.hash(filter); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterResponse.java new file mode 100644 index 0000000000000..56164bd5be08e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterResponse.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response containing the newly created {@link MlFilter} + */ +public class PutFilterResponse implements ToXContentObject { + + private MlFilter filter; + + public static PutFilterResponse fromXContent(XContentParser parser) throws IOException { + return new PutFilterResponse(MlFilter.PARSER.parse(parser, null).build()); + } + + PutFilterResponse(MlFilter filter) { + this.filter = filter; + } + + public MlFilter getResponse() { + return filter; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + filter.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + PutFilterResponse response = (PutFilterResponse) object; + return Objects.equals(filter, response.filter); + } + + @Override + public int hashCode() { + return Objects.hash(filter); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateDatafeedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateDatafeedRequest.java new file mode 100644 index 0000000000000..e434c5f9a5728 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateDatafeedRequest.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.datafeed.DatafeedUpdate; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Requests an update to a {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} with the passed {@link DatafeedUpdate} + * settings + */ +public class UpdateDatafeedRequest extends ActionRequest implements ToXContentObject { + + private final DatafeedUpdate update; + + public UpdateDatafeedRequest(DatafeedUpdate update) { + this.update = update; + } + + public DatafeedUpdate getDatafeedUpdate() { + return update; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return update.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + UpdateDatafeedRequest that = (UpdateDatafeedRequest) o; + return Objects.equals(update, that.update); + } + + @Override + public int hashCode() { + return Objects.hash(update); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java index 1e59ea067ca7b..119f70fc79756 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java @@ -37,6 +37,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -292,6 +293,10 @@ public Builder setIndices(List indices) { return this; } + public Builder setIndices(String... indices) { + return setIndices(Arrays.asList(indices)); + } + public Builder setTypes(List types) { this.types = types; return this; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java index e0d1bd0849b3b..1b67fc4459b50 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java @@ -32,6 +32,14 @@ import java.util.SortedSet; import java.util.TreeSet; +/** + * An MlFilter Object + * + * A filter contains a list of strings. + * It can be used by one or more jobs. + * + * Specifically, filters are referenced in the custom_rules property of detector configuration objects. + */ public class MlFilter implements ToXContentObject { public static final ParseField TYPE = new ParseField("type"); @@ -105,6 +113,10 @@ public int hashCode() { return Objects.hash(id, description, items); } + /** + * Creates a new Builder object for creating an MlFilter object + * @param filterId The ID of the filter to create + */ public static Builder builder(String filterId) { return new Builder().setId(filterId); } @@ -118,6 +130,10 @@ public static class Builder { private Builder() { } + /** + * Set the ID of the filter + * @param id The id desired + */ public Builder setId(String id) { this.id = Objects.requireNonNull(id); return this; @@ -128,6 +144,10 @@ public String getId() { return id; } + /** + * Set the description of the filter + * @param description The description desired + */ public Builder setDescription(String description) { this.description = description; return this; @@ -143,6 +163,13 @@ public Builder setItems(List items) { return this; } + /** + * The items of the filter. + * + * A wildcard * can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. + * + * @param items String list of items to be applied in the filter + */ public Builder setItems(String... items) { setItems(Arrays.asList(items)); return this; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java index 29cd1787c704a..4e279844afc59 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java @@ -19,14 +19,21 @@ package org.elasticsearch.client.rollup; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; public abstract class AcknowledgedResponse implements ToXContentObject { + + protected static final String PARSE_FIELD_NAME = "acknowledged"; private final boolean acknowledged; public AcknowledgedResponse(final boolean acknowledged) { @@ -37,6 +44,12 @@ public boolean isAcknowledged() { return acknowledged; } + protected static ConstructingObjectParser generateParser(String name, Function ctor, String parseField) { + ConstructingObjectParser p = new ConstructingObjectParser<>(name, true, args -> ctor.apply((boolean) args[0])); + p.declareBoolean(constructorArg(), new ParseField(parseField)); + return p; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -58,10 +71,16 @@ public int hashCode() { public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); { - builder.field("acknowledged", isAcknowledged()); + builder.field(getFieldName(), isAcknowledged()); } builder.endObject(); return builder; } + /** + * @return the field name this response uses to output the acknowledged flag + */ + protected String getFieldName() { + return PARSE_FIELD_NAME; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java index f9d3025d38a4f..35734c4a8358a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java @@ -19,28 +19,21 @@ package org.elasticsearch.client.rollup; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - public class DeleteRollupJobResponse extends AcknowledgedResponse { public DeleteRollupJobResponse(boolean acknowledged) { super(acknowledged); } + private static final ConstructingObjectParser PARSER = AcknowledgedResponse + .generateParser("delete_rollup_job_response", DeleteRollupJobResponse::new, AcknowledgedResponse.PARSE_FIELD_NAME); + public static DeleteRollupJobResponse fromXContent(final XContentParser parser) throws IOException { return PARSER.parse(parser, null); } - - private static final ConstructingObjectParser PARSER - = new ConstructingObjectParser<>("delete_rollup_job_response", true, - args -> new DeleteRollupJobResponse((boolean) args[0])); - static { - PARSER.declareBoolean(constructorArg(), new ParseField("acknowledged")); - } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java index d991313bb5e62..31c656b033479 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java @@ -18,28 +18,21 @@ */ package org.elasticsearch.client.rollup; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - public class PutRollupJobResponse extends AcknowledgedResponse { - public PutRollupJobResponse(boolean acknowledged) { super(acknowledged); } + private static final ConstructingObjectParser PARSER = AcknowledgedResponse + .generateParser("delete_rollup_job_response", PutRollupJobResponse::new, AcknowledgedResponse.PARSE_FIELD_NAME); + public static PutRollupJobResponse fromXContent(final XContentParser parser) throws IOException { return PARSER.parse(parser, null); } - - private static final ConstructingObjectParser PARSER - = new ConstructingObjectParser<>("put_rollup_job_response", true, args -> new PutRollupJobResponse((boolean) args[0])); - static { - PARSER.declareBoolean(constructorArg(), new ParseField("acknowledged")); - } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobRequest.java new file mode 100644 index 0000000000000..5b67d8c9e9d20 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobRequest.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +public class StartRollupJobRequest implements Validatable { + + private final String jobId; + + public StartRollupJobRequest(final String jobId) { + this.jobId = Objects.requireNonNull(jobId, "id parameter must not be null"); + } + + public String getJobId() { + return jobId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final StartRollupJobRequest that = (StartRollupJobRequest) o; + return Objects.equals(jobId, that.jobId); + } + + @Override + public int hashCode() { + return Objects.hash(jobId); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobResponse.java new file mode 100644 index 0000000000000..b953901ce0c84 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobResponse.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.rollup; + +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class StartRollupJobResponse extends AcknowledgedResponse { + + private static final String PARSE_FIELD_NAME = "started"; + + private static final ConstructingObjectParser PARSER = AcknowledgedResponse + .generateParser("delete_rollup_job_response", StartRollupJobResponse::new, PARSE_FIELD_NAME); + + public StartRollupJobResponse(boolean acknowledged) { + super(acknowledged); + } + + public static StartRollupJobResponse fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + protected String getFieldName() { + return PARSE_FIELD_NAME; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateRequest.java new file mode 100644 index 0000000000000..2aefa97cb8bf1 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateRequest.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Validatable; + +/** + * Empty request object required to make the authenticate call. The authenticate call + * retrieves metadata about the authenticated user. + */ +public final class AuthenticateRequest implements Validatable { + + public static final AuthenticateRequest INSTANCE = new AuthenticateRequest(); + + private AuthenticateRequest() { + } + + public Request getRequest() { + return new Request(HttpGet.METHOD_NAME, "/_xpack/security/_authenticate"); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java new file mode 100644 index 0000000000000..62f1cc0955bd1 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.security.user.User; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * The response for the authenticate call. The response contains two fields: a + * user field and a boolean flag signaling if the user is enabled or not. The + * user object contains all user metadata which Elasticsearch uses to map roles, + * etc. + */ +public final class AuthenticateResponse { + + static final ParseField USERNAME = new ParseField("username"); + static final ParseField ROLES = new ParseField("roles"); + static final ParseField METADATA = new ParseField("metadata"); + static final ParseField FULL_NAME = new ParseField("full_name"); + static final ParseField EMAIL = new ParseField("email"); + static final ParseField ENABLED = new ParseField("enabled"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "client_security_authenticate_response", + a -> new AuthenticateResponse(new User((String) a[0], ((List) a[1]), (Map) a[2], + (String) a[3], (String) a[4]), (Boolean) a[5])); + static { + PARSER.declareString(constructorArg(), USERNAME); + PARSER.declareStringArray(constructorArg(), ROLES); + PARSER.>declareObject(constructorArg(), (parser, c) -> parser.map(), METADATA); + PARSER.declareStringOrNull(optionalConstructorArg(), FULL_NAME); + PARSER.declareStringOrNull(optionalConstructorArg(), EMAIL); + PARSER.declareBoolean(constructorArg(), ENABLED); + } + + private final User user; + private final boolean enabled; + + public AuthenticateResponse(User user, boolean enabled) { + this.user = user; + this.enabled = enabled; + } + + /** + * @return The effective user. This is the authenticated user, or, when + * submitting requests on behalf of other users, it is the + * impersonated user. + */ + public User getUser() { + return user; + } + + /** + * @return whether the user is enabled or not + */ + public boolean enabled() { + return enabled; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AuthenticateResponse that = (AuthenticateResponse) o; + return user.equals(that.user) && enabled == that.enabled; + } + + @Override + public int hashCode() { + return Objects.hash(user, enabled); + } + + public static AuthenticateResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java new file mode 100644 index 0000000000000..6c1b394355e1e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java @@ -0,0 +1,152 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.CharArrays; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Request to create a new OAuth2 token from the Elasticsearch cluster. + */ +public final class CreateTokenRequest implements Validatable, ToXContentObject { + + private final String grantType; + private final String scope; + private final String username; + private final char[] password; + private final String refreshToken; + + /** + * General purpose constructor. This constructor is typically not useful, and one of the following factory methods should be used + * instead: + *
    + *
  • {@link #passwordGrant(String, char[])}
  • + *
  • {@link #refreshTokenGrant(String)}
  • + *
  • {@link #clientCredentialsGrant()}
  • + *
+ */ + public CreateTokenRequest(String grantType, @Nullable String scope, @Nullable String username, @Nullable char[] password, + @Nullable String refreshToken) { + if (Strings.isNullOrEmpty(grantType)) { + throw new IllegalArgumentException("grant_type is required"); + } + this.grantType = grantType; + this.username = username; + this.password = password; + this.scope = scope; + this.refreshToken = refreshToken; + } + + public static CreateTokenRequest passwordGrant(String username, char[] password) { + if (Strings.isNullOrEmpty(username)) { + throw new IllegalArgumentException("username is required"); + } + if (password == null || password.length == 0) { + throw new IllegalArgumentException("password is required"); + } + return new CreateTokenRequest("password", null, username, password, null); + } + + public static CreateTokenRequest refreshTokenGrant(String refreshToken) { + if (Strings.isNullOrEmpty(refreshToken)) { + throw new IllegalArgumentException("refresh_token is required"); + } + return new CreateTokenRequest("refresh_token", null, null, null, refreshToken); + } + + public static CreateTokenRequest clientCredentialsGrant() { + return new CreateTokenRequest("client_credentials", null, null, null, null); + } + + public String getGrantType() { + return grantType; + } + + public String getScope() { + return scope; + } + + public String getUsername() { + return username; + } + + public char[] getPassword() { + return password; + } + + public String getRefreshToken() { + return refreshToken; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject() + .field("grant_type", grantType); + if (scope != null) { + builder.field("scope", scope); + } + if (username != null) { + builder.field("username", username); + } + if (password != null) { + byte[] passwordBytes = CharArrays.toUtf8Bytes(password); + try { + builder.field("password").utf8Value(passwordBytes, 0, passwordBytes.length); + } finally { + Arrays.fill(passwordBytes, (byte) 0); + } + } + if (refreshToken != null) { + builder.field("refresh_token", refreshToken); + } + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final CreateTokenRequest that = (CreateTokenRequest) o; + return Objects.equals(grantType, that.grantType) && + Objects.equals(scope, that.scope) && + Objects.equals(username, that.username) && + Arrays.equals(password, that.password) && + Objects.equals(refreshToken, that.refreshToken); + } + + @Override + public int hashCode() { + int result = Objects.hash(grantType, scope, username, refreshToken); + result = 31 * result + Arrays.hashCode(password); + return result; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenResponse.java new file mode 100644 index 0000000000000..32d298d1a9bc0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenResponse.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Response when creating a new OAuth2 token in the Elasticsearch cluster. Contains an access token, the token's expiry, and an optional + * refresh token. + */ +public final class CreateTokenResponse { + + private final String accessToken; + private final String type; + private final TimeValue expiresIn; + private final String scope; + private final String refreshToken; + + public CreateTokenResponse(String accessToken, String type, TimeValue expiresIn, String scope, String refreshToken) { + this.accessToken = accessToken; + this.type = type; + this.expiresIn = expiresIn; + this.scope = scope; + this.refreshToken = refreshToken; + } + + public String getAccessToken() { + return accessToken; + } + + public String getType() { + return type; + } + + public TimeValue getExpiresIn() { + return expiresIn; + } + + public String getScope() { + return scope; + } + + public String getRefreshToken() { + return refreshToken; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final CreateTokenResponse that = (CreateTokenResponse) o; + return Objects.equals(accessToken, that.accessToken) && + Objects.equals(type, that.type) && + Objects.equals(expiresIn, that.expiresIn) && + Objects.equals(scope, that.scope) && + Objects.equals(refreshToken, that.refreshToken); + } + + @Override + public int hashCode() { + return Objects.hash(accessToken, type, expiresIn, scope, refreshToken); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "create_token_response", true, args -> new CreateTokenResponse( + (String) args[0], (String) args[1], TimeValue.timeValueSeconds((Long) args[2]), (String) args[3], (String) args[4])); + + static { + PARSER.declareString(constructorArg(), new ParseField("access_token")); + PARSER.declareString(constructorArg(), new ParseField("type")); + PARSER.declareLong(constructorArg(), new ParseField("expires_in")); + PARSER.declareStringOrNull(optionalConstructorArg(), new ParseField("scope")); + PARSER.declareStringOrNull(optionalConstructorArg(), new ParseField("refresh_token")); + } + + public static CreateTokenResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java new file mode 100644 index 0000000000000..9cb78dd9c83ec --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java @@ -0,0 +1,153 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression; +import org.elasticsearch.client.security.support.expressiondsl.parser.RoleMapperExpressionParser; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * A representation of a single role-mapping. + * + * @see RoleMapperExpression + * @see RoleMapperExpressionParser + */ +public final class ExpressionRoleMapping { + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("role-mapping", true, + (args, name) -> new ExpressionRoleMapping(name, (RoleMapperExpression) args[0], (List) args[1], + (Map) args[2], (boolean) args[3])); + + static { + PARSER.declareField(constructorArg(), (parser, context) -> RoleMapperExpressionParser.fromXContent(parser), Fields.RULES, + ObjectParser.ValueType.OBJECT); + PARSER.declareStringArray(constructorArg(), Fields.ROLES); + PARSER.declareField(constructorArg(), XContentParser::map, Fields.METADATA, ObjectParser.ValueType.OBJECT); + PARSER.declareBoolean(constructorArg(), Fields.ENABLED); + } + + private final String name; + private final RoleMapperExpression expression; + private final List roles; + private final Map metadata; + private final boolean enabled; + + /** + * Constructor for role mapping + * + * @param name role mapping name + * @param expr {@link RoleMapperExpression} Expression used for role mapping + * @param roles list of roles to be associated with the user + * @param metadata metadata that helps to identify which roles are assigned + * to the user + * @param enabled a flag when {@code true} signifies the role mapping is active + */ + public ExpressionRoleMapping(final String name, final RoleMapperExpression expr, final List roles, + final Map metadata, boolean enabled) { + this.name = name; + this.expression = expr; + this.roles = Collections.unmodifiableList(roles); + this.metadata = (metadata == null) ? Collections.emptyMap() : Collections.unmodifiableMap(metadata); + this.enabled = enabled; + } + + public String getName() { + return name; + } + + public RoleMapperExpression getExpression() { + return expression; + } + + public List getRoles() { + return roles; + } + + public Map getMetadata() { + return metadata; + } + + public boolean isEnabled() { + return enabled; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (enabled ? 1231 : 1237); + result = prime * result + ((expression == null) ? 0 : expression.hashCode()); + result = prime * result + ((metadata == null) ? 0 : metadata.hashCode()); + result = prime * result + ((name == null) ? 0 : name.hashCode()); + result = prime * result + ((roles == null) ? 0 : roles.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + final ExpressionRoleMapping other = (ExpressionRoleMapping) obj; + if (enabled != other.enabled) + return false; + if (expression == null) { + if (other.expression != null) + return false; + } else if (!expression.equals(other.expression)) + return false; + if (metadata == null) { + if (other.metadata != null) + return false; + } else if (!metadata.equals(other.metadata)) + return false; + if (name == null) { + if (other.name != null) + return false; + } else if (!name.equals(other.name)) + return false; + if (roles == null) { + if (other.roles != null) + return false; + } else if (!roles.equals(other.roles)) + return false; + return true; + } + + public interface Fields { + ParseField ROLES = new ParseField("roles"); + ParseField ENABLED = new ParseField("enabled"); + ParseField RULES = new ParseField("rules"); + ParseField METADATA = new ParseField("metadata"); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetRoleMappingsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetRoleMappingsRequest.java new file mode 100644 index 0000000000000..ca9b85c724d6f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetRoleMappingsRequest.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.util.set.Sets; + +import java.util.Collections; +import java.util.Objects; +import java.util.Set; + +/** + * Request object to get role mappings + */ +public final class GetRoleMappingsRequest implements Validatable { + private final Set roleMappingNames; + + public GetRoleMappingsRequest(final String... roleMappingNames) { + if (roleMappingNames != null) { + this.roleMappingNames = Collections.unmodifiableSet(Sets.newHashSet(roleMappingNames)); + } else { + this.roleMappingNames = Collections.emptySet(); + } + } + + public Set getRoleMappingNames() { + return roleMappingNames; + } + + @Override + public int hashCode() { + return Objects.hash(roleMappingNames); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final GetRoleMappingsRequest other = (GetRoleMappingsRequest) obj; + + return Objects.equals(roleMappingNames, other.roleMappingNames); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetRoleMappingsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetRoleMappingsResponse.java new file mode 100644 index 0000000000000..05e63cefbe5e3 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetRoleMappingsResponse.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * Get role mappings response + */ +public final class GetRoleMappingsResponse { + + private final List mappings; + + public GetRoleMappingsResponse(List mappings) { + this.mappings = Collections.unmodifiableList(mappings); + } + + public List getMappings() { + return mappings; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final GetRoleMappingsResponse that = (GetRoleMappingsResponse) o; + return this.mappings.equals(that.mappings); + } + + @Override + public int hashCode() { + return mappings.hashCode(); + } + + public static GetRoleMappingsResponse fromXContent(XContentParser parser) throws IOException { + final List roleMappings = new ArrayList<>(); + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + roleMappings.add(ExpressionRoleMapping.PARSER.parse(parser, parser.currentName())); + } + + return new GetRoleMappingsResponse(roleMappings); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateTokenRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateTokenRequest.java new file mode 100644 index 0000000000000..4e767335ffdbd --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateTokenRequest.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request to invalidate a OAuth2 token within the Elasticsearch cluster. + */ +public final class InvalidateTokenRequest implements Validatable, ToXContentObject { + + private final String accessToken; + private final String refreshToken; + + InvalidateTokenRequest(@Nullable String accessToken, @Nullable String refreshToken) { + if (Strings.isNullOrEmpty(accessToken)) { + if (Strings.isNullOrEmpty(refreshToken)) { + throw new IllegalArgumentException("Either access-token or refresh-token is required"); + } + } else if (Strings.isNullOrEmpty(refreshToken) == false) { + throw new IllegalArgumentException("Cannot supply both access-token and refresh-token"); + } + this.accessToken = accessToken; + this.refreshToken = refreshToken; + } + + public static InvalidateTokenRequest accessToken(String accessToken) { + if (Strings.isNullOrEmpty(accessToken)) { + throw new IllegalArgumentException("token is required"); + } + return new InvalidateTokenRequest(accessToken, null); + } + + public static InvalidateTokenRequest refreshToken(String refreshToken) { + if (Strings.isNullOrEmpty(refreshToken)) { + throw new IllegalArgumentException("refresh_token is required"); + } + return new InvalidateTokenRequest(null, refreshToken); + } + + public String getAccessToken() { + return accessToken; + } + + public String getRefreshToken() { + return refreshToken; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (accessToken != null) { + builder.field("token", accessToken); + } + if (refreshToken != null) { + builder.field("refresh_token", refreshToken); + } + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final InvalidateTokenRequest that = (InvalidateTokenRequest) o; + return Objects.equals(this.accessToken, that.accessToken) && + Objects.equals(this.refreshToken, that.refreshToken); + } + + @Override + public int hashCode() { + return Objects.hash(accessToken, refreshToken); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateTokenResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateTokenResponse.java new file mode 100644 index 0000000000000..876faa485c236 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/InvalidateTokenResponse.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Response when invalidating an OAuth2 token. Returns a + * single boolean field for whether the invalidation record was created or updated. + */ +public final class InvalidateTokenResponse { + + private final boolean created; + + public InvalidateTokenResponse(boolean created) { + this.created = created; + } + + public boolean isCreated() { + return created; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InvalidateTokenResponse that = (InvalidateTokenResponse) o; + return created == that.created; + } + + @Override + public int hashCode() { + return Objects.hash(created); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "invalidate_token_response", true, args -> new InvalidateTokenResponse((boolean) args[0])); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField("created")); + } + + public static InvalidateTokenResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserResponse.java index 73b57fb57ecc6..e97e4f0125892 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserResponse.java @@ -62,7 +62,6 @@ public int hashCode() { static { PARSER.declareBoolean(constructorArg(), new ParseField("created")); - PARSER.declareObject((a,b) -> {}, (parser, context) -> null, new ParseField("user")); // ignore the user field! } public static PutUserResponse fromXContent(XContentParser parser) throws IOException { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/support/expressiondsl/parser/RoleMapperExpressionParser.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/support/expressiondsl/parser/RoleMapperExpressionParser.java index 98de4f4c2092c..0f5aec7f596b7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/support/expressiondsl/parser/RoleMapperExpressionParser.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/support/expressiondsl/parser/RoleMapperExpressionParser.java @@ -45,6 +45,18 @@ public final class RoleMapperExpressionParser { public static final ParseField FIELD = new ParseField("field"); + public static RoleMapperExpression fromXContent(final XContentParser parser) throws IOException { + return new RoleMapperExpressionParser().parse("rules", parser); + } + + /** + * This function exists to be compatible with + * {@link org.elasticsearch.common.xcontent.ContextParser#parse(XContentParser, Object)} + */ + public static RoleMapperExpression parseObject(XContentParser parser, String id) throws IOException { + return new RoleMapperExpressionParser().parse(id, parser); + } + /** * @param name The name of the expression tree within its containing object. * Used to provide descriptive error messages. diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java new file mode 100644 index 0000000000000..977780b46b79b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + + +/** + * An authenticated user + */ +public final class User { + + private final String username; + private final Collection roles; + private final Map metadata; + @Nullable private final String fullName; + @Nullable private final String email; + + public User(String username, Collection roles, Map metadata, @Nullable String fullName, + @Nullable String email) { + Objects.requireNonNull(username, "`username` cannot be null"); + Objects.requireNonNull(roles, "`roles` cannot be null. Pass an empty collection instead."); + Objects.requireNonNull(roles, "`metadata` cannot be null. Pass an empty map instead."); + this.username = username; + this.roles = roles; + this.metadata = Collections.unmodifiableMap(metadata); + this.fullName = fullName; + this.email = email; + } + + /** + * @return The principal of this user - effectively serving as the + * unique identity of the user. Can never be {@code null}. + */ + public String username() { + return this.username; + } + + /** + * @return The roles this user is associated with. The roles are + * identified by their unique names and each represents as + * set of permissions. Can never be {@code null}. + */ + public Collection roles() { + return this.roles; + } + + /** + * @return The metadata that is associated with this user. Can never be {@code null}. + */ + public Map metadata() { + return metadata; + } + + /** + * @return The full name of this user. May be {@code null}. + */ + public @Nullable String fullName() { + return fullName; + } + + /** + * @return The email of this user. May be {@code null}. + */ + public @Nullable String email() { + return email; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(); + sb.append("User[username=").append(username); + sb.append(",roles=[").append(Strings.collectionToCommaDelimitedString(roles)).append("]"); + sb.append(",metadata=").append(metadata); + sb.append(",fullName=").append(fullName); + sb.append(",email=").append(email); + sb.append("]"); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o instanceof User == false) { + return false; + } + + final User user = (User) o; + + if (!username.equals(user.username)) { + return false; + } + if (!roles.equals(user.roles)) { + return false; + } + if (!metadata.equals(user.metadata)) { + return false; + } + if (fullName != null ? !fullName.equals(user.fullName) : user.fullName != null) { + return false; + } + return !(email != null ? !email.equals(user.email) : user.email != null); + } + + @Override + public int hashCode() { + return Objects.hash(username, roles, metadata, fullName, email); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/AckWatchRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/AckWatchRequest.java index 1381544744dae..8c36caf9dbd5b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/AckWatchRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/AckWatchRequest.java @@ -21,7 +21,6 @@ import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import java.util.Locale; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/ActivateWatchRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/ActivateWatchRequest.java index 7f2849ff39c0c..e242ea4f20ba9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/ActivateWatchRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/ActivateWatchRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.client.watcher; import org.elasticsearch.client.Validatable; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import java.util.Objects; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchRequest.java index b20a56c361f8f..ccbd64358e154 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeactivateWatchRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.watcher; import org.elasticsearch.client.Validatable; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; +import org.elasticsearch.client.watcher.PutWatchRequest; import java.util.Objects; @@ -27,7 +27,6 @@ public class DeactivateWatchRequest implements Validatable { private final String watchId; public DeactivateWatchRequest(String watchId) { - Objects.requireNonNull(watchId, "watch id is missing"); if (PutWatchRequest.isValidId(watchId) == false) { throw new IllegalArgumentException("watch id contains whitespace"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeleteWatchRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeleteWatchRequest.java new file mode 100644 index 0000000000000..3abee6fb47e3f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeleteWatchRequest.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.watcher; + +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +/** + * A delete watch request to delete an watch by name (id) + */ +public class DeleteWatchRequest implements Validatable { + + private final String id; + + public DeleteWatchRequest(String id) { + Objects.requireNonNull(id, "watch id is missing"); + if (PutWatchRequest.isValidId(id) == false) { + throw new IllegalArgumentException("watch id contains whitespace"); + } + this.id = id; + } + + /** + * @return The name of the watch to be deleted + */ + public String getId() { + return id; + } + + @Override + public String toString() { + return "delete [" + id + "]"; + } +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeleteWatchResponse.java similarity index 81% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeleteWatchResponse.java index b644a6a854cfa..4e946ad459cf8 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/DeleteWatchResponse.java @@ -16,12 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.watcher; +package org.elasticsearch.client.watcher; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -30,7 +27,7 @@ import java.io.IOException; import java.util.Objects; -public class DeleteWatchResponse extends ActionResponse implements ToXContentObject { +public class DeleteWatchResponse implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>("x_pack_delete_watch_response", DeleteWatchResponse::new); @@ -92,22 +89,6 @@ public int hashCode() { return Objects.hash(id, version, found); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - version = in.readVLong(); - found = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(id); - out.writeVLong(version); - out.writeBoolean(found); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.startObject() diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/PutWatchRequest.java similarity index 51% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/PutWatchRequest.java index 0bfa7dc7d343f..88f47aeaeee7a 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/PutWatchRequest.java @@ -16,67 +16,43 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.watcher; +package org.elasticsearch.client.watcher; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.client.Validatable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentType; -import java.io.IOException; +import java.util.Objects; import java.util.regex.Pattern; /** * This request class contains the data needed to create a watch along with the name of the watch. * The name of the watch will become the ID of the indexed document. */ -public final class PutWatchRequest extends ActionRequest { +public final class PutWatchRequest implements Validatable { private static final Pattern NO_WS_PATTERN = Pattern.compile("\\S+"); - private String id; - private BytesReference source; - private XContentType xContentType = XContentType.JSON; + private final String id; + private final BytesReference source; + private final XContentType xContentType; private boolean active = true; private long version = Versions.MATCH_ANY; - public PutWatchRequest() {} - - public PutWatchRequest(StreamInput in) throws IOException { - readFrom(in); - } - public PutWatchRequest(String id, BytesReference source, XContentType xContentType) { + Objects.requireNonNull(id, "watch id is missing"); + if (isValidId(id) == false) { + throw new IllegalArgumentException("watch id contains whitespace"); + } + Objects.requireNonNull(source, "watch source is missing"); + Objects.requireNonNull(xContentType, "request body is missing"); this.id = id; this.source = source; this.xContentType = xContentType; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - source = in.readBytesReference(); - active = in.readBoolean(); - xContentType = in.readEnum(XContentType.class); - version = in.readZLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(id); - out.writeBytesReference(source); - out.writeBoolean(active); - out.writeEnum(xContentType); - out.writeZLong(version); - } - /** * @return The name that will be the ID of the indexed document */ @@ -84,13 +60,6 @@ public String getId() { return id; } - /** - * Set the watch name - */ - public void setId(String id) { - this.id = id; - } - /** * @return The source of the watch */ @@ -98,14 +67,6 @@ public BytesReference getSource() { return source; } - /** - * Set the source of the watch - */ - public void setSource(BytesReference source, XContentType xContentType) { - this.source = source; - this.xContentType = xContentType; - } - /** * @return The initial active state of the watch (defaults to {@code true}, e.g. "active") */ @@ -135,23 +96,6 @@ public void setVersion(long version) { this.version = version; } - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (id == null) { - validationException = ValidateActions.addValidationError("watch id is missing", validationException); - } else if (isValidId(id) == false) { - validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); - } - if (source == null) { - validationException = ValidateActions.addValidationError("watch source is missing", validationException); - } - if (xContentType == null) { - validationException = ValidateActions.addValidationError("request body is missing", validationException); - } - return validationException; - } - public static boolean isValidId(String id) { return Strings.isEmpty(id) == false && NO_WS_PATTERN.matcher(id).matches(); } diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/PutWatchResponse.java similarity index 74% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/PutWatchResponse.java index 98467f32dd860..5c8d7bde9b158 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/PutWatchResponse.java @@ -16,12 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.watcher; +package org.elasticsearch.client.watcher; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -30,14 +27,15 @@ import java.io.IOException; import java.util.Objects; -public class PutWatchResponse extends ActionResponse implements ToXContentObject { +public class PutWatchResponse implements ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>("x_pack_put_watch_response", PutWatchResponse::new); + static { - PARSER.declareString(PutWatchResponse::setId, new ParseField("_id")); - PARSER.declareLong(PutWatchResponse::setVersion, new ParseField("_version")); - PARSER.declareBoolean(PutWatchResponse::setCreated, new ParseField("created")); + PARSER.declareString(PutWatchResponse::setId, new ParseField("_id")); + PARSER.declareLong(PutWatchResponse::setVersion, new ParseField("_version")); + PARSER.declareBoolean(PutWatchResponse::setCreated, new ParseField("created")); } private String id; @@ -92,22 +90,6 @@ public int hashCode() { return Objects.hash(id, version, created); } - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(id); - out.writeVLong(version); - out.writeBoolean(created); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - version = in.readVLong(); - created = in.readBoolean(); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.startObject() diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/package-info.java similarity index 94% rename from x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/package-info.java index d34fd598ab170..cadd7d7558fd8 100644 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/package-info.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/watcher/package-info.java @@ -21,4 +21,4 @@ * Request and Response objects for the default distribution's Watcher * APIs. */ -package org.elasticsearch.protocol.xpack.watcher; +package org.elasticsearch.client.watcher; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java index 2b7d205447f69..f9a92d2fbbe02 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java @@ -21,9 +21,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -252,7 +249,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class BuildInfo implements ToXContentObject, Writeable { + public static class BuildInfo implements ToXContentObject { private final String hash; private final String timestamp; @@ -261,16 +258,6 @@ public BuildInfo(String hash, String timestamp) { this.timestamp = timestamp; } - public BuildInfo(StreamInput input) throws IOException { - this(input.readString(), input.readString()); - } - - @Override - public void writeTo(StreamOutput output) throws IOException { - output.writeString(hash); - output.writeString(timestamp); - } - public String getHash() { return hash; } @@ -309,7 +296,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class FeatureSetsInfo implements ToXContentObject, Writeable { + public static class FeatureSetsInfo implements ToXContentObject { private final Map featureSets; public FeatureSetsInfo(Set featureSets) { @@ -320,24 +307,6 @@ public FeatureSetsInfo(Set featureSets) { this.featureSets = Collections.unmodifiableMap(map); } - public FeatureSetsInfo(StreamInput in) throws IOException { - int size = in.readVInt(); - Map featureSets = new HashMap<>(size); - for (int i = 0; i < size; i++) { - FeatureSet featureSet = new FeatureSet(in); - featureSets.put(featureSet.name, featureSet); - } - this.featureSets = Collections.unmodifiableMap(featureSets); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(featureSets.size()); - for (FeatureSet featureSet : featureSets.values()) { - featureSet.writeTo(out); - } - } - public Map getFeatureSets() { return featureSets; } @@ -365,7 +334,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.endObject(); } - public static class FeatureSet implements ToXContentObject, Writeable { + public static class FeatureSet implements ToXContentObject { private final String name; @Nullable private final String description; private final boolean available; @@ -381,19 +350,6 @@ public FeatureSet(String name, @Nullable String description, boolean available, this.nativeCodeInfo = nativeCodeInfo; } - public FeatureSet(StreamInput in) throws IOException { - this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), in.readMap()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeOptionalString(description); - out.writeBoolean(available); - out.writeBoolean(enabled); - out.writeMap(nativeCodeInfo); - } - public String name() { return name; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackUsageResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackUsageResponse.java index b51a2d7de9fbd..2f9c99cc65e09 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackUsageResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackUsageResponse.java @@ -34,7 +34,7 @@ public class XPackUsageResponse { private final Map> usages; - private XPackUsageResponse(Map> usages) throws IOException { + private XPackUsageResponse(Map> usages) { this.usages = usages; } diff --git a/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider b/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider new file mode 100644 index 0000000000000..4204a868246a5 --- /dev/null +++ b/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider @@ -0,0 +1 @@ +org.elasticsearch.client.indexlifecycle.IndexLifecycleNamedXContentProvider \ No newline at end of file diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index cc179e12e3163..a9214e9333c4e 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -22,7 +22,6 @@ org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameV @defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users org.elasticsearch.common.logging.DeprecationLogger -org.elasticsearch.common.logging.ESLoggerFactory org.elasticsearch.common.logging.LogConfigurator org.elasticsearch.common.logging.LoggerMessageFormat org.elasticsearch.common.logging.Loggers diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index fdd5634ddd6bd..95b2fc0a43bb5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -28,14 +28,18 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.search.SearchHit; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -44,10 +48,19 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - +import java.util.stream.IntStream; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.fieldFromSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasIndex; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasProperty; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasType; import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -268,23 +281,124 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception assertMultiGetResponse(highLevelClient().mget(multiGetRequest, RequestOptions.DEFAULT), testDocs); } - private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { + @SuppressWarnings("unchecked") + public void testGlobalParametersAndSingleRequest() throws Exception { + createIndexWithMultipleShards("test"); + + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + createFieldAddingPipleine("pipeline_id", "fieldNameXYZ", "valueXYZ"); + + // tag::bulk-processor-mix-parameters + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + .setGlobalIndex("tweets") + .setGlobalType("_doc") + .setGlobalRouting("routing") + .setGlobalPipeline("pipeline_id") + .build()) { + + + processor.add(new IndexRequest() // <1> + .source(XContentType.JSON, "user", "some user")); + processor.add(new IndexRequest("blogs", "post_type", "1") // <2> + .source(XContentType.JSON, "title", "some title")); + } + // end::bulk-processor-mix-parameters + latch.await(); + + Iterable hits = searchAll(new SearchRequest("tweets").routing("routing")); + assertThat(hits, everyItem(hasProperty(fieldFromSource("user"), equalTo("some user")))); + assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); + + + Iterable blogs = searchAll(new SearchRequest("blogs").routing("routing")); + assertThat(blogs, everyItem(hasProperty(fieldFromSource("title"), equalTo("some title")))); + assertThat(blogs, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); + } + + @SuppressWarnings("unchecked") + public void testGlobalParametersAndBulkProcessor() throws Exception { + createIndexWithMultipleShards("test"); + + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + createFieldAddingPipleine("pipeline_id", "fieldNameXYZ", "valueXYZ"); + + int numDocs = randomIntBetween(10, 10); + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setGlobalIndex("test") + .setGlobalType("test") + .setGlobalRouting("routing") + .setGlobalPipeline("pipeline_id") + .build()) { + + indexDocs(processor, numDocs, null, null, "test", "test", "pipeline_id"); + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(1)); + assertThat(listener.afterCounts.get(), equalTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + + Iterable hits = searchAll(new SearchRequest("test").routing("routing")); + + assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); + assertThat(hits, everyItem(Matchers.allOf(hasIndex("test"), hasType("test")))); + assertThat(hits, containsInAnyOrder(expectedIds(numDocs))); + } + } + + @SuppressWarnings("unchecked") + private Matcher[] expectedIds(int numDocs) { + return IntStream.rangeClosed(1, numDocs) + .boxed() + .map(n -> hasId(n.toString())) + .>toArray(Matcher[]::new); + } + + private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs, String localIndex, String localType, + String globalIndex, String globalType, String globalPipeline) throws Exception { MultiGetRequest multiGetRequest = new MultiGetRequest(); for (int i = 1; i <= numDocs; i++) { if (randomBoolean()) { - processor.add(new IndexRequest("test", "test", Integer.toString(i)) - .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); + processor.add(new IndexRequest(localIndex, localType, Integer.toString(i)) + .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); } else { - final String source = "{ \"index\":{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"" + Integer.toString(i) + "\"} }\n" - + Strings.toString(JsonXContent.contentBuilder() - .startObject().field("field", randomRealisticUnicodeOfLengthBetween(1, 30)).endObject()) + "\n"; - processor.add(new BytesArray(source), null, null, XContentType.JSON); + BytesArray data = bytesBulkRequest(localIndex, localType, i); + processor.add(data, globalIndex, globalType, globalPipeline, null, XContentType.JSON); } - multiGetRequest.add("test", "test", Integer.toString(i)); + multiGetRequest.add(localIndex, localType, Integer.toString(i)); } return multiGetRequest; } + private static BytesArray bytesBulkRequest(String localIndex, String localType, int id) throws IOException { + String action = Strings.toString(jsonBuilder() + .startObject() + .startObject("index") + .field("_index", localIndex) + .field("_type", localType) + .field("_id", Integer.toString(id)) + .endObject() + .endObject() + ); + String source = Strings.toString(jsonBuilder() + .startObject() + .field("field", randomRealisticUnicodeOfLengthBetween(1, 30)) + .endObject() + ); + + String request = action + "\n" + source + "\n"; + return new BytesArray(request); + } + + private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { + return indexDocs(processor, numDocs, "test", "test", null, null, null); + } + private static void assertResponseItems(List bulkItemResponses, int numDocs) { assertThat(bulkItemResponses.size(), is(numDocs)); int i = 1; @@ -343,4 +457,5 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) } } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java new file mode 100644 index 0000000000000..cf8f1ebfdbd76 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.SearchHit; + +import java.io.IOException; +import java.util.function.Function; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasIndex; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasProperty; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasType; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTestCase { + + @SuppressWarnings("unchecked") + public void testGlobalPipelineOnBulkRequest() throws IOException { + createFieldAddingPipleine("xyz", "fieldNameXYZ", "valueXYZ"); + + BulkRequest request = new BulkRequest(); + request.add(new IndexRequest("test", "doc", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("test", "doc", "2") + .source(XContentType.JSON, "field", "bulk2")); + request.pipeline("xyz"); + + bulk(request); + + Iterable hits = searchAll("test"); + assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); + assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); + } + + public void testPipelineOnRequestOverridesGlobalPipeline() throws IOException { + createFieldAddingPipleine("globalId", "fieldXYZ", "valueXYZ"); + createFieldAddingPipleine("perIndexId", "someNewField", "someValue"); + + BulkRequest request = new BulkRequest(); + request.pipeline("globalId"); + request.add(new IndexRequest("test", "doc", "1") + .source(XContentType.JSON, "field", "bulk1") + .setPipeline("perIndexId")); + request.add(new IndexRequest("test", "doc", "2") + .source(XContentType.JSON, "field", "bulk2") + .setPipeline("perIndexId")); + + bulk(request); + + Iterable hits = searchAll("test"); + assertThat(hits, everyItem(hasProperty(fieldFromSource("someNewField"), equalTo("someValue")))); + // global pipeline was not applied + assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldXYZ"), nullValue()))); + } + + @SuppressWarnings("unchecked") + public void testMixPipelineOnRequestAndGlobal() throws IOException { + createFieldAddingPipleine("globalId", "fieldXYZ", "valueXYZ"); + createFieldAddingPipleine("perIndexId", "someNewField", "someValue"); + + // tag::bulk-request-mix-pipeline + BulkRequest request = new BulkRequest(); + request.pipeline("globalId"); + + request.add(new IndexRequest("test", "doc", "1") + .source(XContentType.JSON, "field", "bulk1") + .setPipeline("perIndexId")); // <1> + + request.add(new IndexRequest("test", "doc", "2") + .source(XContentType.JSON, "field", "bulk2")); // <2> + // end::bulk-request-mix-pipeline + bulk(request); + + Iterable hits = searchAll("test"); + assertThat(hits, containsInAnyOrder( + both(hasId("1")) + .and(hasProperty(fieldFromSource("someNewField"), equalTo("someValue"))), + both(hasId("2")) + .and(hasProperty(fieldFromSource("fieldXYZ"), equalTo("valueXYZ"))))); + } + + public void testGlobalIndex() throws IOException { + BulkRequest request = new BulkRequest("global_index", null); + request.add(new IndexRequest().type("doc").id("1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest().type("doc").id("2") + .source(XContentType.JSON, "field", "bulk2")); + + bulk(request); + + Iterable hits = searchAll("global_index"); + assertThat(hits, everyItem(hasIndex("global_index"))); + } + + @SuppressWarnings("unchecked") + public void testIndexGlobalAndPerRequest() throws IOException { + BulkRequest request = new BulkRequest("global_index", null); + request.add(new IndexRequest("local_index", "doc", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest().type("doc").id("2") // will take global index + .source(XContentType.JSON, "field", "bulk2")); + + bulk(request); + + Iterable hits = searchAll("local_index", "global_index"); + assertThat(hits, containsInAnyOrder( + both(hasId("1")) + .and(hasIndex("local_index")), + both(hasId("2")) + .and(hasIndex("global_index")))); + } + + public void testGlobalType() throws IOException { + BulkRequest request = new BulkRequest(null, "global_type"); + request.add(new IndexRequest("index").id("1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index").id("2") + .source(XContentType.JSON, "field", "bulk2")); + + bulk(request); + + Iterable hits = searchAll("index"); + assertThat(hits, everyItem(hasType("global_type"))); + } + + @SuppressWarnings("unchecked") + public void testTypeGlobalAndPerRequest() throws IOException { + BulkRequest request = new BulkRequest(null, "global_type"); + request.add(new IndexRequest("index1", "local_type", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index2").id("2") // will take global type + .source(XContentType.JSON, "field", "bulk2")); + + bulk(request); + + Iterable hits = searchAll("index1", "index2"); + assertThat(hits, containsInAnyOrder( + both(hasId("1")) + .and(hasType("local_type")), + both(hasId("2")) + .and(hasType("global_type")))); + } + + @SuppressWarnings("unchecked") + public void testGlobalRouting() throws IOException { + createIndexWithMultipleShards("index"); + BulkRequest request = new BulkRequest(null, null); + request.add(new IndexRequest("index", "type", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index", "type", "2") + .source(XContentType.JSON, "field", "bulk1")); + request.routing("1"); + bulk(request); + + Iterable emptyHits = searchAll(new SearchRequest("index").routing("xxx")); + assertThat(emptyHits, is(emptyIterable())); + + Iterable hits = searchAll(new SearchRequest("index").routing("1")); + assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); + } + + @SuppressWarnings("unchecked") + public void testMixLocalAndGlobalRouting() throws IOException { + BulkRequest request = new BulkRequest(null, null); + request.routing("globalRouting"); + request.add(new IndexRequest("index", "type", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index", "type", "2") + .routing("localRouting") + .source(XContentType.JSON, "field", "bulk1")); + + bulk(request); + + Iterable hits = searchAll(new SearchRequest("index").routing("globalRouting", "localRouting")); + assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); + } + + private BulkResponse bulk(BulkRequest request) throws IOException { + BulkResponse bulkResponse = execute(request, highLevelClient()::bulk, highLevelClient()::bulkAsync); + assertFalse(bulkResponse.hasFailures()); + return bulkResponse; + } + + @SuppressWarnings("unchecked") + private static Function fieldFromSource(String fieldName) { + return (response) -> (T) response.getSourceAsMap().get(fieldName); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index 07c0d818bfa85..af3112ec7e1d8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -21,7 +21,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -30,15 +33,20 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.AfterClass; import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Base64; import java.util.Collections; import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase { @@ -76,16 +84,42 @@ protected static Resp execute(Req request, SyncMethod syn } } + /** + * Executes the provided request using either the sync method or its async + * variant, both provided as functions. This variant is used when the call does + * not have a request object (only headers and the request path). + */ + protected static Resp execute(SyncMethodNoRequest syncMethodNoRequest, AsyncMethodNoRequest asyncMethodNoRequest, + RequestOptions requestOptions) throws IOException { + if (randomBoolean()) { + return syncMethodNoRequest.execute(requestOptions); + } else { + PlainActionFuture future = PlainActionFuture.newFuture(); + asyncMethodNoRequest.execute(requestOptions, future); + return future.actionGet(); + } + } + @FunctionalInterface protected interface SyncMethod { Response execute(Request request, RequestOptions options) throws IOException; } + @FunctionalInterface + protected interface SyncMethodNoRequest { + Response execute(RequestOptions options) throws IOException; + } + @FunctionalInterface protected interface AsyncMethod { void execute(Request request, RequestOptions options, ActionListener listener); } + @FunctionalInterface + protected interface AsyncMethodNoRequest { + void execute(RequestOptions options, ActionListener listener); + } + private static class HighLevelClient extends RestHighLevelClient { private HighLevelClient(RestClient restClient) { super(restClient, (client) -> {}, Collections.emptyList()); @@ -125,6 +159,22 @@ protected static XContentBuilder buildRandomXContentPipeline() throws IOExceptio return buildRandomXContentPipeline(pipelineBuilder); } + protected static void createFieldAddingPipleine(String id, String fieldName, String value) throws IOException { + XContentBuilder pipeline = jsonBuilder() + .startObject() + .startArray("processors") + .startObject() + .startObject("set") + .field("field", fieldName) + .field("value", value) + .endObject() + .endObject() + .endArray() + .endObject(); + + createPipeline(new PutPipelineRequest(id, BytesReference.bytes(pipeline), XContentType.JSON)); + } + protected static void createPipeline(String pipelineId) throws IOException { XContentBuilder builder = buildRandomXContentPipeline(); createPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(builder), builder.contentType())); @@ -154,4 +204,32 @@ protected Settings restClientSettings() { .put(ThreadContext.PREFIX + ".Authorization", token) .build(); } + + protected Iterable searchAll(String... indices) throws IOException { + SearchRequest searchRequest = new SearchRequest(indices); + return searchAll(searchRequest); + } + + protected Iterable searchAll(SearchRequest searchRequest) throws IOException { + refreshIndexes(searchRequest.indices()); + SearchResponse search = highLevelClient().search(searchRequest, RequestOptions.DEFAULT); + return search.getHits(); + } + + protected void refreshIndexes(String... indices) throws IOException { + String joinedIndices = Arrays.stream(indices) + .collect(Collectors.joining(",")); + Response refreshResponse = client().performRequest(new Request("POST", "/" + joinedIndices + "/_refresh")); + assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); + } + + protected void createIndexWithMultipleShards(String index) throws IOException { + CreateIndexRequest indexRequest = new CreateIndexRequest(index); + int shards = randomIntBetween(8,10); + indexRequest.settings(Settings.builder() + .put("index.number_of_shards", shards) + .put("index.number_of_replicas", 0) + ); + highLevelClient().indices().create(indexRequest, RequestOptions.DEFAULT); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/GrapRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/GrapRequestConvertersTests.java index 6598800d76edb..965f1f627af7d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/GrapRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/GrapRequestConvertersTests.java @@ -24,17 +24,16 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; -import org.elasticsearch.protocol.xpack.graph.Hop; +import org.elasticsearch.client.graph.GraphExploreRequest; +import org.elasticsearch.client.graph.Hop; import org.elasticsearch.test.ESTestCase; -import org.junit.Assert; import java.util.HashMap; import java.util.Map; import static org.hamcrest.Matchers.is; -public class GrapRequestConvertersTests extends ESTestCase{ +public class GrapRequestConvertersTests extends ESTestCase { public void testGraphExplore() throws Exception { Map expectedParams = new HashMap<>(); @@ -43,14 +42,14 @@ public void testGraphExplore() throws Exception { graphExploreRequest.sampleDiversityField("diversity"); graphExploreRequest.indices("index1", "index2"); graphExploreRequest.types("type1", "type2"); - int timeout = ESTestCase.randomIntBetween(10000, 20000); + int timeout = randomIntBetween(10000, 20000); graphExploreRequest.timeout(TimeValue.timeValueMillis(timeout)); - graphExploreRequest.useSignificance(ESTestCase.randomBoolean()); - int numHops = ESTestCase.randomIntBetween(1, 5); + graphExploreRequest.useSignificance(randomBoolean()); + int numHops = randomIntBetween(1, 5); for (int i = 0; i < numHops; i++) { int hopNumber = i + 1; QueryBuilder guidingQuery = null; - if (ESTestCase.randomBoolean()) { + if (randomBoolean()) { guidingQuery = new TermQueryBuilder("field" + hopNumber, "value" + hopNumber); } Hop hop = graphExploreRequest.createNextHop(guidingQuery); @@ -58,10 +57,10 @@ public void testGraphExplore() throws Exception { hop.getVertexRequest(0).addInclude("value" + hopNumber, hopNumber); } Request request = GraphRequestConverters.explore(graphExploreRequest); - Assert.assertEquals(HttpGet.METHOD_NAME, request.getMethod()); - Assert.assertEquals("/index1,index2/type1,type2/_xpack/graph/_explore", request.getEndpoint()); - Assert.assertEquals(expectedParams, request.getParameters()); - Assert.assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters())); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/index1,index2/type1,type2/_xpack/graph/_explore", request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters())); RequestConvertersTests.assertToXContentBody(graphExploreRequest, request.getEntity()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java index 4376b47d737b4..3673afa13896d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/GraphIT.java @@ -23,11 +23,11 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; -import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; -import org.elasticsearch.protocol.xpack.graph.Hop; -import org.elasticsearch.protocol.xpack.graph.Vertex; -import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.client.graph.GraphExploreRequest; +import org.elasticsearch.client.graph.GraphExploreResponse; +import org.elasticsearch.client.graph.Hop; +import org.elasticsearch.client.graph.Vertex; +import org.elasticsearch.client.graph.VertexRequest; import org.hamcrest.Matchers; import org.junit.Before; @@ -136,4 +136,4 @@ public void testBadExplore() throws Exception { } -} \ No newline at end of file +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java new file mode 100644 index 0000000000000..f2040bc88da34 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java @@ -0,0 +1,286 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.indexlifecycle.AllocateAction; +import org.elasticsearch.client.indexlifecycle.DeleteAction; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.client.indexlifecycle.ForceMergeAction; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.IndexLifecycleExplainResponse; +import org.elasticsearch.client.indexlifecycle.LifecycleAction; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusResponse; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.client.indexlifecycle.OperationMode; +import org.elasticsearch.client.indexlifecycle.Phase; +import org.elasticsearch.client.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.RolloverAction; +import org.elasticsearch.client.indexlifecycle.ShrinkAction; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.is; + +public class IndexLifecycleIT extends ESRestHighLevelClientTestCase { + + public void testRemoveIndexLifecyclePolicy() throws Exception { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + createIndex("foo", Settings.builder().put("index.lifecycle.name", policyName).build()); + createIndex("baz", Settings.builder().put("index.lifecycle.name", policyName).build()); + createIndex("rbh", Settings.builder().put("index.lifecycle.name", policyName).build()); + + GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices("foo", "baz", "rbh"); + GetSettingsResponse settingsResponse = highLevelClient().indices().getSettings(getSettingsRequest, RequestOptions.DEFAULT); + assertThat(settingsResponse.getSetting("foo", "index.lifecycle.name"), equalTo(policyName)); + assertThat(settingsResponse.getSetting("baz", "index.lifecycle.name"), equalTo(policyName)); + assertThat(settingsResponse.getSetting("rbh", "index.lifecycle.name"), equalTo(policyName)); + + List indices = new ArrayList<>(); + indices.add("foo"); + indices.add("rbh"); + RemoveIndexLifecyclePolicyRequest removeReq = new RemoveIndexLifecyclePolicyRequest(indices); + RemoveIndexLifecyclePolicyResponse removeResp = execute(removeReq, highLevelClient().indexLifecycle()::removeIndexLifecyclePolicy, + highLevelClient().indexLifecycle()::removeIndexLifecyclePolicyAsync); + assertThat(removeResp.hasFailures(), is(false)); + assertThat(removeResp.getFailedIndexes().isEmpty(), is(true)); + + getSettingsRequest = new GetSettingsRequest().indices("foo", "baz", "rbh"); + settingsResponse = highLevelClient().indices().getSettings(getSettingsRequest, RequestOptions.DEFAULT); + assertNull(settingsResponse.getSetting("foo", "index.lifecycle.name")); + assertThat(settingsResponse.getSetting("baz", "index.lifecycle.name"), equalTo(policyName)); + assertNull(settingsResponse.getSetting("rbh", "index.lifecycle.name")); + } + + public void testStartStopILM() throws Exception { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + createIndex("foo", Settings.builder().put("index.lifecycle.name", "bar").build()); + createIndex("baz", Settings.builder().put("index.lifecycle.name", "eggplant").build()); + createIndex("squash", Settings.EMPTY); + + LifecycleManagementStatusRequest statusRequest = new LifecycleManagementStatusRequest(); + LifecycleManagementStatusResponse statusResponse = execute( + statusRequest, + highLevelClient().indexLifecycle()::lifecycleManagementStatus, + highLevelClient().indexLifecycle()::lifecycleManagementStatusAsync); + assertEquals(statusResponse.getOperationMode(), OperationMode.RUNNING); + + StopILMRequest stopReq = new StopILMRequest(); + AcknowledgedResponse stopResponse = execute(stopReq, highLevelClient().indexLifecycle()::stopILM, + highLevelClient().indexLifecycle()::stopILMAsync); + assertTrue(stopResponse.isAcknowledged()); + + + statusResponse = execute(statusRequest, highLevelClient().indexLifecycle()::lifecycleManagementStatus, + highLevelClient().indexLifecycle()::lifecycleManagementStatusAsync); + assertThat(statusResponse.getOperationMode(), + Matchers.anyOf(equalTo(OperationMode.STOPPING), + equalTo(OperationMode.STOPPED))); + + StartILMRequest startReq = new StartILMRequest(); + AcknowledgedResponse startResponse = execute(startReq, highLevelClient().indexLifecycle()::startILM, + highLevelClient().indexLifecycle()::startILMAsync); + assertTrue(startResponse.isAcknowledged()); + + statusResponse = execute(statusRequest, highLevelClient().indexLifecycle()::lifecycleManagementStatus, + highLevelClient().indexLifecycle()::lifecycleManagementStatusAsync); + assertEquals(statusResponse.getOperationMode(), OperationMode.RUNNING); + } + + public void testExplainLifecycle() throws Exception { + Map lifecyclePhases = new HashMap<>(); + Map hotActions = Collections.singletonMap( + RolloverAction.NAME, + new RolloverAction(null, TimeValue.timeValueHours(50 * 24), null)); + Phase hotPhase = new Phase("hot", randomFrom(TimeValue.ZERO, null), hotActions); + lifecyclePhases.put("hot", hotPhase); + + Map warmActions = new HashMap<>(); + warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, null, Collections.singletonMap("_name", "node-1"))); + warmActions.put(ShrinkAction.NAME, new ShrinkAction(1)); + warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1000)); + lifecyclePhases.put("warm", new Phase("warm", TimeValue.timeValueSeconds(1000), warmActions)); + + Map coldActions = new HashMap<>(); + coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null)); + lifecyclePhases.put("cold", new Phase("cold", TimeValue.timeValueSeconds(2000), coldActions)); + + Map deleteActions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction()); + lifecyclePhases.put("delete", new Phase("delete", TimeValue.timeValueSeconds(3000), deleteActions)); + + LifecyclePolicy policy = new LifecyclePolicy(randomAlphaOfLength(10), lifecyclePhases); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + AcknowledgedResponse putResponse = execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync); + assertTrue(putResponse.isAcknowledged()); + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(policy.getName()); + GetLifecyclePolicyResponse getResponse = execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync); + long expectedPolicyModifiedDate = getResponse.getPolicies().get(policy.getName()).getModifiedDate(); + + + createIndex("foo-01", Settings.builder().put("index.lifecycle.name", policy.getName()) + .put("index.lifecycle.rollover_alias", "foo-alias").build(), "", "\"foo-alias\" : {}"); + + createIndex("baz-01", Settings.builder().put("index.lifecycle.name", policy.getName()) + .put("index.lifecycle.rollover_alias", "baz-alias").build(), "", "\"baz-alias\" : {}"); + + createIndex("squash", Settings.EMPTY); + + ExplainLifecycleRequest req = new ExplainLifecycleRequest(); + req.indices("foo-01", "baz-01", "squash"); + ExplainLifecycleResponse response = execute(req, highLevelClient().indexLifecycle()::explainLifecycle, + highLevelClient().indexLifecycle()::explainLifecycleAsync); + Map indexResponses = response.getIndexResponses(); + assertEquals(3, indexResponses.size()); + IndexLifecycleExplainResponse fooResponse = indexResponses.get("foo-01"); + assertNotNull(fooResponse); + assertTrue(fooResponse.managedByILM()); + assertEquals("foo-01", fooResponse.getIndex()); + assertEquals("hot", fooResponse.getPhase()); + assertEquals("rollover", fooResponse.getAction()); + assertEquals("attempt_rollover", fooResponse.getStep()); + assertEquals(new PhaseExecutionInfo(policy.getName(), new Phase("", hotPhase.getMinimumAge(), hotPhase.getActions()), + 1L, expectedPolicyModifiedDate), fooResponse.getPhaseExecutionInfo()); + IndexLifecycleExplainResponse bazResponse = indexResponses.get("baz-01"); + assertNotNull(bazResponse); + assertTrue(bazResponse.managedByILM()); + assertEquals("baz-01", bazResponse.getIndex()); + assertEquals("hot", bazResponse.getPhase()); + assertEquals("rollover", bazResponse.getAction()); + assertEquals("attempt_rollover", bazResponse.getStep()); + IndexLifecycleExplainResponse squashResponse = indexResponses.get("squash"); + assertNotNull(squashResponse); + assertFalse(squashResponse.managedByILM()); + assertEquals("squash", squashResponse.getIndex()); + } + + public void testDeleteLifecycle() throws IOException { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + DeleteLifecyclePolicyRequest deleteRequest = new DeleteLifecyclePolicyRequest(policy.getName()); + assertAcked(execute(deleteRequest, highLevelClient().indexLifecycle()::deleteLifecyclePolicy, + highLevelClient().indexLifecycle()::deleteLifecyclePolicyAsync)); + + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(policyName); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync)); + assertEquals(404, ex.status().getStatus()); + } + + public void testPutLifecycle() throws IOException { + String name = randomAlphaOfLengthBetween(5, 20); + LifecyclePolicy policy = createRandomPolicy(name); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(name); + GetLifecyclePolicyResponse response = execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync); + assertEquals(policy, response.getPolicies().get(name).getPolicy()); + } + + public void testGetMultipleLifecyclePolicies() throws IOException { + int numPolicies = randomIntBetween(1, 10); + String[] policyNames = new String[numPolicies]; + LifecyclePolicy[] policies = new LifecyclePolicy[numPolicies]; + for (int i = 0; i < numPolicies; i++) { + policyNames[i] = "policy-" + randomAlphaOfLengthBetween(5, 10); + policies[i] = createRandomPolicy(policyNames[i]); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policies[i]); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + } + + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(randomFrom(policyNames, null)); + GetLifecyclePolicyResponse response = execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync); + List retrievedPolicies = Arrays.stream(response.getPolicies().values().toArray()) + .map(p -> ((LifecyclePolicyMetadata) p).getPolicy()).collect(Collectors.toList()); + assertThat(retrievedPolicies, hasItems(policies)); + } + + public void testRetryLifecycleStep() throws IOException { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + createIndex("retry", Settings.builder().put("index.lifecycle.name", policy.getName()).build()); + RetryLifecyclePolicyRequest retryRequest = new RetryLifecyclePolicyRequest("retry"); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> execute( + retryRequest, highLevelClient().indexLifecycle()::retryLifecycleStep, + highLevelClient().indexLifecycle()::retryLifecycleStepAsync + ) + ); + assertEquals(400, ex.status().getStatus()); + assertEquals( + "Elasticsearch exception [type=illegal_argument_exception, reason=cannot retry an action for an index [retry]" + + " that has not encountered an error when running a Lifecycle Policy]", + ex.getRootCause().getMessage() + ); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java new file mode 100644 index 0000000000000..1af29701bc7c8 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.client.RequestConvertersTests.randomIndicesNames; +import static org.elasticsearch.client.RequestConvertersTests.setRandomIndicesOptions; +import static org.elasticsearch.client.RequestConvertersTests.setRandomMasterTimeout; +import static org.elasticsearch.client.RequestConvertersTests.setRandomTimeoutTimeValue; +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; +import static org.hamcrest.CoreMatchers.equalTo; + +public class IndexLifecycleRequestConvertersTests extends ESTestCase { + + public void testGetLifecyclePolicy() { + String[] policies = rarely() ? null : randomIndicesNames(0, 10); + GetLifecyclePolicyRequest req = new GetLifecyclePolicyRequest(policies); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.getLifecyclePolicy(req); + assertEquals(request.getMethod(), HttpGet.METHOD_NAME); + String policiesStr = Strings.arrayToCommaDelimitedString(policies); + assertEquals(request.getEndpoint(), "/_ilm/policy" + (policiesStr.isEmpty() ? "" : ("/" + policiesStr))); + assertEquals(request.getParameters(), expectedParams); + } + + public void testPutLifecyclePolicy() throws Exception { + String name = randomAlphaOfLengthBetween(2, 20); + LifecyclePolicy policy = createRandomPolicy(name); + PutLifecyclePolicyRequest req = new PutLifecyclePolicyRequest(policy); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.putLifecyclePolicy(req); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertEquals("/_ilm/policy/" + name, request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + } + + public void testDeleteLifecycle() { + String lifecycleName = randomAlphaOfLengthBetween(2,20); + DeleteLifecyclePolicyRequest req = new DeleteLifecyclePolicyRequest(lifecycleName); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.deleteLifecyclePolicy(req); + assertEquals(request.getMethod(), HttpDelete.METHOD_NAME); + assertEquals(request.getEndpoint(), "/_ilm/policy/" + lifecycleName); + assertEquals(request.getParameters(), expectedParams); + } + + public void testRemoveIndexLifecyclePolicy() { + Map expectedParams = new HashMap<>(); + String[] indices = randomIndicesNames(0, 10); + IndicesOptions indicesOptions = setRandomIndicesOptions(IndicesOptions.strictExpandOpen(), expectedParams); + RemoveIndexLifecyclePolicyRequest req = new RemoveIndexLifecyclePolicyRequest(Arrays.asList(indices), indicesOptions); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.removeIndexLifecyclePolicy(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + String idxString = Strings.arrayToCommaDelimitedString(indices); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm/remove")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testStartILM() throws Exception { + StartILMRequest req = new StartILMRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.startILM(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(request.getEndpoint(), equalTo("/_ilm/start")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testStopILM() throws Exception { + StopILMRequest req = new StopILMRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.stopILM(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(request.getEndpoint(), equalTo("/_ilm/stop")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testLifecycleManagementStatus() throws Exception { + LifecycleManagementStatusRequest req = new LifecycleManagementStatusRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.lifecycleManagementStatus(req); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(request.getEndpoint(), equalTo("/_ilm/status")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testExplainLifecycle() throws Exception { + ExplainLifecycleRequest req = new ExplainLifecycleRequest(); + String[] indices = rarely() ? null : randomIndicesNames(0, 10); + req.indices(indices); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req, expectedParams); + setRandomIndicesOptions(req::indicesOptions, req::indicesOptions, expectedParams); + + Request request = IndexLifecycleRequestConverters.explainLifecycle(req); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + String idxString = Strings.arrayToCommaDelimitedString(indices); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm/explain")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testRetryLifecycle() throws Exception { + String[] indices = randomIndicesNames(1, 10); + RetryLifecyclePolicyRequest req = new RetryLifecyclePolicyRequest(indices); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + Request request = IndexLifecycleRequestConverters.retryLifecycle(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + String idxString = Strings.arrayToCommaDelimitedString(indices); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm/retry")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 832aba51e2b41..053f46f8496b0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -522,6 +522,9 @@ public void testUpdateAliases() throws IOException { IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index(index).aliases(alias); + if (randomBoolean()) { + addAction.writeIndex(randomBoolean()); + } addAction.routing("routing").searchRouting("search_routing").filter("{\"term\":{\"year\":2016}}"); aliasesAddRequest.addAliasAction(addAction); AcknowledgedResponse aliasesAddResponse = execute(aliasesAddRequest, highLevelClient().indices()::updateAliases, @@ -535,6 +538,8 @@ public void testUpdateAliases() throws IOException { Map filter = (Map) getAlias.get("filter"); Map term = (Map) filter.get("term"); assertEquals(2016, term.get("year")); + Boolean isWriteIndex = (Boolean) getAlias.get("is_write_index"); + assertThat(isWriteIndex, equalTo(addAction.writeIndex())); String alias2 = "alias2"; IndicesAliasesRequest aliasesAddRemoveRequest = new IndicesAliasesRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index d4fdfb2c995df..bb0dbf6680127 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.client.ml.PreviewDatafeedRequest; import org.elasticsearch.client.ml.PutCalendarRequest; import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedRequestTests; @@ -59,6 +60,8 @@ import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.JobUpdateTests; +import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.client.ml.job.config.MlFilterTests; import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; @@ -511,6 +514,20 @@ public void testDeleteCalendar() { assertEquals("/_xpack/ml/calendars/" + deleteCalendarRequest.getCalendarId(), request.getEndpoint()); } + public void testPutFilter() throws IOException { + MlFilter filter = MlFilterTests.createRandom("foo"); + PutFilterRequest putFilterRequest = new PutFilterRequest(filter); + + Request request = MLRequestConverters.putFilter(putFilterRequest); + + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_xpack/ml/filters/foo")); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + MlFilter parsedFilter = MlFilter.PARSER.apply(parser, null).build(); + assertThat(parsedFilter, equalTo(filter)); + } + } + private static Job createValidJob(String jobId) { AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( Detector.builder().setFunction("count").build())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index cac9f533501b5..ff3218795e435 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -58,24 +58,29 @@ import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutDatafeedResponse; +import org.elasticsearch.client.ml.PutFilterRequest; +import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedResponse; import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedResponse; +import org.elasticsearch.client.ml.UpdateDatafeedRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.calendars.Calendar; import org.elasticsearch.client.ml.calendars.CalendarTests; import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.datafeed.DatafeedState; import org.elasticsearch.client.ml.datafeed.DatafeedStats; +import org.elasticsearch.client.ml.datafeed.DatafeedUpdate; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.DataDescription; import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.JobState; import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.client.ml.job.config.MlFilter; import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; @@ -357,6 +362,33 @@ public void testPutDatafeed() throws Exception { assertThat(createdDatafeed.getIndices(), equalTo(datafeedConfig.getIndices())); } + public void testUpdateDatafeed() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + execute(new PutJobRequest(job), machineLearningClient::putJob, machineLearningClient::putJobAsync); + + String datafeedId = "datafeed-" + jobId; + DatafeedConfig datafeedConfig = DatafeedConfig.builder(datafeedId, jobId).setIndices("some_data_index").build(); + + PutDatafeedResponse response = machineLearningClient.putDatafeed(new PutDatafeedRequest(datafeedConfig), RequestOptions.DEFAULT); + + DatafeedConfig createdDatafeed = response.getResponse(); + assertThat(createdDatafeed.getId(), equalTo(datafeedId)); + assertThat(createdDatafeed.getIndices(), equalTo(datafeedConfig.getIndices())); + + DatafeedUpdate datafeedUpdate = DatafeedUpdate.builder(datafeedId).setIndices("some_other_data_index").setScrollSize(10).build(); + + response = execute(new UpdateDatafeedRequest(datafeedUpdate), + machineLearningClient::updateDatafeed, + machineLearningClient::updateDatafeedAsync); + + DatafeedConfig updatedDatafeed = response.getResponse(); + assertThat(datafeedUpdate.getId(), equalTo(updatedDatafeed.getId())); + assertThat(datafeedUpdate.getIndices(), equalTo(updatedDatafeed.getIndices())); + assertThat(datafeedUpdate.getScrollSize(), equalTo(updatedDatafeed.getScrollSize())); + } + public void testGetDatafeed() throws Exception { String jobId1 = "test-get-datafeed-job-1"; String jobId2 = "test-get-datafeed-job-2"; @@ -830,6 +862,22 @@ public void testDeleteCalendar() throws IOException { assertThat(exception.status().getStatus(), equalTo(404)); } + public void testFilterJob() throws Exception { + String filterId = "filter-job-test"; + MlFilter mlFilter = MlFilter.builder(filterId) + .setDescription(randomAlphaOfLength(10)) + .setItems(generateRandomStringArray(10, 10, false, false)) + .build(); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + PutFilterResponse putFilterResponse = execute(new PutFilterRequest(mlFilter), + machineLearningClient::putFilter, + machineLearningClient::putFilterAsync); + MlFilter createdFilter = putFilterResponse.getResponse(); + + assertThat(createdFilter, equalTo(mlFilter)); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java index 03614537bfe78..f83986829d529 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java @@ -20,8 +20,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoResponse; +import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; +import org.elasticsearch.client.migration.IndexUpgradeInfoResponse; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java index 97a2cc16a7ef9..e3adefcb262a3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpGet; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; +import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -28,7 +28,7 @@ public class MigrationRequestConvertersTests extends ESTestCase { - public static void testGetMigrationAssistance() { + public void testGetMigrationAssistance() { IndexUpgradeInfoRequest upgradeInfoRequest = new IndexUpgradeInfoRequest(); String expectedEndpoint = "/_xpack/migration/assistance"; if (randomBoolean()) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java index 15272ad80a655..9a0a861cc132d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java @@ -108,7 +108,7 @@ public void testRankEvalRequest() throws IOException { // now try this when test2 is closed client().performRequest(new Request("POST", "index2/_close")); - rankEvalRequest.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + rankEvalRequest.indicesOptions(IndicesOptions.fromParameters(null, "true", null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 0dc0a67cf7e16..066fb5d8cc903 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -56,6 +56,7 @@ import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.RequestConverters.EndpointBuilder; +import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -100,6 +101,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; +import org.hamcrest.Matchers; import java.io.IOException; import java.io.InputStream; @@ -860,6 +862,21 @@ public void testBulkWithDifferentContentTypes() throws IOException { } } + public void testGlobalPipelineOnBulkRequest() throws IOException { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.pipeline("xyz"); + bulkRequest.add(new IndexRequest("test", "doc", "11") + .source(XContentType.JSON, "field", "bulk1")); + bulkRequest.add(new IndexRequest("test", "doc", "12") + .source(XContentType.JSON, "field", "bulk2")); + bulkRequest.add(new IndexRequest("test", "doc", "13") + .source(XContentType.JSON, "field", "bulk3")); + + Request request = RequestConverters.bulk(bulkRequest); + + assertThat(request.getParameters(), Matchers.hasEntry("pipeline","xyz")); + } + public void testSearchNullSource() throws IOException { SearchRequest searchRequest = new SearchRequest(); Request request = RequestConverters.search(searchRequest); @@ -952,6 +969,72 @@ public void testSearchNullIndicesAndTypes() { expectThrows(NullPointerException.class, () -> new SearchRequest().types((String[]) null)); } + public void testCountNotNullSource() throws IOException { + //as we create SearchSourceBuilder in CountRequest constructor + CountRequest countRequest = new CountRequest(); + Request request = RequestConverters.count(countRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_count", request.getEndpoint()); + assertNotNull(request.getEntity()); + } + + public void testCount() throws Exception { + String[] indices = randomIndicesNames(0, 5); + CountRequest countRequest = new CountRequest(indices); + + int numTypes = randomIntBetween(0, 5); + String[] types = new String[numTypes]; + for (int i = 0; i < numTypes; i++) { + types[i] = "type-" + randomAlphaOfLengthBetween(2, 5); + } + countRequest.types(types); + + Map expectedParams = new HashMap<>(); + setRandomCountParams(countRequest, expectedParams); + setRandomIndicesOptions(countRequest::indicesOptions, countRequest::indicesOptions, expectedParams); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + if (frequently()) { + if (randomBoolean()) { + searchSourceBuilder.minScore(randomFloat()); + } + } + countRequest.source(searchSourceBuilder); + Request request = RequestConverters.count(countRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + String type = String.join(",", types); + if (Strings.hasLength(type)) { + endpoint.add(type); + } + endpoint.add("_count"); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(searchSourceBuilder, request.getEntity()); + } + + public void testCountNullIndicesAndTypes() { + expectThrows(NullPointerException.class, () -> new CountRequest((String[]) null)); + expectThrows(NullPointerException.class, () -> new CountRequest().indices((String[]) null)); + expectThrows(NullPointerException.class, () -> new CountRequest().types((String[]) null)); + } + + private static void setRandomCountParams(CountRequest countRequest, + Map expectedParams) { + if (randomBoolean()) { + countRequest.routing(randomAlphaOfLengthBetween(3, 10)); + expectedParams.put("routing", countRequest.routing()); + } + if (randomBoolean()) { + countRequest.preference(randomAlphaOfLengthBetween(3, 10)); + expectedParams.put("preference", countRequest.preference()); + } + } + public void testMultiSearch() throws IOException { int numberOfSearchRequests = randomIntBetween(0, 32); MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); @@ -972,7 +1055,8 @@ public void testMultiSearch() throws IOException { IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions(); searchRequest.indicesOptions(IndicesOptions.fromOptions(randomlyGenerated.ignoreUnavailable(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), randomlyGenerated.expandWildcardsClosed(), - msearchDefault.allowAliasesToMultipleIndices(), msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases())); + msearchDefault.allowAliasesToMultipleIndices(), msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases(), + msearchDefault.ignoreThrottled())); multiSearchRequest.add(searchRequest); } @@ -1514,13 +1598,13 @@ private static void randomizeFetchSourceContextParams(Consumer 0) { - expectedParams.put("_source_include", includesParam); + expectedParams.put("_source_includes", includesParam); } int numExcludes = randomIntBetween(0, 5); String[] excludes = new String[numExcludes]; String excludesParam = randomFields(excludes); if (numExcludes > 0) { - expectedParams.put("_source_exclude", excludesParam); + expectedParams.put("_source_excludes", excludesParam); } consumer.accept(new FetchSourceContext(true, includes, excludes)); } @@ -1579,6 +1663,24 @@ static void setRandomIndicesOptions(Consumer setter, Supplier expectedParams) { + if (randomBoolean()) { + indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + } + expectedParams.put("ignore_unavailable", Boolean.toString(indicesOptions.ignoreUnavailable())); + expectedParams.put("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices())); + if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) { + expectedParams.put("expand_wildcards", "open,closed"); + } else if (indicesOptions.expandWildcardsOpen()) { + expectedParams.put("expand_wildcards", "open"); + } else if (indicesOptions.expandWildcardsClosed()) { + expectedParams.put("expand_wildcards", "closed"); + } else { + expectedParams.put("expand_wildcards", "none"); + } + return indicesOptions; + } + static void setRandomIncludeDefaults(GetIndexRequest request, Map expectedParams) { if (randomBoolean()) { boolean includeDefaults = randomBoolean(); @@ -1629,6 +1731,17 @@ static void setRandomTimeout(Consumer setter, TimeValue defaultTimeout, } } + static void setRandomTimeoutTimeValue(Consumer setter, TimeValue defaultTimeout, + Map expectedParams) { + if (randomBoolean()) { + TimeValue timeout = TimeValue.parseTimeValue(randomTimeValue(), "random_timeout"); + setter.accept(timeout); + expectedParams.put("timeout", timeout.getStringRep()); + } else { + expectedParams.put("timeout", defaultTimeout.getStringRep()); + } + } + static void setRandomMasterTimeout(MasterNodeRequest request, Map expectedParams) { setRandomMasterTimeout(request::masterNodeTimeout, expectedParams); } @@ -1649,6 +1762,16 @@ static void setRandomMasterTimeout(Consumer setter, Map } } + static void setRandomMasterTimeout(Consumer setter, TimeValue defaultTimeout, Map expectedParams) { + if (randomBoolean()) { + TimeValue masterTimeout = TimeValue.parseTimeValue(randomTimeValue(), "random_master_timeout"); + setter.accept(masterTimeout); + expectedParams.put("master_timeout", masterTimeout.getStringRep()); + } else { + expectedParams.put("master_timeout", defaultTimeout.getStringRep()); + } + } + static void setRandomWaitForActiveShards(Consumer setter, Map expectedParams) { setRandomWaitForActiveShards(setter, ActiveShardCount.DEFAULT, expectedParams); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index d40c3196e54f4..38810285a5d1c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -49,6 +49,13 @@ import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.indexlifecycle.AllocateAction; +import org.elasticsearch.client.indexlifecycle.DeleteAction; +import org.elasticsearch.client.indexlifecycle.ForceMergeAction; +import org.elasticsearch.client.indexlifecycle.LifecycleAction; +import org.elasticsearch.client.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.client.indexlifecycle.RolloverAction; +import org.elasticsearch.client.indexlifecycle.ShrinkAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesReference; @@ -618,7 +625,7 @@ public void testDefaultNamedXContents() { public void testProvidedNamedXContents() { List namedXContents = RestHighLevelClient.getProvidedNamedXContents(); - assertEquals(10, namedXContents.size()); + assertEquals(16, namedXContents.size()); Map, Integer> categories = new HashMap<>(); List names = new ArrayList<>(); for (NamedXContentRegistry.Entry namedXContent : namedXContents) { @@ -628,7 +635,7 @@ public void testProvidedNamedXContents() { categories.put(namedXContent.categoryClass, counter + 1); } } - assertEquals(3, categories.size()); + assertEquals(4, categories.size()); assertEquals(Integer.valueOf(2), categories.get(Aggregation.class)); assertTrue(names.contains(ChildrenAggregationBuilder.NAME)); assertTrue(names.contains(MatrixStatsAggregationBuilder.NAME)); @@ -642,13 +649,19 @@ public void testProvidedNamedXContents() { assertTrue(names.contains(MeanReciprocalRank.NAME)); assertTrue(names.contains(DiscountedCumulativeGain.NAME)); assertTrue(names.contains(ExpectedReciprocalRank.NAME)); + assertEquals(Integer.valueOf(6), categories.get(LifecycleAction.class)); + assertTrue(names.contains(AllocateAction.NAME)); + assertTrue(names.contains(DeleteAction.NAME)); + assertTrue(names.contains(ForceMergeAction.NAME)); + assertTrue(names.contains(ReadOnlyAction.NAME)); + assertTrue(names.contains(RolloverAction.NAME)); + assertTrue(names.contains(ShrinkAction.NAME)); } public void testApiNamingConventions() throws Exception { //this list should be empty once the high-level client is feature complete String[] notYetSupportedApi = new String[]{ "cluster.remote_info", - "count", "create", "get_source", "indices.delete_alias", @@ -719,7 +732,7 @@ public void testApiNamingConventions() throws Exception { methods.containsKey(apiName.substring(0, apiName.length() - 6))); assertThat("async method [" + method + "] should return void", method.getReturnType(), equalTo(Void.TYPE)); assertEquals("async method [" + method + "] should not throw any exceptions", 0, method.getExceptionTypes().length); - if (apiName.equals("security.get_ssl_certificates_async")) { + if (apiName.equals("security.authenticate_async") || apiName.equals("security.get_ssl_certificates_async")) { assertEquals(2, method.getParameterTypes().length); assertThat(method.getParameterTypes()[0], equalTo(RequestOptions.class)); assertThat(method.getParameterTypes()[1], equalTo(ActionListener.class)); @@ -744,7 +757,8 @@ public void testApiNamingConventions() throws Exception { assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length); //a few methods don't accept a request object as argument - if (apiName.equals("ping") || apiName.equals("info") || apiName.equals("security.get_ssl_certificates")) { + if (apiName.equals("ping") || apiName.equals("info") || apiName.equals("security.get_ssl_certificates") + || apiName.equals("security.authenticate")) { assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterTypes().length); assertThat("the parameter to method [" + method + "] is the wrong type", method.getParameterTypes()[0], equalTo(RequestOptions.class)); @@ -770,7 +784,8 @@ public void testApiNamingConventions() throws Exception { apiName.startsWith("watcher.") == false && apiName.startsWith("graph.") == false && apiName.startsWith("migration.") == false && - apiName.startsWith("security.") == false) { + apiName.startsWith("security.") == false && + apiName.startsWith("index_lifecycle.") == false) { apiNotFound.add(apiName); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java index 7a5f873d45cc7..ffe75bfc1b1c3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -39,6 +39,8 @@ import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper; import org.elasticsearch.client.rollup.PutRollupJobRequest; import org.elasticsearch.client.rollup.PutRollupJobResponse; +import org.elasticsearch.client.rollup.StartRollupJobRequest; +import org.elasticsearch.client.rollup.StartRollupJobResponse; import org.elasticsearch.client.rollup.RollableIndexCaps; import org.elasticsearch.client.rollup.RollupJobCaps; import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig; @@ -150,7 +152,7 @@ public void testDeleteRollupJob() throws Exception { PutRollupJobRequest putRollupJobRequest = new PutRollupJobRequest(new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout)); final RollupClient rollupClient = highLevelClient().rollup(); - PutRollupJobResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); + execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); DeleteRollupJobRequest deleteRollupJobRequest = new DeleteRollupJobRequest(id); DeleteRollupJobResponse deleteRollupJobResponse = highLevelClient().rollup() .deleteRollupJob(deleteRollupJobRequest, RequestOptions.DEFAULT); @@ -164,8 +166,7 @@ public void testDeleteMissingRollupJob() { assertThat(responseException.status().getStatus(), is(404)); } - @SuppressWarnings("unchecked") - public void testPutAndGetRollupJob() throws Exception { + public void testPutStartAndGetRollupJob() throws Exception { // TODO expand this to also test with histogram and terms? final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); @@ -178,9 +179,9 @@ public void testPutAndGetRollupJob() throws Exception { PutRollupJobResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); assertTrue(response.isAcknowledged()); - // TODO Replace this with the Rollup Start Job API - Response startResponse = client().performRequest(new Request("POST", "/_xpack/rollup/job/" + id + "/_start")); - assertEquals(RestStatus.OK.getStatus(), startResponse.getHttpResponse().getStatusLine().getStatusCode()); + StartRollupJobRequest startRequest = new StartRollupJobRequest(id); + StartRollupJobResponse startResponse = execute(startRequest, rollupClient::startRollupJob, rollupClient::startRollupJobAsync); + assertTrue(startResponse.isAcknowledged()); assertBusy(() -> { SearchResponse searchResponse = highLevelClient().search(new SearchRequest(rollupIndex), RequestOptions.DEFAULT); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupRequestConvertersTests.java index df7b2bbfca19e..f9363c7a429db 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupRequestConvertersTests.java @@ -20,17 +20,19 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.PutRollupJobRequest; +import org.elasticsearch.client.rollup.StartRollupJobRequest; import org.elasticsearch.client.rollup.job.config.RollupJobConfig; import org.elasticsearch.client.rollup.job.config.RollupJobConfigTests; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; public class RollupRequestConvertersTests extends ESTestCase { @@ -47,6 +49,18 @@ public void testPutJob() throws IOException { RequestConvertersTests.assertToXContentBody(put, request.getEntity()); } + public void testStartJob() throws IOException { + String jobId = randomAlphaOfLength(5); + + StartRollupJobRequest startJob = new StartRollupJobRequest(jobId); + + Request request = RollupRequestConverters.startJob(startJob); + assertThat(request.getEndpoint(), equalTo("/_xpack/rollup/job/" + jobId + "/_start")); + assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod())); + assertThat(request.getParameters().keySet(), empty()); + assertThat(request.getEntity(), nullValue()); + } + public void testGetJob() { boolean getAll = randomBoolean(); String job = getAll ? "_all" : RequestConvertersTests.randomIndicesNames(1, 1)[0]; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index f6aa97def28e4..e6a2f5d617899 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -35,6 +35,8 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.core.CountRequest; +import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; @@ -1233,4 +1235,69 @@ private static void assertSearchHeader(SearchResponse searchResponse) { assertEquals(0, searchResponse.getShardFailures().length); assertEquals(SearchResponse.Clusters.EMPTY, searchResponse.getClusters()); } + + public void testCountOneIndexNoQuery() throws IOException { + CountRequest countRequest = new CountRequest("index"); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(5, countResponse.getCount()); + } + + public void testCountMultipleIndicesNoQuery() throws IOException { + CountRequest countRequest = new CountRequest("index", "index1"); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(7, countResponse.getCount()); + } + + public void testCountAllIndicesNoQuery() throws IOException { + CountRequest countRequest = new CountRequest(); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(12, countResponse.getCount()); + } + + public void testCountOneIndexMatchQuery() throws IOException { + CountRequest countRequest = new CountRequest("index"); + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10))); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(1, countResponse.getCount()); + } + + public void testCountMultipleIndicesMatchQueryUsingConstructor() throws IOException { + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1")); + CountRequest countRequest = new CountRequest(new String[]{"index1", "index2", "index3"}, sourceBuilder); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(3, countResponse.getCount()); + + } + + public void testCountMultipleIndicesMatchQuery() throws IOException { + + CountRequest countRequest = new CountRequest("index1", "index2", "index3"); + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1"))); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(3, countResponse.getCount()); + } + + public void testCountAllIndicesMatchQuery() throws IOException { + + CountRequest countRequest = new CountRequest(); + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1"))); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(3, countResponse.getCount()); + } + + private static void assertCountHeader(CountResponse countResponse) { + assertEquals(0, countResponse.getSkippedShards()); + assertEquals(0, countResponse.getFailedShards()); + assertThat(countResponse.getTotalShards(), greaterThan(0)); + assertEquals(countResponse.getTotalShards(), countResponse.getSuccessfulShards()); + assertEquals(0, countResponse.getShardFailures().length); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java new file mode 100644 index 0000000000000..74a4d58e2bf77 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.client.security.AuthenticateResponse; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.PutUserResponse; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.common.CharArrays; + +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; + +public class SecurityIT extends ESRestHighLevelClientTestCase { + + public void testAuthenticate() throws Exception { + final SecurityClient securityClient = highLevelClient().security(); + // test fixture: put enabled user + final PutUserRequest putUserRequest = randomPutUserRequest(true); + final PutUserResponse putUserResponse = execute(putUserRequest, securityClient::putUser, securityClient::putUserAsync); + assertThat(putUserResponse.isCreated(), is(true)); + + // authenticate correctly + final String basicAuthHeader = basicAuthHeader(putUserRequest.getUsername(), putUserRequest.getPassword()); + final AuthenticateResponse authenticateResponse = execute(securityClient::authenticate, securityClient::authenticateAsync, + authorizationRequestOptions(basicAuthHeader)); + + assertThat(authenticateResponse.getUser().username(), is(putUserRequest.getUsername())); + if (putUserRequest.getRoles().isEmpty()) { + assertThat(authenticateResponse.getUser().roles(), is(empty())); + } else { + assertThat(authenticateResponse.getUser().roles(), contains(putUserRequest.getRoles().toArray())); + } + assertThat(authenticateResponse.getUser().metadata(), is(putUserRequest.getMetadata())); + assertThat(authenticateResponse.getUser().fullName(), is(putUserRequest.getFullName())); + assertThat(authenticateResponse.getUser().email(), is(putUserRequest.getEmail())); + assertThat(authenticateResponse.enabled(), is(true)); + + // delete user + final Request deleteUserRequest = new Request(HttpDelete.METHOD_NAME, "/_xpack/security/user/" + putUserRequest.getUsername()); + highLevelClient().getLowLevelClient().performRequest(deleteUserRequest); + + // authentication no longer works + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> execute(securityClient::authenticate, + securityClient::authenticateAsync, authorizationRequestOptions(basicAuthHeader))); + assertThat(e.getMessage(), containsString("unable to authenticate user [" + putUserRequest.getUsername() + "]")); + } + + private static PutUserRequest randomPutUserRequest(boolean enabled) { + final String username = randomAlphaOfLengthBetween(1, 4); + final char[] password = randomAlphaOfLengthBetween(6, 10).toCharArray(); + final List roles = Arrays.asList(generateRandomStringArray(3, 3, false, true)); + final String fullName = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 3)); + final String email = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 3)); + final Map metadata; + metadata = new HashMap<>(); + if (randomBoolean()) { + metadata.put("string", null); + } else { + metadata.put("string", randomAlphaOfLengthBetween(0, 4)); + } + if (randomBoolean()) { + metadata.put("string_list", null); + } else { + metadata.put("string_list", Arrays.asList(generateRandomStringArray(4, 4, false, true))); + } + return new PutUserRequest(username, password, roles, fullName, email, enabled, metadata, RefreshPolicy.IMMEDIATE); + } + + private static String basicAuthHeader(String username, char[] password) { + final String concat = new StringBuilder().append(username).append(':').append(password).toString(); + final byte[] concatBytes = CharArrays.toUtf8Bytes(concat.toCharArray()); + return "Basic " + Base64.getEncoder().encodeToString(concatBytes); + } + + private static RequestOptions authorizationRequestOptions(String authorizationHeader) { + final RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.addHeader("Authorization", authorizationHeader); + return builder.build(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java index 4267e2385801c..e0499c621f7ba 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -19,13 +19,16 @@ package org.elasticsearch.client; +import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.security.CreateTokenRequest; import org.elasticsearch.client.security.DeleteRoleMappingRequest; import org.elasticsearch.client.security.DeleteRoleRequest; import org.elasticsearch.client.security.DisableUserRequest; import org.elasticsearch.client.security.EnableUserRequest; +import org.elasticsearch.client.security.GetRoleMappingsRequest; import org.elasticsearch.client.security.ChangePasswordRequest; import org.elasticsearch.client.security.PutRoleMappingRequest; import org.elasticsearch.client.security.PutUserRequest; @@ -33,6 +36,7 @@ import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression; import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression; import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression; +import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -102,6 +106,25 @@ public void testPutRoleMapping() throws IOException { assertToXContentBody(putRoleMappingRequest, request.getEntity()); } + public void testGetRoleMappings() throws IOException { + int noOfRoleMappingNames = randomIntBetween(0, 2); + final String[] roleMappingNames = + randomArray(noOfRoleMappingNames, noOfRoleMappingNames, String[]::new, () -> randomAlphaOfLength(5)); + final GetRoleMappingsRequest getRoleMappingsRequest = new GetRoleMappingsRequest(roleMappingNames); + + final Request request = SecurityRequestConverters.getRoleMappings(getRoleMappingsRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + if (noOfRoleMappingNames == 0) { + assertEquals("/_xpack/security/role_mapping", request.getEndpoint()); + } else { + assertEquals("/_xpack/security/role_mapping/" + + Strings.collectionToCommaDelimitedString(getRoleMappingsRequest.getRoleMappingNames()), request.getEndpoint()); + } + assertEquals(Collections.emptyMap(), request.getParameters()); + assertNull(request.getEntity()); + } + public void testEnableUser() { final String username = randomAlphaOfLengthBetween(1, 12); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); @@ -189,4 +212,34 @@ public void testDeleteRole() { assertEquals(expectedParams, request.getParameters()); assertNull(request.getEntity()); } + + public void testCreateTokenWithPasswordGrant() throws Exception { + final String username = randomAlphaOfLengthBetween(1, 12); + final String password = randomAlphaOfLengthBetween(8, 12); + CreateTokenRequest createTokenRequest = CreateTokenRequest.passwordGrant(username, password.toCharArray()); + Request request = SecurityRequestConverters.createToken(createTokenRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/security/oauth2/token", request.getEndpoint()); + assertEquals(0, request.getParameters().size()); + assertToXContentBody(createTokenRequest, request.getEntity()); + } + + public void testCreateTokenWithRefreshTokenGrant() throws Exception { + final String refreshToken = randomAlphaOfLengthBetween(8, 24); + CreateTokenRequest createTokenRequest = CreateTokenRequest.refreshTokenGrant(refreshToken); + Request request = SecurityRequestConverters.createToken(createTokenRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/security/oauth2/token", request.getEndpoint()); + assertEquals(0, request.getParameters().size()); + assertToXContentBody(createTokenRequest, request.getEntity()); + } + + public void testCreateTokenWithClientCredentialsGrant() throws Exception { + CreateTokenRequest createTokenRequest = CreateTokenRequest.clientCredentialsGrant(); + Request request = SecurityRequestConverters.createToken(createTokenRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/security/oauth2/token", request.getEndpoint()); + assertEquals(0, request.getParameters().size()); + assertToXContentBody(createTokenRequest, request.getEntity()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TimedRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TimedRequestTests.java new file mode 100644 index 0000000000000..8024aa0188598 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TimedRequestTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +public class TimedRequestTests extends ESTestCase { + + public void testDefaults() { + TimedRequest timedRequest = new TimedRequest(){}; + assertEquals(timedRequest.timeout(), TimedRequest.DEFAULT_ACK_TIMEOUT); + assertEquals(timedRequest.masterNodeTimeout(), TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT); + } + + public void testNonDefaults() { + TimedRequest timedRequest = new TimedRequest(){}; + TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); + TimeValue masterTimeout = TimeValue.timeValueSeconds(randomIntBetween(0,1000)); + timedRequest.setTimeout(timeout); + timedRequest.setMasterTimeout(masterTimeout); + assertEquals(timedRequest.timeout(), timeout); + assertEquals(timedRequest.masterNodeTimeout(), masterTimeout); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT.java index b069d211b2ee8..c4a2242f901f7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT.java @@ -35,10 +35,10 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; +import org.elasticsearch.client.watcher.DeleteWatchRequest; +import org.elasticsearch.client.watcher.DeleteWatchResponse; +import org.elasticsearch.client.watcher.PutWatchRequest; +import org.elasticsearch.client.watcher.PutWatchResponse; import org.elasticsearch.rest.RestStatus; import static org.hamcrest.Matchers.is; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java index df6f697fb975a..b0b04fd0e5bd5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherRequestConvertersTests.java @@ -29,8 +29,8 @@ import org.elasticsearch.client.watcher.StopWatchServiceRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; +import org.elasticsearch.client.watcher.DeleteWatchRequest; +import org.elasticsearch.client.watcher.PutWatchRequest; import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayOutputStream; @@ -56,11 +56,9 @@ public void testStopWatchService() { } public void testPutWatch() throws Exception { - PutWatchRequest putWatchRequest = new PutWatchRequest(); String watchId = randomAlphaOfLength(10); - putWatchRequest.setId(watchId); String body = randomAlphaOfLength(20); - putWatchRequest.setSource(new BytesArray(body), XContentType.JSON); + PutWatchRequest putWatchRequest = new PutWatchRequest(watchId, new BytesArray(body), XContentType.JSON); Map expectedParams = new HashMap<>(); if (randomBoolean()) { @@ -94,9 +92,8 @@ public void testDeactivateWatch() { } public void testDeleteWatch() { - DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest(); String watchId = randomAlphaOfLength(10); - deleteWatchRequest.setId(watchId); + DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest(watchId); Request request = WatcherRequestConverters.deleteWatch(deleteWatchRequest); assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java new file mode 100644 index 0000000000000..1030f4401e160 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; + +//similar to SearchRequestTests as CountRequest inline several members (and functionality) from SearchRequest +public class CountRequestTests extends ESTestCase { + + public void testIllegalArguments() { + CountRequest countRequest = new CountRequest(); + assertNotNull(countRequest.indices()); + assertNotNull(countRequest.indicesOptions()); + assertNotNull(countRequest.types()); + + NullPointerException e = expectThrows(NullPointerException.class, () -> countRequest.indices((String[]) null)); + assertEquals("indices must not be null", e.getMessage()); + e = expectThrows(NullPointerException.class, () -> countRequest.indices((String) null)); + assertEquals("index must not be null", e.getMessage()); + + e = expectThrows(NullPointerException.class, () -> countRequest.indicesOptions(null)); + assertEquals("indicesOptions must not be null", e.getMessage()); + + e = expectThrows(NullPointerException.class, () -> countRequest.types((String[]) null)); + assertEquals("types must not be null", e.getMessage()); + e = expectThrows(NullPointerException.class, () -> countRequest.types((String) null)); + assertEquals("type must not be null", e.getMessage()); + + e = expectThrows(NullPointerException.class, () -> countRequest.source(null)); + assertEquals("source must not be null", e.getMessage()); + + } + + public void testEqualsAndHashcode() { + checkEqualsAndHashCode(createCountRequest(), CountRequestTests::copyRequest, this::mutate); + } + + private CountRequest createCountRequest() { + CountRequest countRequest = new CountRequest("index"); + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10))); + return countRequest; + } + + private CountRequest mutate(CountRequest countRequest) { + CountRequest mutation = copyRequest(countRequest); + List mutators = new ArrayList<>(); + mutators.add(() -> mutation.indices(ArrayUtils.concat(countRequest.indices(), new String[]{randomAlphaOfLength(10)}))); + mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(countRequest.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())))); + mutators.add(() -> mutation.types(ArrayUtils.concat(countRequest.types(), new String[]{randomAlphaOfLength(10)}))); + mutators.add(() -> mutation.preference(randomValueOtherThan(countRequest.preference(), () -> randomAlphaOfLengthBetween(3, 10)))); + mutators.add(() -> mutation.routing(randomValueOtherThan(countRequest.routing(), () -> randomAlphaOfLengthBetween(3, 10)))); + randomFrom(mutators).run(); + return mutation; + } + + private static CountRequest copyRequest(CountRequest countRequest) { + CountRequest result = new CountRequest(); + result.indices(countRequest.indices()); + result.indicesOptions(countRequest.indicesOptions()); + result.types(countRequest.types()); + result.routing(countRequest.routing()); + result.preference(countRequest.preference()); + if (countRequest.source() != null) { + result.source(countRequest.source()); + } + return result; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountResponseTests.java new file mode 100644 index 0000000000000..c2fc668d604e5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountResponseTests.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class CountResponseTests extends ESTestCase { + + // Not comparing XContent for equivalence as we cannot compare the ShardSearchFailure#cause, because it will be wrapped in an outer + // ElasticSearchException. Best effort: try to check that the original message appears somewhere in the rendered xContent + // For more see ShardSearchFailureTests. + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createTestInstance, + this::toXContent, + CountResponse::fromXContent) + .supportsUnknownFields(false) + .assertEqualsConsumer(this::assertEqualInstances) + .assertToXContentEquivalence(false) + .test(); + } + + private CountResponse createTestInstance() { + long count = 5; + Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); + int totalShards = randomIntBetween(1, Integer.MAX_VALUE); + int successfulShards = randomIntBetween(0, totalShards); + int skippedShards = randomIntBetween(0, totalShards); + int numFailures = randomIntBetween(1, 5); + ShardSearchFailure[] failures = new ShardSearchFailure[numFailures]; + for (int i = 0; i < failures.length; i++) { + failures[i] = createShardFailureTestItem(); + } + CountResponse.ShardStats shardStats = new CountResponse.ShardStats(successfulShards, totalShards, skippedShards, + randomBoolean() ? ShardSearchFailure.EMPTY_ARRAY : failures); + return new CountResponse(count, terminatedEarly, shardStats); + } + + private void toXContent(CountResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field(CountResponse.COUNT.getPreferredName(), response.getCount()); + if (response.isTerminatedEarly() != null) { + builder.field(CountResponse.TERMINATED_EARLY.getPreferredName(), response.isTerminatedEarly()); + } + toXContent(response.getShardStats(), builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + } + + private void toXContent(CountResponse.ShardStats stats, XContentBuilder builder, ToXContent.Params params) throws IOException { + RestActions.buildBroadcastShardsHeader(builder, params, stats.getTotalShards(), stats.getSuccessfulShards(), stats + .getSkippedShards(), stats.getShardFailures().length, stats.getShardFailures()); + } + + @SuppressWarnings("Duplicates") + private static ShardSearchFailure createShardFailureTestItem() { + String randomMessage = randomAlphaOfLengthBetween(3, 20); + Exception ex = new ParsingException(0, 0, randomMessage, new IllegalArgumentException("some bad argument")); + SearchShardTarget searchShardTarget = null; + if (randomBoolean()) { + String nodeId = randomAlphaOfLengthBetween(5, 10); + String indexName = randomAlphaOfLengthBetween(5, 10); + searchShardTarget = new SearchShardTarget(nodeId, + new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), randomInt()), null, null); + } + return new ShardSearchFailure(ex, searchShardTarget); + } + + private void assertEqualInstances(CountResponse expectedInstance, CountResponse newInstance) { + assertEquals(expectedInstance.getCount(), newInstance.getCount()); + assertEquals(expectedInstance.status(), newInstance.status()); + assertEquals(expectedInstance.isTerminatedEarly(), newInstance.isTerminatedEarly()); + assertEquals(expectedInstance.getTotalShards(), newInstance.getTotalShards()); + assertEquals(expectedInstance.getFailedShards(), newInstance.getFailedShards()); + assertEquals(expectedInstance.getSkippedShards(), newInstance.getSkippedShards()); + assertEquals(expectedInstance.getSuccessfulShards(), newInstance.getSuccessfulShards()); + assertEquals(expectedInstance.getShardFailures().length, newInstance.getShardFailures().length); + + ShardSearchFailure[] expectedFailures = expectedInstance.getShardFailures(); + ShardSearchFailure[] newFailures = newInstance.getShardFailures(); + + for (int i = 0; i < newFailures.length; i++) { + ShardSearchFailure parsedFailure = newFailures[i]; + ShardSearchFailure originalFailure = expectedFailures[i]; + assertEquals(originalFailure.index(), parsedFailure.index()); + assertEquals(originalFailure.shard(), parsedFailure.shard()); + assertEquals(originalFailure.shardId(), parsedFailure.shardId()); + String originalMsg = originalFailure.getCause().getMessage(); + assertEquals(parsedFailure.getCause().getMessage(), "Elasticsearch exception [type=parsing_exception, reason=" + + originalMsg + "]"); + String nestedMsg = originalFailure.getCause().getCause().getMessage(); + assertEquals(parsedFailure.getCause().getCause().getMessage(), + "Elasticsearch exception [type=illegal_argument_exception, reason=" + nestedMsg + "]"); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 7f3b980becd8d..29bb860df7307 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -708,14 +708,15 @@ public void testBulk() throws Exception { for (BulkItemResponse bulkItemResponse : bulkResponse) { // <1> DocWriteResponse itemResponse = bulkItemResponse.getResponse(); // <2> - if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.INDEX - || bulkItemResponse.getOpType() == DocWriteRequest.OpType.CREATE) { // <3> + switch (bulkItemResponse.getOpType()) { + case INDEX: // <3> + case CREATE: IndexResponse indexResponse = (IndexResponse) itemResponse; - - } else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.UPDATE) { // <4> + break; + case UPDATE: // <4> UpdateResponse updateResponse = (UpdateResponse) itemResponse; - - } else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.DELETE) { // <5> + break; + case DELETE: // <5> DeleteResponse deleteResponse = (DeleteResponse) itemResponse; } } @@ -728,8 +729,8 @@ public void testBulk() throws Exception { // tag::bulk-errors for (BulkItemResponse bulkItemResponse : bulkResponse) { if (bulkItemResponse.isFailed()) { // <1> - BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); // <2> - + BulkItemResponse.Failure failure = + bulkItemResponse.getFailure(); // <2> } } // end::bulk-errors @@ -748,6 +749,16 @@ public void testBulk() throws Exception { request.waitForActiveShards(2); // <1> request.waitForActiveShards(ActiveShardCount.ALL); // <2> // end::bulk-request-active-shards + // tag::bulk-request-pipeline + request.pipeline("pipelineId"); // <1> + // end::bulk-request-pipeline + // tag::bulk-request-routing + request.routing("routingId"); // <1> + // end::bulk-request-routing + + // tag::bulk-request-index-type + BulkRequest defaulted = new BulkRequest("posts","_doc"); // <1> + // end::bulk-request-index-type // tag::bulk-execute-listener ActionListener listener = new ActionListener() { @@ -839,8 +850,10 @@ public void testReindex() throws Exception { // tag::reindex-request-remote request.setRemoteInfo( new RemoteInfo( - "https", "localhost", 9002, null, new BytesArray(new MatchAllQueryBuilder().toString()), - "user", "pass", Collections.emptyMap(), new TimeValue(100, TimeUnit.MILLISECONDS), + "https", "localhost", 9002, null, + new BytesArray(new MatchAllQueryBuilder().toString()), + "user", "pass", Collections.emptyMap(), + new TimeValue(100, TimeUnit.MILLISECONDS), new TimeValue(100, TimeUnit.SECONDS) ) ); // <1> @@ -861,7 +874,8 @@ public void testReindex() throws Exception { // tag::reindex-execute - BulkByScrollResponse bulkResponse = client.reindex(request, RequestOptions.DEFAULT); + BulkByScrollResponse bulkResponse = + client.reindex(request, RequestOptions.DEFAULT); // end::reindex-execute assertSame(0, bulkResponse.getSearchFailures().size()); assertSame(0, bulkResponse.getBulkFailures().size()); @@ -878,9 +892,12 @@ public void testReindex() throws Exception { long bulkRetries = bulkResponse.getBulkRetries(); // <10> long searchRetries = bulkResponse.getSearchRetries(); // <11> TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <12> - TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <13> - List searchFailures = bulkResponse.getSearchFailures(); // <14> - List bulkFailures = bulkResponse.getBulkFailures(); // <15> + TimeValue throttledUntilMillis = + bulkResponse.getStatus().getThrottledUntil(); // <13> + List searchFailures = + bulkResponse.getSearchFailures(); // <14> + List bulkFailures = + bulkResponse.getBulkFailures(); // <15> // end::reindex-response } { @@ -888,8 +905,9 @@ public void testReindex() throws Exception { request.setSourceIndices("source1"); request.setDestIndex("dest"); + ActionListener listener; // tag::reindex-execute-listener - ActionListener listener = new ActionListener() { + listener = new ActionListener() { @Override public void onResponse(BulkByScrollResponse bulkResponse) { // <1> @@ -939,8 +957,9 @@ public void testReindexRethrottle() throws Exception { // end::rethrottle-request-execution } + ActionListener listener; // tag::rethrottle-request-async-listener - ActionListener listener = new ActionListener() { + listener = new ActionListener() { @Override public void onResponse(ListTasksResponse response) { // <1> @@ -959,9 +978,12 @@ public void onFailure(Exception e) { RethrottleRequest request = new RethrottleRequest(taskId); // tag::rethrottle-execute-async - client.reindexRethrottleAsync(request, RequestOptions.DEFAULT, listener); // <1> - client.updateByQueryRethrottleAsync(request, RequestOptions.DEFAULT, listener); // <2> - client.deleteByQueryRethrottleAsync(request, RequestOptions.DEFAULT, listener); // <3> + client.reindexRethrottleAsync(request, + RequestOptions.DEFAULT, listener); // <1> + client.updateByQueryRethrottleAsync(request, + RequestOptions.DEFAULT, listener); // <2> + client.deleteByQueryRethrottleAsync(request, + RequestOptions.DEFAULT, listener); // <3> // end::rethrottle-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } @@ -990,7 +1012,8 @@ public void testUpdateByQuery() throws Exception { } { // tag::update-by-query-request - UpdateByQueryRequest request = new UpdateByQueryRequest("source1", "source2"); // <1> + UpdateByQueryRequest request = + new UpdateByQueryRequest("source1", "source2"); // <1> // end::update-by-query-request // tag::update-by-query-request-conflicts request.setConflicts("proceed"); // <1> @@ -1034,7 +1057,8 @@ public void testUpdateByQuery() throws Exception { // end::update-by-query-request-indicesOptions // tag::update-by-query-execute - BulkByScrollResponse bulkResponse = client.updateByQuery(request, RequestOptions.DEFAULT); + BulkByScrollResponse bulkResponse = + client.updateByQuery(request, RequestOptions.DEFAULT); // end::update-by-query-execute assertSame(0, bulkResponse.getSearchFailures().size()); assertSame(0, bulkResponse.getBulkFailures().size()); @@ -1050,17 +1074,21 @@ public void testUpdateByQuery() throws Exception { long bulkRetries = bulkResponse.getBulkRetries(); // <9> long searchRetries = bulkResponse.getSearchRetries(); // <10> TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <11> - TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <12> - List searchFailures = bulkResponse.getSearchFailures(); // <13> - List bulkFailures = bulkResponse.getBulkFailures(); // <14> + TimeValue throttledUntilMillis = + bulkResponse.getStatus().getThrottledUntil(); // <12> + List searchFailures = + bulkResponse.getSearchFailures(); // <13> + List bulkFailures = + bulkResponse.getBulkFailures(); // <14> // end::update-by-query-response } { UpdateByQueryRequest request = new UpdateByQueryRequest(); request.indices("source1"); + ActionListener listener; // tag::update-by-query-execute-listener - ActionListener listener = new ActionListener() { + listener = new ActionListener() { @Override public void onResponse(BulkByScrollResponse bulkResponse) { // <1> @@ -1108,7 +1136,8 @@ public void testDeleteByQuery() throws Exception { } { // tag::delete-by-query-request - DeleteByQueryRequest request = new DeleteByQueryRequest("source1", "source2"); // <1> + DeleteByQueryRequest request = + new DeleteByQueryRequest("source1", "source2"); // <1> // end::delete-by-query-request // tag::delete-by-query-request-conflicts request.setConflicts("proceed"); // <1> @@ -1142,7 +1171,8 @@ public void testDeleteByQuery() throws Exception { // end::delete-by-query-request-indicesOptions // tag::delete-by-query-execute - BulkByScrollResponse bulkResponse = client.deleteByQuery(request, RequestOptions.DEFAULT); + BulkByScrollResponse bulkResponse = + client.deleteByQuery(request, RequestOptions.DEFAULT); // end::delete-by-query-execute assertSame(0, bulkResponse.getSearchFailures().size()); assertSame(0, bulkResponse.getBulkFailures().size()); @@ -1157,17 +1187,21 @@ public void testDeleteByQuery() throws Exception { long bulkRetries = bulkResponse.getBulkRetries(); // <8> long searchRetries = bulkResponse.getSearchRetries(); // <9> TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <10> - TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <11> - List searchFailures = bulkResponse.getSearchFailures(); // <12> - List bulkFailures = bulkResponse.getBulkFailures(); // <13> + TimeValue throttledUntilMillis = + bulkResponse.getStatus().getThrottledUntil(); // <11> + List searchFailures = + bulkResponse.getSearchFailures(); // <12> + List bulkFailures = + bulkResponse.getBulkFailures(); // <13> // end::delete-by-query-response } { DeleteByQueryRequest request = new DeleteByQueryRequest(); request.indices("source1"); + ActionListener listener; // tag::delete-by-query-execute-listener - ActionListener listener = new ActionListener() { + listener = new ActionListener() { @Override public void onResponse(BulkByScrollResponse bulkResponse) { // <1> @@ -1430,14 +1464,16 @@ public void afterBulk(long executionId, BulkRequest request, } @Override - public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + public void afterBulk(long executionId, BulkRequest request, + Throwable failure) { // <4> } }; BulkProcessor bulkProcessor = BulkProcessor.builder( - (request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener), - listener).build(); // <5> + (request, bulkListener) -> + client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener), + listener).build(); // <5> // end::bulk-processor-init assertNotNull(bulkProcessor); @@ -1488,7 +1524,8 @@ public void afterBulk(long executionId, BulkRequest request, } @Override - public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + public void afterBulk(long executionId, BulkRequest request, + Throwable failure) { logger.error("Failed to execute bulk", failure); // <3> } }; @@ -1496,7 +1533,9 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) // tag::bulk-processor-options BulkProcessor.Builder builder = BulkProcessor.builder( - (request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener), listener); + (request, bulkListener) -> + client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener), + listener); builder.setBulkActions(500); // <1> builder.setBulkSize(new ByteSizeValue(1L, ByteSizeUnit.MB)); // <2> builder.setConcurrentRequests(0); // <3> @@ -1510,23 +1549,23 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) // Not entirely sure if _termvectors belongs to CRUD, and in the absence of a better place, will have it here public void testTermVectors() throws Exception { RestHighLevelClient client = highLevelClient(); - CreateIndexRequest authorsRequest = new CreateIndexRequest("authors").mapping("doc", "user", "type=keyword"); + CreateIndexRequest authorsRequest = new CreateIndexRequest("authors").mapping("_doc", "user", "type=keyword"); CreateIndexResponse authorsResponse = client.indices().create(authorsRequest, RequestOptions.DEFAULT); assertTrue(authorsResponse.isAcknowledged()); - client.index(new IndexRequest("index", "doc", "1").source("user", "kimchy"), RequestOptions.DEFAULT); + client.index(new IndexRequest("index", "_doc", "1").source("user", "kimchy"), RequestOptions.DEFAULT); Response refreshResponse = client().performRequest(new Request("POST", "/authors/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { // tag::term-vectors-request - TermVectorsRequest request = new TermVectorsRequest("authors", "doc", "1"); + TermVectorsRequest request = new TermVectorsRequest("authors", "_doc", "1"); request.setFields("user"); // end::term-vectors-request } { // tag::term-vectors-request-artificial - TermVectorsRequest request = new TermVectorsRequest("authors", "doc"); + TermVectorsRequest request = new TermVectorsRequest("authors", "_doc"); XContentBuilder docBuilder = XContentFactory.jsonBuilder(); docBuilder.startObject().field("user", "guest-user").endObject(); request.setDoc(docBuilder); // <1> @@ -1559,11 +1598,12 @@ public void testTermVectors() throws Exception { // end::term-vectors-request-optional-arguments } - TermVectorsRequest request = new TermVectorsRequest("authors", "doc", "1"); + TermVectorsRequest request = new TermVectorsRequest("authors", "_doc", "1"); request.setFields("user"); // tag::term-vectors-execute - TermVectorsResponse response = client.termvectors(request, RequestOptions.DEFAULT); + TermVectorsResponse response = + client.termvectors(request, RequestOptions.DEFAULT); // end::term-vectors-execute @@ -1574,16 +1614,17 @@ public void testTermVectors() throws Exception { boolean found = response.getFound(); // <4> // end::term-vectors-response - // tag::term-vectors-term-vectors if (response.getTermVectorsList() != null) { - List tvList = response.getTermVectorsList(); - for (TermVectorsResponse.TermVector tv : tvList) { + // tag::term-vectors-term-vectors + for (TermVectorsResponse.TermVector tv : response.getTermVectorsList()) { String fieldname = tv.getFieldName(); // <1> int docCount = tv.getFieldStatistics().getDocCount(); // <2> - long sumTotalTermFreq = tv.getFieldStatistics().getSumTotalTermFreq(); // <3> + long sumTotalTermFreq = + tv.getFieldStatistics().getSumTotalTermFreq(); // <3> long sumDocFreq = tv.getFieldStatistics().getSumDocFreq(); // <4> if (tv.getTerms() != null) { - List terms = tv.getTerms(); // <5> + List terms = + tv.getTerms(); // <5> for (TermVectorsResponse.TermVector.Term term : terms) { String termStr = term.getTerm(); // <6> int termFreq = term.getTermFreq(); // <7> @@ -1591,7 +1632,8 @@ public void testTermVectors() throws Exception { long totalTermFreq = term.getTotalTermFreq(); // <9> float score = term.getScore(); // <10> if (term.getTokens() != null) { - List tokens = term.getTokens(); // <11> + List tokens = + term.getTokens(); // <11> for (TermVectorsResponse.TermVector.Token token : tokens) { int position = token.getPosition(); // <12> int startOffset = token.getStartOffset(); // <13> @@ -1602,11 +1644,12 @@ public void testTermVectors() throws Exception { } } } + // end::term-vectors-term-vectors } - // end::term-vectors-term-vectors + ActionListener listener; // tag::term-vectors-execute-listener - ActionListener listener = new ActionListener() { + listener = new ActionListener() { @Override public void onResponse(TermVectorsResponse termVectorsResponse) { // <1> @@ -1664,7 +1707,7 @@ public void testMultiGet() throws Exception { "index", // <1> "type", // <2> "example_id")); // <3> - request.add(new MultiGetRequest.Item("index", "type", "another_id")); // <4> + request.add(new MultiGetRequest.Item("index", "type", "another_id")); // <4> // end::multi-get-request // Add a missing index so we can test it. @@ -1715,11 +1758,12 @@ public void testMultiGet() throws Exception { // TODO status is broken! fix in a followup // assertEquals(RestStatus.NOT_FOUND, ee.status()); // <4> assertThat(e.getMessage(), - containsString("reason=no such index [missing_index]")); // <5> + containsString("reason=no such index [missing_index]")); // <5> // end::multi-get-indexnotfound + ActionListener listener; // tag::multi-get-execute-listener - ActionListener listener = new ActionListener() { + listener = new ActionListener() { @Override public void onResponse(MultiGetResponse response) { // <1> diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java index 8631e18b8739b..bf507242bc89e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java @@ -26,12 +26,12 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.protocol.xpack.graph.Connection; -import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; -import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; -import org.elasticsearch.protocol.xpack.graph.Hop; -import org.elasticsearch.protocol.xpack.graph.Vertex; -import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.client.graph.Connection; +import org.elasticsearch.client.graph.GraphExploreRequest; +import org.elasticsearch.client.graph.GraphExploreResponse; +import org.elasticsearch.client.graph.Hop; +import org.elasticsearch.client.graph.Vertex; +import org.elasticsearch.client.graph.VertexRequest; import org.junit.Before; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java index c8310be8053b2..57f8a8314fa97 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java @@ -24,9 +24,9 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoResponse; -import org.elasticsearch.protocol.xpack.migration.UpgradeActionRequired; +import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; +import org.elasticsearch.client.migration.IndexUpgradeInfoResponse; +import org.elasticsearch.client.migration.UpgradeActionRequired; import java.io.IOException; import java.util.Map; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index e8383b9ba7441..90337ebf6053e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -74,17 +74,21 @@ import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutDatafeedResponse; +import org.elasticsearch.client.ml.PutFilterRequest; +import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedResponse; import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedResponse; +import org.elasticsearch.client.ml.UpdateDatafeedRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.calendars.Calendar; import org.elasticsearch.client.ml.datafeed.ChunkingConfig; import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.datafeed.DatafeedStats; +import org.elasticsearch.client.ml.datafeed.DatafeedUpdate; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.AnalysisLimits; import org.elasticsearch.client.ml.job.config.DataDescription; @@ -92,6 +96,7 @@ import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.client.ml.job.config.MlFilter; import org.elasticsearch.client.ml.job.config.ModelPlotConfig; import org.elasticsearch.client.ml.job.config.Operator; import org.elasticsearch.client.ml.job.config.RuleCondition; @@ -630,6 +635,77 @@ public void onFailure(Exception e) { } } + public void testUpdateDatafeed() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("update-datafeed-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + String datafeedId = job.getId() + "-feed"; + DatafeedConfig datafeed = DatafeedConfig.builder(datafeedId, job.getId()).setIndices("foo").build(); + client.machineLearning().putDatafeed(new PutDatafeedRequest(datafeed), RequestOptions.DEFAULT); + + { + AggregatorFactories.Builder aggs = AggregatorFactories.builder(); + List scriptFields = Collections.emptyList(); + // tag::update-datafeed-config + DatafeedUpdate.Builder datafeedUpdateBuilder = new DatafeedUpdate.Builder(datafeedId) // <1> + .setAggregations(aggs) // <2> + .setIndices("index_1", "index_2") // <3> + .setChunkingConfig(ChunkingConfig.newAuto()) // <4> + .setFrequency(TimeValue.timeValueSeconds(30)) // <5> + .setQuery(QueryBuilders.matchAllQuery()) // <6> + .setQueryDelay(TimeValue.timeValueMinutes(1)) // <7> + .setScriptFields(scriptFields) // <8> + .setScrollSize(1000) // <9> + .setJobId("update-datafeed-job"); // <10> + // end::update-datafeed-config + + // Clearing aggregation to avoid complex validation rules + datafeedUpdateBuilder.setAggregations((String) null); + + // tag::update-datafeed-request + UpdateDatafeedRequest request = new UpdateDatafeedRequest(datafeedUpdateBuilder.build()); // <1> + // end::update-datafeed-request + + // tag::update-datafeed-execute + PutDatafeedResponse response = client.machineLearning().updateDatafeed(request, RequestOptions.DEFAULT); + // end::update-datafeed-execute + + // tag::update-datafeed-response + DatafeedConfig updatedDatafeed = response.getResponse(); // <1> + // end::update-datafeed-response + assertThat(updatedDatafeed.getId(), equalTo(datafeedId)); + } + { + DatafeedUpdate datafeedUpdate = new DatafeedUpdate.Builder(datafeedId).setIndices("index_1", "index_2").build(); + + UpdateDatafeedRequest request = new UpdateDatafeedRequest(datafeedUpdate); + // tag::update-datafeed-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PutDatafeedResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::update-datafeed-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::update-datafeed-execute-async + client.machineLearning().updateDatafeedAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::update-datafeed-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGetDatafeed() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -1934,4 +2010,58 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testCreateFilter() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + // tag::put-filter-config + MlFilter.Builder filterBuilder = MlFilter.builder("my_safe_domains") // <1> + .setDescription("A list of safe domains") // <2> + .setItems("*.google.com", "wikipedia.org"); // <3> + // end::put-filter-config + + // tag::put-filter-request + PutFilterRequest request = new PutFilterRequest(filterBuilder.build()); // <1> + // end::put-filter-request + + // tag::put-filter-execute + PutFilterResponse response = client.machineLearning().putFilter(request, RequestOptions.DEFAULT); + // end::put-filter-execute + + // tag::put-filter-response + MlFilter createdFilter = response.getResponse(); // <1> + // end::put-filter-response + assertThat(createdFilter.getId(), equalTo("my_safe_domains")); + } + { + MlFilter.Builder filterBuilder = MlFilter.builder("safe_domains_async") + .setDescription("A list of safe domains") + .setItems("*.google.com", "wikipedia.org"); + + PutFilterRequest request = new PutFilterRequest(filterBuilder.build()); + // tag::put-filter-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PutFilterResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-filter-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-filter-execute-async + client.machineLearning().putFilterAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::put-filter-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java index 86562297306b2..0a0ca9215c93d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.RollupClient; import org.elasticsearch.client.rollup.DeleteRollupJobRequest; import org.elasticsearch.client.rollup.DeleteRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupCapsRequest; @@ -46,6 +47,8 @@ import org.elasticsearch.client.rollup.PutRollupJobResponse; import org.elasticsearch.client.rollup.RollableIndexCaps; import org.elasticsearch.client.rollup.RollupJobCaps; +import org.elasticsearch.client.rollup.StartRollupJobRequest; +import org.elasticsearch.client.rollup.StartRollupJobResponse; import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig; import org.elasticsearch.client.rollup.job.config.GroupConfig; import org.elasticsearch.client.rollup.job.config.HistogramGroupConfig; @@ -186,6 +189,7 @@ public void onFailure(Exception e) { } } + @SuppressWarnings("unused") public void testGetRollupJob() throws Exception { testCreateRollupJob(); RestHighLevelClient client = highLevelClient(); @@ -236,6 +240,62 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + @SuppressWarnings("unused") + public void testStartRollupJob() throws Exception { + testCreateRollupJob(); + RestHighLevelClient client = highLevelClient(); + + String id = "job_1"; + // tag::rollup-start-job-request + StartRollupJobRequest request = new StartRollupJobRequest(id); // <1> + // end::rollup-start-job-request + + + try { + // tag::rollup-start-job-execute + RollupClient rc = client.rollup(); + StartRollupJobResponse response = rc.startRollupJob(request, RequestOptions.DEFAULT); + // end::rollup-start-job-execute + + // tag::rollup-start-job-response + response.isAcknowledged(); // <1> + // end::rollup-start-job-response + } catch (Exception e) { + // Swallow any exception, this test does not test actually cancelling. + } + + // tag::rollup-start-job-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(StartRollupJobResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::rollup-start-job-execute-listener + + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::rollup-start-job-execute-async + RollupClient rc = client.rollup(); + rc.startRollupJobAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::rollup-start-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + // stop job so it can correctly be deleted by the test teardown + // TODO Replace this with the Rollup Stop Job API + Response stoptResponse = client().performRequest(new Request("POST", "/_xpack/rollup/job/" + id + "/_stop")); + assertEquals(RestStatus.OK.getStatus(), stoptResponse.getStatusLine().getStatusCode()); + } + + @SuppressWarnings("unused") public void testGetRollupCaps() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -329,6 +389,7 @@ public void testGetRollupCaps() throws Exception { ActionListener listener = new ActionListener() { @Override public void onResponse(GetRollupCapsResponse response) { + // <1> } @@ -406,6 +467,7 @@ private void waitForPendingRollupTasks() throws Exception { }); } + @SuppressWarnings("unused") public void testDeleteRollupJob() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -450,4 +512,4 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } -} +} \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 1e596158750fa..831c39ed28be6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -49,6 +49,8 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.core.CountRequest; +import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.text.Text; @@ -1287,4 +1289,124 @@ private void indexSearchTestData() throws IOException { assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } + + + @SuppressWarnings({"unused", "unchecked"}) + public void testCount() throws Exception { + indexCountTestData(); + RestHighLevelClient client = highLevelClient(); + { + // tag::count-request-basic + CountRequest countRequest = new CountRequest(); // <1> + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); // <2> + searchSourceBuilder.query(QueryBuilders.matchAllQuery()); // <3> + countRequest.source(searchSourceBuilder); // <4> + // end::count-request-basic + } + { + // tag::count-request-indices-types + CountRequest countRequest = new CountRequest("blog"); // <1> + countRequest.types("doc"); // <2> + // end::count-request-indices-types + // tag::count-request-routing + countRequest.routing("routing"); // <1> + // end::count-request-routing + // tag::count-request-indicesOptions + countRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::count-request-indicesOptions + // tag::count-request-preference + countRequest.preference("_local"); // <1> + // end::count-request-preference + assertNotNull(client.count(countRequest, RequestOptions.DEFAULT)); + } + { + // tag::count-source-basics + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); // <1> + sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy")); // <2> + // end::count-source-basics + + // tag::count-source-setter + CountRequest countRequest = new CountRequest(); + countRequest.indices("blog", "author"); + countRequest.source(sourceBuilder); + // end::count-source-setter + + // tag::count-execute + CountResponse countResponse = client + .count(countRequest, RequestOptions.DEFAULT); + // end::count-execute + + // tag::count-execute-listener + ActionListener listener = + new ActionListener() { + + @Override + public void onResponse(CountResponse countResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::count-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::count-execute-async + client.countAsync(countRequest, RequestOptions.DEFAULT, listener); // <1> + // end::count-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + // tag::count-response-1 + long count = countResponse.getCount(); + RestStatus status = countResponse.status(); + Boolean terminatedEarly = countResponse.isTerminatedEarly(); + // end::count-response-1 + + // tag::count-response-2 + int totalShards = countResponse.getTotalShards(); + int skippedShards = countResponse.getSkippedShards(); + int successfulShards = countResponse.getSuccessfulShards(); + int failedShards = countResponse.getFailedShards(); + for (ShardSearchFailure failure : countResponse.getShardFailures()) { + // failures should be handled here + } + // end::count-response-2 + assertNotNull(countResponse); + assertEquals(4, countResponse.getCount()); + } + } + + private static void indexCountTestData() throws IOException { + CreateIndexRequest authorsRequest = new CreateIndexRequest("author") + .mapping("doc", "user", "type=keyword,doc_values=false"); + CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest, RequestOptions.DEFAULT); + assertTrue(authorsResponse.isAcknowledged()); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest("blog", "doc", "1") + .source(XContentType.JSON, "title", "Doubling Down on Open?", "user", + Collections.singletonList("kimchy"), "innerObject", Collections.singletonMap("key", "value"))); + bulkRequest.add(new IndexRequest("blog", "doc", "2") + .source(XContentType.JSON, "title", "Swiftype Joins Forces with Elastic", "user", + Arrays.asList("kimchy", "matt"), "innerObject", Collections.singletonMap("key", "value"))); + bulkRequest.add(new IndexRequest("blog", "doc", "3") + .source(XContentType.JSON, "title", "On Net Neutrality", "user", + Arrays.asList("tyler", "kimchy"), "innerObject", Collections.singletonMap("key", "value"))); + + bulkRequest.add(new IndexRequest("author", "doc", "1") + .source(XContentType.JSON, "user", "kimchy")); + + + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + assertSame(RestStatus.OK, bulkResponse.status()); + assertFalse(bulkResponse.hasFailures()); + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 96b4b311490b7..3fc787fa8585d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -24,13 +24,17 @@ import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.AuthenticateResponse; import org.elasticsearch.client.security.ChangePasswordRequest; import org.elasticsearch.client.security.ClearRolesCacheRequest; import org.elasticsearch.client.security.ClearRolesCacheResponse; +import org.elasticsearch.client.security.CreateTokenRequest; +import org.elasticsearch.client.security.CreateTokenResponse; import org.elasticsearch.client.security.DeleteRoleMappingRequest; import org.elasticsearch.client.security.DeleteRoleMappingResponse; import org.elasticsearch.client.security.DeleteRoleRequest; @@ -38,30 +42,43 @@ import org.elasticsearch.client.security.DisableUserRequest; import org.elasticsearch.client.security.EmptyResponse; import org.elasticsearch.client.security.EnableUserRequest; +import org.elasticsearch.client.security.ExpressionRoleMapping; +import org.elasticsearch.client.security.GetRoleMappingsRequest; +import org.elasticsearch.client.security.GetRoleMappingsResponse; import org.elasticsearch.client.security.GetSslCertificatesResponse; +import org.elasticsearch.client.security.InvalidateTokenRequest; +import org.elasticsearch.client.security.InvalidateTokenResponse; import org.elasticsearch.client.security.PutRoleMappingRequest; import org.elasticsearch.client.security.PutRoleMappingResponse; import org.elasticsearch.client.security.PutUserRequest; import org.elasticsearch.client.security.PutUserResponse; import org.elasticsearch.client.security.RefreshPolicy; -import org.elasticsearch.client.security.support.CertificateInfo; import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression; -import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression; import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression; +import org.elasticsearch.client.security.user.User; +import org.elasticsearch.client.security.support.CertificateInfo; +import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.not; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.nullValue; public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase { @@ -119,11 +136,11 @@ public void testPutRoleMapping() throws Exception { { // tag::put-role-mapping-execute final RoleMapperExpression rules = AnyRoleMapperExpression.builder() - .addExpression(FieldRoleMapperExpression.ofUsername("*")) - .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")) - .build(); + .addExpression(FieldRoleMapperExpression.ofUsername("*")) + .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")) + .build(); final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.singletonList("superuser"), - rules, null, RefreshPolicy.NONE); + rules, null, RefreshPolicy.NONE); final PutRoleMappingResponse response = client.security().putRoleMapping(request, RequestOptions.DEFAULT); // end::put-role-mapping-execute // tag::put-role-mapping-response @@ -134,11 +151,11 @@ public void testPutRoleMapping() throws Exception { { final RoleMapperExpression rules = AnyRoleMapperExpression.builder() - .addExpression(FieldRoleMapperExpression.ofUsername("*")) - .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")) - .build(); + .addExpression(FieldRoleMapperExpression.ofUsername("*")) + .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")) + .build(); final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.singletonList("superuser"), - rules, null, RefreshPolicy.NONE); + rules, null, RefreshPolicy.NONE); // tag::put-role-mapping-execute-listener ActionListener listener = new ActionListener() { @Override @@ -165,6 +182,119 @@ public void onFailure(Exception e) { } } + public void testGetRoleMappings() throws Exception { + final RestHighLevelClient client = highLevelClient(); + + final RoleMapperExpression rules1 = AnyRoleMapperExpression.builder().addExpression(FieldRoleMapperExpression.ofUsername("*")) + .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")).build(); + final PutRoleMappingRequest putRoleMappingRequest1 = new PutRoleMappingRequest("mapping-example-1", true, Collections.singletonList( + "superuser"), rules1, null, RefreshPolicy.NONE); + final PutRoleMappingResponse putRoleMappingResponse1 = client.security().putRoleMapping(putRoleMappingRequest1, + RequestOptions.DEFAULT); + boolean isCreated1 = putRoleMappingResponse1.isCreated(); + assertTrue(isCreated1); + final RoleMapperExpression rules2 = AnyRoleMapperExpression.builder().addExpression(FieldRoleMapperExpression.ofGroups( + "cn=admins,dc=example,dc=com")).build(); + final Map metadata2 = new HashMap<>(); + metadata2.put("k1", "v1"); + final PutRoleMappingRequest putRoleMappingRequest2 = new PutRoleMappingRequest("mapping-example-2", true, Collections.singletonList( + "monitoring"), rules2, metadata2, RefreshPolicy.NONE); + final PutRoleMappingResponse putRoleMappingResponse2 = client.security().putRoleMapping(putRoleMappingRequest2, + RequestOptions.DEFAULT); + boolean isCreated2 = putRoleMappingResponse2.isCreated(); + assertTrue(isCreated2); + + { + // tag::get-role-mappings-execute + final GetRoleMappingsRequest request = new GetRoleMappingsRequest("mapping-example-1"); + final GetRoleMappingsResponse response = client.security().getRoleMappings(request, RequestOptions.DEFAULT); + // end::get-role-mappings-execute + // tag::get-role-mappings-response + List mappings = response.getMappings(); + // end::get-role-mappings-response + assertNotNull(mappings); + assertThat(mappings.size(), is(1)); + assertThat(mappings.get(0).isEnabled(), is(true)); + assertThat(mappings.get(0).getName(), is("mapping-example-1")); + assertThat(mappings.get(0).getExpression(), equalTo(rules1)); + assertThat(mappings.get(0).getMetadata(), equalTo(Collections.emptyMap())); + assertThat(mappings.get(0).getRoles(), contains("superuser")); + } + + { + // tag::get-role-mappings-list-execute + final GetRoleMappingsRequest request = new GetRoleMappingsRequest("mapping-example-1", "mapping-example-2"); + final GetRoleMappingsResponse response = client.security().getRoleMappings(request, RequestOptions.DEFAULT); + // end::get-role-mappings-list-execute + List mappings = response.getMappings(); + assertNotNull(mappings); + assertThat(mappings.size(), is(2)); + for (ExpressionRoleMapping roleMapping : mappings) { + assertThat(roleMapping.isEnabled(), is(true)); + assertThat(roleMapping.getName(), isIn(new String[]{"mapping-example-1", "mapping-example-2"})); + if (roleMapping.getName().equals("mapping-example-1")) { + assertThat(roleMapping.getMetadata(), equalTo(Collections.emptyMap())); + assertThat(roleMapping.getExpression(), equalTo(rules1)); + assertThat(roleMapping.getRoles(), contains("superuser")); + } else { + assertThat(roleMapping.getMetadata(), equalTo(metadata2)); + assertThat(roleMapping.getExpression(), equalTo(rules2)); + assertThat(roleMapping.getRoles(), contains("monitoring")); + } + } + } + + { + // tag::get-role-mappings-all-execute + final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); + final GetRoleMappingsResponse response = client.security().getRoleMappings(request, RequestOptions.DEFAULT); + // end::get-role-mappings-all-execute + List mappings = response.getMappings(); + assertNotNull(mappings); + assertThat(mappings.size(), is(2)); + for (ExpressionRoleMapping roleMapping : mappings) { + assertThat(roleMapping.isEnabled(), is(true)); + assertThat(roleMapping.getName(), isIn(new String[]{"mapping-example-1", "mapping-example-2"})); + if (roleMapping.getName().equals("mapping-example-1")) { + assertThat(roleMapping.getMetadata(), equalTo(Collections.emptyMap())); + assertThat(roleMapping.getExpression(), equalTo(rules1)); + assertThat(roleMapping.getRoles(), contains("superuser")); + } else { + assertThat(roleMapping.getMetadata(), equalTo(metadata2)); + assertThat(roleMapping.getExpression(), equalTo(rules2)); + assertThat(roleMapping.getRoles(), contains("monitoring")); + } + } + } + + { + final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); + // tag::get-role-mappings-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetRoleMappingsResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-role-mappings-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::get-role-mappings-execute-async + client.security().getRoleMappingsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::get-role-mappings-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testEnableUser() throws Exception { RestHighLevelClient client = highLevelClient(); char[] password = new char[]{'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; @@ -254,6 +384,51 @@ public void onFailure(Exception e) { } } + public void testAuthenticate() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + //tag::authenticate-execute + AuthenticateResponse response = client.security().authenticate(RequestOptions.DEFAULT); + //end::authenticate-execute + + //tag::authenticate-response + User user = response.getUser(); // <1> + boolean enabled = response.enabled(); // <2> + //end::authenticate-response + + assertThat(user.username(), is("test_user")); + assertThat(user.roles(), contains(new String[] {"superuser"})); + assertThat(user.fullName(), nullValue()); + assertThat(user.email(), nullValue()); + assertThat(user.metadata().isEmpty(), is(true)); + assertThat(enabled, is(true)); + } + + { + // tag::authenticate-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AuthenticateResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::authenticate-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + // tag::authenticate-execute-async + client.security().authenticateAsync(RequestOptions.DEFAULT, listener); // <1> + // end::authenticate-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } public void testClearRolesCache() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -432,7 +607,7 @@ public void testDeleteRoleMapping() throws Exception { // Create role mappings final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("*"); final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.singletonList("superuser"), - rules, null, RefreshPolicy.NONE); + rules, null, RefreshPolicy.NONE); final PutRoleMappingResponse response = client.security().putRoleMapping(request, RequestOptions.DEFAULT); boolean isCreated = response.isCreated(); assertTrue(isCreated); @@ -546,4 +721,151 @@ private void addRole(String roleName) throws IOException { client().performRequest(addRoleRequest); } + public void testCreateToken() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + // Setup user + PutUserRequest putUserRequest = new PutUserRequest("token_user", "password".toCharArray(), + Collections.singletonList("kibana_user"), null, null, true, null, RefreshPolicy.IMMEDIATE); + PutUserResponse putUserResponse = client.security().putUser(putUserRequest, RequestOptions.DEFAULT); + assertTrue(putUserResponse.isCreated()); + } + { + // tag::create-token-password-request + final char[] password = new char[]{'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; + CreateTokenRequest createTokenRequest = CreateTokenRequest.passwordGrant("token_user", password); + // end::create-token-password-request + + // tag::create-token-execute + CreateTokenResponse createTokenResponse = client.security().createToken(createTokenRequest, RequestOptions.DEFAULT); + // end::create-token-execute + + // tag::create-token-response + String accessToken = createTokenResponse.getAccessToken(); // <1> + String refreshToken = createTokenResponse.getRefreshToken(); // <2> + // end::create-token-response + assertNotNull(accessToken); + assertNotNull(refreshToken); + assertNotNull(createTokenResponse.getExpiresIn()); + + // tag::create-token-refresh-request + createTokenRequest = CreateTokenRequest.refreshTokenGrant(refreshToken); + // end::create-token-refresh-request + + CreateTokenResponse refreshResponse = client.security().createToken(createTokenRequest, RequestOptions.DEFAULT); + assertNotNull(refreshResponse.getAccessToken()); + assertNotNull(refreshResponse.getRefreshToken()); + } + + { + // tag::create-token-client-credentials-request + CreateTokenRequest createTokenRequest = CreateTokenRequest.clientCredentialsGrant(); + // end::create-token-client-credentials-request + + ActionListener listener; + //tag::create-token-execute-listener + listener = new ActionListener() { + @Override + public void onResponse(CreateTokenResponse createTokenResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::create-token-execute-listener + + // Avoid unused variable warning + assertNotNull(listener); + + // Replace the empty listener by a blocking listener in test + final PlainActionFuture future = new PlainActionFuture<>(); + listener = future; + + //tag::create-token-execute-async + client.security().createTokenAsync(createTokenRequest, RequestOptions.DEFAULT, listener); // <1> + //end::create-token-execute-async + + assertNotNull(future.get(30, TimeUnit.SECONDS)); + assertNotNull(future.get().getAccessToken()); + // "client-credentials" grants aren't refreshable + assertNull(future.get().getRefreshToken()); + } + } + + public void testInvalidateToken() throws Exception { + RestHighLevelClient client = highLevelClient(); + + String accessToken; + String refreshToken; + { + // Setup user + final char[] password = "password".toCharArray(); + PutUserRequest putUserRequest = new PutUserRequest("invalidate_token", password, + Collections.singletonList("kibana_user"), null, null, true, null, RefreshPolicy.IMMEDIATE); + PutUserResponse putUserResponse = client.security().putUser(putUserRequest, RequestOptions.DEFAULT); + assertTrue(putUserResponse.isCreated()); + + // Create tokens + final CreateTokenRequest createTokenRequest = CreateTokenRequest.passwordGrant("invalidate_token", password); + final CreateTokenResponse tokenResponse = client.security().createToken(createTokenRequest, RequestOptions.DEFAULT); + accessToken = tokenResponse.getAccessToken(); + refreshToken = tokenResponse.getRefreshToken(); + } + { + // tag::invalidate-access-token-request + InvalidateTokenRequest invalidateTokenRequest = InvalidateTokenRequest.accessToken(accessToken); + // end::invalidate-access-token-request + + // tag::invalidate-token-execute + InvalidateTokenResponse invalidateTokenResponse = + client.security().invalidateToken(invalidateTokenRequest, RequestOptions.DEFAULT); + // end::invalidate-token-execute + + // tag::invalidate-token-response + boolean isCreated = invalidateTokenResponse.isCreated(); + // end::invalidate-token-response + assertTrue(isCreated); + } + + { + // tag::invalidate-refresh-token-request + InvalidateTokenRequest invalidateTokenRequest = InvalidateTokenRequest.refreshToken(refreshToken); + // end::invalidate-refresh-token-request + + ActionListener listener; + //tag::invalidate-token-execute-listener + listener = new ActionListener() { + @Override + public void onResponse(InvalidateTokenResponse invalidateTokenResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::invalidate-token-execute-listener + + // Avoid unused variable warning + assertNotNull(listener); + + // Replace the empty listener by a blocking listener in test + final PlainActionFuture future = new PlainActionFuture<>(); + listener = future; + + //tag::invalidate-token-execute-async + client.security().invalidateTokenAsync(invalidateTokenRequest, RequestOptions.DEFAULT, listener); // <1> + //end::invalidate-token-execute-async + + final InvalidateTokenResponse response = future.get(30, TimeUnit.SECONDS); + assertNotNull(response); + assertTrue(response.isCreated());// technically, this should be false, but the API is broken + // See https://github.com/elastic/elasticsearch/issues/35115 + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java index 165bda95dfc3d..ac4fca82b2e10 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java @@ -40,10 +40,10 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; +import org.elasticsearch.client.watcher.DeleteWatchRequest; +import org.elasticsearch.client.watcher.DeleteWatchResponse; +import org.elasticsearch.client.watcher.PutWatchRequest; +import org.elasticsearch.client.watcher.PutWatchResponse; import org.elasticsearch.rest.RestStatus; import java.util.concurrent.CountDownLatch; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/graph/GraphExploreResponseTests.java similarity index 98% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/graph/GraphExploreResponseTests.java index 2e6e325c4a009..ef76d8d32e624 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/graph/GraphExploreResponseTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.graph; +package org.elasticsearch.client.graph; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ShardOperationFailedException; @@ -35,7 +35,7 @@ import static org.hamcrest.Matchers.equalTo; -public class GraphExploreResponseTests extends AbstractXContentTestCase< GraphExploreResponse> { +public class GraphExploreResponseTests extends AbstractXContentTestCase { @Override protected GraphExploreResponse createTestInstance() { @@ -81,7 +81,7 @@ private static GraphExploreResponse createTestInstanceWithFailures() { @Override protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { - return GraphExploreResponse.fromXContext(parser); + return GraphExploreResponse.fromXContent(parser); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/AllocateActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/AllocateActionTests.java new file mode 100644 index 0000000000000..e44eb0da0e188 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/AllocateActionTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class AllocateActionTests extends AbstractXContentTestCase { + + @Override + protected AllocateAction createTestInstance() { + return randomInstance(); + } + + static AllocateAction randomInstance() { + boolean hasAtLeastOneMap = false; + Map includes; + if (randomBoolean()) { + includes = randomMap(1, 100); + hasAtLeastOneMap = true; + } else { + includes = randomBoolean() ? null : Collections.emptyMap(); + } + Map excludes; + if (randomBoolean()) { + hasAtLeastOneMap = true; + excludes = randomMap(1, 100); + } else { + excludes = randomBoolean() ? null : Collections.emptyMap(); + } + Map requires; + if (hasAtLeastOneMap == false || randomBoolean()) { + requires = randomMap(1, 100); + } else { + requires = randomBoolean() ? null : Collections.emptyMap(); + } + Integer numberOfReplicas = randomBoolean() ? null : randomIntBetween(0, 10); + return new AllocateAction(numberOfReplicas, includes, excludes, requires); + } + + @Override + protected AllocateAction doParseInstance(XContentParser parser) { + return AllocateAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testAllMapsNullOrEmpty() { + Map include = randomBoolean() ? null : Collections.emptyMap(); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(null, include, exclude, require)); + assertEquals("At least one of " + AllocateAction.INCLUDE_FIELD.getPreferredName() + ", " + + AllocateAction.EXCLUDE_FIELD.getPreferredName() + " or " + AllocateAction.REQUIRE_FIELD.getPreferredName() + + "must contain attributes for action " + AllocateAction.NAME, exception.getMessage()); + } + + public void testInvalidNumberOfReplicas() { + Map include = randomMap(1, 5); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(randomIntBetween(-1000, -1), include, exclude, require)); + assertEquals("[" + AllocateAction.NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0", exception.getMessage()); + } + + public static Map randomMap(int minEntries, int maxEntries) { + Map map = new HashMap<>(); + int numIncludes = randomIntBetween(minEntries, maxEntries); + for (int i = 0; i < numIncludes; i++) { + map.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + } + return map; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteActionTests.java new file mode 100644 index 0000000000000..fb7deb97a2787 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteActionTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class DeleteActionTests extends AbstractXContentTestCase { + + @Override + protected DeleteAction createTestInstance() { + return new DeleteAction(); + } + + @Override + protected DeleteAction doParseInstance(XContentParser parser) { + return DeleteAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..01f6288d81d4b --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequestTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +public class DeleteLifecyclePolicyRequestTests extends ESTestCase { + + private DeleteLifecyclePolicyRequest createTestInstance() { + return new DeleteLifecyclePolicyRequest(randomAlphaOfLengthBetween(2, 20)); + } + + public void testValidate() { + DeleteLifecyclePolicyRequest req = createTestInstance(); + assertFalse(req.validate().isPresent()); + + } + + public void testValidationFailure() { + expectThrows(IllegalArgumentException.class, () -> new DeleteLifecyclePolicyRequest(randomFrom("", null))); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequestTests.java new file mode 100644 index 0000000000000..933503e629b06 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequestTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.Arrays; + +public class ExplainLifecycleRequestTests extends ESTestCase { + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copy, this::mutateInstance); + } + + private ExplainLifecycleRequest createTestInstance() { + ExplainLifecycleRequest request = new ExplainLifecycleRequest(); + if (randomBoolean()) { + request.indices(generateRandomStringArray(20, 20, false, true)); + } + if (randomBoolean()) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + request.indicesOptions(indicesOptions); + } + return request; + } + + private ExplainLifecycleRequest mutateInstance(ExplainLifecycleRequest instance) { + String[] indices = instance.indices(); + IndicesOptions indicesOptions = instance.indicesOptions(); + switch (between(0, 1)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + () -> generateRandomStringArray(20, 10, false, true)); + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + ExplainLifecycleRequest newRequest = new ExplainLifecycleRequest(); + newRequest.indices(indices); + newRequest.indicesOptions(indicesOptions); + return newRequest; + } + + private ExplainLifecycleRequest copy(ExplainLifecycleRequest original) { + ExplainLifecycleRequest copy = new ExplainLifecycleRequest(); + copy.indices(original.indices()); + copy.indicesOptions(original.indicesOptions()); + return copy; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java new file mode 100644 index 0000000000000..26eacb04b024f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ExplainLifecycleResponseTests extends AbstractXContentTestCase { + + @Override + protected ExplainLifecycleResponse createTestInstance() { + Map indexResponses = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 2); i++) { + IndexLifecycleExplainResponse indexResponse = IndexExplainResponseTests.randomIndexExplainResponse(); + indexResponses.put(indexResponse.getIndex(), indexResponse); + } + return new ExplainLifecycleResponse(indexResponses); + } + + @Override + protected ExplainLifecycleResponse doParseInstance(XContentParser parser) throws IOException { + return ExplainLifecycleResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ForceMergeActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ForceMergeActionTests.java new file mode 100644 index 0000000000000..16fafcfa24015 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ForceMergeActionTests.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class ForceMergeActionTests extends AbstractXContentTestCase { + + @Override + protected ForceMergeAction doParseInstance(XContentParser parser) { + return ForceMergeAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected ForceMergeAction createTestInstance() { + return randomInstance(); + } + + static ForceMergeAction randomInstance() { + return new ForceMergeAction(randomIntBetween(1, 100)); + } + + public void testMissingMaxNumSegments() throws IOException { + BytesReference emptyObject = BytesReference.bytes(JsonXContent.contentBuilder().startObject().endObject()); + XContentParser parser = XContentHelper.createParser(null, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + emptyObject, XContentType.JSON); + Exception e = expectThrows(IllegalArgumentException.class, () -> ForceMergeAction.parse(parser)); + assertThat(e.getMessage(), equalTo("Required [max_num_segments]")); + } + + public void testInvalidNegativeSegmentNumber() { + Exception r = expectThrows(IllegalArgumentException.class, () -> new ForceMergeAction(randomIntBetween(-10, 0))); + assertThat(r.getMessage(), equalTo("[max_num_segments] must be a positive integer")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..06d28207ce93a --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequestTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +public class GetLifecyclePolicyRequestTests extends ESTestCase { + + private GetLifecyclePolicyRequest createTestInstance() { + int numPolicies = randomIntBetween(0, 10); + String[] policyNames = new String[numPolicies]; + for (int i = 0; i < numPolicies; i++) { + policyNames[i] = "policy-" + randomAlphaOfLengthBetween(2, 5); + } + return new GetLifecyclePolicyRequest(policyNames); + } + + public void testValidation() { + GetLifecyclePolicyRequest request = createTestInstance(); + assertFalse(request.validate().isPresent()); + } + + public void testNullPolicyNameShouldFail() { + expectThrows(IllegalArgumentException.class, + () -> new GetLifecyclePolicyRequest(randomAlphaOfLengthBetween(2,20), null, randomAlphaOfLengthBetween(2,20))); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponseTests.java new file mode 100644 index 0000000000000..89dfbb8635332 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponseTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; + +public class GetLifecyclePolicyResponseTests extends AbstractXContentTestCase { + + @Override + protected GetLifecyclePolicyResponse createTestInstance() { + int numPolicies = randomIntBetween(1, 10); + ImmutableOpenMap.Builder policies = ImmutableOpenMap.builder(); + for (int i = 0; i < numPolicies; i++) { + String policyName = "policy-" + randomAlphaOfLengthBetween(2, 5); + LifecyclePolicy policy = createRandomPolicy(policyName); + policies.put(policyName, new LifecyclePolicyMetadata(policy, randomLong(), randomLong())); + } + return new GetLifecyclePolicyResponse(policies.build()); + } + + @Override + protected GetLifecyclePolicyResponse doParseInstance(XContentParser parser) throws IOException { + return GetLifecyclePolicyResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexExplainResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexExplainResponseTests.java new file mode 100644 index 0000000000000..fb7e73ee62191 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexExplainResponseTests.java @@ -0,0 +1,122 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +public class IndexExplainResponseTests extends AbstractXContentTestCase { + + static IndexLifecycleExplainResponse randomIndexExplainResponse() { + if (frequently()) { + return randomManagedIndexExplainResponse(); + } else { + return randomUnmanagedIndexExplainResponse(); + } + } + + private static IndexLifecycleExplainResponse randomUnmanagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newUnmanagedIndexResponse(randomAlphaOfLength(10)); + } + + private static IndexLifecycleExplainResponse randomManagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newManagedIndexResponse(randomAlphaOfLength(10), randomAlphaOfLength(10), + randomNonNegativeLong(), randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(10), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomBoolean() ? null : new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString()), + randomBoolean() ? null : PhaseExecutionInfoTests.randomPhaseExecutionInfo("")); + } + + @Override + protected IndexLifecycleExplainResponse createTestInstance() { + return randomIndexExplainResponse(); + } + + @Override + protected IndexLifecycleExplainResponse doParseInstance(XContentParser parser) throws IOException { + return IndexLifecycleExplainResponse.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + private static class RandomStepInfo implements ToXContentObject { + + private final String key; + private final String value; + + RandomStepInfo(Supplier randomStringSupplier) { + this.key = randomStringSupplier.get(); + this.value = randomStringSupplier.get(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(key, value); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RandomStepInfo other = (RandomStepInfo) obj; + return Objects.equals(key, other.key) && Objects.equals(value, other.value); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponseTests.java new file mode 100644 index 0000000000000..144039b8995c6 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponseTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.CoreMatchers; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.stream.Collectors; + +public class LifecycleManagementStatusResponseTests extends ESTestCase { + + public void testAllValidStatuses() { + EnumSet.allOf(OperationMode.class) + .forEach(e -> assertEquals(new LifecycleManagementStatusResponse(e.name()).getOperationMode(), e)); + } + + public void testXContent() throws IOException { + XContentType xContentType = XContentType.JSON; + String mode = randomFrom(EnumSet.allOf(OperationMode.class) + .stream().map(Enum::name).collect(Collectors.toList())); + XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"operation_mode\" : \"" + mode + "\"}"); + assertEquals(LifecycleManagementStatusResponse.fromXContent(parser).getOperationMode(), OperationMode.fromString(mode)); + } + + public void testXContentInvalid() throws IOException { + XContentType xContentType = XContentType.JSON; + String mode = randomAlphaOfLength(10); + XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"operation_mode\" : \"" + mode + "\"}"); + Exception e = expectThrows(IllegalArgumentException.class, () -> LifecycleManagementStatusResponse.fromXContent(parser)); + assertThat(e.getMessage(), CoreMatchers.containsString("failed to parse field [operation_mode]")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadataTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadataTests.java new file mode 100644 index 0000000000000..548ba366b640e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadataTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; + +public class LifecyclePolicyMetadataTests extends AbstractXContentTestCase { + + private String policyName; + + @Override + protected LifecyclePolicyMetadata createTestInstance() { + policyName = randomAlphaOfLengthBetween(5,20); + LifecyclePolicy policy = createRandomPolicy(policyName); + return new LifecyclePolicyMetadata(policy, randomLong(), randomLong()); + } + + @Override + protected LifecyclePolicyMetadata doParseInstance(XContentParser parser) throws IOException { + return LifecyclePolicyMetadata.parse(parser, policyName); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyTests.java new file mode 100644 index 0000000000000..024cb13d8df37 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyTests.java @@ -0,0 +1,243 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class LifecyclePolicyTests extends AbstractXContentTestCase { + private static final Set VALID_HOT_ACTIONS = Sets.newHashSet(RolloverAction.NAME); + private static final Set VALID_WARM_ACTIONS = Sets.newHashSet(AllocateAction.NAME, ForceMergeAction.NAME, + ReadOnlyAction.NAME, ShrinkAction.NAME); + private static final Set VALID_COLD_ACTIONS = Sets.newHashSet(AllocateAction.NAME); + private static final Set VALID_DELETE_ACTIONS = Sets.newHashSet(DeleteAction.NAME); + + private String lifecycleName; + + @Override + protected LifecyclePolicy doParseInstance(XContentParser parser) { + return LifecyclePolicy.parse(parser, lifecycleName); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + @Override + protected LifecyclePolicy createTestInstance() { + lifecycleName = randomAlphaOfLength(5); + return createRandomPolicy(lifecycleName); + } + + public void testValidatePhases() { + boolean invalid = randomBoolean(); + String phaseName = randomFrom("hot", "warm", "cold", "delete"); + if (invalid) { + phaseName += randomAlphaOfLength(5); + } + Map phases = Collections.singletonMap(phaseName, + new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + if (invalid) { + Exception e = expectThrows(IllegalArgumentException.class, () -> new LifecyclePolicy(lifecycleName, phases)); + assertThat(e.getMessage(), equalTo("Lifecycle does not support phase [" + phaseName + "]")); + } else { + new LifecyclePolicy(lifecycleName, phases); + } + } + + public void testValidateHotPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_HOT_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "forcemerge", "delete", "shrink")); + actions.put(invalidAction.getName(), invalidAction); + } + Map hotPhase = Collections.singletonMap("hot", + new Phase("hot", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, hotPhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [hot]")); + } else { + new LifecyclePolicy(lifecycleName, hotPhase); + } + } + + public void testValidateWarmPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_WARM_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete")); + actions.put(invalidAction.getName(), invalidAction); + } + Map warmPhase = Collections.singletonMap("warm", + new Phase("warm", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, warmPhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [warm]")); + } else { + new LifecyclePolicy(lifecycleName, warmPhase); + } + } + + public void testValidateColdPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_COLD_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete", "forcemerge", "shrink")); + actions.put(invalidAction.getName(), invalidAction); + } + Map coldPhase = Collections.singletonMap("cold", + new Phase("cold", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, coldPhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [cold]")); + } else { + new LifecyclePolicy(lifecycleName, coldPhase); + } + } + + public void testValidateDeletePhase() { + LifecycleAction invalidAction = null; + Map actions = VALID_DELETE_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "rollover", "forcemerge", "shrink")); + actions.put(invalidAction.getName(), invalidAction); + } + Map deletePhase = Collections.singletonMap("delete", + new Phase("delete", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, deletePhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [delete]")); + } else { + new LifecyclePolicy(lifecycleName, deletePhase); + } + } + + public static LifecyclePolicy createRandomPolicy(String lifecycleName) { + List phaseNames = randomSubsetOf(Arrays.asList("hot", "warm", "cold", "delete")); + Map phases = new HashMap<>(phaseNames.size()); + Function> validActions = (phase) -> { + switch (phase) { + case "hot": + return VALID_HOT_ACTIONS; + case "warm": + return VALID_WARM_ACTIONS; + case "cold": + return VALID_COLD_ACTIONS; + case "delete": + return VALID_DELETE_ACTIONS; + default: + throw new IllegalArgumentException("invalid phase [" + phase + "]"); + }}; + Function randomAction = (action) -> { + switch (action) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("invalid action [" + action + "]"); + }}; + for (String phase : phaseNames) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = new HashMap<>(); + List actionNames = randomSubsetOf(validActions.apply(phase)); + for (String action : actionNames) { + actions.put(action, randomAction.apply(action)); + } + phases.put(phase, new Phase(phase, after, actions)); + } + return new LifecyclePolicy(lifecycleName, phases); + } + + private LifecycleAction getTestAction(String actionName) { + switch (actionName) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("unsupported phase action [" + actionName + "]"); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/OperationModeTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/OperationModeTests.java new file mode 100644 index 0000000000000..27651ba4a8c41 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/OperationModeTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.CoreMatchers; + +import java.util.EnumSet; + +public class OperationModeTests extends ESTestCase { + + public void testIsValidChange() { + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.RUNNING)); + assertTrue(OperationMode.RUNNING.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPING.isValidChange(OperationMode.STOPPING)); + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPED.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPED)); + } + + public void testFromName() { + EnumSet.allOf(OperationMode.class).forEach(e -> assertEquals(OperationMode.fromString(e.name()), e)); + } + + public void testFromNameInvalid() { + String invalidName = randomAlphaOfLength(10); + Exception e = expectThrows(IllegalArgumentException.class, () -> OperationMode.fromString(invalidName)); + assertThat(e.getMessage(), CoreMatchers.containsString(invalidName + " is not a valid operation_mode")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfoTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfoTests.java new file mode 100644 index 0000000000000..0db9b56aea93c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfoTests.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class PhaseExecutionInfoTests extends AbstractXContentTestCase { + + static PhaseExecutionInfo randomPhaseExecutionInfo(String phaseName) { + return new PhaseExecutionInfo(randomAlphaOfLength(5), PhaseTests.randomPhase(phaseName), + randomNonNegativeLong(), randomNonNegativeLong()); + } + + String phaseName; + + @Before + public void setupPhaseName() { + phaseName = randomAlphaOfLength(7); + } + + @Override + protected PhaseExecutionInfo createTestInstance() { + return randomPhaseExecutionInfo(phaseName); + } + + @Override + protected PhaseExecutionInfo doParseInstance(XContentParser parser) throws IOException { + return PhaseExecutionInfo.parse(parser, phaseName); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseTests.java new file mode 100644 index 0000000000000..3b4fc2fec6059 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class PhaseTests extends AbstractXContentTestCase { + private String phaseName; + + @Before + public void setup() { + phaseName = randomAlphaOfLength(20); + } + + @Override + protected Phase createTestInstance() { + return randomPhase(phaseName); + } + + static Phase randomPhase(String phaseName) { + TimeValue after = null; + if (randomBoolean()) { + after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + } + Map actions = Collections.emptyMap(); + if (randomBoolean()) { + actions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction()); + } + return new Phase(phaseName, after, actions); + } + + @Override + protected Phase doParseInstance(XContentParser parser) { + return Phase.parse(parser, phaseName); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testDefaultAfter() { + Phase phase = new Phase(randomAlphaOfLength(20), null, Collections.emptyMap()); + assertEquals(TimeValue.ZERO, phase.getMinimumAge()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..26cfe1946ac4d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequestTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; + +public class PutLifecyclePolicyRequestTests extends ESTestCase { + + private PutLifecyclePolicyRequest createTestInstance() { + return new PutLifecyclePolicyRequest(createRandomPolicy(randomAlphaOfLengthBetween(5, 20))); + } + + public void testValidation() { + PutLifecyclePolicyRequest req = createTestInstance(); + assertFalse(req.validate().isPresent()); + } + + public void testNullPolicy() { + expectThrows(IllegalArgumentException.class, () -> new PutLifecyclePolicyRequest(null)); + } + + public void testNullPolicyName() { + expectThrows(IllegalArgumentException.class, () -> new PutLifecyclePolicyRequest(createRandomPolicy(randomFrom("", null)))); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ReadOnlyActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ReadOnlyActionTests.java new file mode 100644 index 0000000000000..bf57478425cc9 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ReadOnlyActionTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class ReadOnlyActionTests extends AbstractXContentTestCase { + + @Override + protected ReadOnlyAction doParseInstance(XContentParser parser) { + return ReadOnlyAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected ReadOnlyAction createTestInstance() { + return new ReadOnlyAction(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..d5ccabc748df5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequestTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; + +public class RemoveIndexLifecyclePolicyRequestTests extends ESTestCase { + + public void testNullIndices() { + expectThrows(NullPointerException.class, () -> new RemoveIndexLifecyclePolicyRequest(null)); + } + + public void testNullIndicesOptions() { + expectThrows(NullPointerException.class, () -> new RemoveIndexLifecyclePolicyRequest(Collections.emptyList(), null)); + } + + public void testValidate() { + RemoveIndexLifecyclePolicyRequest request = new RemoveIndexLifecyclePolicyRequest(Collections.emptyList()); + assertFalse(request.validate().isPresent()); + } + + protected RemoveIndexLifecyclePolicyRequest createInstance() { + if (randomBoolean()) { + return new RemoveIndexLifecyclePolicyRequest(Arrays.asList(generateRandomStringArray(20, 20, false)), + IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + } else { + return new RemoveIndexLifecyclePolicyRequest(Arrays.asList(generateRandomStringArray(20, 20, false))); + } + } + + private RemoveIndexLifecyclePolicyRequest copyInstance(RemoveIndexLifecyclePolicyRequest req) { + return new RemoveIndexLifecyclePolicyRequest(new ArrayList<>(req.indices()), IndicesOptions.fromOptions( + req.indicesOptions().ignoreUnavailable(), req.indicesOptions().allowNoIndices(), + req.indicesOptions().expandWildcardsOpen(), req.indicesOptions().expandWildcardsClosed(), + req.indicesOptions().allowAliasesToMultipleIndices(), req.indicesOptions().forbidClosedIndices(), + req.indicesOptions().ignoreAliases(), req.indicesOptions().ignoreThrottled())); + } + + private RemoveIndexLifecyclePolicyRequest mutateInstance(RemoveIndexLifecyclePolicyRequest req) { + if (randomBoolean()) { + return new RemoveIndexLifecyclePolicyRequest(req.indices(), + randomValueOtherThan(req.indicesOptions(), () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()))); + } else { + return new RemoveIndexLifecyclePolicyRequest( + randomValueOtherThan(req.indices(), () -> Arrays.asList(generateRandomStringArray(20, 20, false))), + req.indicesOptions()); + } + } + + public void testEqualsAndHashCode() { + for (int count = 0; count < 100; ++count) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createInstance(), this::copyInstance, this::mutateInstance); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponseTests.java new file mode 100644 index 0000000000000..1f99a2dfdfac4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponseTests.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class RemoveIndexLifecyclePolicyResponseTests extends ESTestCase { + + private void toXContent(RemoveIndexLifecyclePolicyResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field(RemoveIndexLifecyclePolicyResponse.HAS_FAILURES_FIELD.getPreferredName(), response.hasFailures()); + builder.field(RemoveIndexLifecyclePolicyResponse.FAILED_INDEXES_FIELD.getPreferredName(), response.getFailedIndexes()); + builder.endObject(); + } + + private RemoveIndexLifecyclePolicyResponse createInstance() { + List failedIndexes = Arrays.asList(generateRandomStringArray(20, 20, false)); + return new RemoveIndexLifecyclePolicyResponse(failedIndexes); + } + + private RemoveIndexLifecyclePolicyResponse copyInstance(RemoveIndexLifecyclePolicyResponse req) { + return new RemoveIndexLifecyclePolicyResponse(new ArrayList<>(req.getFailedIndexes())); + } + + private RemoveIndexLifecyclePolicyResponse mutateInstance(RemoveIndexLifecyclePolicyResponse req) { + return new RemoveIndexLifecyclePolicyResponse(randomValueOtherThan(req.getFailedIndexes(), + () -> Arrays.asList(generateRandomStringArray(20, 20, false)))); + } + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createInstance, + this::toXContent, + RemoveIndexLifecyclePolicyResponse::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + public void testNullFailedIndices() { + IllegalArgumentException exception = + expectThrows(IllegalArgumentException.class, () -> new RemoveIndexLifecyclePolicyResponse(null)); + assertEquals("failed_indexes cannot be null", exception.getMessage()); + } + + public void testHasFailures() { + RemoveIndexLifecyclePolicyResponse response = new RemoveIndexLifecyclePolicyResponse(new ArrayList<>()); + assertFalse(response.hasFailures()); + assertEquals(Collections.emptyList(), response.getFailedIndexes()); + + int size = randomIntBetween(1, 10); + List failedIndexes = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + failedIndexes.add(randomAlphaOfLength(20)); + } + response = new RemoveIndexLifecyclePolicyResponse(failedIndexes); + assertTrue(response.hasFailures()); + assertEquals(failedIndexes, response.getFailedIndexes()); + } + + public void testEqualsAndHashCode() { + for (int count = 0; count < 100; ++count) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createInstance(), this::copyInstance, this::mutateInstance); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RolloverActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RolloverActionTests.java new file mode 100644 index 0000000000000..bbbdba37e5640 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RolloverActionTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class RolloverActionTests extends AbstractXContentTestCase { + + @Override + protected RolloverAction doParseInstance(XContentParser parser) { + return RolloverAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected RolloverAction createTestInstance() { + return randomInstance(); + } + + static RolloverAction randomInstance() { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + Long maxDocs = randomBoolean() ? null : randomNonNegativeLong(); + TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) + ? TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test") + : null; + return new RolloverAction(maxSize, maxAge, maxDocs); + } + + public void testNoConditions() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new RolloverAction(null, null, null)); + assertEquals("At least one rollover condition must be set.", exception.getMessage()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ShrinkActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ShrinkActionTests.java new file mode 100644 index 0000000000000..adeec1ff825a9 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ShrinkActionTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class ShrinkActionTests extends AbstractXContentTestCase { + + @Override + protected ShrinkAction doParseInstance(XContentParser parser) throws IOException { + return ShrinkAction.parse(parser); + } + + @Override + protected ShrinkAction createTestInstance() { + return randomInstance(); + } + + static ShrinkAction randomInstance() { + return new ShrinkAction(randomIntBetween(1, 100)); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testNonPositiveShardNumber() { + Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0))); + assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StartILMRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StartILMRequestTests.java new file mode 100644 index 0000000000000..449ef7d1678eb --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StartILMRequestTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +public class StartILMRequestTests extends ESTestCase { + + protected StartILMRequest createTestInstance() { + return new StartILMRequest(); + } + + public void testValidate() { + StartILMRequest request = createTestInstance(); + assertFalse(request.validate().isPresent()); + } + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), (original) -> createTestInstance()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StopILMRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StopILMRequestTests.java new file mode 100644 index 0000000000000..f1618f3f0f0e3 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StopILMRequestTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +public class StopILMRequestTests extends ESTestCase { + + protected StopILMRequest createTestInstance() { + return new StopILMRequest(); + } + + public void testValidate() { + StopILMRequest request = createTestInstance(); + assertFalse(request.validate().isPresent()); + } + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), (original) -> createTestInstance()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/StartBasicResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/StartBasicResponseTests.java deleted file mode 100644 index 8370a6ba9afed..0000000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/StartBasicResponseTests.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.license; - -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.client.common.ProtocolUtils; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.equalTo; - -public class StartBasicResponseTests extends ESTestCase { - - public void testFromXContent() throws Exception { - StartBasicResponse.Status status = randomFrom(StartBasicResponse.Status.values()); - - boolean acknowledged = status != StartBasicResponse.Status.NEED_ACKNOWLEDGEMENT; - String acknowledgeMessage = null; - Map ackMessages = Collections.emptyMap(); - if (status != StartBasicResponse.Status.GENERATED_BASIC) { - acknowledgeMessage = randomAlphaOfLength(10); - ackMessages = randomAckMessages(); - } - - final StartBasicResponse startBasicResponse = new StartBasicResponse(status, ackMessages, acknowledgeMessage); - - XContentType xContentType = randomFrom(XContentType.values()); - XContentBuilder builder = XContentFactory.contentBuilder(xContentType); - - toXContent(startBasicResponse, builder); - - final StartBasicResponse response = StartBasicResponse.fromXContent(createParser(builder)); - assertThat(response.isAcknowledged(), equalTo(acknowledged)); - assertThat(response.isBasicStarted(), equalTo(status.isBasicStarted())); - assertThat(response.getAcknowledgeMessage(), equalTo(acknowledgeMessage)); - assertThat(ProtocolUtils.equals(response.getAcknowledgeMessages(), ackMessages), equalTo(true)); - } - - private static void toXContent(StartBasicResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - builder.field("acknowledged", response.isAcknowledged()); - if (response.isBasicStarted()) { - builder.field("basic_was_started", true); - } else { - builder.field("basic_was_started", false); - builder.field("error_message", response.getErrorMessage()); - } - if (response.getAcknowledgeMessages().isEmpty() == false) { - builder.startObject("acknowledge"); - builder.field("message", response.getAcknowledgeMessage()); - for (Map.Entry entry : response.getAcknowledgeMessages().entrySet()) { - builder.startArray(entry.getKey()); - for (String message : entry.getValue()) { - builder.value(message); - } - builder.endArray(); - } - builder.endObject(); - } - builder.endObject(); - } - - private static Map randomAckMessages() { - int nFeatures = randomIntBetween(1, 5); - - Map ackMessages = new HashMap<>(); - - for (int i = 0; i < nFeatures; i++) { - String feature = randomAlphaOfLengthBetween(9, 15); - int nMessages = randomIntBetween(1, 5); - String[] messages = new String[nMessages]; - for (int j = 0; j < nMessages; j++) { - messages[j] = randomAlphaOfLengthBetween(10, 30); - } - ackMessages.put(feature, messages); - } - - return ackMessages; - } - -} diff --git a/x-pack/protocol/build.gradle b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java similarity index 66% rename from x-pack/protocol/build.gradle rename to client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java index 7ca81c05e3947..86250fdaec274 100644 --- a/x-pack/protocol/build.gradle +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java @@ -17,13 +17,14 @@ * under the License. */ -apply plugin: 'elasticsearch.build' +package org.elasticsearch.client.migration; -description = 'Request and Response objects for x-pack that are used by the' + - ' high level rest client and x-pack itself' +import org.elasticsearch.test.ESTestCase; -dependencies { - compileOnly "org.elasticsearch:elasticsearch:${version}" +public class IndexUpgradeInfoRequestTests extends ESTestCase { - testCompile "org.elasticsearch.test:framework:${version}" + public void testNullIndices() { + expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest((String[])null)); + expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest().indices((String[])null)); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterRequestTests.java new file mode 100644 index 0000000000000..6b39d81f171ca --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterRequestTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.client.ml.job.config.MlFilterTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + + +public class PutFilterRequestTests extends AbstractXContentTestCase { + + @Override + protected PutFilterRequest createTestInstance() { + return new PutFilterRequest(MlFilterTests.createRandom()); + } + + @Override + protected PutFilterRequest doParseInstance(XContentParser parser) { + return new PutFilterRequest(MlFilter.PARSER.apply(parser, null).build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterResponseTests.java new file mode 100644 index 0000000000000..29eda47598b2f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterResponseTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.MlFilterTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class PutFilterResponseTests extends AbstractXContentTestCase { + + @Override + protected PutFilterResponse createTestInstance() { + return new PutFilterResponse(MlFilterTests.createRandom()); + } + + @Override + protected PutFilterResponse doParseInstance(XContentParser parser) throws IOException { + return PutFilterResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateDatafeedRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateDatafeedRequestTests.java new file mode 100644 index 0000000000000..9c0d87661a973 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateDatafeedRequestTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedUpdate; +import org.elasticsearch.client.ml.datafeed.DatafeedUpdateTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + + +public class UpdateDatafeedRequestTests extends AbstractXContentTestCase { + + @Override + protected UpdateDatafeedRequest createTestInstance() { + return new UpdateDatafeedRequest(DatafeedUpdateTests.createRandom()); + } + + @Override + protected UpdateDatafeedRequest doParseInstance(XContentParser parser) { + return new UpdateDatafeedRequest(DatafeedUpdate.PARSER.apply(parser, null).build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java index 1c3723fd0a631..1f1675a330e1b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java @@ -32,8 +32,7 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase { - @Override - protected DatafeedUpdate createTestInstance() { + public static DatafeedUpdate createRandom() { DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(DatafeedConfigTests.randomValidDatafeedId()); if (randomBoolean()) { builder.setJobId(randomAlphaOfLength(10)); @@ -87,6 +86,11 @@ protected DatafeedUpdate createTestInstance() { return builder.build(); } + @Override + protected DatafeedUpdate createTestInstance() { + return createRandom(); + } + @Override protected DatafeedUpdate doParseInstance(XContentParser parser) { return DatafeedUpdate.PARSER.apply(parser, null).build(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobRequestTests.java new file mode 100644 index 0000000000000..6a44bac0c159e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobRequestTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +public class StartRollupJobRequestTests extends ESTestCase { + + public void testConstructor() { + String jobId = randomAlphaOfLength(5); + assertEquals(jobId, new StartRollupJobRequest(jobId).getJobId()); + } + + public void testEqualsAndHash() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(new StartRollupJobRequest(randomAlphaOfLength(5)), + orig -> new StartRollupJobRequest(orig.getJobId()), + orig -> new StartRollupJobRequest(orig.getJobId() + "_suffix")); + } + + public void testRequireJobId() { + final NullPointerException e = expectThrows(NullPointerException.class, ()-> new StartRollupJobRequest(null)); + assertEquals("id parameter must not be null", e.getMessage()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobResponseTests.java new file mode 100644 index 0000000000000..724e60d2d4a75 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/StartRollupJobResponseTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.io.IOException; + +public class StartRollupJobResponseTests extends AbstractXContentTestCase { + + private boolean acknowledged; + + @Before + public void setupAcknoledged() { + acknowledged = randomBoolean(); + } + + @Override + protected StartRollupJobResponse createTestInstance() { + return new StartRollupJobResponse(acknowledged); + } + + @Override + protected StartRollupJobResponse doParseInstance(XContentParser parser) throws IOException { + return StartRollupJobResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java new file mode 100644 index 0000000000000..ce813f5ecf59c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.security.user.User; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class AuthenticateResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createTestInstance, + this::toXContent, + AuthenticateResponse::fromXContent) + .supportsUnknownFields(false) + .test(); + } + + public void testEqualsAndHashCode() { + final AuthenticateResponse reponse = createTestInstance(); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(reponse, this::copy, + this::mutate); + } + + protected AuthenticateResponse createTestInstance() { + final String username = randomAlphaOfLengthBetween(1, 4); + final List roles = Arrays.asList(generateRandomStringArray(4, 4, false, true)); + final Map metadata; + metadata = new HashMap<>(); + if (randomBoolean()) { + metadata.put("string", null); + } else { + metadata.put("string", randomAlphaOfLengthBetween(0, 4)); + } + if (randomBoolean()) { + metadata.put("string_list", null); + } else { + metadata.put("string_list", Arrays.asList(generateRandomStringArray(4, 4, false, true))); + } + final String fullName = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 4)); + final String email = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 4)); + final boolean enabled = randomBoolean(); + return new AuthenticateResponse(new User(username, roles, metadata, fullName, email), enabled); + } + + private void toXContent(AuthenticateResponse response, XContentBuilder builder) throws IOException { + final User user = response.getUser(); + final boolean enabled = response.enabled(); + builder.startObject(); + builder.field(AuthenticateResponse.USERNAME.getPreferredName(), user.username()); + builder.field(AuthenticateResponse.ROLES.getPreferredName(), user.roles()); + builder.field(AuthenticateResponse.METADATA.getPreferredName(), user.metadata()); + if (user.fullName() != null) { + builder.field(AuthenticateResponse.FULL_NAME.getPreferredName(), user.fullName()); + } + if (user.email() != null) { + builder.field(AuthenticateResponse.EMAIL.getPreferredName(), user.email()); + } + builder.field(AuthenticateResponse.ENABLED.getPreferredName(), enabled); + builder.endObject(); + } + + private AuthenticateResponse copy(AuthenticateResponse response) { + final User originalUser = response.getUser(); + final User copyUser = new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), originalUser.fullName(), + originalUser.email()); + return new AuthenticateResponse(copyUser, response.enabled()); + } + + private AuthenticateResponse mutate(AuthenticateResponse response) { + final User originalUser = response.getUser(); + switch (randomIntBetween(1, 6)) { + case 1: + return new AuthenticateResponse(new User(originalUser.username() + "wrong", originalUser.roles(), originalUser.metadata(), + originalUser.fullName(), originalUser.email()), response.enabled()); + case 2: + final Collection wrongRoles = new ArrayList<>(originalUser.roles()); + wrongRoles.add(randomAlphaOfLengthBetween(1, 4)); + return new AuthenticateResponse(new User(originalUser.username(), wrongRoles, originalUser.metadata(), + originalUser.fullName(), originalUser.email()), response.enabled()); + case 3: + final Map wrongMetadata = new HashMap<>(originalUser.metadata()); + wrongMetadata.put("wrong_string", randomAlphaOfLengthBetween(0, 4)); + return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), wrongMetadata, + originalUser.fullName(), originalUser.email()), response.enabled()); + case 4: + return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), + originalUser.fullName() + "wrong", originalUser.email()), response.enabled()); + case 5: + return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), + originalUser.fullName(), originalUser.email() + "wrong"), response.enabled()); + case 6: + return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), + originalUser.fullName(), originalUser.email()), !response.enabled()); + } + throw new IllegalStateException("Bad random number"); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenRequestTests.java new file mode 100644 index 0000000000000..53f3e1d0f368c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenRequestTests.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.security; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class CreateTokenRequestTests extends ESTestCase { + + public void testCreateTokenFromPassword() { + final CreateTokenRequest request = CreateTokenRequest.passwordGrant("jsmith", "top secret password".toCharArray()); + assertThat(request.getGrantType(), equalTo("password")); + assertThat(request.getUsername(), equalTo("jsmith")); + assertThat(new String(request.getPassword()), equalTo("top secret password")); + assertThat(request.getScope(), nullValue()); + assertThat(request.getRefreshToken(), nullValue()); + assertThat(Strings.toString(request), equalTo("{" + + "\"grant_type\":\"password\"," + + "\"username\":\"jsmith\"," + + "\"password\":\"top secret password\"" + + "}" + )); + } + + public void testCreateTokenFromRefreshToken() { + final CreateTokenRequest request = CreateTokenRequest.refreshTokenGrant("9a7f41cf-9918-4d1f-bfaa-ad3f8f9f02b9"); + assertThat(request.getGrantType(), equalTo("refresh_token")); + assertThat(request.getRefreshToken(), equalTo("9a7f41cf-9918-4d1f-bfaa-ad3f8f9f02b9")); + assertThat(request.getScope(), nullValue()); + assertThat(request.getUsername(), nullValue()); + assertThat(request.getPassword(), nullValue()); + assertThat(Strings.toString(request), equalTo("{" + + "\"grant_type\":\"refresh_token\"," + + "\"refresh_token\":\"9a7f41cf-9918-4d1f-bfaa-ad3f8f9f02b9\"" + + "}" + )); + } + + public void testCreateTokenFromClientCredentials() { + final CreateTokenRequest request = CreateTokenRequest.clientCredentialsGrant(); + assertThat(request.getGrantType(), equalTo("client_credentials")); + assertThat(request.getScope(), nullValue()); + assertThat(request.getUsername(), nullValue()); + assertThat(request.getPassword(), nullValue()); + assertThat(request.getRefreshToken(), nullValue()); + assertThat(Strings.toString(request), equalTo("{\"grant_type\":\"client_credentials\"}")); + } + + public void testEqualsAndHashCode() { + final String grantType = randomAlphaOfLength(8); + final String scope = randomBoolean() ? null : randomAlphaOfLength(6); + final String username = randomBoolean() ? null : randomAlphaOfLengthBetween(4, 10); + final char[] password = randomBoolean() ? null : randomAlphaOfLengthBetween(8, 12).toCharArray(); + final String refreshToken = randomBoolean() ? null : randomAlphaOfLengthBetween(12, 24); + final CreateTokenRequest request = new CreateTokenRequest(grantType, scope, username, password, refreshToken); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(request, + r -> new CreateTokenRequest(r.getGrantType(), r.getScope(), r.getUsername(), r.getPassword(), r.getRefreshToken()), + this::mutate); + } + + private CreateTokenRequest mutate(CreateTokenRequest req) { + switch (randomIntBetween(1, 5)) { + case 1: + return new CreateTokenRequest("g", req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken()); + case 2: + return new CreateTokenRequest(req.getGrantType(), "s", req.getUsername(), req.getPassword(), req.getRefreshToken()); + case 3: + return new CreateTokenRequest(req.getGrantType(), req.getScope(), "u", req.getPassword(), req.getRefreshToken()); + case 4: + final char[] password = {'p'}; + return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), password, req.getRefreshToken()); + case 5: + return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), "r"); + } + throw new IllegalStateException("Bad random number"); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenResponseTests.java new file mode 100644 index 0000000000000..f99ea668665dd --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/CreateTokenResponseTests.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.security; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class CreateTokenResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + final String accessToken = randomAlphaOfLengthBetween(12, 24); + final TimeValue expiresIn = TimeValue.timeValueSeconds(randomIntBetween(30, 10_000)); + final String refreshToken = randomBoolean() ? null : randomAlphaOfLengthBetween(12, 24); + final String scope = randomBoolean() ? null : randomAlphaOfLength(4); + final String type = randomAlphaOfLength(6); + + final XContentType xContentType = randomFrom(XContentType.values()); + final XContentBuilder builder = XContentFactory.contentBuilder(xContentType); + builder.startObject() + .field("access_token", accessToken) + .field("type", type) + .field("expires_in", expiresIn.seconds()); + if (refreshToken != null || randomBoolean()) { + builder.field("refresh_token", refreshToken); + } + if (scope != null || randomBoolean()) { + builder.field("scope", scope); + } + builder.endObject(); + BytesReference xContent = BytesReference.bytes(builder); + + final CreateTokenResponse response = CreateTokenResponse.fromXContent(createParser(xContentType.xContent(), xContent)); + assertThat(response.getAccessToken(), equalTo(accessToken)); + assertThat(response.getRefreshToken(), equalTo(refreshToken)); + assertThat(response.getScope(), equalTo(scope)); + assertThat(response.getType(), equalTo(type)); + assertThat(response.getExpiresIn(), equalTo(expiresIn)); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java new file mode 100644 index 0000000000000..29bc7812f5b7e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class ExpressionRoleMappingTests extends ESTestCase { + + public void testExpressionRoleMappingParser() throws IOException { + final String json = + "{\n" + + " \"enabled\" : true,\n" + + " \"roles\" : [\n" + + " \"superuser\"\n" + + " ],\n" + + " \"rules\" : {\n" + + " \"field\" : {\n" + + " \"realm.name\" : \"kerb1\"\n" + + " }\n" + + " },\n" + + " \"metadata\" : { }\n" + + " }"; + final ExpressionRoleMapping expressionRoleMapping = ExpressionRoleMapping.PARSER.parse(XContentType.JSON.xContent().createParser( + new NamedXContentRegistry(Collections.emptyList()), new DeprecationHandler() { + @Override + public void usedDeprecatedName(String usedName, String modernName) { + } + + @Override + public void usedDeprecatedField(String usedName, String replacedWith) { + } + }, json), "example-role-mapping"); + final ExpressionRoleMapping expectedRoleMapping = new ExpressionRoleMapping("example-role-mapping", FieldRoleMapperExpression + .ofKeyValues("realm.name", "kerb1"), Collections.singletonList("superuser"), null, true); + assertThat(expressionRoleMapping, equalTo(expectedRoleMapping)); + } + + public void testEqualsHashCode() { + final ExpressionRoleMapping expressionRoleMapping = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression + .ofKeyValues("realm.name", "kerb1"), Collections.singletonList("superuser"), null, true); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(expressionRoleMapping, (original) -> { + return new ExpressionRoleMapping(original.getName(), original.getExpression(), original.getRoles(), original.getMetadata(), + original.isEnabled()); + }); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(expressionRoleMapping, (original) -> { + return new ExpressionRoleMapping(original.getName(), original.getExpression(), original.getRoles(), original.getMetadata(), + original.isEnabled()); + }, ExpressionRoleMappingTests::mutateTestItem); + } + + private static ExpressionRoleMapping mutateTestItem(ExpressionRoleMapping original) { + ExpressionRoleMapping mutated = null; + switch (randomIntBetween(0, 4)) { + case 0: + mutated = new ExpressionRoleMapping("namechanged", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections + .singletonList("superuser"), null, true); + break; + case 1: + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("changed", "changed"), Collections + .singletonList("superuser"), null, true); + break; + case 2: + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections + .singletonList("changed"), null, true); + break; + case 3: + Map metadata = new HashMap<>(); + metadata.put("a", "b"); + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections + .singletonList("superuser"), metadata, true); + break; + case 4: + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections + .singletonList("superuser"), null, false); + break; + } + return mutated; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsRequestTests.java new file mode 100644 index 0000000000000..8acd8ba17de07 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsRequestTests.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; + +public class GetRoleMappingsRequestTests extends ESTestCase { + + public void testGetRoleMappingsRequest() { + int noOfRoleMappingNames = randomIntBetween(0, 2); + final String[] roleMappingNames = randomArray(noOfRoleMappingNames, noOfRoleMappingNames, String[]::new, () -> randomAlphaOfLength( + 5)); + final GetRoleMappingsRequest getRoleMappingsRequest = new GetRoleMappingsRequest(roleMappingNames); + assertThat(getRoleMappingsRequest.getRoleMappingNames().size(), is(noOfRoleMappingNames)); + assertThat(getRoleMappingsRequest.getRoleMappingNames(), containsInAnyOrder(roleMappingNames)); + } + + public void testEqualsHashCode() { + int noOfRoleMappingNames = randomIntBetween(0, 2); + final String[] roleMappingNames = randomArray(noOfRoleMappingNames, String[]::new, () -> randomAlphaOfLength(5)); + final GetRoleMappingsRequest getRoleMappingsRequest = new GetRoleMappingsRequest(roleMappingNames); + assertNotNull(getRoleMappingsRequest); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(getRoleMappingsRequest, (original) -> { + return new GetRoleMappingsRequest(original.getRoleMappingNames().toArray(new String[0])); + }); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(getRoleMappingsRequest, (original) -> { + return new GetRoleMappingsRequest(original.getRoleMappingNames().toArray(new String[0])); + }, GetRoleMappingsRequestTests::mutateTestItem); + } + + private static GetRoleMappingsRequest mutateTestItem(GetRoleMappingsRequest original) { + return new GetRoleMappingsRequest(randomAlphaOfLength(8)); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java new file mode 100644 index 0000000000000..b612c9ead28a5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class GetRoleMappingsResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + final String json = "{\n" + + " \"kerberosmapping\" : {\n" + + " \"enabled\" : true,\n" + + " \"roles\" : [\n" + + " \"superuser\"\n" + + " ],\n" + + " \"rules\" : {\n" + + " \"field\" : {\n" + + " \"realm.name\" : \"kerb1\"\n" + + " }\n" + + " },\n" + + " \"metadata\" : { }\n" + + " },\n" + + " \"ldapmapping\" : {\n" + + " \"enabled\" : false,\n" + + " \"roles\" : [\n" + + " \"monitoring\"\n" + + " ],\n" + + " \"rules\" : {\n" + + " \"field\" : {\n" + + " \"groups\" : \"cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local\"\n" + + " }\n" + + " },\n" + + " \"metadata\" : { }\n" + + " }\n" + + "}"; + final GetRoleMappingsResponse response = GetRoleMappingsResponse.fromXContent(XContentType.JSON.xContent().createParser( + new NamedXContentRegistry(Collections.emptyList()), new DeprecationHandler() { + @Override + public void usedDeprecatedName(String usedName, String modernName) { + } + + @Override + public void usedDeprecatedField(String usedName, String replacedWith) { + } + }, json)); + final List expectedRoleMappingsList = new ArrayList<>(); + expectedRoleMappingsList.add(new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", + "kerb1"), Collections.singletonList("superuser"), null, true)); + expectedRoleMappingsList.add(new ExpressionRoleMapping("ldapmapping", FieldRoleMapperExpression.ofGroups( + "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), null, false)); + final GetRoleMappingsResponse expectedResponse = new GetRoleMappingsResponse(expectedRoleMappingsList); + assertThat(response, equalTo(expectedResponse)); + } + + public void testEqualsHashCode() { + final List roleMappingsList = new ArrayList<>(); + roleMappingsList.add(new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", + "kerb1"), Collections.singletonList("superuser"), null, true)); + final GetRoleMappingsResponse response = new GetRoleMappingsResponse(roleMappingsList); + assertNotNull(response); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(response, (original) -> { + return new GetRoleMappingsResponse(original.getMappings()); + }); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(response, (original) -> { + return new GetRoleMappingsResponse(original.getMappings()); + }, GetRoleMappingsResponseTests::mutateTestItem); + } + + private static GetRoleMappingsResponse mutateTestItem(GetRoleMappingsResponse original) { + GetRoleMappingsResponse mutated = null; + switch(randomIntBetween(0, 1)) { + case 0: + final List roleMappingsList1 = new ArrayList<>(); + roleMappingsList1.add(new ExpressionRoleMapping("ldapmapping", FieldRoleMapperExpression.ofGroups( + "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), null, false)); + mutated = new GetRoleMappingsResponse(roleMappingsList1); + break; + case 1: + final List roleMappingsList2 = new ArrayList<>(); + ExpressionRoleMapping orginialRoleMapping = original.getMappings().get(0); + roleMappingsList2.add(new ExpressionRoleMapping(orginialRoleMapping.getName(), FieldRoleMapperExpression.ofGroups( + "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), + orginialRoleMapping.getRoles(), orginialRoleMapping.getMetadata(), !orginialRoleMapping.isEnabled())); + mutated = new GetRoleMappingsResponse(roleMappingsList2); + break; + } + return mutated; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateTokenRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateTokenRequestTests.java new file mode 100644 index 0000000000000..ed84e3e43aded --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateTokenRequestTests.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.security; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class InvalidateTokenRequestTests extends ESTestCase { + + public void testInvalidateAccessToken() { + String token = "Tf01rrAymdUjxMY4VlG3gV3gsFFUWxVVPrztX+4uhe0="; + final InvalidateTokenRequest request = InvalidateTokenRequest.accessToken(token); + assertThat(request.getAccessToken(), equalTo(token)); + assertThat(request.getRefreshToken(), nullValue()); + assertThat(Strings.toString(request), equalTo("{" + + "\"token\":\"Tf01rrAymdUjxMY4VlG3gV3gsFFUWxVVPrztX+4uhe0=\"" + + "}" + )); + } + + public void testInvalidateRefreshToken() { + String token = "4rE0YPT/oHODS83TbTtYmuh8"; + final InvalidateTokenRequest request = InvalidateTokenRequest.refreshToken(token); + assertThat(request.getAccessToken(), nullValue()); + assertThat(request.getRefreshToken(), equalTo(token)); + assertThat(Strings.toString(request), equalTo("{" + + "\"refresh_token\":\"4rE0YPT/oHODS83TbTtYmuh8\"" + + "}" + )); + } + + public void testEqualsAndHashCode() { + final String token = randomAlphaOfLength(8); + final boolean accessToken = randomBoolean(); + final InvalidateTokenRequest request = accessToken ? InvalidateTokenRequest.accessToken(token) + : InvalidateTokenRequest.refreshToken(token); + final EqualsHashCodeTestUtils.MutateFunction mutate = r -> { + if (randomBoolean()) { + return accessToken ? InvalidateTokenRequest.refreshToken(token) : InvalidateTokenRequest.accessToken(token); + } else { + return accessToken ? InvalidateTokenRequest.accessToken(randomAlphaOfLength(10)) + : InvalidateTokenRequest.refreshToken(randomAlphaOfLength(10)); + } + }; + EqualsHashCodeTestUtils.checkEqualsAndHashCode(request, + r -> new InvalidateTokenRequest(r.getAccessToken(), r.getRefreshToken()), mutate); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateTokenResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateTokenResponseTests.java new file mode 100644 index 0000000000000..9a0c30d43af66 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/InvalidateTokenResponseTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.security; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.io.IOException; + +public class InvalidateTokenResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + final boolean created = randomBoolean(); + + final XContentType xContentType = randomFrom(XContentType.values()); + final XContentBuilder builder = XContentFactory.contentBuilder(xContentType); + builder.startObject() + .field("created", created) + .endObject(); + BytesReference xContent = BytesReference.bytes(builder); + + try (XContentParser parser = createParser(xContentType.xContent(), xContent)) { + final InvalidateTokenResponse response = InvalidateTokenResponse.fromXContent(parser); + assertThat(response.isCreated(), Matchers.equalTo(created)); + } + + } + +} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeleteWatchResponseTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeleteWatchResponseTests.java index 1dbc4cec32128..3017b188292c0 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/DeleteWatchResponseTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.watcher; +package org.elasticsearch.client.watcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/PutWatchResponseTests.java similarity index 96% rename from x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/PutWatchResponseTests.java index d0aadef161175..e82ccd11cb2cb 100644 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/PutWatchResponseTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.protocol.xpack.watcher; +package org.elasticsearch.client.watcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/WatchRequestValidationTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/WatchRequestValidationTests.java index d75e36f7a3664..1fea3bccb62a7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/WatchRequestValidationTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/WatchRequestValidationTests.java @@ -19,19 +19,15 @@ package org.elasticsearch.client.watcher; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.client.ValidationException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; -import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.test.ESTestCase; import java.util.Optional; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; public class WatchRequestValidationTests extends ESTestCase { @@ -61,38 +57,38 @@ public void testAcknowledgeWatchNullActionId() { } public void testDeleteWatchInvalidWatchId() { - ActionRequestValidationException e = new DeleteWatchRequest("id with whitespaces").validate(); - assertThat(e, is(notNullValue())); - assertThat(e.validationErrors(), hasItem("watch id contains whitespace")); + final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new DeleteWatchRequest("id with whitespaces")); + assertThat(exception.getMessage(), is("watch id contains whitespace")); } public void testDeleteWatchNullId() { - ActionRequestValidationException e = new DeleteWatchRequest(null).validate(); - assertThat(e, is(notNullValue())); - assertThat(e.validationErrors(), hasItem("watch id is missing")); + final NullPointerException exception = expectThrows(NullPointerException.class, + () -> new DeleteWatchRequest(null)); + assertThat(exception.getMessage(), is("watch id is missing")); } public void testPutWatchInvalidWatchId() { - ActionRequestValidationException e = new PutWatchRequest("id with whitespaces", BytesArray.EMPTY, XContentType.JSON).validate(); - assertThat(e, is(notNullValue())); - assertThat(e.validationErrors(), hasItem("watch id contains whitespace")); + final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new PutWatchRequest("id with whitespaces", BytesArray.EMPTY, XContentType.JSON)); + assertThat(exception.getMessage(), is("watch id contains whitespace")); } public void testPutWatchNullId() { - ActionRequestValidationException e = new PutWatchRequest(null, BytesArray.EMPTY, XContentType.JSON).validate(); - assertThat(e, is(notNullValue())); - assertThat(e.validationErrors(), hasItem("watch id is missing")); + final NullPointerException exception = expectThrows(NullPointerException.class, + () -> new PutWatchRequest(null, BytesArray.EMPTY, XContentType.JSON)); + assertThat(exception.getMessage(), is("watch id is missing")); } public void testPutWatchSourceNull() { - ActionRequestValidationException e = new PutWatchRequest("foo", null, XContentType.JSON).validate(); - assertThat(e, is(notNullValue())); - assertThat(e.validationErrors(), hasItem("watch source is missing")); + final NullPointerException exception = expectThrows(NullPointerException.class, + () -> new PutWatchRequest("foo", null, XContentType.JSON)); + assertThat(exception.getMessage(), is("watch source is missing")); } public void testPutWatchContentNull() { - ActionRequestValidationException e = new PutWatchRequest("foo", BytesArray.EMPTY, null).validate(); - assertThat(e, is(notNullValue())); - assertThat(e.validationErrors(), hasItem("request body is missing")); + final NullPointerException exception = expectThrows(NullPointerException.class, + () -> new PutWatchRequest("foo", BytesArray.EMPTY, null)); + assertThat(exception.getMessage(), is("request body is missing")); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/xpack/XPackInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/xpack/XPackInfoResponseTests.java new file mode 100644 index 0000000000000..702c4bef64bd2 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/xpack/XPackInfoResponseTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.xpack; + +import org.elasticsearch.client.license.LicenseStatus; +import org.elasticsearch.client.xpack.XPackInfoResponse.BuildInfo; +import org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo; +import org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; +import org.elasticsearch.client.xpack.XPackInfoResponse.LicenseInfo; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Predicate; + +public class XPackInfoResponseTests extends AbstractXContentTestCase { + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + protected XPackInfoResponse doParseInstance(XContentParser parser) throws IOException { + return XPackInfoResponse.fromXContent(parser); + } + + protected Predicate getRandomFieldsExcludeFilter() { + return path -> path.equals("features") + || (path.startsWith("features") && path.endsWith("native_code_info")); + } + + protected ToXContent.Params getToXContentParams() { + Map params = new HashMap<>(); + if (randomBoolean()) { + params.put("human", randomBoolean() ? "true" : "false"); + } + if (randomBoolean()) { + params.put("categories", "_none"); + } + return new ToXContent.MapParams(params); + } + + protected XPackInfoResponse createTestInstance() { + return new XPackInfoResponse( + randomBoolean() ? null : randomBuildInfo(), + randomBoolean() ? null : randomLicenseInfo(), + randomBoolean() ? null : randomFeatureSetsInfo()); + } + + private BuildInfo randomBuildInfo() { + return new BuildInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(15)); + } + + private LicenseInfo randomLicenseInfo() { + return new LicenseInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(4), + randomAlphaOfLength(5), + randomFrom(LicenseStatus.values()), + randomLong()); + } + + private FeatureSetsInfo randomFeatureSetsInfo() { + int size = between(0, 10); + Set featureSets = new HashSet<>(size); + while (featureSets.size() < size) { + featureSets.add(randomFeatureSet()); + } + return new FeatureSetsInfo(featureSets); + } + + private FeatureSet randomFeatureSet() { + return new FeatureSet( + randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(20), + randomBoolean(), + randomBoolean(), + randomNativeCodeInfo()); + } + + private Map randomNativeCodeInfo() { + if (randomBoolean()) { + return null; + } + int size = between(0, 10); + Map nativeCodeInfo = new HashMap<>(size); + while (nativeCodeInfo.size() < size) { + nativeCodeInfo.put(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + return nativeCodeInfo; + } +} diff --git a/dev-tools/es_release_notes.pl b/dev-tools/es_release_notes.pl index 4ea7e124598ec..cf19a1cd9ddf5 100755 --- a/dev-tools/es_release_notes.pl +++ b/dev-tools/es_release_notes.pl @@ -32,7 +32,7 @@ ">enhancement", ">bug", ">regression", ">upgrade" ); my %Ignore = map { $_ => 1 } - ( ">non-issue", ">refactoring", ">docs", ">test", ">test-failure", ":Core/Build" ); + ( ">non-issue", ">refactoring", ">docs", ">test", ">test-failure", ":Core/Build", "backport" ); my %Group_Labels = ( '>breaking' => 'Breaking changes', diff --git a/distribution/bwc/maintenance-bugfix-snapshot/build.gradle b/distribution/bwc/bugfix/build.gradle similarity index 100% rename from distribution/bwc/maintenance-bugfix-snapshot/build.gradle rename to distribution/bwc/bugfix/build.gradle diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index a44e670542bcd..0b59749f9534f 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -17,236 +17,225 @@ * under the License. */ - - import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionCollection import java.nio.charset.StandardCharsets import static org.elasticsearch.gradle.BuildPlugin.getJavaHome + /** - * This is a dummy project which does a local checkout of the previous - * wire compat version's branch, and builds a snapshot. This allows backcompat - * tests to test against the next unreleased version, closest to this version, - * without relying on snapshots. + * We want to be able to do BWC tests for unreleased versions without relying on and waiting for snapshots. + * For this we need to check out and build the unreleased versions. + * Since These depend on the current version, we can't name the Gradle projects statically, and don't know what the + * unreleased versions are when Gradle projects are set up, so we use "build-unreleased-version-*" as placeholders + * and configure them to build various versions here. */ -subprojects { - - Version bwcVersion = bwcVersions.getSnapshotForProject(project.name) - if (bwcVersion == null) { - // this project wont do anything - return - } - - String bwcBranch - if (project.name == 'next-minor-snapshot') { - // this is always a .x series - bwcBranch = "${bwcVersion.major}.x" - } else { - bwcBranch = "${bwcVersion.major}.${bwcVersion.minor}" - } - - apply plugin: 'distribution' - // Not published so no need to assemble - assemble.enabled = false - assemble.dependsOn.remove('buildBwcVersion') - - File checkoutDir = file("${buildDir}/bwc/checkout-${bwcBranch}") - - final String remote = System.getProperty("tests.bwc.remote", "elastic") - - final boolean gitFetchLatest - final String gitFetchLatestProperty = System.getProperty("tests.bwc.git_fetch_latest", "true") - if ("true".equals(gitFetchLatestProperty)) { - gitFetchLatest = true - } else if ("false".equals(gitFetchLatestProperty)) { - gitFetchLatest = false - } else { - throw new GradleException("tests.bwc.git_fetch_latest must be [true] or [false] but was [" + gitFetchLatestProperty + "]") - } - - task createClone(type: LoggedExec) { - onlyIf { checkoutDir.exists() == false } - commandLine = ['git', 'clone', rootDir, checkoutDir] - } - - task findRemote(type: LoggedExec) { - dependsOn createClone - workingDir = checkoutDir - commandLine = ['git', 'remote', '-v'] - ByteArrayOutputStream output = new ByteArrayOutputStream() - standardOutput = output - doLast { - project.ext.remoteExists = false - output.toString('UTF-8').eachLine { - if (it.contains("${remote}\t")) { - project.ext.remoteExists = true - } - } - } - } - - task addRemote(type: LoggedExec) { - dependsOn findRemote - onlyIf { project.ext.remoteExists == false } - workingDir = checkoutDir - commandLine = ['git', 'remote', 'add', "${remote}", "https://github.com/${remote}/elasticsearch.git"] - } - - task fetchLatest(type: LoggedExec) { - onlyIf { project.gradle.startParameter.isOffline() == false && gitFetchLatest } - dependsOn addRemote - workingDir = checkoutDir - commandLine = ['git', 'fetch', '--all'] - } - - String buildMetadataKey = "bwc_refspec_${project.path.substring(1)}" - task checkoutBwcBranch(type: LoggedExec) { - String refspec = System.getProperty("tests.bwc.refspec.${bwcBranch}", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) - dependsOn fetchLatest - workingDir = checkoutDir - commandLine = ['git', 'checkout', refspec] - doFirst { - println "Checking out elasticsearch ${refspec} for branch ${bwcBranch}" - } - } - - File buildMetadataFile = project.file("build/${project.name}/build_metadata") - task writeBuildMetadata(type: LoggedExec) { - dependsOn checkoutBwcBranch - workingDir = checkoutDir - commandLine = ['git', 'rev-parse', 'HEAD'] - ignoreExitValue = true - ByteArrayOutputStream output = new ByteArrayOutputStream() - standardOutput = output - doLast { - if (execResult.exitValue != 0) { - output.toString('UTF-8').eachLine { line -> logger.error(line) } - execResult.assertNormalExitValue() - } - project.mkdir(buildMetadataFile.parent) - String commit = output.toString('UTF-8') - buildMetadataFile.setText("${buildMetadataKey}=${commit}", 'UTF-8') - println "Checked out elasticsearch commit ${commit}" +bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unreleasedVersion -> project("${project.path}:${unreleasedVersion.gradleProjectName}") { + Version bwcVersion = unreleasedVersion.version + String bwcBranch = unreleasedVersion.branch + apply plugin: 'distribution' + // Not published so no need to assemble + assemble.enabled = false + assemble.dependsOn.remove('buildBwcVersion') + + File checkoutDir = file("${buildDir}/bwc/checkout-${bwcBranch}") + + final String remote = System.getProperty("tests.bwc.remote", "elastic") + + final boolean gitFetchLatest + final String gitFetchLatestProperty = System.getProperty("tests.bwc.git_fetch_latest", "true") + if ("true".equals(gitFetchLatestProperty)) { + gitFetchLatest = true + } else if ("false".equals(gitFetchLatestProperty)) { + gitFetchLatest = false + } else { + throw new GradleException("tests.bwc.git_fetch_latest must be [true] or [false] but was [" + gitFetchLatestProperty + "]") } - } - - List artifactFiles = [] - List projectDirs = [] - for (String project : ['zip', 'deb', 'rpm']) { - String baseDir = "distribution" - if (bwcVersion.onOrAfter('6.3.0')) { - baseDir += project == 'zip' ? '/archives' : '/packages' - // add oss variant first - projectDirs.add("${baseDir}/oss-${project}") - artifactFiles.add(file("${checkoutDir}/${baseDir}/oss-${project}/build/distributions/elasticsearch-oss-${bwcVersion}.${project}")) + + task createClone(type: LoggedExec) { + onlyIf { checkoutDir.exists() == false } + commandLine = ['git', 'clone', rootDir, checkoutDir] } - projectDirs.add("${baseDir}/${project}") - artifactFiles.add(file("${checkoutDir}/${baseDir}/${project}/build/distributions/elasticsearch-${bwcVersion}.${project}")) - } - - task buildBwcVersion(type: Exec) { - dependsOn checkoutBwcBranch, writeBuildMetadata - workingDir = checkoutDir - doFirst { - // Execution time so that the checkouts are available - List lines = file("$checkoutDir/.ci/java-versions.properties").readLines() - environment( - 'JAVA_HOME', - getJavaHome(it, Integer.parseInt( - lines - .findAll({ it.startsWith("ES_BUILD_JAVA=java") }) - .collect({ it.replace("ES_BUILD_JAVA=java", "").trim() }) - .join("!!") - )) - ) - environment( - 'RUNTIME_JAVA_HOME', - getJavaHome(it, Integer.parseInt( - lines - .findAll({ it.startsWith("ES_RUNTIME_JAVA=java") }) - .collect({ it.replace("ES_RUNTIME_JAVA=java", "").trim() }) - .join("!!") - )) - ) + + task findRemote(type: LoggedExec) { + dependsOn createClone + workingDir = checkoutDir + commandLine = ['git', 'remote', '-v'] + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output + doLast { + project.ext.remoteExists = false + output.toString('UTF-8').eachLine { + if (it.contains("${remote}\t")) { + project.ext.remoteExists = true + } + } + } } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - executable 'cmd' - args '/C', 'call', new File(checkoutDir, 'gradlew').toString() - } else { - executable new File(checkoutDir, 'gradlew').toString() + task addRemote(type: LoggedExec) { + dependsOn findRemote + onlyIf { project.ext.remoteExists == false } + workingDir = checkoutDir + commandLine = ['git', 'remote', 'add', "${remote}", "https://github.com/${remote}/elasticsearch.git"] } - if (gradle.startParameter.isOffline()) { - args "--offline" + + task fetchLatest(type: LoggedExec) { + onlyIf { project.gradle.startParameter.isOffline() == false && gitFetchLatest } + dependsOn addRemote + workingDir = checkoutDir + commandLine = ['git', 'fetch', '--all'] } - for (String dir : projectDirs) { - args ":${dir.replace('/', ':')}:assemble" + + String buildMetadataKey = "bwc_refspec_${project.path.substring(1)}" + task checkoutBwcBranch(type: LoggedExec) { + String refspec = System.getProperty("tests.bwc.refspec.${bwcBranch}", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) + dependsOn fetchLatest + workingDir = checkoutDir + commandLine = ['git', 'checkout', refspec] + doFirst { + println "Checking out elasticsearch ${refspec} for branch ${bwcBranch}" + } } - args "-Dbuild.snapshot=true" - final LogLevel logLevel = gradle.startParameter.logLevel - if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { - args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + + File buildMetadataFile = project.file("build/${project.name}/build_metadata") + task writeBuildMetadata(type: LoggedExec) { + dependsOn checkoutBwcBranch + workingDir = checkoutDir + commandLine = ['git', 'rev-parse', 'HEAD'] + ignoreExitValue = true + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output + doLast { + if (execResult.exitValue != 0) { + output.toString('UTF-8').eachLine { line -> logger.error(line) } + execResult.assertNormalExitValue() + } + project.mkdir(buildMetadataFile.parent) + String commit = output.toString('UTF-8') + buildMetadataFile.setText("${buildMetadataKey}=${commit}", 'UTF-8') + println "Checked out elasticsearch commit ${commit}" + } } - final String showStacktraceName = gradle.startParameter.showStacktrace.name() - assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) - if (showStacktraceName.equals("ALWAYS")) { - args "--stacktrace" - } else if (showStacktraceName.equals("ALWAYS_FULL")) { - args "--full-stacktrace" + + List artifactFiles = [] + List projectDirs = [] + for (String project : ['zip', 'deb', 'rpm']) { + String baseDir = "distribution" + if (bwcVersion.onOrAfter('6.3.0')) { + baseDir += project == 'zip' ? '/archives' : '/packages' + // add oss variant first + projectDirs.add("${baseDir}/oss-${project}") + artifactFiles.add(file("${checkoutDir}/${baseDir}/oss-${project}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT.${project}")) + } + projectDirs.add("${baseDir}/${project}") + artifactFiles.add(file("${checkoutDir}/${baseDir}/${project}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT.${project}")) } - standardOutput = new IndentingOutputStream(System.out) - errorOutput = new IndentingOutputStream(System.err) - doLast { - List missing = artifactFiles.grep { file -> - false == file.exists() - } - if (false == missing.empty) { - throw new InvalidUserDataException( - "Building bwc version didn't generate expected files ${missing}") - } + + task buildBwcVersion(type: Exec) { + dependsOn checkoutBwcBranch, writeBuildMetadata + workingDir = checkoutDir + doFirst { + // Execution time so that the checkouts are available + List lines = file("${checkoutDir}/.ci/java-versions.properties").readLines() + environment( + 'JAVA_HOME', + getJavaHome(it, Integer.parseInt( + lines + .findAll({ it.startsWith("ES_BUILD_JAVA=java") }) + .collect({ it.replace("ES_BUILD_JAVA=java", "").trim() }) + .join("!!") + )) + ) + environment( + 'RUNTIME_JAVA_HOME', + getJavaHome(it, Integer.parseInt( + lines + .findAll({ it.startsWith("ES_RUNTIME_JAVA=java") }) + .collect({ it.replace("ES_RUNTIME_JAVA=java", "").trim() }) + .join("!!") + )) + ) + } + + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + executable 'cmd' + args '/C', 'call', new File(checkoutDir, 'gradlew').toString() + } else { + executable new File(checkoutDir, 'gradlew').toString() + } + if (gradle.startParameter.isOffline()) { + args "--offline" + } + for (String dir : projectDirs) { + args ":${dir.replace('/', ':')}:assemble" + } + args "-Dbuild.snapshot=true" + final LogLevel logLevel = gradle.startParameter.logLevel + if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { + args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + } + final String showStacktraceName = gradle.startParameter.showStacktrace.name() + assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) + if (showStacktraceName.equals("ALWAYS")) { + args "--stacktrace" + } else if (showStacktraceName.equals("ALWAYS_FULL")) { + args "--full-stacktrace" + } + standardOutput = new IndentingOutputStream(System.out, bwcVersion) + errorOutput = new IndentingOutputStream(System.err, bwcVersion) + doLast { + List missing = artifactFiles.grep { file -> + false == file.exists() + } + if (false == missing.empty) { + throw new InvalidUserDataException( + "Building ${bwcVersion} didn't generate expected files ${missing}") + } + } } - } - - if (gradle.startParameter.taskNames == ["assemble"]) { - // Gradle needs the `artifacts` declaration, including `builtBy` bellow to make projects dependencies on this - // project work, but it will also trigger the build of these for the `assemble` task. - // Since these are only used for testing, we don't want to assemble them if `assemble` is the single command being - // ran. - logger.info("Skipping BWC builds since `assemble` is the only task name provided on the command line") - } else { - artifacts { - for (File artifactFile : artifactFiles) { - String artifactName = artifactFile.name.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' - String suffix = artifactFile.toString()[-3..-1] - 'default' file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion - } + + if (gradle.startParameter.taskNames == ["assemble"]) { + // Gradle needs the `artifacts` declaration, including `builtBy` bellow to make projects dependencies on this + // project work, but it will also trigger the build of these for the `assemble` task. + // Since these are only used for testing, we don't want to assemble them if `assemble` is the single command being + // ran. + logger.info("Skipping BWC builds since `assemble` is the only task name provided on the command line") + } else { + artifacts { + for (File artifactFile : artifactFiles) { + String artifactName = artifactFile.name.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' + String suffix = artifactFile.toString()[-3..-1] + 'default' file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion + } + } } - } -} +}} class IndentingOutputStream extends OutputStream { - public static final byte[] INDENT = " [bwc] ".getBytes(StandardCharsets.UTF_8) - private final OutputStream delegate + public final byte[] indent + private final OutputStream delegate - public IndentingOutputStream(OutputStream delegate) { - this.delegate = delegate - } + public IndentingOutputStream(OutputStream delegate, Object version) { + this.delegate = delegate + indent = " [${version}] ".getBytes(StandardCharsets.UTF_8) + } - @Override - public void write(int b) { - write([b] as int[], 0, 1) - } + @Override + public void write(int b) { + write([b] as int[], 0, 1) + } - public void write(int[] bytes, int offset, int length) { - for (int i = 0; i < bytes.length; i++) { - delegate.write(bytes[i]) - if (bytes[i] == '\n') { - delegate.write(INDENT) - } + public void write(int[] bytes, int offset, int length) { + for (int i = 0; i < bytes.length; i++) { + delegate.write(bytes[i]) + if (bytes[i] == '\n') { + delegate.write(indent) + } + } } - } -} +} \ No newline at end of file diff --git a/distribution/bwc/next-bugfix-snapshot/build.gradle b/distribution/bwc/maintenance/build.gradle similarity index 100% rename from distribution/bwc/next-bugfix-snapshot/build.gradle rename to distribution/bwc/maintenance/build.gradle diff --git a/distribution/bwc/next-minor-snapshot/build.gradle b/distribution/bwc/minor/build.gradle similarity index 100% rename from distribution/bwc/next-minor-snapshot/build.gradle rename to distribution/bwc/minor/build.gradle diff --git a/distribution/bwc/staged-minor-snapshot/build.gradle b/distribution/bwc/staged/build.gradle similarity index 100% rename from distribution/bwc/staged-minor-snapshot/build.gradle rename to distribution/bwc/staged/build.gradle diff --git a/distribution/src/bin/elasticsearch-service-mgr.exe b/distribution/src/bin/elasticsearch-service-mgr.exe index 730240403a7da..e5d4b55d91628 100644 Binary files a/distribution/src/bin/elasticsearch-service-mgr.exe and b/distribution/src/bin/elasticsearch-service-mgr.exe differ diff --git a/distribution/src/bin/elasticsearch-service-x64.exe b/distribution/src/bin/elasticsearch-service-x64.exe index dab7def75583f..acd94f2507615 100644 Binary files a/distribution/src/bin/elasticsearch-service-x64.exe and b/distribution/src/bin/elasticsearch-service-x64.exe differ diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index f0303323d855f..7f7636eb85998 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -54,6 +54,11 @@ ifeval::["{release-state}"!="unreleased"] :rank-eval-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/rank-eval-client/{version} endif::[] +:javadoc-client: {rest-high-level-client-javadoc}/org/elasticsearch/client +:javadoc-xpack: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack +:javadoc-license: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack/license +:javadoc-watcher: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack/watcher + /////// Shared attribute values are pulled from elastic/docs /////// diff --git a/docs/build.gradle b/docs/build.gradle index 99f82d95b585f..377ee7af0c53c 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1082,6 +1082,34 @@ buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['cale ]} ''' +// used by median absolute deviation aggregation +buildRestTests.setups['reviews'] = ''' + - do: + indices.create: + index: reviews + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + product: + type: keyword + rating: + type: long + - do: + bulk: + index: reviews + type: _doc + refresh: true + body: | + {"index": {"_id": "1"}} + {"product": "widget-foo", "rating": 1} + {"index": {"_id": "2"}} + {"product": "widget-foo", "rating": 5} +''' + buildRestTests.setups['remote_cluster'] = buildRestTests.setups['host'] + ''' - do: cluster.put_settings: @@ -1100,3 +1128,33 @@ buildRestTests.setups['remote_cluster_and_leader_index'] = buildRestTests.setups index.number_of_shards: 1 index.soft_deletes.enabled: true ''' + +buildRestTests.setups['seats'] = ''' + - do: + indices.create: + index: seats + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + theatre: + type: keyword + cost: + type: long + - do: + bulk: + index: seats + type: _doc + refresh: true + body: | + {"index":{}} + {"theatre": "Skyline", "cost": 1} + {"index":{}} + {"theatre": "Graye", "cost": 5} + {"index":{}} + {"theatre": "Graye", "cost": 8} + {"index":{}} + {"theatre": "Skyline", "cost": 10}''' \ No newline at end of file diff --git a/docs/java-api/docs/bulk.asciidoc b/docs/java-api/docs/bulk.asciidoc index 03c6ae719e5bd..1c2882d9c07e7 100644 --- a/docs/java-api/docs/bulk.asciidoc +++ b/docs/java-api/docs/bulk.asciidoc @@ -165,3 +165,26 @@ client.admin().indices().prepareRefresh().get(); client.prepareSearch().get(); -------------------------------------------------- + +[[java-docs-bulk-global-parameters]] +==== Global Parameters + +Global parameters can be specified on the BulkRequest as well as BulkProcessor, similar to the REST API. These global + parameters serve as defaults and can be overridden by local parameters specified on each sub request. Some parameters + have to be set before any sub request is added - index, type - and you have to specify them during BulkRequest or + BulkProcessor creation. Some are optional - pipeline, routing - and can be specified at any point before the bulk is sent. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{hlrc-tests}/BulkProcessorIT.java[bulk-processor-mix-parameters] +-------------------------------------------------- +<1> global parameters from the BulkRequest will be applied on a sub request +<2> local pipeline parameter on a sub request will override global parameters from BulkRequest + + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{hlrc-tests}/BulkRequestWithGlobalParametersIT.java[bulk-request-mix-pipeline] +-------------------------------------------------- +<1> local pipeline parameter on a sub request will override global pipeline from the BulkRequest +<2> global parameter from the BulkRequest will be applied on a sub request diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index 5c3a94d57f41c..72b18d49646e2 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -131,6 +131,7 @@ and add it as a dependency. As an example, we will use the `slf4j-simple` logger -------------------------------------------------- :client-tests: {docdir}/../../server/src/test/java/org/elasticsearch/client/documentation +:hlrc-tests: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client :client-reindex-tests: {docdir}/../../modules/reindex/src/test/java/org/elasticsearch/client/documentation diff --git a/docs/java-rest/high-level/document/bulk.asciidoc b/docs/java-rest/high-level/document/bulk.asciidoc index 8f0b890dda1e7..d794779435af3 100644 --- a/docs/java-rest/high-level/document/bulk.asciidoc +++ b/docs/java-rest/high-level/document/bulk.asciidoc @@ -1,38 +1,45 @@ -[[java-rest-high-document-bulk]] +-- +:api: bulk +:request: BulkRequest +:response: BulkResponse +-- + +[id="{upid}-{api}"] === Bulk API -NOTE: The Java High Level REST Client provides the <> to assist with bulk requests +NOTE: The Java High Level REST Client provides the +<<{upid}-{api}-processor>> to assist with bulk requests. -[[java-rest-high-document-bulk-request]] +[id="{upid}-{api}-request"] ==== Bulk Request -A `BulkRequest` can be used to execute multiple index, update and/or delete +A +{request}+ can be used to execute multiple index, update and/or delete operations using a single request. It requires at least one operation to be added to the Bulk request: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request] +include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Creates the `BulkRequest` -<2> Adds a first `IndexRequest` to the Bulk request. See <> -for more information on how to build `IndexRequest`. +<1> Creates the +{request}+ +<2> Adds a first `IndexRequest` to the Bulk request. See <<{upid}-index>> for +more information on how to build `IndexRequest`. <3> Adds a second `IndexRequest` <4> Adds a third `IndexRequest` -WARNING: The Bulk API supports only documents encoded in JSON or SMILE. Providing documents - in any other format will result in an error. +WARNING: The Bulk API supports only documents encoded in JSON or SMILE. +Providing documents in any other format will result in an error. -And different operation types can be added to the same `BulkRequest`: +And different operation types can be added to the same +{request}+: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-with-mixed-operations] +include-tagged::{doc-tests-file}[{api}-request-with-mixed-operations] -------------------------------------------------- -<1> Adds a `DeleteRequest` to the `BulkRequest`. See <> +<1> Adds a `DeleteRequest` to the `BulkRequest`. See <<{upid}-delete>> for more information on how to build `DeleteRequest`. -<2> Adds an `UpdateRequest` to the `BulkRequest`. See <> +<2> Adds an `UpdateRequest` to the `BulkRequest`. See <<{upid}-update>> for more information on how to build `UpdateRequest`. <3> Adds an `IndexRequest` using the SMILE format @@ -41,102 +48,85 @@ The following arguments can optionally be provided: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-timeout] +include-tagged::{doc-tests-file}[{api}-request-timeout] -------------------------------------------------- <1> Timeout to wait for the bulk request to be performed as a `TimeValue` <2> Timeout to wait for the bulk request to be performed as a `String` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-refresh] +include-tagged::{doc-tests-file}[{api}-request-refresh] -------------------------------------------------- <1> Refresh policy as a `WriteRequest.RefreshPolicy` instance <2> Refresh policy as a `String` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-active-shards] +include-tagged::{doc-tests-file}[{api}-request-active-shards] -------------------------------------------------- <1> Sets the number of shard copies that must be active before proceeding with the index/update/delete operations. -<2> Number of shard copies provided as a `ActiveShardCount`: can be `ActiveShardCount.ALL`, -`ActiveShardCount.ONE` or `ActiveShardCount.DEFAULT` (default) - - -[[java-rest-high-document-bulk-sync]] -==== Synchronous Execution +<2> Number of shard copies provided as a `ActiveShardCount`: can be +`ActiveShardCount.ALL`, `ActiveShardCount.ONE` or +`ActiveShardCount.DEFAULT` (default) ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-execute] +include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-pipeline] -------------------------------------------------- - -[[java-rest-high-document-bulk-async]] -==== Asynchronous Execution - -The asynchronous execution of a bulk request requires both the `BulkRequest` -instance and an `ActionListener` instance to be passed to the asynchronous -method: +<1> Global pipelineId used on all sub requests, unless overridden on a sub request ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-execute-async] +include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-routing] -------------------------------------------------- -<1> The `BulkRequest` to execute and the `ActionListener` to use when -the execution completes - -The asynchronous method does not block and returns immediately. Once it is -completed the `ActionListener` is called back using the `onResponse` method -if the execution successfully completed or using the `onFailure` method if -it failed. - -A typical listener for `BulkResponse` looks like: +<1> Global routingId used on all sub requests, unless overridden on a sub request ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-execute-listener] +include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-index-type] -------------------------------------------------- -<1> Called when the execution is successfully completed. The response is -provided as an argument and contains a list of individual results for each -operation that was executed. Note that one or more operations might have -failed while the others have been successfully executed. -<2> Called when the whole `BulkRequest` fails. In this case the raised -exception is provided as an argument and no operation has been executed. +<1> A bulk request with global index and type used on all sub requests, unless overridden on a sub request. +Both parameters are @Nullable and can only be set during BulkRequest creation. -[[java-rest-high-document-bulk-response]] +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] ==== Bulk Response -The returned `BulkResponse` contains information about the executed operations and +The returned +{response}+ contains information about the executed operations and allows to iterate over each result as follows: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-response] +include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- <1> Iterate over the results of all operations -<2> Retrieve the response of the operation (successful or not), can be `IndexResponse`, -`UpdateResponse` or `DeleteResponse` which can all be seen as `DocWriteResponse` instances +<2> Retrieve the response of the operation (successful or not), can be +`IndexResponse`, `UpdateResponse` or `DeleteResponse` which can all be seen as +`DocWriteResponse` instances <3> Handle the response of an index operation <4> Handle the response of a update operation <5> Handle the response of a delete operation -The Bulk response provides a method to quickly check if one or more operation has failed: +The Bulk response provides a method to quickly check if one or more operation +has failed: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-has-failures] +include-tagged::{doc-tests-file}[{api}-has-failures] -------------------------------------------------- <1> This method returns `true` if at least one operation failed -In such situation it is necessary to iterate over all operation results in order to check - if the operation failed, and if so, retrieve the corresponding failure: +In such situation it is necessary to iterate over all operation results in order +to check if the operation failed, and if so, retrieve the corresponding failure: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-errors] +include-tagged::{doc-tests-file}[{api}-errors] -------------------------------------------------- <1> Indicate if a given operation failed <2> Retrieve the failure of the failed operation -[[java-rest-high-document-bulk-processor]] +[id="{upid}-{api}-processor"] ==== Bulk Processor The `BulkProcessor` simplifies the usage of the Bulk API by providing @@ -146,29 +136,30 @@ transparently executed as they are added to the processor. In order to execute the requests, the `BulkProcessor` requires the following components: -`RestHighLevelClient`:: This client is used to execute the `BulkRequest` +`RestHighLevelClient`:: This client is used to execute the +{request}+ and to retrieve the `BulkResponse` `BulkProcessor.Listener`:: This listener is called before and after -every `BulkRequest` execution or when a `BulkRequest` failed +every +{request}+ execution or when a +{request}+ failed -Then the `BulkProcessor.builder` method can be used to build a new `BulkProcessor`: +Then the `BulkProcessor.builder` method can be used to build a new +`BulkProcessor`: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-processor-init] +include-tagged::{doc-tests-file}[{api}-processor-init] -------------------------------------------------- <1> Create the `BulkProcessor.Listener` -<2> This method is called before each execution of a `BulkRequest` -<3> This method is called after each execution of a `BulkRequest` -<4> This method is called when a `BulkRequest` failed +<2> This method is called before each execution of a +{request}+ +<3> This method is called after each execution of a +{request}+ +<4> This method is called when a +{request}+ failed <5> Create the `BulkProcessor` by calling the `build()` method from the `BulkProcessor.Builder`. The `RestHighLevelClient.bulkAsync()` -method will be used to execute the `BulkRequest` under the hood. +method will be used to execute the +{request}+ under the hood. -The `BulkProcessor.Builder` provides methods to configure how the `BulkProcessor` -should handle requests execution: +The `BulkProcessor.Builder` provides methods to configure how the +`BulkProcessor` should handle requests execution: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-processor-options] +include-tagged::{doc-tests-file}[{api}-processor-options] -------------------------------------------------- <1> Set when to flush a new bulk request based on the number of actions currently added (defaults to 1000, use -1 to disable it) @@ -186,32 +177,32 @@ for more options. Once the `BulkProcessor` is created requests can be added to it: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-processor-add] +include-tagged::{doc-tests-file}[{api}-processor-add] -------------------------------------------------- The requests will be executed by the `BulkProcessor`, which takes care of calling the `BulkProcessor.Listener` for every bulk request. -The listener provides methods to access to the `BulkRequest` and the `BulkResponse`: +The listener provides methods to access to the +{request}+ and the +{response}+: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-processor-listener] +include-tagged::{doc-tests-file}[{api}-processor-listener] -------------------------------------------------- -<1> Called before each execution of a `BulkRequest`, this method allows -to know the number of operations that are going to be executed within the `BulkRequest` -<2> Called after each execution of a `BulkRequest`, this method allows -to know if the `BulkResponse` contains errors -<3> Called if the `BulkRequest` failed, this method allows to know +<1> Called before each execution of a +{request}+, this method allows to know +the number of operations that are going to be executed within the +{request}+ +<2> Called after each execution of a +{request}+, this method allows to know if +the +{response}+ contains errors +<3> Called if the +{request}+ failed, this method allows to know the failure Once all requests have been added to the `BulkProcessor`, its instance needs to be closed using one of the two available closing methods. -The `awaitClose()` method can be used to wait until all requests have been processed - or the specified waiting time elapses: +The `awaitClose()` method can be used to wait until all requests have been +processed or the specified waiting time elapses: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-processor-await] +include-tagged::{doc-tests-file}[{api}-processor-await] -------------------------------------------------- <1> The method returns `true` if all bulk requests completed and `false` if the waiting time elapsed before all the bulk requests completed @@ -219,9 +210,8 @@ waiting time elapsed before all the bulk requests completed The `close()` method can be used to immediately close the `BulkProcessor`: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-processor-close] +include-tagged::{doc-tests-file}[{api}-processor-close] -------------------------------------------------- -Both methods flush the requests added to the processor before closing the processor -and also forbid any new request to be added to it. - +Both methods flush the requests added to the processor before closing the +processor and also forbid any new request to be added to it. diff --git a/docs/java-rest/high-level/document/delete-by-query.asciidoc b/docs/java-rest/high-level/document/delete-by-query.asciidoc index 3a4c8a15deae6..37be8e2f3f4c8 100644 --- a/docs/java-rest/high-level/document/delete-by-query.asciidoc +++ b/docs/java-rest/high-level/document/delete-by-query.asciidoc @@ -1,26 +1,33 @@ -[[java-rest-high-document-delete-by-query]] +-- +:api: delete-by-query +:request: DeleteByQueryRequest +:response: DeleteByQueryResponse +-- + +[id="{upid}-{api}"] === Delete By Query API -[[java-rest-high-document-delete-by-query-request]] +[id="{upid}-{api}-request"] ==== Delete By Query Request -A `DeleteByQueryRequest` can be used to delete documents from an index. It requires an existing index (or a set of indices) -on which deletion is to be performed. +A +{request}+ can be used to delete documents from an index. It requires an +existing index (or a set of indices) on which deletion is to be performed. -The simplest form of a `DeleteByQueryRequest` looks like: +The simplest form of a +{request}+ looks like this and deletes all documents +in an index: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request] +include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Creates the `DeleteByQueryRequest` on a set of indices. +<1> Creates the +{request}+ on a set of indices. -By default version conflicts abort the `DeleteByQueryRequest` process but you can just count them by settings it to -`proceed` in the request body +By default version conflicts abort the +{request}+ process but you can just +count them with this: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-conflicts] +include-tagged::{doc-tests-file}[{api}-request-conflicts] -------------------------------------------------- <1> Set `proceed` on version conflict @@ -28,7 +35,7 @@ You can limit the documents by adding a query. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-query] +include-tagged::{doc-tests-file}[{api}-request-query] -------------------------------------------------- <1> Only copy documents which have field `user` set to `kimchy` @@ -36,32 +43,33 @@ It’s also possible to limit the number of processed documents by setting size. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-size] +include-tagged::{doc-tests-file}[{api}-request-size] -------------------------------------------------- <1> Only copy 10 documents -By default `DeleteByQueryRequest` uses batches of 1000. You can change the batch size with `setBatchSize`. +By default +{request}+ uses batches of 1000. You can change the batch size +with `setBatchSize`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-scrollSize] +include-tagged::{doc-tests-file}[{api}-request-scrollSize] -------------------------------------------------- <1> Use batches of 100 documents -`DeleteByQueryRequest` also helps in automatically parallelizing using `sliced-scroll` to -slice on `_uid`. Use `setSlices` to specify the number of slices to use. ++{request}+ can also be parallelized using `sliced-scroll` with `setSlices`: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-slices] +include-tagged::{doc-tests-file}[{api}-request-slices] -------------------------------------------------- <1> set number of slices to use -`DeleteByQueryRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive. ++{request}+ uses the `scroll` parameter to control how long it keeps the +"search context" alive. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-scroll] +include-tagged::{doc-tests-file}[{api}-request-scroll] -------------------------------------------------- <1> set scroll time @@ -70,7 +78,7 @@ that routing value. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-routing] +include-tagged::{doc-tests-file}[{api}-request-routing] -------------------------------------------------- <1> set routing @@ -80,72 +88,33 @@ In addition to the options above the following arguments can optionally be also ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-timeout] +include-tagged::{doc-tests-file}[{api}-request-timeout] -------------------------------------------------- <1> Timeout to wait for the delete by query request to be performed as a `TimeValue` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-refresh] +include-tagged::{doc-tests-file}[{api}-request-refresh] -------------------------------------------------- <1> Refresh index after calling delete by query ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-request-indicesOptions] +include-tagged::{doc-tests-file}[{api}-request-indicesOptions] -------------------------------------------------- <1> Set indices options +include::../execution.asciidoc[] -[[java-rest-high-document-delete-by-query-sync]] -==== Synchronous Execution - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-execute] --------------------------------------------------- - -[[java-rest-high-document-delete-by-query-async]] -==== Asynchronous Execution - -The asynchronous execution of an delete by query request requires both the `DeleteByQueryRequest` -instance and an `ActionListener` instance to be passed to the asynchronous -method: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-execute-async] --------------------------------------------------- -<1> The `DeleteByQueryRequest` to execute and the `ActionListener` to use when -the execution completes - -The asynchronous method does not block and returns immediately. Once it is -completed the `ActionListener` is called back using the `onResponse` method -if the execution successfully completed or using the `onFailure` method if -it failed. - -A typical listener for `BulkByScrollResponse` looks like: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-execute-listener] --------------------------------------------------- -<1> Called when the execution is successfully completed. The response is -provided as an argument and contains a list of individual results for each -operation that was executed. Note that one or more operations might have -failed while the others have been successfully executed. -<2> Called when the whole `DeleteByQueryRequest` fails. In this case the raised -exception is provided as an argument and no operation has been executed. - -[[java-rest-high-document-delete-by-query-execute-listener-response]] +[id="{upid}-{api}-response"] ==== Delete By Query Response -The returned `BulkByScrollResponse` contains information about the executed operations and - allows to iterate over each result as follows: +The returned +{response}+ contains information about the executed operations and +allows to iterate over each result as follows: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-by-query-response] +include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- <1> Get total time taken <2> Check if the request timed out diff --git a/docs/java-rest/high-level/document/multi-get.asciidoc b/docs/java-rest/high-level/document/multi-get.asciidoc index 723cd0fba8645..30b7d37313e1c 100644 --- a/docs/java-rest/high-level/document/multi-get.asciidoc +++ b/docs/java-rest/high-level/document/multi-get.asciidoc @@ -1,18 +1,24 @@ -[[java-rest-high-document-multi-get]] +-- +:api: multi-get +:request: MultiGetRequest +:response: MultiGetResponse +-- + +[id="{upid}-{api}"] === Multi-Get API The `multiGet` API executes multiple <> requests in a single http request in parallel. -[[java-rest-high-document-mulit-get-request]] +[id="{upid}-{api}-request"] ==== Multi-Get Request -A `MultiGetRequest` is built empty and you add `MultiGetRequest.Item`s to -configure what to fetch: +A +{request}+ is built empty and you add `MultiGetRequest.Item`s to configure +what to fetch: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-request] +include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> Index <2> Type @@ -27,25 +33,25 @@ You can set most of these on the `Item`: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-request-no-source] +include-tagged::{doc-tests-file}[{api}-request-no-source] -------------------------------------------------- <1> Disable source retrieval, enabled by default ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-request-source-include] +include-tagged::{doc-tests-file}[{api}-request-source-include] -------------------------------------------------- <1> Configure source inclusion for specific fields ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-request-source-exclude] +include-tagged::{doc-tests-file}[{api}-request-source-exclude] -------------------------------------------------- <1> Configure source exclusion for specific fields ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-request-stored] +include-tagged::{doc-tests-file}[{api}-request-stored] -------------------------------------------------- <1> Configure retrieval for specific stored fields (requires fields to be stored separately in the mappings) @@ -54,7 +60,7 @@ separately in the mappings) ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-request-item-extras] +include-tagged::{doc-tests-file}[{api}-request-item-extras] -------------------------------------------------- <1> Routing value <2> Version @@ -68,56 +74,18 @@ not on any items: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-request-top-level-extras] +include-tagged::{doc-tests-file}[{api}-request-top-level-extras] -------------------------------------------------- <1> Preference value <2> Set realtime flag to `false` (`true` by default) <3> Perform a refresh before retrieving the document (`false` by default) -[[java-rest-high-document-multi-get-sync]] -==== Synchronous Execution - -After building the `MultiGetRequest` you can execute it synchronously with -`multiGet`: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-execute] --------------------------------------------------- - -[[java-rest-high-document-multi-get-async]] -==== Asynchronous Execution - -The asynchronous execution of a multi get request requires both the -`MultiGetRequest` instance and an `ActionListener` instance to be passed to -the asynchronous method: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-execute-async] --------------------------------------------------- -<1> The `MultiGetRequest` to execute and the `ActionListener` to use when -the execution completes. - -The asynchronous method does not block and returns immediately. Once the -request completed the `ActionListener` is called back using the `onResponse` -method if the execution successfully completed or using the `onFailure` method -if it failed. - -A typical listener for `MultiGetResponse` looks like: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-execute-listener] --------------------------------------------------- -<1> Called when the execution is successfully completed. The response is -provided as an argument. -<2> Called in case of failure. The raised exception is provided as an argument. +include::../execution.asciidoc[] -[[java-rest-high-document-multi-get-response]] +[id="{upid}-{api}-response"] ==== Multi Get Response -The returned `MultiGetResponse` contains a list of `MultiGetItemResponse`s in +The returned +{response}+ contains a list of `MultiGetItemResponse`s in `getResponses` in the same order that they were requested. `MultiGetItemResponse` contains *either* a <> if the get succeeded @@ -126,7 +94,7 @@ normal `GetResponse`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-response] +include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- <1> `getFailure` returns null because there isn't a failure. <2> `getResponse` returns the `GetResponse`. @@ -143,7 +111,7 @@ When one of the subrequests as performed against an index that does not exist ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-indexnotfound] +include-tagged::{doc-tests-file}[{api}-indexnotfound] -------------------------------------------------- <1> `getResponse` is null. <2> `getFailure` isn't and contains an `Exception`. @@ -157,7 +125,7 @@ document has a different version number, a version conflict is raised: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-conflict] +include-tagged::{doc-tests-file}[{api}-conflict] -------------------------------------------------- <1> `getResponse` is null. <2> `getFailure` isn't and contains an `Exception`. diff --git a/docs/java-rest/high-level/document/reindex.asciidoc b/docs/java-rest/high-level/document/reindex.asciidoc index b6d98b42dc509..2482467410c96 100644 --- a/docs/java-rest/high-level/document/reindex.asciidoc +++ b/docs/java-rest/high-level/document/reindex.asciidoc @@ -1,22 +1,29 @@ -[[java-rest-high-document-reindex]] +-- +:api: reindex +:request: ReindexRequest +:response: BulkByScrollResponse +-- + +[id="{upid}-{api}"] === Reindex API -[[java-rest-high-document-reindex-request]] +[id="{upid}-{api}-request"] ==== Reindex Request -A `ReindexRequest` can be used to copy documents from one or more indexes into a destination index. +A +{request} can be used to copy documents from one or more indexes into a +destination index. It requires an existing source index and a target index which may or may not exist pre-request. Reindex does not attempt to set up the destination index. It does not copy the settings of the source index. You should set up the destination index prior to running a _reindex action, including setting up mappings, shard counts, replicas, etc. -The simplest form of a `ReindexRequest` looks like follows: +The simplest form of a +{request}+ looks like this: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request] +include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Creates the `ReindexRequest` +<1> Creates the +{request}+ <2> Adds a list of sources to copy from <3> Adds the destination index @@ -28,7 +35,7 @@ source index. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-versionType] +include-tagged::{doc-tests-file}[{api}-request-versionType] -------------------------------------------------- <1> Set the versionType to `EXTERNAL` @@ -37,16 +44,16 @@ documents will cause a version conflict. The default `opType` is `index`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-opType] +include-tagged::{doc-tests-file}[{api}-request-opType] -------------------------------------------------- <1> Set the opType to `create` -By default version conflicts abort the `_reindex` process but you can just count them by settings it to `proceed` -in the request body +By default version conflicts abort the `_reindex` process but you can just count +them instead with: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-conflicts] +include-tagged::{doc-tests-file}[{api}-request-conflicts] -------------------------------------------------- <1> Set `proceed` on version conflict @@ -54,7 +61,7 @@ You can limit the documents by adding a type to the source or by adding a query. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-typeOrQuery] +include-tagged::{doc-tests-file}[{api}-request-typeOrQuery] -------------------------------------------------- <1> Only copy `doc` type <2> Only copy documents which have field `user` set to `kimchy` @@ -63,7 +70,7 @@ It’s also possible to limit the number of processed documents by setting size. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-size] +include-tagged::{doc-tests-file}[{api}-request-size] -------------------------------------------------- <1> Only copy 10 documents @@ -71,7 +78,7 @@ By default `_reindex` uses batches of 1000. You can change the batch size with ` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-sourceSize] +include-tagged::{doc-tests-file}[{api}-request-sourceSize] -------------------------------------------------- <1> Use batches of 100 documents @@ -79,7 +86,7 @@ Reindex can also use the ingest feature by specifying a `pipeline`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-pipeline] +include-tagged::{doc-tests-file}[{api}-request-pipeline] -------------------------------------------------- <1> set pipeline to `my_pipeline` @@ -88,21 +95,21 @@ selective query to size and sort. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-sort] +include-tagged::{doc-tests-file}[{api}-request-sort] -------------------------------------------------- <1> add descending sort to`field1` <2> add ascending sort to `field2` -`ReindexRequest` also supports a `script` that modifies the document. It allows you to also change the document's -metadata. The following example illustrates that. ++{request} also supports a `script` that modifies the document. It allows you to +also change the document's metadata. The following example illustrates that. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-script] +include-tagged::{doc-tests-file}[{api}-request-script] -------------------------------------------------- <1> `setScript` to increment the `likes` field on all documents with user `kimchy`. -`ReindexRequest` supports reindexing from a remote Elasticsearch cluster. When using a remote cluster the query should be ++{request}+ supports reindexing from a remote Elasticsearch cluster. When using a remote cluster the query should be specified inside the `RemoteInfo` object and not using `setSourceQuery`. If both the remote info and the source query are set it results in a validation error during the request. The reason for this is that the remote Elasticsearch may not understand queries built by the modern query builders. The remote cluster support works all the way back to Elasticsearch @@ -111,23 +118,24 @@ in JSON. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-remote] +include-tagged::{doc-tests-file}[{api}-request-remote] -------------------------------------------------- <1> set remote elastic cluster -`ReindexRequest` also helps in automatically parallelizing using `sliced-scroll` to ++{request}+ also helps in automatically parallelizing using `sliced-scroll` to slice on `_uid`. Use `setSlices` to specify the number of slices to use. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-slices] +include-tagged::{doc-tests-file}[{api}-request-slices] -------------------------------------------------- <1> set number of slices to use -`ReindexRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive. ++{request}+ uses the `scroll` parameter to control how long it keeps the +"search context" alive. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-scroll] +include-tagged::{doc-tests-file}[{api}-request-scroll] -------------------------------------------------- <1> set scroll time @@ -137,66 +145,27 @@ In addition to the options above the following arguments can optionally be also ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-timeout] +include-tagged::{doc-tests-file}[{api}-request-timeout] -------------------------------------------------- <1> Timeout to wait for the reindex request to be performed as a `TimeValue` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-request-refresh] +include-tagged::{doc-tests-file}[{api}-request-refresh] -------------------------------------------------- <1> Refresh index after calling reindex +include::../execution.asciidoc[] -[[java-rest-high-document-reindex-sync]] -==== Synchronous Execution - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-execute] --------------------------------------------------- - -[[java-rest-high-document-reindex-async]] -==== Asynchronous Execution - -The asynchronous execution of a reindex request requires both the `ReindexRequest` -instance and an `ActionListener` instance to be passed to the asynchronous -method: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-execute-async] --------------------------------------------------- -<1> The `ReindexRequest` to execute and the `ActionListener` to use when -the execution completes - -The asynchronous method does not block and returns immediately. Once it is -completed the `ActionListener` is called back using the `onResponse` method -if the execution successfully completed or using the `onFailure` method if -it failed. - -A typical listener for `BulkByScrollResponse` looks like: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-execute-listener] --------------------------------------------------- -<1> Called when the execution is successfully completed. The response is -provided as an argument and contains a list of individual results for each -operation that was executed. Note that one or more operations might have -failed while the others have been successfully executed. -<2> Called when the whole `ReindexRequest` fails. In this case the raised -exception is provided as an argument and no operation has been executed. - -[[java-rest-high-document-reindex-response]] +[id="{upid}-{api}-response"] ==== Reindex Response -The returned `BulkByScrollResponse` contains information about the executed operations and - allows to iterate over each result as follows: +The returned +{response}+ contains information about the executed operations and +allows to iterate over each result as follows: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[reindex-response] +include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- <1> Get total time taken <2> Check if the request timed out diff --git a/docs/java-rest/high-level/document/rethrottle.asciidoc b/docs/java-rest/high-level/document/rethrottle.asciidoc index 9f6fd69dbcd49..cb606521a1d52 100644 --- a/docs/java-rest/high-level/document/rethrottle.asciidoc +++ b/docs/java-rest/high-level/document/rethrottle.asciidoc @@ -1,10 +1,16 @@ -[[java-rest-high-document-rethrottle]] +-- +:api: rethrottle +:request: RethrottleRequest +:response: ListTasksResponse +-- + +[id="{upid}-{api}"] === Rethrottle API -[[java-rest-high-document-rethrottle-request]] +[id="{upid}-{api}-request"] ==== Rethrottle Request -A `RethrottleRequest` can be used to change the current throttling on a running +A +{request}+ can be used to change the current throttling on a running reindex, update-by-query or delete-by-query task or to disable throttling of the task entirely. It requires the task Id of the task to change. @@ -13,16 +19,16 @@ task using the following: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-disable-request] +include-tagged::{doc-tests-file}[{api}-disable-request] -------------------------------------------------- -<1> Create a `RethrottleRequest` that disables throttling for a specific task id +<1> Create a +{request}+ that disables throttling for a specific task id By providing a `requestsPerSecond` argument, the request will change the existing task throttling to the specified value: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-request] +include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> Request to change the throttling of a task to 100 requests per second @@ -32,22 +38,22 @@ should be rethrottled: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-request-execution] +include-tagged::{doc-tests-file}[{api}-request-execution] -------------------------------------------------- <1> Execute reindex rethrottling request <2> The same for update-by-query <3> The same for delete-by-query -[[java-rest-high-document-rethrottle-async]] +[id="{upid}-{api}-async"] ==== Asynchronous Execution -The asynchronous execution of a rethrottle request requires both the `RethrottleRequest` +The asynchronous execution of a rethrottle request requires both the +{request}+ instance and an `ActionListener` instance to be passed to the asynchronous method: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-execute-async] +include-tagged::{doc-tests-file}[{api}-execute-async] -------------------------------------------------- <1> Execute reindex rethrottling asynchronously <2> The same for update-by-query @@ -60,14 +66,14 @@ it failed. A typical listener looks like this: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-request-async-listener] +include-tagged::{doc-tests-file}[{api}-request-async-listener] -------------------------------------------------- <1> Code executed when the request is successfully completed <2> Code executed when the request fails with an exception -[[java-rest-high-document-retrottle-response]] +[id="{upid}-{api}-response"] ==== Rethrottle Response -Rethrottling returns the task that has been rethrottled in the form of a -`ListTasksResponse`. The structure of this response object is described in detail +Rethrottling returns the task that has been rethrottled in the form of a ++{response}+. The structure of this response object is described in detail in <>. diff --git a/docs/java-rest/high-level/document/term-vectors.asciidoc b/docs/java-rest/high-level/document/term-vectors.asciidoc index e739e37732fd4..e8d4a25a2cac0 100644 --- a/docs/java-rest/high-level/document/term-vectors.asciidoc +++ b/docs/java-rest/high-level/document/term-vectors.asciidoc @@ -62,9 +62,9 @@ include::../execution.asciidoc[] [id="{upid}-{api}-response"] -==== TermVectorsResponse +==== Term Vectors Response -The `TermVectorsResponse` contains the following information: ++{response}+ contains the following information: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -77,7 +77,7 @@ include-tagged::{doc-tests-file}[{api}-response] ===== Inspecting Term Vectors -If `TermVectorsResponse` contains non-null list of term vectors, +If +{response}+ contains non-null list of term vectors, more information about each term vector can be obtained using the following: ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/document/update-by-query.asciidoc b/docs/java-rest/high-level/document/update-by-query.asciidoc index 5c7e4f5d3b072..fdf50148df4c8 100644 --- a/docs/java-rest/high-level/document/update-by-query.asciidoc +++ b/docs/java-rest/high-level/document/update-by-query.asciidoc @@ -1,27 +1,34 @@ -[[java-rest-high-document-update-by-query]] +-- +:api: update-by-query +:request: UpdateByQueryRequest +:response: UpdateByQueryResponse +-- + +[id="{upid}-{api}"] === Update By Query API -[[java-rest-high-document-update-by-query-request]] +[id="{upid}-{api}-request"] ==== Update By Query Request -A `UpdateByQueryRequest` can be used to update documents in an index. +A +{request}+ can be used to update documents in an index. -It requires an existing index (or a set of indices) on which the update is to be performed. +It requires an existing index (or a set of indices) on which the update is to +be performed. -The simplest form of a `UpdateByQueryRequest` looks like follows: +The simplest form of a +{request}+ looks like this: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request] +include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Creates the `UpdateByQueryRequest` on a set of indices. +<1> Creates the +{request}+ on a set of indices. -By default version conflicts abort the `UpdateByQueryRequest` process but you can just count them by settings it to -`proceed` in the request body +By default version conflicts abort the +{request}+ process but you can just +count them instead with: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-conflicts] +include-tagged::{doc-tests-file}[{api}-request-conflicts] -------------------------------------------------- <1> Set `proceed` on version conflict @@ -29,7 +36,7 @@ You can limit the documents by adding a query. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-query] +include-tagged::{doc-tests-file}[{api}-request-query] -------------------------------------------------- <1> Only copy documents which have field `user` set to `kimchy` @@ -37,15 +44,16 @@ It’s also possible to limit the number of processed documents by setting size. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-size] +include-tagged::{doc-tests-file}[{api}-request-size] -------------------------------------------------- <1> Only copy 10 documents -By default `UpdateByQueryRequest` uses batches of 1000. You can change the batch size with `setBatchSize`. +By default +{request}+ uses batches of 1000. You can change the batch size with +`setBatchSize`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scrollSize] +include-tagged::{doc-tests-file}[{api}-request-scrollSize] -------------------------------------------------- <1> Use batches of 100 documents @@ -53,24 +61,23 @@ Update by query can also use the ingest feature by specifying a `pipeline`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-pipeline] +include-tagged::{doc-tests-file}[{api}-request-pipeline] -------------------------------------------------- <1> set pipeline to `my_pipeline` -`UpdateByQueryRequest` also supports a `script` that modifies the document. The following example illustrates that. ++{request}+ also supports a `script` that modifies the document: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-script] +include-tagged::{doc-tests-file}[{api}-request-script] -------------------------------------------------- <1> `setScript` to increment the `likes` field on all documents with user `kimchy`. -`UpdateByQueryRequest` also helps in automatically parallelizing using `sliced-scroll` to -slice on `_uid`. Use `setSlices` to specify the number of slices to use. ++{request}+ can be parallelized using `sliced-scroll` with `setSlices`: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-slices] +include-tagged::{doc-tests-file}[{api}-request-slices] -------------------------------------------------- <1> set number of slices to use @@ -78,7 +85,7 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-sli ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scroll] +include-tagged::{doc-tests-file}[{api}-request-scroll] -------------------------------------------------- <1> set scroll time @@ -87,7 +94,7 @@ that routing value. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-routing] +include-tagged::{doc-tests-file}[{api}-request-routing] -------------------------------------------------- <1> set routing @@ -97,72 +104,33 @@ In addition to the options above the following arguments can optionally be also ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-timeout] +include-tagged::{doc-tests-file}[{api}-request-timeout] -------------------------------------------------- <1> Timeout to wait for the update by query request to be performed as a `TimeValue` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-refresh] +include-tagged::{doc-tests-file}[{api}-request-refresh] -------------------------------------------------- <1> Refresh index after calling update by query ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-indicesOptions] +include-tagged::{doc-tests-file}[{api}-request-indicesOptions] -------------------------------------------------- <1> Set indices options +include::../execution.asciidoc[] -[[java-rest-high-document-update-by-query-sync]] -==== Synchronous Execution - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute] --------------------------------------------------- - -[[java-rest-high-document-update-by-query-async]] -==== Asynchronous Execution - -The asynchronous execution of an update by query request requires both the `UpdateByQueryRequest` -instance and an `ActionListener` instance to be passed to the asynchronous -method: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-async] --------------------------------------------------- -<1> The `UpdateByQueryRequest` to execute and the `ActionListener` to use when -the execution completes - -The asynchronous method does not block and returns immediately. Once it is -completed the `ActionListener` is called back using the `onResponse` method -if the execution successfully completed or using the `onFailure` method if -it failed. - -A typical listener for `BulkByScrollResponse` looks like: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-listener] --------------------------------------------------- -<1> Called when the execution is successfully completed. The response is -provided as an argument and contains a list of individual results for each -operation that was executed. Note that one or more operations might have -failed while the others have been successfully executed. -<2> Called when the whole `UpdateByQueryRequest` fails. In this case the raised -exception is provided as an argument and no operation has been executed. - -[[java-rest-high-document-update-by-query-execute-listener-response]] +[id="{upid}-{api}-response"] ==== Update By Query Response -The returned `BulkByScrollResponse` contains information about the executed operations and - allows to iterate over each result as follows: +The returned +{resposne}+ contains information about the executed operations and +allows to iterate over each result as follows: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-response] +include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- <1> Get total time taken <2> Check if the request timed out diff --git a/docs/java-rest/high-level/ml/put-filter.asciidoc b/docs/java-rest/high-level/ml/put-filter.asciidoc new file mode 100644 index 0000000000000..2582e7715ab59 --- /dev/null +++ b/docs/java-rest/high-level/ml/put-filter.asciidoc @@ -0,0 +1,53 @@ +-- +:api: put-filter +:request: PutFilterRequest +:response: PutFilterResponse +-- +[id="{upid}-{api}"] +=== Put Filter API + +The Put Filter API can be used to create a new {ml} filter +in the cluster. The API accepts a +{request}+ object +as a request and returns a +{response}+. + +[id="{upid}-{api}-request"] +==== Put Filter Request + +A +{request}+ requires the following argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The configuration of the {ml} filter to create as a `MlFilter` + +[id="{upid}-{api}-config"] +==== Filter Configuration + +The `MlFilter` object contains all the details about the {ml} filter +configuration. + +A `MlFilter` contains the following arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-config] +-------------------------------------------------- +<1> Required, the filter ID +<2> Optional, the filter description +<3> Optional, the items of the filter. A wildcard * can be used at the beginning or the end of an item. +Up to 10000 items are allowed in each filter. + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ returns the full representation of +the new {ml} filter if it has been successfully created. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The newly created `MlFilter` diff --git a/docs/java-rest/high-level/ml/update-datafeed.asciidoc b/docs/java-rest/high-level/ml/update-datafeed.asciidoc new file mode 100644 index 0000000000000..86e3a4de336ec --- /dev/null +++ b/docs/java-rest/high-level/ml/update-datafeed.asciidoc @@ -0,0 +1,58 @@ +-- +:api: update-datafeed +:request: UpdateDatafeedRequest +:response: PutDatafeedResponse +-- +[id="{upid}-{api}"] +=== Update Datafeed API + +The Update Datafeed API can be used to update a {ml} datafeed +in the cluster. The API accepts a +{request}+ object +as a request and returns a +{response}+. + +[id="{upid}-{api}-request"] +==== Update Datafeed Request + +A +{request}+ requires the following argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The updated configuration of the {ml} datafeed + +[id="{upid}-{api}-config"] +==== Updated Datafeed Arguments + +A `DatafeedUpdate` requires an existing non-null `datafeedId` and +allows updating various settings. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-config] +-------------------------------------------------- +<1> Mandatory, non-null `datafeedId` referencing an existing {ml} datafeed +<2> Optional, set the datafeed Aggregations for data gathering +<3> Optional, the indices that contain the data to retrieve and feed into the job +<4> Optional, specifies how data searches are split into time chunks. +<5> Optional, the interval at which scheduled queries are made while the datafeed runs in real time. +<6> Optional, a query to filter the search results by. Defaults to the `match_all` query. +<7> Optional, the time interval behind real time that data is queried. +<8> Optional, allows the use of script fields. +<9> Optional, the `size` parameter used in the searches. +<10> Optional, the `jobId` that references the job that the datafeed should be associated with +after the update. + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ returns the full representation of +the updated {ml} datafeed if it has been successfully updated. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The updated datafeed diff --git a/docs/java-rest/high-level/rollup/start_job.asciidoc b/docs/java-rest/high-level/rollup/start_job.asciidoc new file mode 100644 index 0000000000000..6d760dc0b33e6 --- /dev/null +++ b/docs/java-rest/high-level/rollup/start_job.asciidoc @@ -0,0 +1,34 @@ +-- +:api: rollup-start-job +:request: StartRollupJobRequest +:response: StartRollupJobResponse +-- + +[id="{upid}-{api}"] +=== Start Rollup Job API + +[id="{upid}-{api}-request"] +==== Request + +The Start Rollup Job API allows you to start a job by ID. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The ID of the job to start. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the start command was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the start job request was received. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/search/count.asciidoc b/docs/java-rest/high-level/search/count.asciidoc new file mode 100644 index 0000000000000..f70e1e1fd4d22 --- /dev/null +++ b/docs/java-rest/high-level/search/count.asciidoc @@ -0,0 +1,114 @@ +-- +:api: count +:request: CountRequest +:response: CountResponse +-- +[id="{upid}-{api}"] + +=== Count API + +[id="{upid}-{api}-request"] + +==== Count Request + +The +{request}+ is used to execute a query and get the number of matches for the query. The query to use in +{request}+ can be +set in similar way as query in `SearchRequest` using `SearchSourceBuilder`. + +In its most basic form, we can add a query to the request: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-basic] +-------------------------------------------------- + +<1> Creates the +{request}+. Without arguments this runs against all indices. +<2> Most search parameters are added to the `SearchSourceBuilder`. +<3> Add a `match_all` query to the `SearchSourceBuilder`. +<4> Add the `SearchSourceBuilder` to the +{request}+. + +[[java-rest-high-count-request-optional]] +===== Count Request optional arguments + +Let's first look at some of the optional arguments of a +{request}+: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-indices-types] +-------------------------------------------------- +<1> Restricts the request to an index +<2> Limits the request to a type + +There are a couple of other interesting optional parameters: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-routing] +-------------------------------------------------- +<1> Set a routing parameter + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and how wildcard expressions are expanded + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-preference] +-------------------------------------------------- +<1> Use the preference parameter e.g. to execute the search to prefer local shards. The default is to randomize across shards. + +===== Using the SearchSourceBuilder in CountRequest + +Both in search and count API calls, most options controlling the search behavior can be set on the `SearchSourceBuilder`, +which contains more or less the equivalent of the options in the search request body of the Rest API. + +Here are a few examples of some common options: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-source-basics] +-------------------------------------------------- +<1> Create a `SearchSourceBuilder` with default options. +<2> Set the query. Can be any type of `QueryBuilder` + +After this, the `SearchSourceBuilder` only needs to be added to the ++{request}+: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-source-setter] +-------------------------------------------------- + +Note subtle difference when using `SearchSourceBuilder` in `SearchRequest` and using `SearchSourceBuilder` in +{request}+ - using +`SearchSourceBuilder` in `SearchRequest` one can use `SearchSourceBuilder.size()` and `SearchSourceBuilder.from()` methods to set the +number of search hits to return, and the starting index. In +{request}+ we're interested in total number of matches and these methods +have no meaning. + +The <> page gives a list of all available search queries with +their corresponding `QueryBuilder` objects and `QueryBuilders` helper methods. + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== CountResponse + +The +{response}+ that is returned by executing the count API call provides total count of hits and details about the count execution +itself, like the HTTP status code, or whether the request terminated early: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response-1] +-------------------------------------------------- + +The response also provides information about the execution on the +shard level by offering statistics about the total number of shards that were +affected by the underlying search, and the successful vs. unsuccessful shards. Possible +failures can also be handled by iterating over an array off +`ShardSearchFailures` like in the following example: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response-2] +-------------------------------------------------- + diff --git a/docs/java-rest/high-level/security/authenticate.asciidoc b/docs/java-rest/high-level/security/authenticate.asciidoc new file mode 100644 index 0000000000000..e50c64bf9d0f5 --- /dev/null +++ b/docs/java-rest/high-level/security/authenticate.asciidoc @@ -0,0 +1,66 @@ + +-- +:api: authenticate +:response: AuthenticateResponse +-- + +[id="{upid}-{api}"] +=== Authenticate API + +[id="{upid}-{api}-sync"] +==== Execution + +Authenticating and retrieving information about a user can be performed +using the `security().authenticate()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-execute] +-------------------------------------------------- + +This method does not require a request object. The client waits for the ++{response}+ to be returned before continuing with code execution. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains two fields. Firstly, the `user` field +, accessed with `getUser`, contains all the information about this +authenticated user. The other field, `enabled`, tells if this user is actually +usable or has been temporalily deactivated. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> `getUser` retrieves the `User` instance containing the information, +see {javadoc-client}/security/user/User.html. +<2> `enabled` tells if this user is usable or is deactivated. + +[id="{upid}-{api}-async"] +==== Asynchronous Execution + +This request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-execute-async] +-------------------------------------------------- +<1> The `ActionListener` to use when the execution completes. This method does +not require a request object. + +The asynchronous method does not block and returns immediately. Once the request +has completed the `ActionListener` is called back using the `onResponse` method +if the execution completed successfully or using the `onFailure` method if +it failed. + +A typical listener for a +{response}+ looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-execute-listener] +-------------------------------------------------- +<1> Called when the execution completed successfully. The response is +provided as an argument. +<2> Called in case of a failure. The exception is provided as an argument. + diff --git a/docs/java-rest/high-level/security/create-token.asciidoc b/docs/java-rest/high-level/security/create-token.asciidoc new file mode 100644 index 0000000000000..33e55d4ed582b --- /dev/null +++ b/docs/java-rest/high-level/security/create-token.asciidoc @@ -0,0 +1,85 @@ +[[java-rest-high-security-create-token]] +=== Create Token API + +[[java-rest-high-security-create-token-request]] +==== Request +The `CreateTokenRequest` supports three different OAuth2 _grant types_: + +===== Password Grants + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[create-token-password-request] +-------------------------------------------------- + +===== Refresh Token Grants +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[create-token-refresh-request] +-------------------------------------------------- + +===== Client Credential Grants +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[create-token-client-credentials-request] +-------------------------------------------------- + +[[java-rest-high-security-create-token-execution]] +==== Execution + +Creating a OAuth2 security token can be performed by passing the appropriate request to the + `security().createToken()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[create-token-execute] +-------------------------------------------------- + +[[java-rest-high-security-create-token-response]] +==== Response + +The returned `CreateTokenResponse` contains the following properties: + +`accessToken`:: This is the newly created access token. + It can be used to authenticate to the Elasticsearch cluster. +`type`:: The type of the token, this is always `"Bearer"`. +`expiresIn`:: The length of time until the token will expire. + The token will be considered invalid after that time. +`scope`:: The scope of the token. May be `null`. +`refreshToken`:: A secondary "refresh" token that may be used to extend + the life of an access token. May be `null`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[create-token-response] +-------------------------------------------------- +<1> The `accessToken` can be used to authentication to Elasticsearch. +<2> The `refreshToken` can be used in to create a new `CreateTokenRequest` with a `refresh_token` grant. + +[[java-rest-high-security-create-token-async]] +==== Asynchronous Execution + +This request can be executed asynchronously using the `security().createTokenAsync()` +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[create-token-execute-async] +-------------------------------------------------- +<1> The `CreateTokenRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once the request +has completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for a `CreateTokenResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[create-token-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument \ No newline at end of file diff --git a/docs/java-rest/high-level/security/get-role-mappings.asciidoc b/docs/java-rest/high-level/security/get-role-mappings.asciidoc new file mode 100644 index 0000000000000..cc58d0980c3e7 --- /dev/null +++ b/docs/java-rest/high-level/security/get-role-mappings.asciidoc @@ -0,0 +1,67 @@ +[[java-rest-high-security-get-role-mappings]] +=== Get Role Mappings API + +[[java-rest-high-security-get-role-mappings-execution]] +==== Execution + +Retrieving a role mapping can be performed using the `security().getRoleMappings()` +method and by setting role mapping name on `GetRoleMappingsRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[get-role-mappings-execute] +-------------------------------------------------- + +Retrieving multiple role mappings can be performed using the `security.getRoleMappings()` +method and by setting role mapping names on `GetRoleMappingsRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[get-role-mappings-list-execute] +-------------------------------------------------- + +Retrieving all role mappings can be performed using the `security.getRoleMappings()` +method and with no role mapping name on `GetRoleMappingsRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[get-role-mappings-all-execute] +-------------------------------------------------- + +[[java-rest-high-security-get-role-mappings-response]] +==== Response + +The returned `GetRoleMappingsResponse` contains the list of role mapping(s). + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[get-role-mappings-response] +-------------------------------------------------- + +[[java-rest-high-security-get-role-mappings-async]] +==== Asynchronous Execution + +This request can be executed asynchronously using the `security().getRoleMappingsAsync()` +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[get-role-mappings-execute-async] +-------------------------------------------------- +<1> The `GetRoleMappingsRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once the request +has completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for a `GetRoleMappingsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SecurityDocumentationIT.java[get-role-mappings-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument \ No newline at end of file diff --git a/docs/java-rest/high-level/security/invalidate-token.asciidoc b/docs/java-rest/high-level/security/invalidate-token.asciidoc new file mode 100644 index 0000000000000..ecb3fedb56f0a --- /dev/null +++ b/docs/java-rest/high-level/security/invalidate-token.asciidoc @@ -0,0 +1,39 @@ +-- +:api: invalidate-token +:request: InvalidateTokenRequest +:response: InvalidateTokenResponse +-- + +[id="{upid}-{api}"] +=== Invalidate Token API + +[id="{upid}-{api}-request"] +==== Invalidate Token Request +The +{request}+ supports invalidating either an _access token_ or a _refresh token_ + +===== Access Token +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[invalidate-access-token-request] +-------------------------------------------------- + +===== Refresh Token +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[invalidate-refresh-token-request] +-------------------------------------------------- + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Invalidate Token Response + +The returned +{response}+ contains a single property: + +`created`:: Whether the invalidation record was newly created (`true`), + or if the token had already been invalidated (`false`). + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index f5aecdc1af107..7a2a86a8390ea 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -54,6 +54,7 @@ The Java High Level REST Client supports the following Search APIs: * <<{upid}-field-caps>> * <<{upid}-rank-eval>> * <<{upid}-explain>> +* <<{upid}-count>> include::search/search.asciidoc[] include::search/scroll.asciidoc[] @@ -63,6 +64,7 @@ include::search/multi-search-template.asciidoc[] include::search/field-caps.asciidoc[] include::search/rank-eval.asciidoc[] include::search/explain.asciidoc[] +include::search/count.asciidoc[] == Miscellaneous APIs @@ -240,6 +242,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <<{upid}-update-job>> * <<{upid}-get-job-stats>> * <<{upid}-put-datafeed>> +* <<{upid}-update-datafeed>> * <<{upid}-get-datafeed>> * <<{upid}-delete-datafeed>> * <<{upid}-preview-datafeed>> @@ -257,6 +260,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <<{upid}-get-calendars>> * <<{upid}-put-calendar>> * <<{upid}-delete-calendar>> +* <<{upid}-put-filter>> include::ml/put-job.asciidoc[] include::ml/get-job.asciidoc[] @@ -266,6 +270,7 @@ include::ml/close-job.asciidoc[] include::ml/update-job.asciidoc[] include::ml/flush-job.asciidoc[] include::ml/put-datafeed.asciidoc[] +include::ml/update-datafeed.asciidoc[] include::ml/get-datafeed.asciidoc[] include::ml/delete-datafeed.asciidoc[] include::ml/preview-datafeed.asciidoc[] @@ -284,6 +289,7 @@ include::ml/get-categories.asciidoc[] include::ml/get-calendars.asciidoc[] include::ml/put-calendar.asciidoc[] include::ml/delete-calendar.asciidoc[] +include::ml/put-filter.asciidoc[] == Migration APIs @@ -301,11 +307,13 @@ include::migration/get-assistance.asciidoc[] The Java High Level REST Client supports the following Rollup APIs: * <> +* <<{upid}-rollup-start-job>> * <<{upid}-rollup-delete-job>> * <> * <<{upid}-x-pack-rollup-get-rollup-caps>> include::rollup/put_job.asciidoc[] +include::rollup/start_job.asciidoc[] include::rollup/delete_job.asciidoc[] include::rollup/get_job.asciidoc[] include::rollup/get_rollup_caps.asciidoc[] @@ -323,9 +331,13 @@ The Java High Level REST Client supports the following Security APIs: * <> * <> * <<{upid}-clear-roles-cache>> +* <<{upid}-authenticate>> * <> * <> +* <> * <> +* <> +* <<{upid}-invalidate-token>> include::security/put-user.asciidoc[] include::security/enable-user.asciidoc[] @@ -333,9 +345,13 @@ include::security/disable-user.asciidoc[] include::security/change-password.asciidoc[] include::security/delete-role.asciidoc[] include::security/clear-roles-cache.asciidoc[] +include::security/authenticate.asciidoc[] include::security/get-certificates.asciidoc[] include::security/put-role-mapping.asciidoc[] +include::security/get-role-mappings.asciidoc[] include::security/delete-role-mapping.asciidoc[] +include::security/create-token.asciidoc[] +include::security/invalidate-token.asciidoc[] == Watcher APIs @@ -378,4 +394,4 @@ don't leak into the rest of the documentation. :response!: :doc-tests-file!: :upid!: --- \ No newline at end of file +-- diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc index cc7bc752ec6d9..d3f598525a8b7 100644 --- a/docs/painless/painless-contexts.asciidoc +++ b/docs/painless/painless-contexts.asciidoc @@ -44,7 +44,7 @@ specialized code may define new ways to use a Painless script. | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] | Metric aggregation reduce | <> | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] -| Bucket aggregation | <> +| Bucket script aggregation | <> | {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation] | Watcher condition | <> | {xpack-ref}/condition-script.html[Elasticsearch Documentation] diff --git a/docs/painless/painless-contexts/index.asciidoc b/docs/painless/painless-contexts/index.asciidoc index a71fde0be32a0..df0c6f71e7798 100644 --- a/docs/painless/painless-contexts/index.asciidoc +++ b/docs/painless/painless-contexts/index.asciidoc @@ -28,7 +28,7 @@ include::painless-metric-agg-combine-context.asciidoc[] include::painless-metric-agg-reduce-context.asciidoc[] -include::painless-bucket-agg-context.asciidoc[] +include::painless-bucket-script-agg-context.asciidoc[] include::painless-analysis-predicate-context.asciidoc[] diff --git a/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc deleted file mode 100644 index 3bb4cae3d3bab..0000000000000 --- a/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -[[painless-bucket-agg-context]] -=== Bucket aggregation context - -Use a Painless script in an -{ref}/search-aggregations-pipeline-bucket-script-aggregation.html[bucket aggregation] -to calculate a value as a result in a bucket. - -*Variables* - -`params` (`Map`, read-only):: - User-defined parameters passed in as part of the query. The parameters - include values defined as part of the `buckets_path`. - -*Return* - -numeric:: - The calculated value as the result. - -*API* - -The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc new file mode 100644 index 0000000000000..5a5306016945d --- /dev/null +++ b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc @@ -0,0 +1,86 @@ +[[painless-bucket-script-agg-context]] +=== Bucket script aggregation context + +Use a Painless script in an +{ref}/search-aggregations-pipeline-bucket-script-aggregation.html[`bucket_script` pipeline aggregation] +to calculate a value as a result in a bucket. + +==== Variables + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. The parameters + include values defined as part of the `buckets_path`. + +==== Return + +numeric:: + The calculated value as the result. + +==== API + +The standard <> is available. + +==== Example + +To run this example, first follow the steps in <>. + +The painless context in a `bucket_script` aggregation provides a `params` map. This map contains both +user-specified custom values, as well as the values from other aggregations specified in the `buckets_path` +property. + +This example takes the values from a min and max aggregation, calculates the difference, +and adds the user-specified base_cost to the result: + +[source,Painless] +-------------------------------------------------- +(params.max - params.min) + params.base_cost +-------------------------------------------------- + +Note that the values are extracted from the `params` map. In context, the aggregation looks like this: + +[source,js] +-------------------------------------------------- +GET /seats/_search +{ + "size": 0, + "aggs": { + "theatres": { + "terms": { + "field": "theatre", + "size": 10 + }, + "aggs": { + "min_cost": { + "min": { + "field": "cost" + } + }, + "max_cost": { + "max": { + "field": "cost" + } + }, + "spread_plus_base": { + "bucket_script": { + "buckets_path": { <1> + "min": "min_cost", + "max": "max_cost" + }, + "script": { + "params": { + "base_cost": 5 <2> + }, + "source": "(params.max - params.min) + params.base_cost" + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:seats] +<1> The `buckets_path` points to two aggregations (`min_cost`, `max_cost`) and adds `min`/`max` variables +to the `params` map +<2> The user-specified `base_cost` is also added to the script's `params` map \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-context-examples.asciidoc b/docs/painless/painless-contexts/painless-context-examples.asciidoc index a6a67e22a5bd9..469f425d1d89f 100644 --- a/docs/painless/painless-contexts/painless-context-examples.asciidoc +++ b/docs/painless/painless-contexts/painless-context-examples.asciidoc @@ -46,7 +46,7 @@ the request URL. PUT /seats { "mappings": { - "_doc": { + "seat": { "properties": { "theatre": { "type": "keyword" }, "play": { "type": "text" }, diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index 35265140533ed..ef5744f7dff18 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -38,7 +38,7 @@ normalization can be specified with the `name` parameter, which accepts `nfc`, convert `nfc` to `nfd` or `nfkc` to `nfkd` respectively: Which letters are normalized can be controlled by specifying the -`unicodeSetFilter` parameter, which accepts a +`unicode_set_filter` parameter, which accepts a http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet]. Here are two examples, the default usage and a customised character filter: @@ -194,7 +194,7 @@ with the `name` parameter, which accepts `nfc`, `nfkc`, and `nfkc_cf` (default). Which letters are normalized can be controlled by specifying the -`unicodeSetFilter` parameter, which accepts a +`unicode_set_filter` parameter, which accepts a http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet]. You should probably prefer the <>. @@ -273,7 +273,7 @@ The ICU folding token filter already does Unicode normalization, so there is no need to use Normalize character or token filter as well. Which letters are folded can be controlled by specifying the -`unicodeSetFilter` parameter, which accepts a +`unicode_set_filter` parameter, which accepts a http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet]. The following example exempts Swedish characters from folding. It is important @@ -300,7 +300,7 @@ PUT icu_sample "filter": { "swedish_folding": { "type": "icu_folding", - "unicodeSetFilter": "[^åäöÅÄÖ]" + "unicode_set_filter": "[^åäöÅÄÖ]" } } } diff --git a/docs/plugins/repository.asciidoc b/docs/plugins/repository.asciidoc index 9a4e90bebd714..d444170801833 100644 --- a/docs/plugins/repository.asciidoc +++ b/docs/plugins/repository.asciidoc @@ -32,7 +32,7 @@ The GCS repository plugin adds support for using Google Cloud Storage service as The following plugin has been contributed by our community: -* https://github.com/wikimedia/search-repository-swift[Openstack Swift] (by Wikimedia Foundation) +* https://github.com/BigDataBoutique/elasticsearch-repository-swift[Openstack Swift] (by Wikimedia Foundation and BigData Boutique) include::repository-azure.asciidoc[] diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index 1d185e80f4f96..514528da0d0bd 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -1,12 +1,129 @@ [[search-aggregations-bucket-datehistogram-aggregation]] === Date Histogram Aggregation -A multi-bucket aggregation similar to the <> except it can -only be applied on date values. Since dates are represented in Elasticsearch internally as long values, it is possible -to use the normal `histogram` on dates as well, though accuracy will be compromised. The reason for this is in the fact -that time based intervals are not fixed (think of leap years and on the number of days in a month). For this reason, -we need special support for time based data. From a functionality perspective, this histogram supports the same features -as the normal <>. The main difference is that the interval can be specified by date/time expressions. +This multi-bucket aggregation is similar to the normal +<>, but it can +only be used with date values. Because dates are represented internally in +Elasticsearch as long values, it is possible, but not as accurate, to use the +normal `histogram` on dates as well. The main difference in the two APIs is +that here the interval can be specified using date/time expressions. Time-based +data requires special support because time-based intervals are not always a +fixed length. + +==== Setting intervals + +There seems to be no limit to the creativity we humans apply to setting our +clocks and calendars. We've invented leap years and leap seconds, standard and +daylight savings times, and timezone offsets of 30 or 45 minutes rather than a +full hour. While these creations help keep us in sync with the cosmos and our +environment, they can make specifying time intervals accurately a real challenge. +The only universal truth our researchers have yet to disprove is that a +millisecond is always the same duration, and a second is always 1000 milliseconds. +Beyond that, things get complicated. + +Generally speaking, when you specify a single time unit, such as 1 hour or 1 day, you +are working with a _calendar interval_, but multiples, such as 6 hours or 3 days, are +_fixed-length intervals_. + +For example, a specification of 1 day (1d) from now is a calendar interval that +means "at +this exact time tomorrow" no matter the length of the day. A change to or from +daylight savings time that results in a 23 or 25 hour day is compensated for and the +specification of "this exact time tomorrow" is maintained. But if you specify 2 or +more days, each day must be of the same fixed duration (24 hours). In this case, if +the specified interval includes the change to or from daylight savings time, the +interval will end an hour sooner or later than you expect. + +There are similar differences to consider when you specify single versus multiple +minutes or hours. Multiple time periods longer than a day are not supported. + +Here are the valid time specifications and their meanings: + +milliseconds (ms) :: +Fixed length interval; supports multiples. + +seconds (s) :: +1000 milliseconds; fixed length interval (except for the last second of a +minute that contains a leap-second, which is 2000ms long); supports multiples. + +minutes (m) :: +All minutes begin at 00 seconds. + +* One minute (1m) is the interval between 00 seconds of the first minute and 00 +seconds of the following minute in the specified timezone, compensating for any +intervening leap seconds, so that the number of minutes and seconds past the +hour is the same at the start and end. +* Multiple minutes (__n__m) are intervals of exactly 60x1000=60,000 milliseconds +each. + +hours (h) :: +All hours begin at 00 minutes and 00 seconds. + +* One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00 +minutes of the following hour in the specified timezone, compensating for any +intervening leap seconds, so that the number of minutes and seconds past the hour +is the same at the start and end. +* Multiple hours (__n__h) are intervals of exactly 60x60x1000=3,600,000 milliseconds +each. + +days (d) :: +All days begin at the earliest possible time, which is usually 00:00:00 +(midnight). + +* One day (1d) is the interval between the start of the day and the start of +of the following day in the specified timezone, compensating for any intervening +time changes. +* Multiple days (__n__d) are intervals of exactly 24x60x60x1000=86,400,000 +milliseconds each. + +weeks (w) :: + +* One week (1w) is the interval between the start day_of_week:hour:minute:second +and the same day of the week and time of the following week in the specified +timezone. +* Multiple weeks (__n__w) are not supported. + +months (M) :: + +* One month (1M) is the interval between the start day of the month and time of +day and the same day of the month and time of the following month in the specified +timezone, so that the day of the month and time of day are the same at the start +and end. +* Multiple months (__n__M) are not supported. + +quarters (q) :: + +* One quarter (1q) is the interval between the start day of the month and +time of day and the same day of the month and time of day three months later, +so that the day of the month and time of day are the same at the start and end. + +* Multiple quarters (__n__q) are not supported. + +years (y) :: + +* One year (1y) is the interval between the start day of the month and time of +day and the same day of the month and time of day the following year in the +specified timezone, so that the date and time are the same at the start and end. + +* Multiple years (__n__y) are not supported. + +NOTE: +In all cases, when the specified end time does not exist, the actual end time is +the closest available time after the specified end. + +Widely distributed applications must also consider vagaries such as countries that +start and stop daylight savings time at 12:01 A.M., so end up with one minute of +Sunday followed by an additional 59 minutes of Saturday once a year, and countries +that decide to move across the international date line. Situations like +that can make irregular timezone offsets seem easy. + +As always, rigorous testing, especially around time-change events, will ensure +that your time interval specification is +what you intend it to be. + +WARNING: +To avoid unexpected results, all connected servers and clients must sync to a +reliable network time service. + +==== Examples Requesting bucket intervals of a month. @@ -27,13 +144,11 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -Available expressions for interval: `year` (`1y`), `quarter` (`1q`), `month` (`1M`), `week` (`1w`), -`day` (`1d`), `hour` (`1h`), `minute` (`1m`), `second` (`1s`) - -Time values can also be specified via abbreviations supported by <> parsing. -Note that fractional time values are not supported, but you can address this by shifting to another -time unit (e.g., `1.5h` could instead be specified as `90m`). Also note that time intervals larger than -days do not support arbitrary values but can only be one unit large (e.g. `1y` is valid, `2y` is not). +You can also specify time values using abbreviations supported by +<> parsing. +Note that fractional time values are not supported, but you can address this by +shifting to another +time unit (e.g., `1.5h` could instead be specified as `90m`). [source,js] -------------------------------------------------- @@ -52,15 +167,16 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -==== Keys +===== Keys Internally, a date is represented as a 64 bit number representing a timestamp -in milliseconds-since-the-epoch. These timestamps are returned as the bucket -++key++s. The `key_as_string` is the same timestamp converted to a formatted -date string using the format specified with the `format` parameter: +in milliseconds-since-the-epoch (01/01/1970 midnight UTC). These timestamps are +returned as the ++key++ name of the bucket. The `key_as_string` is the same +timestamp converted to a formatted +date string using the `format` parameter sprcification: -TIP: If no `format` is specified, then it will use the first date -<> specified in the field mapping. +TIP: If you don't specify `format`, the first date +<> specified in the field mapping is used. [source,js] -------------------------------------------------- @@ -113,15 +229,15 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -==== Time Zone +===== Timezone Date-times are stored in Elasticsearch in UTC. By default, all bucketing and -rounding is also done in UTC. The `time_zone` parameter can be used to indicate -that bucketing should use a different time zone. +rounding is also done in UTC. Use the `time_zone` parameter to indicate +that bucketing should use a different timezone. -Time zones may either be specified as an ISO 8601 UTC offset (e.g. `+01:00` or -`-08:00`) or as a timezone id, an identifier used in the TZ database like -`America/Los_Angeles`. +You can specify timezones as either an ISO 8601 UTC offset (e.g. `+01:00` or +`-08:00`) or as a timezone ID as specified in the IANA timezone database, +such as`America/Los_Angeles`. Consider the following example: @@ -151,7 +267,7 @@ GET my_index/_search?size=0 --------------------------------- // CONSOLE -UTC is used if no time zone is specified, which would result in both of these +If you don't specify a timezone, UTC is used. This would result in both of these documents being placed into the same day bucket, which starts at midnight UTC on 1 October 2015: @@ -174,8 +290,8 @@ on 1 October 2015: --------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -If a `time_zone` of `-01:00` is specified, then midnight starts at one hour before -midnight UTC: +If you specify a `time_zone` of `-01:00`, midnight in that timezone is one hour +before midnight UTC: [source,js] --------------------------------- @@ -223,28 +339,27 @@ second document falls into the bucket for 1 October 2015: // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] <1> The `key_as_string` value represents midnight on each day - in the specified time zone. + in the specified timezone. WARNING: When using time zones that follow DST (daylight savings time) changes, buckets close to the moment when those changes happen can have slightly different -sizes than would be expected from the used `interval`. +sizes than you would expect from the used `interval`. For example, consider a DST start in the `CET` time zone: on 27 March 2016 at 2am, -clocks were turned forward 1 hour to 3am local time. When using `day` as `interval`, +clocks were turned forward 1 hour to 3am local time. If you use `day` as `interval`, the bucket covering that day will only hold data for 23 hours instead of the usual -24 hours for other buckets. The same is true for shorter intervals like e.g. 12h. -Here, we will have only a 11h bucket on the morning of 27 March when the DST shift +24 hours for other buckets. The same is true for shorter intervals, like 12h, +where you'll have only a 11h bucket on the morning of 27 March when the DST shift happens. +===== Offset -==== Offset - -The `offset` parameter is used to change the start value of each bucket by the +Use the `offset` parameter to change the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration, such as `1h` for an hour, or `1d` for a day. See <> for more possible time duration options. -For instance, when using an interval of `day`, each bucket runs from midnight -to midnight. Setting the `offset` parameter to `+6h` would change each bucket +For example, when using an interval of `day`, each bucket runs from midnight +to midnight. Setting the `offset` parameter to `+6h` changes each bucket to run from 6am to 6am: [source,js] @@ -301,12 +416,13 @@ documents into buckets starting at 6am: ----------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -NOTE: The start `offset` of each bucket is calculated after the `time_zone` +NOTE: The start `offset` of each bucket is calculated after `time_zone` adjustments have been made. -==== Keyed Response +===== Keyed Response -Setting the `keyed` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array: +Setting the `keyed` flag to `true` associates a unique string key with each +bucket and returns the ranges as a hash rather than an array: [source,js] -------------------------------------------------- @@ -358,20 +474,25 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -==== Scripts +===== Scripts -Like with the normal <>, both document level scripts and -value level scripts are supported. It is also possible to control the order of the returned buckets using the `order` -settings and filter the returned buckets based on a `min_doc_count` setting (by default all buckets between the first -bucket that matches documents and the last one are returned). This histogram also supports the `extended_bounds` -setting, which enables extending the bounds of the histogram beyond the data itself (to read more on why you'd want to -do that please refer to the explanation <>). +As with the normal <>, +both document-level scripts and +value-level scripts are supported. You can control the order of the returned +buckets using the `order` +settings and filter the returned buckets based on a `min_doc_count` setting +(by default all buckets between the first +bucket that matches documents and the last one are returned). This histogram +also supports the `extended_bounds` +setting, which enables extending the bounds of the histogram beyond the data +itself. For more information, see +<>. -==== Missing value +===== Missing value -The `missing` parameter defines how documents that are missing a value should be treated. -By default they will be ignored but it is also possible to treat them as if they -had a value. +The `missing` parameter defines how to treat documents that are missing a value. +By default, they are ignored, but it is also possible to treat them as if they +have a value. [source,js] -------------------------------------------------- @@ -391,20 +512,22 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -<1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`. +<1> Documents without a value in the `publish_date` field will fall into the +same bucket as documents that have the value `2000-01-01`. -==== Order +===== Order -By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled using -the `order` setting. Supports the same `order` functionality as the <>. +By default the returned buckets are sorted by their `key` ascending, but you can +control the order using +the `order` setting. This setting supports the same `order` functionality as +<>. deprecated[6.0.0, Use `_key` instead of `_time` to order buckets by their dates/keys] -==== Use of a script to aggregate by day of the week +===== Using a script to aggregate by day of the week -There are some cases where date histogram can't help us, like for example, when we need -to aggregate the results by day of the week. -In this case to overcome the problem, we can use a script that returns the day of the week: +When you need to aggregate the results by day of the week, use a script that +returns the day of the week: [source,js] @@ -452,5 +575,5 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -The response will contain all the buckets having as key the relative day of -the week: 1 for Monday, 2 for Tuesday... 7 for Sunday. +The response will contain all the buckets having the relative day of +the week as key : 1 for Monday, 2 for Tuesday... 7 for Sunday. diff --git a/docs/reference/aggregations/metrics.asciidoc b/docs/reference/aggregations/metrics.asciidoc index 96597564dac2d..1d3daa2cecae5 100644 --- a/docs/reference/aggregations/metrics.asciidoc +++ b/docs/reference/aggregations/metrics.asciidoc @@ -41,6 +41,8 @@ include::metrics/tophits-aggregation.asciidoc[] include::metrics/valuecount-aggregation.asciidoc[] +include::metrics/median-absolute-deviation-aggregation.asciidoc[] + diff --git a/docs/reference/aggregations/metrics/median-absolute-deviation-aggregation.asciidoc b/docs/reference/aggregations/metrics/median-absolute-deviation-aggregation.asciidoc new file mode 100644 index 0000000000000..2e88b12d92cd1 --- /dev/null +++ b/docs/reference/aggregations/metrics/median-absolute-deviation-aggregation.asciidoc @@ -0,0 +1,189 @@ +[[search-aggregations-metrics-median-absolute-deviation-aggregation]] +=== Median Absolute Deviation Aggregation + +This `single-value` aggregation approximates the https://en.wikipedia.org/wiki/Median_absolute_deviation[median absolute deviation] +of its search results. + +Median absolute deviation is a measure of variability. It is a robust +statistic, meaning that it is useful for describing data that may have +outliers, or may not be normally distributed. For such data it can be more +descriptive than standard deviation. + +It is calculated as the median of each data point's deviation from the median +of the entire sample. That is, for a random variable X, the median absolute +deviation is median(|median(X) - X~i~|). + +==== Example + +Assume our data represents product reviews on a one to five star scale. +Such reviews are usually summarized as a mean, which is easily understandable +but doesn't describe the reviews' variability. Estimating the median absolute +deviation can provide insight into how much reviews vary from one another. + +In this example we have a product which has an average rating of +3 stars. Let's look at its ratings' median absolute deviation to determine +how much they vary + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_average": { + "avg": { + "field": "rating" + } + }, + "review_variability": { + "median_absolute_deviation": { + "field": "rating" <1> + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews] +<1> `rating` must be a numeric field + +The resulting median absolute deviation of `2` tells us that there is a fair +amount of variability in the ratings. Reviewers must have diverse opinions about +this product. + +[source,js] +--------------------------------------------------------- +{ + ... + "aggregations": { + "review_average": { + "value": 3.0 + }, + "review_variability": { + "value": 2.0 + } + } +} +--------------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] + +==== Approximation + +The naive implementation of calculating median absolute deviation stores the +entire sample in memory, so this aggregation instead calculates an +approximation. It uses the https://github.com/tdunning/t-digest[TDigest data structure] +to approximate the sample median and the median of deviations from the sample +median. For more about the approximation characteristics of TDigests, see +<>. + +The tradeoff between resource usage and accuracy of a TDigest's quantile +approximation, and therefore the accuracy of this aggregation's approximation +of median absolute deviation, is controlled by the `compression` parameter. A +higher `compression` setting provides a more accurate approximation at the +cost of higher memory usage. For more about the characteristics of the TDigest +`compression` parameter see +<>. + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_variability": { + "median_absolute_deviation": { + "field": "rating", + "compression": 100 + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews] + +The default `compression` value for this aggregation is `1000`. At this +compression level this aggregation is usually within 5% of the exact result, +but observed performance will depend on the sample data. + +==== Script + +This metric aggregation supports scripting. In our example above, product +reviews are on a scale of one to five. If we wanted to modify them to a scale +of one to ten, we can using scripting. + +To provide an inline script: + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_variability": { + "median_absolute_deviation": { + "script": { + "lang": "painless", + "source": "doc['rating'].value * params.scaleFactor", + "params": { + "scaleFactor": 2 + } + } + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews] + +To provide a stored script: + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_variability": { + "median_absolute_deviation": { + "script": { + "id": "my_script", + "params": { + "field": "rating" + } + } + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews,stored_example_script] + +==== Missing value + +The `missing` parameter defines how documents that are missing a value should be +treated. By default they will be ignored but it is also possible to treat them +as if they had a value. + +Let's be optimistic and assume some reviewers loved the product so much that +they forgot to give it a rating. We'll assign them five stars + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_variability": { + "median_absolute_deviation": { + "field": "rating", + "missing": 5 + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews] diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index c4857699f9805..69c9d50690196 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -15,8 +15,8 @@ POST ledger/_search?size=0 "aggs": { "profit": { "scripted_metric": { - "init_script" : "state.transactions = []", - "map_script" : "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)", <1> + "init_script" : "state.transactions = []", <1> + "map_script" : "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)", "combine_script" : "double profit = 0; for (t in state.transactions) { profit += t } return profit", "reduce_script" : "double profit = 0; for (a in states) { profit += a } return profit" } @@ -27,7 +27,7 @@ POST ledger/_search?size=0 // CONSOLE // TEST[setup:ledger] -<1> `map_script` is the only required parameter +<1> `init_script` is an optional parameter, all other scripts are required. The above aggregation demonstrates how one would use the script aggregation compute the total profit from sale and cost transactions. @@ -121,22 +121,22 @@ init_script:: Executed prior to any collection of documents. Allows the ag + In the above example, the `init_script` creates an array `transactions` in the `state` object. -map_script:: Executed once per document collected. This is the only required script. If no combine_script is specified, the resulting state +map_script:: Executed once per document collected. This is a required script. If no combine_script is specified, the resulting state needs to be stored in the `state` object. + In the above example, the `map_script` checks the value of the type field. If the value is 'sale' the value of the amount field is added to the transactions array. If the value of the type field is not 'sale' the negated value of the amount field is added to transactions. -combine_script:: Executed once on each shard after document collection is complete. Allows the aggregation to consolidate the state returned from - each shard. If a combine_script is not provided the combine phase will return the aggregation variable. +combine_script:: Executed once on each shard after document collection is complete. This is a required script. Allows the aggregation to + consolidate the state returned from each shard. + In the above example, the `combine_script` iterates through all the stored transactions, summing the values in the `profit` variable and finally returns `profit`. -reduce_script:: Executed once on the coordinating node after all shards have returned their results. The script is provided with access to a - variable `states` which is an array of the result of the combine_script on each shard. If a reduce_script is not provided - the reduce phase will return the `states` variable. +reduce_script:: Executed once on the coordinating node after all shards have returned their results. This is a required script. The + script is provided with access to a variable `states` which is an array of the result of the combine_script on each + shard. + In the above example, the `reduce_script` iterates through the `profit` returned by each shard summing the values before returning the final combined profit which will be returned in the response of the aggregation. diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-stats.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-stats.asciidoc deleted file mode 100644 index 85c6775af1cab..0000000000000 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-stats.asciidoc +++ /dev/null @@ -1,46 +0,0 @@ -[role="xpack"] -[testenv="platinum"] -[[ccr-get-auto-follow-stats]] -=== Get Auto-Follow Stats API -++++ -Get Auto-Follow Stats -++++ - -Get auto-follow stats. - -==== Description - -This API gets stats about auto-follow patterns. - -==== Request - -[source,js] --------------------------------------------------- -GET /_ccr/auto_follow/stats --------------------------------------------------- -// CONSOLE -// TEST - -==== Example - -This example retrieves stats about auto-follow patterns: - -[source,js] --------------------------------------------------- -GET /_ccr/auto_follow/stats --------------------------------------------------- -// CONSOLE -// TEST - -The API returns the following result: - -[source,js] --------------------------------------------------- -{ - "number_of_successful_follow_indices" : 16, - "number_of_failed_follow_indices" : 0, - "number_of_failed_remote_cluster_state_requests" : 0, - "recent_auto_follow_errors" : [ ] -} --------------------------------------------------- -// TESTRESPONSE[s/"number_of_successful_follow_indices" : 16/"number_of_successful_follow_indices" : $body.number_of_successful_follow_indices/] diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc index d4a45bab6ed61..6411766350d34 100644 --- a/docs/reference/ccr/apis/ccr-apis.asciidoc +++ b/docs/reference/ccr/apis/ccr-apis.asciidoc @@ -5,6 +5,12 @@ You can use the following APIs to perform {ccr} operations. +[float] +[[ccr-api-top-level]] +=== Top-Level + +* <> + [float] [[ccr-api-follow]] === Follow @@ -22,7 +28,9 @@ You can use the following APIs to perform {ccr} operations. * <> * <> * <> -* <> + +// top-level +include::get-ccr-stats.asciidoc[] // follow include::follow/put-follow.asciidoc[] @@ -35,4 +43,3 @@ include::follow/get-follow-stats.asciidoc[] include::auto-follow/put-auto-follow-pattern.asciidoc[] include::auto-follow/delete-auto-follow-pattern.asciidoc[] include::auto-follow/get-auto-follow-pattern.asciidoc[] -include::auto-follow/get-auto-follow-stats.asciidoc[] diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index efbaeecb712d5..0efa156b95a49 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -38,12 +38,6 @@ POST /follower_index/_ccr/pause_follow ////////////////////////// -[source,js] --------------------------------------------------- -GET /_ccr/stats --------------------------------------------------- -// CONSOLE - [source,js] -------------------------------------------------- GET //_ccr/stats @@ -186,7 +180,7 @@ This example retrieves follower stats: [source,js] -------------------------------------------------- -GET /_ccr/stats +GET /follower_index/_ccr/stats -------------------------------------------------- // CONSOLE diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc new file mode 100644 index 0000000000000..9229e4c9406c5 --- /dev/null +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -0,0 +1,21 @@ +[role="xpack"] +[testenv="platinum"] +[[ccr-get-stats]] +=== Get Cross-Cluster Replication Stats API +++++ +Get Follower Stats +++++ + +Get {ccr} stats. + +==== Description + +This API gets {ccr} stats. + +==== Request + +[source,js] +-------------------------------------------------- +GET /_ccr/stats +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 33246b57aa4c5..d21d84ba8f230 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -11,6 +11,8 @@ GET /_cluster/settings -------------------------------------------------- // CONSOLE +By default, this API call only returns settings that have been explicitly defined, but can also <>. + Updates to settings can be persistent, meaning they apply across restarts, or transient, where they don't survive a full cluster restart. Here is an example of a persistent update: diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 93a365623fce7..39ac9b134e3c9 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -147,7 +147,7 @@ and `scroll`. Sending the `refresh` will refresh all shards involved in the delete by query once the request completes. This is different than the Delete API's `refresh` parameter which causes just the shard that received the delete request -to be refreshed. +to be refreshed. Also unlike the Delete API it does not support `wait_for`. If the request contains `wait_for_completion=false` then Elasticsearch will perform some preflight checks, launch the request, and then return a `task` @@ -277,7 +277,7 @@ The number of requests per second effectively executed during the delete by quer `throttled_until_millis`:: -This field should always be equal to zero in a delete by query response. It only +This field should always be equal to zero in a `_delete_by_query` response. It only has meaning when using the <>, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index c85e638558197..ec6ef28534fd6 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -73,14 +73,14 @@ GET twitter/_doc/0?_source=false // CONSOLE // TEST[setup:twitter] -If you only need one or two fields from the complete `_source`, you can use the `_source_include` -& `_source_exclude` parameters to include or filter out that parts you need. This can be especially helpful +If you only need one or two fields from the complete `_source`, you can use the `_source_includes` +& `_source_excludes` parameters to include or filter out that parts you need. This can be especially helpful with large documents where partial retrieval can save on network overhead. Both parameters take a comma separated list of fields or wildcard expressions. Example: [source,js] -------------------------------------------------- -GET twitter/_doc/0?_source_include=*.id&_source_exclude=entities +GET twitter/_doc/0?_source_includes=*.id&_source_excludes=entities -------------------------------------------------- // CONSOLE // TEST[setup:twitter] @@ -232,7 +232,7 @@ You can also use the same source filtering parameters to control which parts of [source,js] -------------------------------------------------- -GET twitter/_doc/1/_source?_source_include=*.id&_source_exclude=entities' +GET twitter/_doc/1/_source?_source_includes=*.id&_source_excludes=entities' -------------------------------------------------- // CONSOLE // TEST[continued] diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc index e8b81b6ae37d9..6d50a6a643a89 100644 --- a/docs/reference/docs/multi-get.asciidoc +++ b/docs/reference/docs/multi-get.asciidoc @@ -89,7 +89,7 @@ GET /test/_doc/_mget By default, the `_source` field will be returned for every document (if stored). Similar to the <> API, you can retrieve only parts of the `_source` (or not at all) by using the `_source` parameter. You can also use -the url parameters `_source`,`_source_include` & `_source_exclude` to specify defaults, +the url parameters `_source`,`_source_includes` & `_source_excludes` to specify defaults, which will be used when there are no per-document instructions. For example: diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 6a9d89b9242c4..5a5004f17aea4 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -1,6 +1,9 @@ [[docs-reindex]] == Reindex API +IMPORTANT: Reindex requires <> to be enabled for +all documents in the source index. + IMPORTANT: Reindex does not attempt to set up the destination index. It does not copy the settings of the source index. You should set up the destination index prior to running a `_reindex` action, including setting up mappings, shard @@ -161,13 +164,7 @@ POST _reindex `index` and `type` in `source` can both be lists, allowing you to copy from lots of sources in one request. This will copy documents from the `_doc` and -`post` types in the `twitter` and `blog` index. The copied documents would include the -`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more -specific parameters, you can use `query`. - -The Reindex API makes no effort to handle ID collisions. For such issues, the target index -will remain valid, but it's not easy to predict which document will survive because -the iteration order isn't well defined. +`post` types in the `twitter` and `blog` indices. [source,js] -------------------------------------------------- @@ -178,12 +175,19 @@ POST _reindex "type": ["_doc", "post"] }, "dest": { - "index": "all_together" + "index": "all_together", + "type": "_doc" } } -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT twitter\nPUT blog\n/] +// TEST[setup:twitter] +// TEST[s/^/PUT blog\/post\/post1?refresh\n{"test": "foo"}\n/] + +NOTE: The Reindex API makes no effort to handle ID collisions so the last +document written will "win" but the order isn't usually predictable so it is +not a good idea to rely on this behavior. Instead, make sure that IDs are unique +using a script. It's also possible to limit the number of processed documents by setting `size`. This will only copy a single document from `twitter` to @@ -527,7 +531,8 @@ supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, Sending the `refresh` url parameter will cause all indexes to which the request wrote to be refreshed. This is different than the Index API's `refresh` -parameter which causes just the shard that received the new data to be refreshed. +parameter which causes just the shard that received the new data to be +refreshed. Also unlike the Index API it does not support `wait_for`. If the request contains `wait_for_completion=false` then Elasticsearch will perform some preflight checks, launch the request, and then return a `task` @@ -667,7 +672,7 @@ The number of requests per second effectively executed during the reindex. `throttled_until_millis`:: -This field should always be equal to zero in a `_delete_by_query` response. It only +This field should always be equal to zero in a `_reindex` response. It only has meaning when using the <>, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index bef531d8de2c4..fab21aa570f2e 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -203,8 +203,9 @@ also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeo and `scroll`. Sending the `refresh` will update all shards in the index being updated when -the request completes. This is different than the Index API's `refresh` +the request completes. This is different than the Update API's `refresh` parameter which causes just the shard that received the new data to be indexed. +Also unlike the Update API it does not support `wait_for`. If the request contains `wait_for_completion=false` then Elasticsearch will perform some preflight checks, launch the request, and then return a `task` @@ -236,7 +237,7 @@ batch size is `1000`, so if the `requests_per_second` is set to `500`: [source,txt] -------------------------------------------------- target_time = 1000 / 500 per second = 2 seconds -wait_time = target_time - delete_time = 2 seconds - .5 seconds = 1.5 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -------------------------------------------------- Since the batch is issued as a single `_bulk` request large batch sizes will @@ -331,7 +332,7 @@ The number of requests per second effectively executed during the update by quer `throttled_until_millis`:: -This field should always be equal to zero in a delete by query response. It only +This field should always be equal to zero in an `_update_by_query` response. It only has meaning when using the <>, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc new file mode 100644 index 0000000000000..3c8b6c397c07f --- /dev/null +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -0,0 +1,89 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-delete-lifecycle]] +=== Delete Lifecycle Policy API +++++ +Delete Policy +++++ + +Deletes an existing lifecycle policy + +==== Request + +`DELETE _ilm/policy/` + +==== Description + +Deletes an existing lifecycle policy + +==== Path Parameters + +`policy` (required):: + (string) Identifier for the policy. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + DELETE operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example deletes an existing policy named `my_policy`: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +DELETE _ilm/policy/my_policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc new file mode 100644 index 0000000000000..95daf0bda1f6b --- /dev/null +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -0,0 +1,296 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-explain]] +=== Explain Lifecycle API +++++ +Explain Lifecycle +++++ + +Shows the current lifecycle status for an index. + +==== Request + +`GET /_ilm/explain` + +==== Description + +This API returns information relating to the current lifecycle state of an +index. This includes information such as the currently executing phase, action, +and step and the timestamp when the index entered them. It also shows the +definition of the current phase that is being run and in the event that there +has been a failure, information regarding the failure. + +==== Path Parameters + +`index` (required):: + (string) Identifier for the index. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + GET operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example retrieves the lifecycle state for the index `my_index`: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy", + "index.number_of_replicas": 0 + } +} + +GET /_cluster/health?wait_for_status=green&timeout=10s +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +When the index is first taken over by ILM you will see a response like the following: + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, <1> + "policy": "my_policy", <2> + "lifecycle_date_millis": 1538475653281, <3> + "phase": "new", <4> + "phase_time_millis": 1538475653317, <5> + "action": "complete", <6> + "action_time_millis": 1538475653317, <7> + "step": "complete", <8> + "step_time_millis": 1538475653317 <9> + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"lifecycle_date_millis": 1538475653281/"lifecycle_date_millis": $body.indices.my_index.lifecycle_date_millis/] +// TESTRESPONSE[s/"phase_time_millis": 1538475653317/"phase_time_millis": $body.indices.my_index.phase_time_millis/] +// TESTRESPONSE[s/"action_time_millis": 1538475653317/"action_time_millis": $body.indices.my_index.action_time_millis/] +// TESTRESPONSE[s/"step_time_millis": 1538475653317/"step_time_millis": $body.indices.my_index.step_time_millis/] +<1> Shows if the index is being managed by ILM. If the index is not managed by +ILM the other fields will not be shown +<2> The name of the policy which ILM is using for this index +<3> The timestamp used for the `min_age` +<4> The current phase +<5> The timestamp for when the index entered the current phase +<6> The current action +<7> The timestamp for when the index entered the current action +<8> The current step +<9> The timestamp for when the index entered the current step + +When the policy is running on the index the response will contain a +`phase_execution` object that describes the exact phase that is being run. +Changes to the underlying policy will not affect this index until the current +phase definition has been completely executed. + +[source,js] +-------------------------------------------------- +{ + "indices": { + "test-000069": { + "index": "test-000069", + "managed": true, + "policy": "my_lifecycle3", + "lifecycle_date_millis": 1538475653281, + "lifecycle_date": "2018-10-15T13:45:21.981Z", + "phase": "hot", + "phase_time_millis": 1538475653317, + "phase_time": "2018-10-15T13:45:22.577Z", + "action": "rollover", + "action_time_millis": 1538475653317, + "action_time": "2018-10-15T13:45:22.577Z", + "step": "attempt_rollover", + "step_time_millis": 1538475653317, + "step_time": "2018-10-15T13:45:22.577Z", + "phase_execution": { <1> + "policy": "my_lifecycle3", <2> + "phase_definition": { <3> + "min_age": "0ms", + "actions": { + "rollover": { + "max_age": "30s" + } + } + }, + "version": 3, <4> + "modified_date": "2018-10-15T13:21:41.576Z", <5> + "modified_date_in_millis": 1539609701576 <6> + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] +<1> The phase execution information for this index in its current phase +<2> The policy that this phase definition was loaded from +<3> The phase definition itself. This is the JSON for the phase loaded from the +policy at the time the index entered the current phase +<4> The version of the policy at the time the phase definition was loaded +<5> The last modified date of the policy at the time the phase definition was loaded +<6> The last modified epoch time of the policy at the time the phase definition was loaded + + +If the policy is waiting for a step to complete for the index, the response will contain step information such as: + +[source,js] +-------------------------------------------------- +{ + "indices": { + "test-000020": { + "index": "test-000020", + "managed": true, + "policy": "my_lifecycle3", + "lifecycle_date_millis": 1538475653281, + "lifecycle_date": "2018-10-15T13:45:21.981Z", + "phase": "warm", + "phase_time_millis": 1538475653317, + "phase_time": "2018-10-15T13:45:22.577Z", + "action": "allocate", + "action_time_millis": 1538475653317, + "action_time": "2018-10-15T13:45:22.577Z", + "step": "check-allocation", + "step_time_millis": 1538475653317, + "step_time": "2018-10-15T13:45:22.577Z", + "step_info": { <1> + "message": "Waiting for all shard copies to be active", + "shards_left_to_allocate": -1, + "all_shards_active": false, + "actual_replicas": 2 + }, + "phase_execution": { + "policy": "my_lifecycle3", + "phase_definition": { + "min_age": "0ms", + "actions": { + "allocate": { + "number_of_replicas": 2, + "include": { + "box_type": "warm" + }, + "exclude": {}, + "require": {} + }, + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "version": 2, + "modified_date": "2018-10-15T13:20:02.489Z", + "modified_date_in_millis": 1539609602489 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] +<1> `step_info` shows information about what ILM is waiting for on this index. +In this case we are waiting for all shard copies of the index to be active. + +If the index is in the ERROR step, something has gone wrong when executing a +step in the policy and will need to be investigated and resolved for the index +to make progress. TO help determine how to resolve the error the explain response +will show the step that failed in `failed_step`, and the information on the error +that occurred in `step_info`. + +[source,js] +-------------------------------------------------- +{ + "indices": { + "test-000056": { + "index": "test-000056", + "managed": true, + "policy": "my_lifecycle3", + "lifecycle_date_millis": 1538475653281, + "lifecycle_date": "2018-10-15T13:45:21.981Z", + "phase": "hot", + "phase_time_millis": 1538475653317, + "phase_time": "2018-10-15T13:45:22.577Z", + "action": "rollover", + "action_time_millis": 1538475653317, + "action_time": "2018-10-15T13:45:22.577Z", + "step": "ERROR", + "step_time_millis": 1538475653317, + "step_time": "2018-10-15T13:45:22.577Z", + "failed_step": "attempt_rollover", <1> + "step_info": { <2> + "type": "resource_already_exists_exception", + "reason": "index [test-000057/H7lF9n36Rzqa-KfKcnGQMg] already exists", + "index_uuid": "H7lF9n36Rzqa-KfKcnGQMg", + "index": "test-000057" + }, + "phase_execution": { + "policy": "my_lifecycle3", + "phase_definition": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_age": "30s" + } + } + }, + "version": 3, + "modified_date": "2018-10-15T13:21:41.576Z", + "modified_date_in_millis": 1539609701576 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] +<1> The step that caused an error +<2> Information on the error that occurred. In this case the next index already +existed when the rollover operation was performed diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc new file mode 100644 index 0000000000000..dbc8a572903b3 --- /dev/null +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -0,0 +1,115 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-get-lifecycle]] +=== Get Lifecycle Policy API +++++ +Get Policy +++++ + +Retrieves an existing policy + +==== Request + +`GET _ilm/policy` +`GET _ilm/policy/` + +==== Description + +This API returns a policy definition along with some of its metadata like +its last modified date and version. If no path parameters are provided, then +all the policies defined will be returned. + +==== Path Parameters + +`policy` (optional):: + (string) Identifier for the policy. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + GET operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example retrieves the policy named `my_policy`: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +GET _ilm/policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "my_policy": { + "version": 1, <1> + "modified_date": 82392349, <2> + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] +<1> The version of the policy. This is increased whenever the policy is updated +<2> The timestamp when this policy was last modified diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc new file mode 100644 index 0000000000000..8f5d2289ff2ea --- /dev/null +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -0,0 +1,55 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-get-status]] +=== Get ILM Status API +++++ +Get ILM Status +++++ + +Gets the current status for ILM. + +==== Request + +`GET /_ilm/status` + +==== Description + +This API will return the current status of the ILM plugin. The response contains +a `operation_mode` field which shows whether the ILM plugin is `STARTED`, `STOPPING` +or `STOPPED`. This `operation_mode` is controlled by the <> +and <> APIs. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + get operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example stops the ILM plugin. + +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/ilm-api.asciidoc b/docs/reference/ilm/apis/ilm-api.asciidoc new file mode 100644 index 0000000000000..49c7d2155d516 --- /dev/null +++ b/docs/reference/ilm/apis/ilm-api.asciidoc @@ -0,0 +1,42 @@ +[[index-lifecycle-management-api]] +== Index Lifecycle Management API + +You can use the following APIs to manage policies on indices. + +[float] +[[ilm-api-policy-endpoint]] +=== Policy Management APIs + +* <> +* <> +* <> + +[float] +[[ilm-api-index-endpoint]] +=== Index Management APIs + +* <> +* <> + +[float] +[[ilm-api-management-endpoint]] +=== Operation Management APIs + +* <> +* <> +* <> +* <> + + +include::put-lifecycle.asciidoc[] +include::get-lifecycle.asciidoc[] +include::delete-lifecycle.asciidoc[] + +include::move-to-step.asciidoc[] +include::remove-policy.asciidoc[] +include::retry-policy.asciidoc[] + +include::get-status.asciidoc[] +include::explain.asciidoc[] +include::start.asciidoc[] +include::stop.asciidoc[] diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc new file mode 100644 index 0000000000000..c34b800856c10 --- /dev/null +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -0,0 +1,121 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-move-to-step]] +=== Move To Step API +++++ +Move To Step +++++ + +Moves a managed index into a specific execution step its policy + +==== Request + +`POST _ilm/move/` + +==== Description + +WARNING: This is an expert API that may lead to unintended data loss. When used, +an index's policy will begin executing at the specified step. It will execute +the step specified even if it has already executed it. Since this is a, potentionally, +dangerous action, specifying both the current step and next step to move to is +required in the body of the request. + +This API changes the current step for the specified index to the step supplied in the body of the request + +==== Path Parameters + +`index` (required):: + (string) Identifier for the index. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + move operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example moves the index `my_index` from the initial step to the +forcemerge step: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/move/my_index +{ + "current_step": { <1> + "phase": "new", + "action": "complete", + "name": "complete" + }, + "next_step": { <2> + "phase": "warm", + "action": "forcemerge", + "name": "forcemerge" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> The step that the index is currently expected to be executing +<2> The step that the index should move to when executing this request + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +NOTE: An error will be returned if the index is now longer executing the step +specified in `current_step`. This is so the index is not moved from an +unexpected step into the `next_step`. diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc new file mode 100644 index 0000000000000..36650078db652 --- /dev/null +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -0,0 +1,82 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-put-lifecycle]] +=== Put Lifecycle Policy API +++++ +Put Policy +++++ + +Creates or updates an ILM Policy + +==== Request + +`PUT _ilm/policy/` + +==== Description + +This API creates a new Lifecycle Policy, or updates an existing one with the same +identifier. Each call will replace the existing policy and increment the `version` +associated with the policy. + +NOTE: The `version` is only for informational purposes. Only the latest version +of the policy is stored. + +==== Path Parameters + +`policy` (required):: + (string) Identifier for the policy. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + PUT operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example creates a new policy named `my_policy`: + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +If the request does not encounter errors, you receive the following result: +[source,js] +---- +{ + "acknowledged": true +} +---- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/remove-policy.asciidoc b/docs/reference/ilm/apis/remove-policy.asciidoc new file mode 100644 index 0000000000000..f37ac9a715d28 --- /dev/null +++ b/docs/reference/ilm/apis/remove-policy.asciidoc @@ -0,0 +1,98 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-remove-policy]] +=== Remove Policy On Index API +++++ +Remove Policy From Index +++++ + +Unassigns a policy from a specified index pattern + +==== Request + +`POST /_ilm/remove` + +==== Description + +This action removes a policy from managing an index. It is effectively the same as setting an index's +`index.lifecycle.name` setting to null. + +==== Path Parameters + +`index` (required):: + (string) Identifier for the index. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example removes a policy `my_policy` from an index `my_index`. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST my_index/_ilm/remove +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "has_failures" : false, + "failed_indexes" : [] +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc new file mode 100644 index 0000000000000..7c81f9423ef12 --- /dev/null +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -0,0 +1,59 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-retry-policy]] +=== Retry Policy Execution API +++++ +Retry Policy Execution +++++ + +Retry executing the policy for an index which has errored. + +==== Request + +`POST /_ilm/retry` + +==== Description + +This API will re-run a policy is currently in the ERROR step. It will set the +policy back to the step where the error occurred and attempt to re-execute it. +Information on whether an index is in the ERROR step can be obtained from the +<> + +==== Path Parameters + +`index` (required):: + (string) Identifier for the indices to retry in comma-separated format. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + retry operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example retries the policy for index `my_index`. + +[source,js] +-------------------------------------------------- +POST my_index/_ilm/retry +-------------------------------------------------- +// NOTCONSOLE + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc new file mode 100644 index 0000000000000..073a584e4d872 --- /dev/null +++ b/docs/reference/ilm/apis/start.asciidoc @@ -0,0 +1,90 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-start]] +=== Start ILM API +++++ +Start ILM +++++ + +Start the ILM plugin + +==== Request + +`POST /_ilm/start` + +==== Description + +This API will start the ILM plugin if it is currently stopped. ILM is started +by default when the cluster is formed so this API is only needed if ILM has +been stopped using the <>. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + start operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example starts the ILM plugin. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index + +POST _ilm/stop +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/start +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc new file mode 100644 index 0000000000000..cdc038adabcfc --- /dev/null +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -0,0 +1,101 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-stop]] +=== Stop ILM API +++++ +Stop ILM +++++ + +Stop the ILM plugin. + +==== Request + +`POST /_ilm/stop` + +==== Description + +This API will stop the ILM plugin. This can be used for period where +maintenance is required and ILM should not perform any actions on any indices. +The API will return as soon as the stop request has been acknowledged but the +plugin may not immediately stop but rather need to wait for some operations +to finish before it's stopped. Progress can be seen using the +<> API. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the response. When this + period of time elapses, the API fails and returns an error. The default value + is `30s`. For more information about time units, see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example stops the ILM plugin. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/stop +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/start +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +////////////////////////// diff --git a/docs/reference/ilm/get-index-lifecycle-information.asciidoc b/docs/reference/ilm/get-index-lifecycle-information.asciidoc new file mode 100644 index 0000000000000..3d5dc8a172010 --- /dev/null +++ b/docs/reference/ilm/get-index-lifecycle-information.asciidoc @@ -0,0 +1,11 @@ +[role="xpack"] +[[get-index-lifecycle-information]] +== Get index lifecycle information + +Execution Model +Discuss how actions are actually split up into discrete steps and how you can see more information about where an index is within a policy (info and all) +Talk about the jump-to-step API +Error Handling +Show error in explain api +Demonstrate the retry API +Show how to get a sense of progress for things like the allocate step diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc new file mode 100644 index 0000000000000..ad3596e92a1ad --- /dev/null +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -0,0 +1,15 @@ +[role="xpack"] +[[getting-started-index-lifecycle-management]] +== Getting started with {ilm} + +Create a policy that rolls over after 1 day deletes an index after 30 days + +Show create policy API req/res + +Show assign policy to index API req/res + +Show both the API and how it is done with `index.lifecyce.name` using the +create-index API + +Show explain API to show current state, but ignore the “step” related info, +only focus on managed/phase/action diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc new file mode 100644 index 0000000000000..d85f92fb1c28a --- /dev/null +++ b/docs/reference/ilm/index.asciidoc @@ -0,0 +1,62 @@ +[role="xpack"] +[testenv="basic"] +[[index-lifecycle-management]] += Managing Indices + +:ilm: index lifecycle management +:ILM: Index lifecycle management +[partintro] +-- +The <> enable you to automate how you +want to manage your indices over time. Rather than simply performing management +actions on your indices on a set schedule, you can base actions on other factors +such as shard size and performance requirements. + +You control how indices are handled as they age by attaching a +lifecycle policy to the index template used to create them. You can update +the policy to modify the lifecycle of both new and existing indices. + +For time series indices, there are four stages in the index lifecycle: + +* Hot--the index is actively being updated and queried. +* Warm--the index is no longer being updated, but is still being queried. +* Cold--the index is no longer being updated and is seldom queried. The +information still needs to be searchable, but it's okay if those queries are +slower. +* Delete--the index is no longer needed and can safely be deleted. + +The lifecycle policy governs how the index transitions through these stages and +the actions that are performed on the index at each stage. The policy can +specify: + +* The maximum size or age at which you want to roll over to a new index. +* The point at which the index is no longer being updated and the number of +primary shards can be reduced. +* When to force a merge to permanently delete documents marked for deletion. +* The point at which the index can be moved to less performant hardware. +* The point at which the availability is not as critical and the number of +replicas can be reduced. +* When the index can be safely deleted. + +For example, if you are indexing metrics data from a fleet of ATMs into +Elasticsearch, you might define a policy that says: + +. When the index reaches 5GB, roll over to a new index. +. Move the old index into the warm stage, mark it read only, and shrink it down +to a single shard. +. After 7 days, move the index into the cold stage and move it to less expensive +hardware. +. Delete the index once the required 30 day retention period is reached. +-- + +include::getting-started-ilm.asciidoc[] + +include::using-policies-rollover.asciidoc[] + +include::set-up-lifecycle-policy.asciidoc[] + +include::update-lifecycle-policy.asciidoc[] + +include::get-index-lifecycle-information.asciidoc[] + +include::start-stop-ilm.asciidoc[] diff --git a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc new file mode 100644 index 0000000000000..7f5bb84c598a4 --- /dev/null +++ b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc @@ -0,0 +1,112 @@ +[role="xpack"] +[testenv="basic"] +[[set-up-lifecycle-policy]] +== Set up {ilm} policy + +In order for an index to use an {ilm} policy to manage its lifecycle we must +first define a lifecycle policy for it to use. The following request creates +a policy called `my_policy` in Elasticsearch which we can later use to manage +our indexes. + +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" <1> + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} <2> + } + } + } + } +} +------------------------ +// CONSOLE +<1> Rollover the index when it reaches 25GB in size +<2> Delete the index when its 30 days old + +{ilm} will manage an index using the policy defined in the +`index.lifecycle.name` index setting. If this setting does not exist in the +settings for a particular index {ilm} will not manage that index. + +To set the policy for an index there are two options: +1. Apply the policy to an index template and bootstrap creating the first index +2. Apply the policy to a new index in a create index request + +=== Applying a policy to an index template + +The `index.lifecycle.name` setting can be set in an index template so that it +is automatically applied to indexes matching the templates index pattern: + +[source,js] +----------------------- +PUT _template/my_template +{ + "index_patterns": ["test-*"], <1> + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "my_policy", <2> + "index.lifecycle.rollover_alias": "test-alias" + } +} +----------------------- +// CONSOLE +<1> This template will be applied to all indexes which have a name starting +with `test-` +<2> The template will set the policy to be used to `my_policy` + +Now that a policy exists and is used in an index template we can create an +initial index which will be managed by our policy: + +[source,js] +----------------------- +PUT test-000001 +{ + "aliases": { + "test-alias":{ + "is_write_index": true <1> + } + } +} +----------------------- +// CONSOLE +<1> Set this initial index to be the write index for this alias. + +We can now write data to the `test-alias` alias. Because we have a rollover +action defined in our policy when the index grows larger than 25GB {ilm} will +create a new index and roll the alias over to use the new index automatically. + +=== Apply a policy to a create index request + +The `index.lifecycle.name` setting can be set on an individual create index +request so {ilm} immediately starts managing the index: + +[source,js] +----------------------- +PUT test-index +{ + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "my_policy" + } +} +----------------------- +// CONSOLE + +IMPORTANT: Its recommended not to use the create index API with a policy that +defines a rollover action. If you do so, the new index as the result of the +rollover will not carry forward the policy. Always use index templates to +define policies with rollover actions. + diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc new file mode 100644 index 0000000000000..938b97d44721f --- /dev/null +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -0,0 +1,168 @@ +[role="xpack"] +[testenv="basic"] +[[start-stop-ilm]] +== Start And Stop {ilm} + +All indices that are managed by ILM will continue to execute +their policies. There may be times when this is not desired on certain +indices, or maybe even all the indices in a cluster. For example, +maybe there are scheduled maintenance windows when cluster topology +changes are desired that may impact running ILM actions. For this reason, +ILM has two ways to disable operations. + +Normally, ILM will be running by default. +To see the current operating status of ILM, use the <> +to see the current state of ILM. + +//// +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +-------------------------------------------------- +// CONSOLE +//// + +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +The operating modes of ILM: + + +.ILM Operating Modes +|=== +|Name |Description +|RUNNING |Normal operation where all policies are executed as normal +|STOPPING|ILM has received a request to stop but is still processing some policies +|STOPPED |This represents a state where no policies are executed +|=== + +=== Stopping ILM + +The ILM service can be paused such that no further steps will be executed +using the <>. + +[source,js] +-------------------------------------------------- +POST _ilm/stop +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +When stopped, all further policy actions will be halted. This will +be reflected in the Status API + +//// +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "STOPPING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +The ILM service will then, asynchronously, run all policies to a point +where it is safe to stop. After ILM verifies that it is safe, it will +move to the `STOPPED` mode. + +//// +[source,js] +-------------------------------------------------- +PUT trigger_ilm_cs_action + +GET _ilm/status +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "STOPPED" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +=== Starting ILM + +To start ILM and continue executing policies, use the <>. + + +[source,js] +-------------------------------------------------- +POST _ilm/start +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +//// +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +The Start API will send a request to the ILM service to immediately begin +normal operations. + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc new file mode 100644 index 0000000000000..334b5a953fd0e --- /dev/null +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -0,0 +1,432 @@ +[role="xpack"] +[testenv="basic"] +[[update-lifecycle-policy]] +== Update lifecycle policy + +Updating existing ILM policies is useful to fix mistakes or change +strategies for newly created indices. It is possible to update policy definitions +and an index's `index.lifecycle.name` settings independently. To prevent the situation +that phase definitions are modified while currently being executed on an index, each index +will keep the version of the current phase definition it began execution with until it completes. + +There are three scenarios for examining the behavior updating policies and +their effects on policy execution on indices. + +=== Updates to policies not managing indices + +Indices not referencing an existing policy that is updated will not be affected. +If an index is assigned to the policy, it will be assigned the latest version of that policy + +To show this, let's create a policy `my_policy`. + +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE + +This newly defined policy will be created and assigned to have a version equal +to 1. Since we haven't assigned any indices to this policy, any updates that +occur will be reflected completely on indices that are newly set to be managed +by this policy. + +Updating the Delete phase's minimum age can be done in an update request. + +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "10d", <1> + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE +// TEST[continued] +<1> update `min_age` to 10 days + +////////// +[source,js] +-------------------------------------------------- +GET _ilm/policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] +////////// + +When we get the policy, we will see it reflect our latest changes, but +with its version bumped to 2. + +[source,js] +-------------------------------------------------- +{ + "my_policy": { + "version": 2, <1> + "modified_date": 82392349, <2> + "policy": { + "phases": { + "hot": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_size": "25gb" + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] +<1> The updated version value +<2> The timestamp when this policy was updated last. + +Afterwords, any indices set to `my_policy` will execute against version 2 of +the policy. + +=== Updates to executing policies + +Indices preserve the phase definition from the latest policy version that existed +at the time that it entered that phase. Changes to the currently-executing phase within policy updates will +not be reflected during execution. This means that updates to the `hot` phase, for example, will not affect +indices that are currently executing the corresponding `hot` phase. + +Let's say we have an index `my_index` managed by the below `my_executing_policy` definition. + +[source,js] +------------------------ +PUT _ilm/policy/my_executing_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_docs": 1 + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE + +//// +[source,js] +------------------------ +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_executing_policy" + } +} +------------------------ +// CONSOLE +// TEST[continued] +//// + +The <> is useful to introspect managed indices to see which phase definition they are currently executing. +Using this API, we can find out that `my_index` is currently attempting to be rolled over. + +[source,js] +-------------------------------------------------- +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, + "policy": "my_executing_policy", + "lifecycle_date_millis": 1538475653281, + "phase": "hot", + "phase_time_millis": 1538475653317, + "action": "rollover", + "action_time_millis": 1538475653317, + "step": "attempt_rollover", + "step_time_millis": 1538475653317, + "phase_execution": { + "policy": "my_executing_policy", + "modified_date_in_millis": 1538475653317, + "version": 1, + "phase_definition": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_docs": 1 + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"lifecycle_date_millis": 1538475653281/"lifecycle_date_millis": $body.indices.my_index.lifecycle_date_millis/] +// TESTRESPONSE[s/"phase_time_millis": 1538475653317/"phase_time_millis": $body.indices.my_index.phase_time_millis/] +// TESTRESPONSE[s/"action_time_millis": 1538475653317/"action_time_millis": $body.indices.my_index.action_time_millis/] +// TESTRESPONSE[s/"step_time_millis": 1538475653317/"step_time_millis": $body.indices.my_index.step_time_millis/] +// TESTRESPONSE[s/"modified_date_in_millis": 1538475653317/"modified_date_in_millis": $body.indices.my_index.phase_execution.modified_date_in_millis/] + +Updating `my_executing_policy` to have no rollover action and, instead, go directly into a newly introduced `warm` phase. + +[source,js] +------------------------ +PUT _ilm/policy/my_executing_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "1d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE +// TEST[continued] + +Now, version 2 of this policy has no `hot` phase, but if we run the Explain API again, we will see that nothing has changed. +The index `my_index` is still executing version 1 of the policy. + +//// +[source,js] +-------------------------------------------------- +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, + "policy": "my_executing_policy", + "lifecycle_date_millis": 1538475653281, + "phase": "hot", + "phase_time_millis": 1538475653317, + "action": "rollover", + "action_time_millis": 1538475653317, + "step": "attempt_rollover", + "step_time_millis": 1538475653317, + "phase_execution": { + "policy": "my_executing_policy", + "modified_date_in_millis": 1538475653317, + "version": 1, + "phase_definition": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_docs": 1 + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"lifecycle_date_millis": 1538475653281/"lifecycle_date_millis": $body.indices.my_index.lifecycle_date_millis/] +// TESTRESPONSE[s/"phase_time_millis": 1538475653317/"phase_time_millis": $body.indices.my_index.phase_time_millis/] +// TESTRESPONSE[s/"action_time_millis": 1538475653317/"action_time_millis": $body.indices.my_index.action_time_millis/] +// TESTRESPONSE[s/"step_time_millis": 1538475653317/"step_time_millis": $body.indices.my_index.step_time_millis/] +// TESTRESPONSE[s/"modified_date_in_millis": 1538475653317/"modified_date_in_millis": $body.indices.my_index.phase_execution.modified_date_in_millis/] + +After indexing one document into `my_index` so that rollover succeeds and moves onto the next phase, we will notice something new. The +index will move into the next phase in the updated version 2 of its policy. + +//// +[source,js] +-------------------------------------------------- +PUT my_index/_doc/1 +{ + "foo": "bar" +} + +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, + "policy": "my_executing_policy", + "lifecycle_date_millis": 1538475653281, + "phase": "warm", + "phase_time_millis": 1538475653317, + "action": "forcemerge", + "action_time_millis": 1538475653317, + "step": "forcemerge", + "step_time_millis": 1538475653317, + "phase_execution": { + "policy": "my_executing_policy", + "modified_date_in_millis": 1538475653317, + "version": 2, <1> + "phase_definition": { + "min_age": "1d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:There is no way to force the index to move to the next step in a timely manner] +<1> The index has moved to using version 2 of the policy + +`my_index` will move to the next phase in the latest policy definition, which is the newly added `warm` phase. + +=== Switching policies for an index + +Setting `index.lifecycle.name` to a different policy behaves much like a policy update, but instead of just +switching to a different version, it switches to a different policy. + +After setting a policy for an index, we can switch out `my_policy` with +`my_other_policy` by just updating the index's `index.lifecycle.name` +setting to the new policy. After completing its currently executed phase, +it will move on to the next phase in `my_other_policy`. So if it was on the +`hot` phase before, it will move to the `delete` phase after the `hot` phase concluded. + +//// +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT _ilm/policy/my_other_policy +{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +------------------------ +// CONSOLE + +//// + +[source,js] +-------------------------------------------------- +PUT my_index/_settings +{ + "lifecycle.name": "my_other_policy" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The change to the new policy will not happen immediately. The currently executing phase +of the existing policy for `my_index` will continue to execute until it completes. Once +completed, `my_index` will move to being managed by the `my_other_policy`. diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc new file mode 100644 index 0000000000000..f7982af4fec81 --- /dev/null +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -0,0 +1,117 @@ +[role="xpack"] +[testenv="basic"] +[[using-policies-rollover]] +== Using policies to manage index rollover + +The rollover action enables you to automatically roll over to a new index based +on the index size, document count, or age. When a rollover is triggered, a new +index is created, the write alias is updated to point to the new index, and all +subsequent updates are written to the new index. + +Rolling over to a new index based on size, document count, or age is preferable +to time-based rollovers. Rolling over at an arbitrary time often results in +many small indices, which can have a negative impact on performance and +resource usage. + +You control when the rollover action is triggered by specifying one or more +rollover parameters. The rollover is performed once any of the criteria are +met. Because the criteria are checked periodically, the index might grow +slightly beyond the specified threshold. To control how often the critera are +checked, specify the `indices.lifecycle.poll_interval` cluster setting. + +The rollover action takes the following parameters: + +.`rollover` Action Parameters +|=== +|Name |Description +|max_size |The maximum estimated size the index is allowed to grow +to. Defaults tonull. Optional. +|max_docs |The maximum number of document the index should +contain. Defaults tonull. Optional. +|max_age |The maximum age of the index. Defaults to `null`. Optional. +|=== + +These parameters are used to determine when the index is considered "full" and +a rollover should be performed. Where multiple criteria are defined the +rollover operation will be performed once any of the criteria are met. + +The following request defines a policy with a rollover action that triggers +when the index size reaches 25GB. The old index is subsequently deleted after +30 days. + +NOTE: Once an index rolls over, {ilm} uses the timestamp of the rollover +operation rather than the index creation time to evaluate when to move the +index to the next phase. For indices that have rolled over, the `min_age` +criteria specified for a phase is relative to the rollover time for indices. In +this example, that means the index will be deleted 30 days after rollover, not +30 days from when the index was created. + +[source,js] +-------------------------------------------------- +PUT /_ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +To use an {ilm} policy, you need to specify it in the index template used to +create the indices. For example, the following template associates `my_policy` +with indices created from the template `my_template`. + +[source,js] +----------------------- +PUT _template/my_template +{ + "index_patterns": ["test-*"], <1> + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "my_policy", <2> + "index.lifecycle.rollover_alias": "test-alias" <3> + } +} +----------------------- +// CONSOLE +<1> Template applies to all indices with the prefix test- +<2> Associates my_policy with all indices created with this template +<3> Rolls over the write alias test when the rollover action is triggered + +To be able to start using the policy for these `test-*` indexes we need to +bootstrap the process by creating the first index. + +[source,js] +----------------------- +PUT test-000001 <1> +{ + "aliases": { + "test-alias":{ + "is_write_index": true <2> + } + } +} +----------------------- +// CONSOLE +<1> Creates the index called test-000001. The rollover action increments the +suffix number for each subsequent index. +<2> Designates this index as the write index for this alias. + +When the rollover is performed, the newly-created index is set as the write +index for the rolled over alias. Documents sent to the alias are indexed into +the new index, enabling indexing to continue uninterrupted. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index dd841fccda761..48cb7700a52ea 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -55,7 +55,7 @@ include::index-modules.asciidoc[] include::ingest.asciidoc[] -include::ccr/index.asciidoc[] +include::ilm/index.asciidoc[] include::sql/index.asciidoc[] diff --git a/docs/reference/ingest/ingest-node-common-processor.asciidoc b/docs/reference/ingest/ingest-node-common-processor.asciidoc new file mode 100644 index 0000000000000..dcf8b63630b4b --- /dev/null +++ b/docs/reference/ingest/ingest-node-common-processor.asciidoc @@ -0,0 +1,5 @@ +| `if` | no | - | Conditionally execute this processor. +| `on_failure` | no | - | Handle failures for this processor. See <>. +| `ignore_failure` | no | `false` | Ignore failures for this processor. See <>. +| `tag` | no | - | An identifier for this processor. Useful for debugging and metrics. +// TODO: See <>. <-- for the if description once PR 35044 is merged \ No newline at end of file diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index eeb914facc2c6..070892d6f02e7 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -764,6 +764,12 @@ A node will not start if either of these plugins are not available. The <> can be used to fetch ingest usage statistics, globally and on a per pipeline basis. Useful to find out which pipelines are used the most or spent the most time on preprocessing. +[float] +=== Ingest Processor Plugins + +Additional ingest processors can be implemented and installed as Elasticsearch {plugins}/intro.html[plugins]. +See {plugins}/ingest.html[Ingest plugins] for information about the available ingest plugins. + [[append-processor]] === Append Processor Appends one or more values to an existing array if the field already exists and it is an array. @@ -778,6 +784,7 @@ Accepts a single value or an array of values. | Name | Required | Default | Description | `field` | yes | - | The field to be appended to. Supports <>. | `value` | yes | - | The value to be appended. Supports <>. +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -806,6 +813,7 @@ the field is not a supported format or resultant value exceeds 2^63. | `field` | yes | - | The field to convert | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -844,6 +852,7 @@ still be updated with the unconverted field value. | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `type` | yes | - | The type to convert the existing value to | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -876,6 +885,7 @@ in the same order they were defined as part of the processor definition. | `formats` | yes | - | An array of the expected date formats. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. | `timezone` | no | UTC | The timezone to use when parsing the date. Supports <>. | `locale` | no | ENGLISH | The locale to use when parsing the date, relevant when parsing month names or week days. Supports <>. +include::ingest-node-common-processor.asciidoc[] |====== Here is an example that adds the parsed date to the `timestamp` field based on the `initial_date` field: @@ -1065,6 +1075,7 @@ understands this to mean `2016-04-01` as is explained in the <>. +include::ingest-node-common-processor.asciidoc[] |====== [[dissect-processor]] @@ -1134,7 +1145,7 @@ See <> for more information. | `pattern` | yes | - | The pattern to apply to the field | `append_separator`| no | "" (empty string) | The character(s) that separate the appended fields. | `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -| ` +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1162,7 +1173,6 @@ modifiers. | `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <> | `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <> | `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <> -| ` |====== [[dissect-modifier-skip-right-padding]] @@ -1265,6 +1275,14 @@ Reference key modifier example Drops the document without raising any errors. This is useful to prevent the document from getting indexed based on some condition. +[[drop-options]] +.Drop Options +[options="header"] +|====== +| Name | Required | Default | Description +include::ingest-node-common-processor.asciidoc[] +|====== + [source,js] -------------------------------------------------- { @@ -1289,6 +1307,7 @@ Otherwise these <> can't be accessed by any | Name | Required | Default | Description | `field` | yes | - | The field to expand into an object field | `path` | no | - | The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1406,6 +1425,7 @@ to the requester. |====== | Name | Required | Default | Description | `message` | yes | - | The error message thrown by the processor. Supports <>. +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1452,6 +1472,7 @@ then it aborts the execution and leaves the array unmodified. | `field` | yes | - | The array field | `processor` | yes | - | The processor to execute against each field | `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== Assume the following document: @@ -1645,6 +1666,7 @@ Grok expression. | `pattern_definitions` | no | - | A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. Patterns matching existing names will override the pre-existing definition. | `trace_match` | no | false | when true, `_ingest._grok_match_index` will be inserted into your matched document's metadata with the index into the pattern found in `patterns` that matched. | `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== Here is an example of using the provided patterns to extract out and name structured fields from a string field in @@ -1919,6 +1941,7 @@ If the field is not a string, the processor will throw an exception. | `replacement` | yes | - | The string to replace the matching patterns with | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1946,6 +1969,7 @@ Throws an error when the field is not an array. | `field` | yes | - | The field to be separated | `separator` | yes | - | The separator character | `target_field` | no | `field` | The field to assign the joined value to, by default `field` is updated in-place +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1971,6 +1995,7 @@ Converts a JSON string into a structured JSON object. | `field` | yes | - | The field to be parsed | `target_field` | no | `field` | The field to insert the converted structured object into | `add_to_root` | no | false | Flag that forces the serialized json to be injected into the top level of the document. `target_field` must not be set when this option is chosen. +include::ingest-node-common-processor.asciidoc[] |====== All JSON-supported types will be parsed (null, boolean, number, array, object, string). @@ -2082,6 +2107,7 @@ For example, if you have a log message which contains `ip=1.2.3.4 error=REFUSED` | `trim_key` | no | `null` | String of characters to trim from extracted keys | `trim_value` | no | `null` | String of characters to trim from extracted values | `strip_brackets` | no | `false` | If `true` strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values +include::ingest-node-common-processor.asciidoc[] |====== @@ -2097,6 +2123,7 @@ Converts a string to its lowercase equivalent. | `field` | yes | - | The field to make lowercase | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2119,6 +2146,7 @@ Executes another pipeline. |====== | Name | Required | Default | Description | `name` | yes | - | The name of the pipeline to execute +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2234,6 +2262,7 @@ Removes existing fields. If one field doesn't exist, an exception will be thrown | Name | Required | Default | Description | `field` | yes | - | Fields to be removed. Supports <>. | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== Here is an example to remove a single field: @@ -2272,6 +2301,7 @@ Renames an existing field. If the field doesn't exist or the new name is already | `field` | yes | - | The field to be renamed. Supports <>. | `target_field` | yes | - | The new name of the field. Supports <>. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2305,6 +2335,7 @@ caching see <>. | `id` | no | - | The stored script id to refer to | `source` | no | - | An inline script to be executed | `params` | no | - | Script Parameters +include::ingest-node-common-processor.asciidoc[] |====== One of `id` or `source` options must be provided in order to properly reference a script to execute. @@ -2401,6 +2432,7 @@ its value will be replaced with the provided one. | `field` | yes | - | The field to insert, upsert, or update. Supports <>. | `value` | yes | - | The value to be set for the field. Supports <>. | `override` | no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2414,6 +2446,43 @@ its value will be replaced with the provided one. -------------------------------------------------- // NOTCONSOLE + +[[ingest-node-set-security-user-processor]] +=== Set Security User Processor +Sets user-related details (such as `username`, `roles`, `email`, `full_name` +and `metadata` ) from the current +authenticated user to the current document by pre-processing the ingest. + +IMPORTANT: Requires an authenticated user for the index request. + +[[set-security-user-options]] +.Set Security User Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to store the user information into. +| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`. +include::ingest-node-common-processor.asciidoc[] +|====== + +The following example adds all user details for the current authenticated user +to the `user` field for all documents that are processed by this pipeline: + +[source,js] +-------------------------------------------------- +{ + "processors" : [ + { + "set_security_user": { + "field": "user" + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + + [[split-processor]] === Split Processor Splits a field into an array using a separator character. Only works on string fields. @@ -2427,6 +2496,7 @@ Splits a field into an array using a separator character. Only works on string f | `separator` | yes | - | A regex which matches the separator, eg `,` or `\s+` | `target_field` | no | `field` | The field to assign the split value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2455,6 +2525,7 @@ Throws an error when the field is not an array. | `field` | yes | - | The field to be sorted | `order` | no | `"asc"` | The sort order to use. Accepts `"asc"` or `"desc"`. | `target_field` | no | `field` | The field to assign the sorted value to, by default `field` is updated in-place +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2482,6 +2553,7 @@ NOTE: This only works on leading and trailing whitespace. | `field` | yes | - | The string-valued field to trim whitespace from | `target_field` | no | `field` | The field to assign the trimmed value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2506,6 +2578,7 @@ Converts a string to its uppercase equivalent. | `field` | yes | - | The field to make uppercase | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2530,6 +2603,7 @@ URL-decodes a string | `field` | yes | - | The field to decode | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] diff --git a/docs/reference/mapping/params/ignore-above.asciidoc b/docs/reference/mapping/params/ignore-above.asciidoc index fe7c6881a064f..fe28e77cd6322 100644 --- a/docs/reference/mapping/params/ignore-above.asciidoc +++ b/docs/reference/mapping/params/ignore-above.asciidoc @@ -49,11 +49,9 @@ GET _search <4> <3> This document will be indexed, but without indexing the `message` field. <4> Search returns both documents, but only the first is present in the terms aggregation. -TIP: The `ignore_above` setting is allowed to have different settings for -fields of the same name in the same index. Its value can be updated on +TIP: The `ignore_above` setting can be updated on existing fields using the <>. - This option is also useful for protecting against Lucene's term byte-length limit of `32766`. diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 6779ab472ea36..988411246c42a 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -55,7 +55,6 @@ PUT my_index/_doc/match_value Fields referred to in a percolator query must *already* exist in the mapping associated with the index used for percolation. In order to make sure these fields exist, add or update a mapping via the <> or <> APIs. -Fields referred in a percolator query may exist in any type of the index containing the `percolator` field type. ===================================== diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 71a8e1aa0150c..34720bb182d4d 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -102,3 +102,8 @@ status 200 - OK is now returned instead at all times. The Put User API response was changed in 6.5.0 to add the `created` field outside of the user object where it previously had been. In 7.0.0 the user object has been removed in favor of the top level `created` field. + +[float] +==== Source filtering url parameters `_source_include` and `_source_exclude` have been removed + +The deprecated in 6.x url parameters are now removed. Use `_source_includes` and `_source_excludes` instead. diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index afb2251fb1bcd..efd2d2c271a8d 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -106,6 +106,14 @@ To safeguard against this, the maximum length of regex that can be used in a Regexp Query request has been limited to 1000. This default maximum can be changed for a particular index with the index setting `index.max_regex_length`. +[float] +==== Limiting the number of auto-expanded fields + +Executing queries that use automatic expansion of fields (e.g. `query_string`, `simple_query_string` +or `multi_match`) can have performance issues for indices with a large numbers of fields. +To safeguard against this, a hard limit of 1024 fields has been introduced for queries +using the "all fields" mode ("default_field": "*") or other fieldname expansions (e.g. "foo*"). + [float] ==== Invalid `_search` request body diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index 85648da4f0d25..7eab006efdc30 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -66,3 +66,33 @@ used. Therefore, these settings have been renamed from `search.remote.*` to in the cluster state, or set on dynamic settings updates, we will automatically upgrade the setting from `search.remote.*` to `cluster.remote.*`. The fallback settings will be removed in 8.0.0. + +[float] +[[include-realm-type-in-setting]] +==== Security realms settings + +The settings for all security realms must now include the realm type as part +of the setting name, and the explicit `type` setting has been removed. + +A realm that was previous configured as: +[source,yaml] +-------------------------------------------------- +xpack.security.authc.realms: + ldap1: + type: ldap + order: 1 + url: "ldaps://ldap.example.com/" +-------------------------------------------------- + +Must be migrated to: +[source,yaml] +-------------------------------------------------- +xpack.security.authc.realms: + ldap.ldap1: + order: 1 + url: "ldaps://ldap.example.com/" +-------------------------------------------------- + +Any realm specific secure settings that have been stored in the elasticsearch +keystore (such as ldap bind passwords, or passwords for ssl keys) must be updated +in a similar way. diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index 5bd32750685a9..737df6e0cea21 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -74,7 +74,7 @@ chosen. structure finder produced its result. The default value is `false`. `format`:: - (string) The high level structure of the file. Valid values are `json`, `xml`, + (string) The high level structure of the file. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. If this parameter is not specified, the structure finder chooses one. @@ -259,7 +259,7 @@ If the request does not encounter errors, you receive the following result: "sample_start" : "{\"name\": \"Leviathan Wakes\", \"author\": \"James S.A. Corey\", \"release_date\": \"2011-06-02\", \"page_count\": 561}\n{\"name\": \"Hyperion\", \"author\": \"Dan Simmons\", \"release_date\": \"1989-05-26\", \"page_count\": 482}\n", <3> "charset" : "UTF-8", <4> "has_byte_order_marker" : false, <5> - "format" : "json", <6> + "format" : "ndjson", <6> "need_client_timezone" : false, <7> "mappings" : { <8> "author" : { @@ -473,14 +473,14 @@ If the request does not encounter errors, you receive the following result: <1> `num_lines_analyzed` indicates how many lines of the file were analyzed. <2> `num_messages_analyzed` indicates how many distinct messages the lines contained. - For ND-JSON, this value is the same as `num_lines_analyzed`. For other file + For NDJSON, this value is the same as `num_lines_analyzed`. For other file formats, messages can span several lines. <3> `sample_start` reproduces the first two messages in the file verbatim. This may help to diagnose parse errors or accidental uploads of the wrong file. <4> `charset` indicates the character encoding used to parse the file. <5> For UTF character encodings, `has_byte_order_marker` indicates whether the file begins with a byte order marker. -<6> `format` is one of `json`, `xml`, `delimited` or `semi_structured_text`. +<6> `format` is one of `ndjson`, `xml`, `delimited` or `semi_structured_text`. <7> If a timestamp format is detected that does not include a timezone, `need_client_timezone` will be `true`. The server that parses the file must therefore be told the correct timezone by the client. diff --git a/docs/reference/modules/indices/query_cache.asciidoc b/docs/reference/modules/indices/query_cache.asciidoc index f6cdf71925a94..aaa1ab1742841 100644 --- a/docs/reference/modules/indices/query_cache.asciidoc +++ b/docs/reference/modules/indices/query_cache.asciidoc @@ -5,6 +5,7 @@ The query cache is responsible for caching the results of queries. There is one queries cache per node that is shared by all shards. The cache implements an LRU eviction policy: when a cache becomes full, the least recently used data is evicted to make way for new data. +It is not possible to look at the contents being cached. The query cache only caches queries which are being used in a filter context. diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 81d882f5f0eb6..4bf3073abd359 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -152,6 +152,15 @@ PUT _cluster/settings by default, but they can selectively be made optional by setting this setting to `true`. +`cluster.remote.${cluster_alias}.transport.ping_schedule`:: + + Sets the time interval between regular application-level ping messages that + are sent to ensure that transport connections to nodes belonging to remote + clusters are kept alive. If set to `-1`, application-level ping messages to + this remote cluster are not sent. If unset, application-level ping messages + are sent according to the global `transport.ping_schedule` setting, which + defaults to ``-1` meaning that pings are not sent. + [float] [[retrieve-remote-clusters-info]] === Retrieving remote clusters info diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/modules/scripting/using.asciidoc index 725808e193640..86202a98dd537 100644 --- a/docs/reference/modules/scripting/using.asciidoc +++ b/docs/reference/modules/scripting/using.asciidoc @@ -204,7 +204,7 @@ you can change this behavior by using the `script.cache.expire` setting. You can configure the size of this cache by using the `script.cache.max_size` setting. By default, the cache size is `100`. -NOTE: The size of stored scripts is limited to 65,535 bytes. This can be +NOTE: The size of scripts is limited to 65,535 bytes. This can be changed by setting `script.max_size_in_bytes` setting to increase that soft limit, but if scripts are really large then a <> should be considered. diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 23051f622f755..48ae41ded2a78 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -112,8 +112,8 @@ which returns: ----------------------------------- // TESTRESPONSE -To retrieve information about multiple repositories, specify a -a comma-delimited list of repositories. You can also use the * wildcard when +To retrieve information about multiple repositories, specify a comma-delimited +list of repositories. You can also use the * wildcard when specifying repository names. For example, the following request retrieves information about all of the snapshot repositories that start with `repo` or contain `backup`: diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index 257181f70c507..c1bc83230e597 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -46,9 +46,9 @@ between all nodes. Defaults to `false`. |`transport.ping_schedule` | Schedule a regular application-level ping message to ensure that transport connections between nodes are kept alive. Defaults to -`5s` in the transport client and `-1` (disabled) elsewhere. It is preferable to -correctly configure TCP keep-alives instead of using this feature, because TCP -keep-alives apply to all kinds of long-lived connection and not just to +`5s` in the transport client and `-1` (disabled) elsewhere. It is preferable +to correctly configure TCP keep-alives instead of using this feature, because +TCP keep-alives apply to all kinds of long-lived connections and not just to transport connections. |======================================================================= diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index 3d4a37861f1ae..34cbee9c1699f 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -3,7 +3,9 @@ [[configuring-metricbeat]] === Monitoring {es} with {metricbeat} -beta[] In 6.5 and later, you can use {metricbeat} to collect data about {es} +beta[] + +In 6.5 and later, you can use {metricbeat} to collect data about {es} and ship it to the monitoring cluster, rather than routing it through exporters as described in <>. diff --git a/docs/reference/monitoring/images/metricbeat.png b/docs/reference/monitoring/images/metricbeat.png index bf6434dc4b40c..f74f856653043 100644 Binary files a/docs/reference/monitoring/images/metricbeat.png and b/docs/reference/monitoring/images/metricbeat.png differ diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index 4b4a82594a110..7c5ca95623e83 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -208,10 +208,10 @@ not. The number value is of type float. [[function-random]] ==== Random -The `random_score` generates scores that are uniformly distributed in [0, 1[. -By default, it uses the internal Lucene doc ids as a source of randomness, -which is very efficient but unfortunately not reproducible since documents might -be renumbered by merges. +The `random_score` generates scores that are uniformly distributed from 0 up to +but not including 1. By default, it uses the internal Lucene doc ids as a +source of randomness, which is very efficient but unfortunately not +reproducible since documents might be renumbered by merges. In case you want scores to be reproducible, it is possible to provide a `seed` and `field`. The final score will then be computed based on this seed, the diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index 189f135fa3bc7..c33b227824bdf 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -1,7 +1,7 @@ [[query-dsl-geo-polygon-query]] === Geo Polygon Query -A query allowing to include hits that only fall within a polygon of +A query returning hits that only fall within a polygon of points. Here is an example: [source,js] @@ -17,9 +17,9 @@ GET /_search "geo_polygon" : { "person.location" : { "points" : [ - {"lat" : 40, "lon" : -70}, - {"lat" : 30, "lon" : -80}, - {"lat" : 20, "lon" : -90} + {"lat" : 40, "lon" : -70}, + {"lat" : 30, "lon" : -80}, + {"lat" : 20, "lon" : -90} ] } } @@ -49,7 +49,9 @@ or longitude, or `STRICT` (default is `STRICT`). [float] ===== Lat Long as Array -Format in `[lon, lat]`, note, the order of lon/lat here in order to +Format as `[lon, lat]` + +Note: the order of lon/lat here must conform with http://geojson.org/[GeoJSON]. [source,js] diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 0d2661c37b862..1999bd26e0ce7 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -118,6 +118,10 @@ The above request will yield the following response: <2> The `_percolator_document_slot` field indicates which document has matched with this query. Useful when percolating multiple document simultaneously. +TIP: To provide a simple example, this documentation uses one index `my-index` for both the percolate queries and documents. +This set-up can work well when there are just a few percolate queries registered. However, with heavier usage it is recommended +to store queries and documents in separate indices. Please see <> for more details. + [float] ==== Parameters @@ -643,6 +647,7 @@ The above search request returns a response similar to this: query with `_name` parameter set to `query1`. [float] +[[how-it-works]] ==== How it Works Under the Hood When indexing a document into an index that has the <> mapping configured, the query @@ -679,3 +684,11 @@ GET /_search NOTE: The above example assumes that there is a `query` field of type `percolator` in the mappings. + +Given the design of percolation, it often makes sense to use separate indices for the percolate queries and documents +being percolated, as opposed to a single index as we do in examples. There are a few benefits to this approach: + +- Because percolate queries contain a different set of fields from the percolated documents, using two separate indices +allows for fields to be stored in a denser, more efficient way. +- Percolate queries do not scale in the same way as other queries, so percolation performance may benefit from using +a different index configuration, like the number of primary shards. diff --git a/docs/reference/query-dsl/range-query.asciidoc b/docs/reference/query-dsl/range-query.asciidoc index 620a175ff39a5..c5087d52f905e 100644 --- a/docs/reference/query-dsl/range-query.asciidoc +++ b/docs/reference/query-dsl/range-query.asciidoc @@ -139,3 +139,26 @@ GET _search // CONSOLE <1> This date will be converted to `2014-12-31T23:00:00 UTC`. <2> `now` is not affected by the `time_zone` parameter (dates must be stored as UTC). + +[[querying-range-fields]] +==== Querying range fields + +`range` queries can be used on fields of type <>, allowing to +match a range specified in the query with a range field value in the document. +The `relation` parameter controls how these two ranges are matched: + +[horizontal] +`WITHIN`:: + + Matches documents who's range field is entirely within the query's range. + +`CONTAINS`:: + + Matches documents who's range field entirely contains the query's range. + +`INTERSECTS`:: + + Matches documents who's range field intersects the query's range. + This is the default value when querying range fields. + +For examples, see <> mapping type. diff --git a/docs/reference/release-notes/7.0.0-alpha1.asciidoc b/docs/reference/release-notes/7.0.0-alpha1.asciidoc index eb1924d2452c2..802caa2b1e026 100644 --- a/docs/reference/release-notes/7.0.0-alpha1.asciidoc +++ b/docs/reference/release-notes/7.0.0-alpha1.asciidoc @@ -16,6 +16,8 @@ Cross-Cluster-Search:: Rest API:: * The Clear Cache API only supports `POST` as HTTP method +* `CircuitBreakingException` was previously mapped to HTTP status code 503 and is now + mapped as HTTP status code 429. Aggregations:: * The Percentiles and PercentileRanks aggregations now return `null` in the REST response, diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 5aef27e127500..eedc2dfa1f51f 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -10,6 +10,7 @@ directly to configure and access {xpack} features. * <> * <> * <> +* <> * <> * <> * <> @@ -22,6 +23,7 @@ directly to configure and access {xpack} features. include::info.asciidoc[] include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[] include::{es-repo-dir}/graph/explore.asciidoc[] +include::{es-repo-dir}/ilm/apis/ilm-api.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] include::{es-repo-dir}/migration/migration.asciidoc[] include::{es-repo-dir}/ml/apis/ml-api.asciidoc[] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 125a7c3d1272e..d7e9e5202b111 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -68,6 +68,11 @@ Example response: "available" : true, "enabled" : true }, + "ilm" : { + "description" : "Index lifecycle management for the Elastic Stack", + "available" : true, + "enabled" : true + }, "logstash" : { "description" : "Logstash management component for X-Pack", "available" : true, diff --git a/docs/reference/rollup/apis/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc index 8e7fc69a00a6b..e2252a772184d 100644 --- a/docs/reference/rollup/apis/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -30,6 +30,7 @@ Rules for the `index` parameter: or using `_all`, is not permitted - Multiple non-rollup indices may be specified - Only one rollup index may be specified. If more than one are supplied an exception will be thrown +- Index patterns may be used, but if they match more than one rollup index an exception will be thrown. ==== Request Body diff --git a/docs/reference/rollup/rollup-search-limitations.asciidoc b/docs/reference/rollup/rollup-search-limitations.asciidoc index b61d1a743880f..c8a736450bde0 100644 --- a/docs/reference/rollup/rollup-search-limitations.asciidoc +++ b/docs/reference/rollup/rollup-search-limitations.asciidoc @@ -21,6 +21,7 @@ follows: or using `_all`, is not permitted - Multiple non-rollup indices may be specified - Only one rollup index may be specified. If more than one are supplied an exception will be thrown +- Index patterns may be used, but if they match more than one rollup index an exception will be thrown. This limitation is driven by the logic that decides which jobs are the "best" for any given query. If you have ten jobs stored in a single index, which cover the source data with varying degrees of completeness and different intervals, the query needs to determine which set diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index 399ff5b92f5c6..9ab8eb45b021d 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -122,7 +122,7 @@ This will yield the same result as the previous request. `_source`:: Set to `true` to retrieve the `_source` of the document explained. You can also - retrieve part of the document by using `_source_include` & `_source_exclude` (see <> for more details) + retrieve part of the document by using `_source_includes` & `_source_excludes` (see <> for more details) `stored_fields`:: Allows to control which stored fields to return as part of the diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index 279bc0c0384c1..bfc50e774bff8 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -83,7 +83,7 @@ providing text to a numeric field) to be ignored. Defaults to false. hits was computed. |`_source`|Set to `false` to disable retrieval of the `_source` field. You can also retrieve -part of the document by using `_source_include` & `_source_exclude` (see the <> +part of the document by using `_source_includes` & `_source_excludes` (see the <> documentation for more details) |`stored_fields` |The selective stored fields of the document to return for each hit, diff --git a/docs/reference/security/securing-communications/tls-ad.asciidoc b/docs/reference/security/securing-communications/tls-ad.asciidoc index 2421925f08ebe..cd0395b6725f2 100644 --- a/docs/reference/security/securing-communications/tls-ad.asciidoc +++ b/docs/reference/security/securing-communications/tls-ad.asciidoc @@ -32,12 +32,12 @@ xpack: authc: realms: active_directory: - type: active_directory - order: 0 - domain_name: ad.example.com - url: ldaps://ad.example.com:636 - ssl: - certificate_authorities: [ "ES_PATH_CONF/cacert.pem" ] + ad_realm: + order: 0 + domain_name: ad.example.com + url: ldaps://ad.example.com:636 + ssl: + certificate_authorities: [ "ES_PATH_CONF/cacert.pem" ] -------------------------------------------------- The CA cert must be a PEM encoded certificate. diff --git a/docs/reference/security/securing-communications/tls-ldap.asciidoc b/docs/reference/security/securing-communications/tls-ldap.asciidoc index 1ffc667cd33c1..2d7b2546becb7 100644 --- a/docs/reference/security/securing-communications/tls-ldap.asciidoc +++ b/docs/reference/security/securing-communications/tls-ldap.asciidoc @@ -24,12 +24,12 @@ xpack: security: authc: realms: - ldap1: - type: ldap - order: 0 - url: "ldaps://ldap.example.com:636" - ssl: - certificate_authorities: [ "ES_PATH_CONF/cacert.pem" ] + ldap: + ldap1: + order: 0 + url: "ldaps://ldap.example.com:636" + ssl: + certificate_authorities: [ "ES_PATH_CONF/cacert.pem" ] -------------------------------------------------- The CA certificate must be a PEM encoded. @@ -52,4 +52,4 @@ NOTE: By default, when you configure {security} to connect to an LDAP server configuration do not match, {security} does not allow a connection to the LDAP server. This is done to protect against man-in-the-middle attacks. If necessary, you can disable this behavior by setting the - `ssl.verification_mode` property to `certificate`. \ No newline at end of file + `ssl.verification_mode` property to `certificate`. diff --git a/docs/reference/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc index ec661a1f30c9f..69045dca0a2db 100644 --- a/docs/reference/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -22,14 +22,13 @@ file named `_audit.log` on each node. You can also specify `index`, which puts the auditing events in an {es} index that is prefixed with `.security_audit_log`. The index can reside on the same cluster or a separate cluster. - ++ For backwards compatibility reasons, if you use the logfile output type, a `_access.log` file is also created. It contains the same information, but it uses the older (pre-6.5.0) formatting style. If the backwards compatible format is not required, it should be disabled. To do that, change its logger level to `off` in the `log4j2.properties` file. For more information, see <>. - + -- TIP: If the index is unavailable, it is possible for auditing events to diff --git a/docs/reference/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc index f75bf3a9f2986..5d4f9519006ae 100644 --- a/docs/reference/settings/ml-settings.asciidoc +++ b/docs/reference/settings/ml-settings.asciidoc @@ -77,11 +77,11 @@ opening spend more time in the `opening` state. Defaults to `2`. These settings are for advanced use cases; the default values are generally sufficient: -`xpack.ml.max_anomaly_records`:: (<>) +`xpack.ml.max_anomaly_records` (<>):: The maximum number of records that are output per bucket. The default value is `500`. -`xpack.ml.max_lazy_ml_nodes`:: (<>) +`xpack.ml.max_lazy_ml_nodes` (<>):: The number of lazily spun up Machine Learning nodes. Useful in situations where ML nodes are not desired until the first Machine Learning Job is opened. It defaults to `0` and has a maximum acceptable value of `3`. diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index 92c51772720c9..56bfaf5d4f863 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -43,14 +43,14 @@ to `true`. Its default value is `false`. The `xpack.monitoring.collection` settings control how data is collected from your Elasticsearch nodes. -`xpack.monitoring.collection.enabled`:: (<>) +`xpack.monitoring.collection.enabled` (<>):: added[6.3.0] Set to `true` to enable the collection of monitoring data. When this setting is `false` (default), {es} monitoring data is not collected and all monitoring data from other sources such as {kib}, Beats, and Logstash is ignored. -`xpack.monitoring.collection.interval`:: (<>) +`xpack.monitoring.collection.interval` (<>):: Setting to `-1` to disable data collection is no longer supported beginning with 7.0.0. deprecated[6.3.0, Use `xpack.monitoring.collection.enabled` set to @@ -60,7 +60,7 @@ Controls how often data samples are collected. Defaults to `10s`. If you modify the collection interval, set the `xpack.monitoring.min_interval_seconds` option in `kibana.yml` to the same value. -`xpack.monitoring.elasticsearch.collection.enabled`:: (<>) +`xpack.monitoring.elasticsearch.collection.enabled` (<>):: Controls whether statistics about your {es} cluster should be collected. Defaults to `true`. This is different from xpack.monitoring.collection.enabled, which allows you to enable or disable @@ -72,7 +72,7 @@ to pass through this cluster. Sets the timeout for collecting the cluster statistics. Defaults to `10s`. -`xpack.monitoring.collection.indices`:: (<>) +`xpack.monitoring.collection.indices` (<>):: Controls which indices Monitoring collects data from. Defaults to all indices. Specify the index names as a comma-separated list, for example `test1,test2,test3`. Names can include wildcards, for diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 8d5c832adcc86..c8e5895625c30 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -42,9 +42,10 @@ recommend that you explicitly add this setting to avoid confusion. A comma-separated list of settings that are omitted from the results of the <>. You can use wildcards to include multiple settings in the list. For example, the following value hides all the -settings for the ad1 realm: `xpack.security.authc.realms.ad1.*`. The API already -omits all `ssl` settings, `bind_dn`, and `bind_password` due to the -sensitive nature of the information. +settings for the ad1 active_directory realm: +`xpack.security.authc.realms.active_directory.ad1.*`. +The API already omits all `ssl` settings, `bind_dn`, and `bind_password` due to +the sensitive nature of the information. `xpack.security.fips_mode.enabled`:: Enables fips mode of operation. Set this to `true` if you run this {es} instance in a FIPS 140-2 enabled JVM. For more information, see <>. Defaults to `false`. @@ -149,18 +150,15 @@ namespace in `elasticsearch.yml`. For example: ---------------------------------------- xpack.security.authc.realms: - realm1: - type: native + native.realm1: order: 0 ... - realm2: - type: ldap + ldap.realm2: order: 1 ... - realm3: - type: active_directory + active_directory.realm3: order: 2 ... ... diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index f0e5cfc71c999..03f98fd38acf0 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -193,7 +193,7 @@ Elasticsearch installs system call filters of various flavors depending on the operating system (e.g., seccomp on Linux). These system call filters are installed to prevent the ability to execute system calls related to forking as a defense mechanism against arbitrary code -execution attacks on Elasticsearch The system call filter check ensures +execution attacks on Elasticsearch. The system call filter check ensures that if system call filters are enabled, then they were successfully installed. To pass the system call filter check you must either fix any configuration errors on your system that prevented system call filters diff --git a/docs/reference/sql/index.asciidoc b/docs/reference/sql/index.asciidoc index d702245273028..31a4c6df9e0ea 100644 --- a/docs/reference/sql/index.asciidoc +++ b/docs/reference/sql/index.asciidoc @@ -3,10 +3,10 @@ [[xpack-sql]] = SQL access -:sql-tests: {xes-repo-dir}/../../qa/sql +:sql-tests: {xes-repo-dir}/../../plugin/sql/qa :sql-specs: {sql-tests}/src/main/resources -:jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc -:security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/qa/sql/security +:jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc +:security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/sql/qa/security :es-sql: Elasticsearch SQL [partintro] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/ObjectPath.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectPath.java similarity index 50% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/ObjectPath.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectPath.java index 67ef405238aba..8a70f9cb70474 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/ObjectPath.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectPath.java @@ -1,48 +1,61 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.watcher.support.xcontent; -import org.elasticsearch.common.Strings; +package org.elasticsearch.common.xcontent; import java.lang.reflect.Array; import java.util.List; import java.util.Map; -public class ObjectPath { +/** + * Helper class to navigate nested objects using dot notation + */ +public final class ObjectPath { + + private static final String[] EMPTY_ARRAY = new String[0]; private ObjectPath() { } + /** + * Return the value within a given object at the specified path, or + * {@code null} if the path does not exist + */ + @SuppressWarnings("unchecked") public static T eval(String path, Object object) { return (T) evalContext(path, object); } private static Object evalContext(String path, Object ctx) { final String[] parts; - if (path == null || path.isEmpty()) parts = Strings.EMPTY_ARRAY; + if (path == null || path.isEmpty()) parts = EMPTY_ARRAY; else parts = path.split("\\."); - StringBuilder resolved = new StringBuilder(); for (String part : parts) { if (ctx == null) { return null; } if (ctx instanceof Map) { ctx = ((Map) ctx).get(part); - if (resolved.length() != 0) { - resolved.append("."); - } - resolved.append(part); } else if (ctx instanceof List) { try { int index = Integer.parseInt(part); ctx = ((List) ctx).get(index); - if (resolved.length() != 0) { - resolved.append("."); - } - resolved.append(part); } catch (NumberFormatException nfe) { return null; } @@ -50,10 +63,6 @@ private static Object evalContext(String path, Object ctx) { try { int index = Integer.parseInt(part); ctx = Array.get(ctx, index); - if (resolved.length() != 0) { - resolved.append("."); - } - resolved.append(part); } catch (NumberFormatException nfe) { return null; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/xcontent/XContentUtils.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentUtils.java similarity index 60% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/xcontent/XContentUtils.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentUtils.java index da8ac3ef9d8f1..14a9f5be24b28 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/xcontent/XContentUtils.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentUtils.java @@ -1,20 +1,34 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.watcher.common.xcontent; -import org.elasticsearch.common.xcontent.XContentParser; +package org.elasticsearch.common.xcontent; import java.io.IOException; -public class XContentUtils { +public final class XContentUtils { private XContentUtils() { } - // TODO open this up in core + /** + * Convert a {@link XContentParser.Token} to a value + */ public static Object readValue(XContentParser parser, XContentParser.Token token) throws IOException { if (token == XContentParser.Token.VALUE_NULL) { return null; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/MapPathTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectPathTests.java similarity index 63% rename from x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/MapPathTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectPathTests.java index f89552a637726..52e9723743b44 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/MapPathTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectPathTests.java @@ -1,12 +1,25 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.watcher.support.xcontent; + +package org.elasticsearch.common.xcontent; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; import java.util.ArrayList; import java.util.Arrays; @@ -18,23 +31,23 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.is; -public class MapPathTests extends ESTestCase { - public void testEval() throws Exception { +public class ObjectPathTests extends ESTestCase { + public void testEval() { Map map = singletonMap("key", "value"); assertThat(ObjectPath.eval("key", map), is((Object) "value")); assertThat(ObjectPath.eval("key1", map), nullValue()); } - public void testEvalList() throws Exception { - List list = Arrays.asList(1, 2, 3, 4); + public void testEvalList() { + List list = Arrays.asList(1, 2, 3, 4); Map map = singletonMap("key", list); int index = randomInt(3); assertThat(ObjectPath.eval("key." + index, map), is(list.get(index))); } - public void testEvalArray() throws Exception { + public void testEvalArray() { int[] array = new int[] { 1, 2, 3, 4 }; Map map = singletonMap("key", array); @@ -42,13 +55,13 @@ public void testEvalArray() throws Exception { assertThat(((Number) ObjectPath.eval("key." + index, map)).intValue(), is(array[index])); } - public void testEvalMap() throws Exception { + public void testEvalMap() { Map map = singletonMap("a", singletonMap("b", "val")); assertThat(ObjectPath.eval("a.b", map), is((Object) "val")); } - public void testEvalMixed() throws Exception { + public void testEvalMixed() { Map map = new HashMap<>(); Map mapA = new HashMap<>(); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index aaca4f9b1860f..75e0087831a62 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -44,7 +44,6 @@ import org.apache.lucene.analysis.core.DecimalDigitFilter; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.LetterTokenizer; -import org.apache.lucene.analysis.core.LowerCaseTokenizer; import org.apache.lucene.analysis.core.UpperCaseFilter; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.cz.CzechAnalyzer; @@ -308,7 +307,8 @@ public Map> getTokenizers() { tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); tokenizers.put("letter", LetterTokenizerFactory::new); - tokenizers.put("lowercase", LowerCaseTokenizerFactory::new); + // TODO deprecate and remove in API + tokenizers.put("lowercase", XLowerCaseTokenizerFactory::new); tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new); tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new); tokenizers.put("pattern", PatternTokenizerFactory::new); @@ -503,7 +503,8 @@ public List getPreConfiguredTokenizers() { () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null)); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null)); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null)); - tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", LowerCaseTokenizer::new, () -> new TokenFilterFactory() { + // TODO deprecate and remove in API + tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new, () -> new TokenFilterFactory() { @Override public String name() { return "lowercase"; diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java index 051c5bf80c524..69acb411d105f 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.IndexSettings; public class LegacyDelimitedPayloadTokenFilterFactory extends DelimitedPayloadTokenFilterFactory { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(LegacyDelimitedPayloadTokenFilterFactory.class)); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java new file mode 100644 index 0000000000000..3f11c52858aa4 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.CharacterUtils; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.analysis.util.CharTokenizer; + +import java.io.IOException; + +@Deprecated +class XLowerCaseTokenizer extends Tokenizer { + + private int offset = 0, bufferIndex = 0, dataLen = 0, finalOffset = 0; + + private static final int IO_BUFFER_SIZE = 4096; + + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); + + private final CharacterUtils.CharacterBuffer ioBuffer = CharacterUtils.newCharacterBuffer(IO_BUFFER_SIZE); + + @Override + public final boolean incrementToken() throws IOException { + clearAttributes(); + int length = 0; + int start = -1; // this variable is always initialized + int end = -1; + char[] buffer = termAtt.buffer(); + while (true) { + if (bufferIndex >= dataLen) { + offset += dataLen; + CharacterUtils.fill(ioBuffer, input); // read supplementary char aware with CharacterUtils + if (ioBuffer.getLength() == 0) { + dataLen = 0; // so next offset += dataLen won't decrement offset + if (length > 0) { + break; + } else { + finalOffset = correctOffset(offset); + return false; + } + } + dataLen = ioBuffer.getLength(); + bufferIndex = 0; + } + // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based methods are gone + final int c = Character.codePointAt(ioBuffer.getBuffer(), bufferIndex, ioBuffer.getLength()); + final int charCount = Character.charCount(c); + bufferIndex += charCount; + + if (Character.isLetter(c)) { // if it's a token char + if (length == 0) { // start of token + assert start == -1; + start = offset + bufferIndex - charCount; + end = start; + } else if (length >= buffer.length-1) { // check if a supplementary could run out of bounds + buffer = termAtt.resizeBuffer(2+length); // make sure a supplementary fits in the buffer + } + end += charCount; + length += Character.toChars(Character.toLowerCase(c), buffer, length); // buffer it, normalized + int maxTokenLen = CharTokenizer.DEFAULT_MAX_WORD_LEN; + if (length >= maxTokenLen) { // buffer overflow! make sure to check for >= surrogate pair could break == test + break; + } + } else if (length > 0) { // at non-Letter w/ chars + break; // return 'em + } + } + + termAtt.setLength(length); + assert start != -1; + offsetAtt.setOffset(correctOffset(start), finalOffset = correctOffset(end)); + return true; + + } + + @Override + public final void end() throws IOException { + super.end(); + // set final offset + offsetAtt.setOffset(finalOffset, finalOffset); + } + + @Override + public void reset() throws IOException { + super.reset(); + bufferIndex = 0; + offset = 0; + dataLen = 0; + finalOffset = 0; + ioBuffer.reset(); // make sure to reset the IO buffer!! + } + +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java similarity index 71% rename from modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java index 8c913a33cfe4c..4cd5b07fe484a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java @@ -20,26 +20,21 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.LowerCaseTokenizer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenizerFactory; -import org.elasticsearch.index.analysis.MultiTermAwareComponent; -public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory implements MultiTermAwareComponent { +@Deprecated +// NORELEASE we should prevent the usage on indices created after 7.0 in order to be able to remove in 8 +public class XLowerCaseTokenizerFactory extends AbstractTokenizerFactory { - LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + public XLowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, settings); } @Override public Tokenizer create() { - return new LowerCaseTokenizer(); - } - - @Override - public Object getMultiTermComponent() { - return this; + return new XLowerCaseTokenizer(); } } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java index f9fca66cc54a1..99e882c622085 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java @@ -48,7 +48,7 @@ protected Map> getTokenizers() { tokenizers.put("edgengram", EdgeNGramTokenizerFactory.class); tokenizers.put("classic", ClassicTokenizerFactory.class); tokenizers.put("letter", LetterTokenizerFactory.class); - tokenizers.put("lowercase", LowerCaseTokenizerFactory.class); + // tokenizers.put("lowercase", XLowerCaseTokenizerFactory.class); tokenizers.put("pathhierarchy", PathHierarchyTokenizerFactory.class); tokenizers.put("pattern", PatternTokenizerFactory.class); tokenizers.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class); @@ -223,7 +223,7 @@ protected Map> getPreConfiguredTokenFilters() { protected Map> getPreConfiguredTokenizers() { Map> tokenizers = new TreeMap<>(super.getPreConfiguredTokenizers()); tokenizers.put("keyword", null); - tokenizers.put("lowercase", null); + tokenizers.put("lowercase", Void.class); tokenizers.put("classic", null); tokenizers.put("uax_url_email", org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class); tokenizers.put("path_hierarchy", null); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java index 0698f6ed0a6c9..f4215af8390dd 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java @@ -52,28 +52,30 @@ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { map = ingestDocument.getSourceAndMetadata(); } - if (ingestDocument.hasField(path)) { - Object value = map.remove(field); - ingestDocument.appendFieldValue(path, value); - } else { - // check whether we actually can expand the field in question into an object field. - // part of the path may already exist and if part of it would be a value field (string, integer etc.) - // then we can't override it with an object field and we should fail with a good reason. - // IngestDocument#setFieldValue(...) would fail too, but the error isn't very understandable - for (int index = path.indexOf('.'); index != -1; index = path.indexOf('.', index + 1)) { - String partialPath = path.substring(0, index); - if (ingestDocument.hasField(partialPath)) { - Object val = ingestDocument.getFieldValue(partialPath, Object.class); - if ((val instanceof Map) == false) { - throw new IllegalArgumentException("cannot expend [" + path + "], because [" + partialPath + + if (map.containsKey(field)) { + if (ingestDocument.hasField(path)) { + Object value = map.remove(field); + ingestDocument.appendFieldValue(path, value); + } else { + // check whether we actually can expand the field in question into an object field. + // part of the path may already exist and if part of it would be a value field (string, integer etc.) + // then we can't override it with an object field and we should fail with a good reason. + // IngestDocument#setFieldValue(...) would fail too, but the error isn't very understandable + for (int index = path.indexOf('.'); index != -1; index = path.indexOf('.', index + 1)) { + String partialPath = path.substring(0, index); + if (ingestDocument.hasField(partialPath)) { + Object val = ingestDocument.getFieldValue(partialPath, Object.class); + if ((val instanceof Map) == false) { + throw new IllegalArgumentException("cannot expend [" + path + "], because [" + partialPath + "] is not an object field, but a value field"); + } + } else { + break; } - } else { - break; } + Object value = map.remove(field); + ingestDocument.setFieldValue(path, value); } - Object value = map.remove(field); - ingestDocument.setFieldValue(path, value); } return ingestDocument; } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index 2fae5d77bcce3..e73049aecbced 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -114,8 +114,8 @@ public void writeTo(StreamOutput out) throws IOException { public static class TransportAction extends HandledTransportAction { @Inject - public TransportAction(Settings settings, TransportService transportService, ActionFilters actionFilters) { - super(settings, NAME, transportService, actionFilters, Request::new); + public TransportAction(TransportService transportService, ActionFilters actionFilters) { + super(NAME, transportService, actionFilters, Request::new); } @Override diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java index fde7f0c9b8a02..d6a207b859eb0 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java @@ -143,4 +143,38 @@ public void testEscapeFields_path() throws Exception { assertThat(document.getFieldValue("field.foo.bar.baz", String.class), equalTo("value")); } + + public void testEscapeFields_doNothingIfFieldNotInSourceDoc() throws Exception { + //asking to expand a (literal) field that is not present in the source document + Map source = new HashMap<>(); + source.put("foo.bar", "baz1"); + IngestDocument document = new IngestDocument(source, Collections.emptyMap()); + //abc.def does not exist in source, so don't mutate document + DotExpanderProcessor processor = new DotExpanderProcessor("_tag", null, "abc.def"); + processor.execute(document); + //hasField returns false since it requires the expanded form, which is not expanded since we did not ask for it to be + assertFalse(document.hasField("foo.bar")); + //nothing has changed + assertEquals(document.getSourceAndMetadata().get("foo.bar"), "baz1"); + //abc.def is not found anywhere + assertFalse(document.hasField("abc.def")); + assertFalse(document.getSourceAndMetadata().containsKey("abc")); + assertFalse(document.getSourceAndMetadata().containsKey("abc.def")); + + //asking to expand a (literal) field that does not exist, but the nested field does exist + source = new HashMap<>(); + Map inner = new HashMap<>(); + inner.put("bar", "baz1"); + source.put("foo", inner); + document = new IngestDocument(source, Collections.emptyMap()); + //foo.bar, the literal value (as opposed to nested value) does not exist in source, so don't mutate document + processor = new DotExpanderProcessor("_tag", null, "foo.bar"); + processor.execute(document); + //hasField returns true because the nested/expanded form exists in the source document + assertTrue(document.hasField("foo.bar")); + //nothing changed + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", String.class), equalTo("baz1")); + } + } diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..b536c887eab0c --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +8db13c6e146c851614c9f862f1eac67431f9b509 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 4904c89e62f89..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc072b68aac06a2fb9569ab7adce05302f130948 \ No newline at end of file diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionPlugin.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionPlugin.java index fd80c56cdbe8a..cd0b09eca8c3a 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionPlugin.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionPlugin.java @@ -31,6 +31,6 @@ public class ExpressionPlugin extends Plugin implements ScriptPlugin { @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ExpressionScriptEngine(settings); + return new ExpressionScriptEngine(); } } diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index 150bca60273e9..e53d0ec3e8c9c 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -28,8 +28,6 @@ import org.apache.lucene.search.SortField; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -63,14 +61,10 @@ * * Only contexts returning numeric types or {@link Object} are supported. */ -public class ExpressionScriptEngine extends AbstractComponent implements ScriptEngine { +public class ExpressionScriptEngine implements ScriptEngine { public static final String NAME = "expression"; - public ExpressionScriptEngine(Settings settings) { - super(settings); - } - @Override public String getType() { return NAME; diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java index b1872b30f1f17..205e638314fe3 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.script.expression; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; @@ -64,7 +63,7 @@ public void setUp() throws Exception { when(fieldData.getFieldName()).thenReturn("field"); when(fieldData.load(anyObject())).thenReturn(atomicFieldData); - service = new ExpressionScriptEngine(Settings.EMPTY); + service = new ExpressionScriptEngine(); lookup = new SearchLookup(mapperService, ignored -> fieldData, null); } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java index 301fd2d4db70c..e6bd503bfabe1 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.text.ParseException; import java.util.Collections; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; @@ -63,7 +62,7 @@ public void setUp() throws Exception { when(fieldData.getFieldName()).thenReturn("field"); when(fieldData.load(anyObject())).thenReturn(atomicFieldData); - service = new ExpressionScriptEngine(Settings.EMPTY); + service = new ExpressionScriptEngine(); lookup = new SearchLookup(mapperService, ignored -> fieldData, null); } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java index c7eae2446a6c0..137f8e058cd85 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.text.ParseException; import java.util.Collections; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; @@ -63,7 +62,7 @@ public void setUp() throws Exception { when(fieldData.getFieldName()).thenReturn("field"); when(fieldData.load(anyObject())).thenReturn(atomicFieldData); - service = new ExpressionScriptEngine(Settings.EMPTY); + service = new ExpressionScriptEngine(); lookup = new SearchLookup(mapperService, ignored -> fieldData, null); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java index eea9e31d4a79d..a685c3ba5ba7c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java @@ -45,7 +45,7 @@ public class MultiSearchTemplateRequest extends ActionRequest implements Composi private int maxConcurrentSearchRequests = 0; private List requests = new ArrayList<>(); - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); /** * Add a search template request to execute. Note, the order is important, the search response will be returned in the diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java index 6e0baed9be879..7da78a449d70a 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; @@ -45,10 +44,9 @@ public class TransportMultiSearchTemplateAction extends HandledTransportAction) SearchTemplateRequest::new); + super(SearchTemplateAction.NAME, transportService, actionFilters, (Supplier) SearchTemplateRequest::new); this.scriptService = scriptService; this.xContentRegistry = xContentRegistry; this.client = client; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index 81cc802916d4e..0ea3d4af81f79 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -48,11 +48,6 @@ */ final class Compiler { - /** - * The maximum number of characters allowed in the script source. - */ - static final int MAXIMUM_SOURCE_LENGTH = 16384; - /** * Define the class with lowest privileges. */ @@ -212,12 +207,6 @@ private static void addFactoryMethod(Map> additionalClasses, Cl * @return An executable script that implements both a specified interface and is a subclass of {@link PainlessScript} */ Constructor compile(Loader loader, MainMethodReserved reserved, String name, String source, CompilerSettings settings) { - if (source.length() > MAXIMUM_SOURCE_LENGTH) { - throw new IllegalArgumentException("Scripts may be no longer than " + MAXIMUM_SOURCE_LENGTH + - " characters. The passed in script is " + source.length() + " characters. Consider using a" + - " plugin if a script longer than this length is a requirement."); - } - ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, scriptClass); SSource root = Walker.buildPainlessTree(scriptClassInfo, reserved, name, source, settings, painlessLookup, null); @@ -248,12 +237,6 @@ Constructor compile(Loader loader, MainMethodReserved reserved, String name, * @return The bytes for compilation. */ byte[] compile(String name, String source, CompilerSettings settings, Printer debugStream) { - if (source.length() > MAXIMUM_SOURCE_LENGTH) { - throw new IllegalArgumentException("Scripts may be no longer than " + MAXIMUM_SOURCE_LENGTH + - " characters. The passed in script is " + source.length() + " characters. Consider using a" + - " plugin if a script longer than this length is a requirement."); - } - ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, scriptClass); SSource root = Walker.buildPainlessTree(scriptClassInfo, new MainMethodReserved(), name, source, settings, painlessLookup, debugStream); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java index 2c60136209ca7..765e5c836e051 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java @@ -464,10 +464,10 @@ public static class TransportAction extends TransportSingleShardAction, List> contexts) { - super(settings); - defaultCompilerSettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(settings)); Map, Compiler> contextsToCompilers = new HashMap<>(); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index 32d74d0837ccd..d1db6606c86aa 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.script.ScriptException; import java.lang.invoke.WrongMethodTypeException; -import java.util.Arrays; import java.util.Collections; import static java.util.Collections.emptyMap; @@ -200,21 +199,6 @@ public void testLoopLimits() { "The maximum number of statements that can be executed in a loop has been reached.")); } - public void testSourceLimits() { - final char[] tooManyChars = new char[Compiler.MAXIMUM_SOURCE_LENGTH + 1]; - Arrays.fill(tooManyChars, '0'); - - IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { - exec(new String(tooManyChars)); - }); - assertTrue(expected.getMessage().contains("Scripts may be no longer than")); - - final char[] exactlyAtLimit = new char[Compiler.MAXIMUM_SOURCE_LENGTH]; - Arrays.fill(exactlyAtLimit, '0'); - // ok - assertEquals(0, exec(new String(exactlyAtLimit))); - } - public void testIllegalDynamicMethod() { IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec("def x = 'test'; return x.getClass().toString()"); diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java index d95c9899c89ad..67e0fad53ec49 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -75,7 +75,7 @@ public void testDefaults() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(1230, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; @@ -149,7 +149,7 @@ public void testNoDocValues() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertEquals(1230, pointField.numericValue().longValue()); } @@ -173,7 +173,7 @@ public void testStore() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertEquals(1230, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -202,7 +202,7 @@ public void testCoerce() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertEquals(1230, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -317,7 +317,7 @@ public void testNullValue() throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(25, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 4b46537bb1650..581c6fd494286 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -327,7 +327,7 @@ Tuple, Map>> extractTermsAndRanges(IndexRead extractedTerms.add(builder.toBytesRef()); } } - if (info.getPointDimensionCount() == 1) { // not != 0 because range fields are not supported + if (info.getPointIndexDimensionCount() == 1) { // not != 0 because range fields are not supported PointValues values = reader.getPointValues(info.name); List encodedPointValues = new ArrayList<>(); encodedPointValues.add(values.getMinPackedValue().clone()); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java index 44823f9aa012b..0338e0fba91df 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.search.SearchHit; @@ -56,8 +55,8 @@ final class PercolatorHighlightSubFetchPhase implements FetchSubPhase { private final HighlightPhase highlightPhase; - PercolatorHighlightSubFetchPhase(Settings settings, Map highlighters) { - this.highlightPhase = new HighlightPhase(settings, highlighters); + PercolatorHighlightSubFetchPhase(Map highlighters) { + this.highlightPhase = new HighlightPhase(highlighters); } boolean hitsExecutionNeeded(SearchContext context) { // for testing diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java index 9c8b6e8c67fcc..863b46b54ac30 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java @@ -20,7 +20,6 @@ package org.elasticsearch.percolator; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; @@ -35,13 +34,6 @@ import static java.util.Collections.singletonMap; public class PercolatorPlugin extends Plugin implements MapperPlugin, SearchPlugin { - - private final Settings settings; - - public PercolatorPlugin(Settings settings) { - this.settings = settings; - } - @Override public List> getQueries() { return singletonList(new QuerySpec<>(PercolateQueryBuilder.NAME, PercolateQueryBuilder::new, PercolateQueryBuilder::fromXContent)); @@ -51,7 +43,7 @@ public List> getQueries() { public List getFetchSubPhases(FetchPhaseConstructionContext context) { return Arrays.asList( new PercolatorMatchedSlotSubFetchPhase(), - new PercolatorHighlightSubFetchPhase(settings, context.getHighlighters()) + new PercolatorHighlightSubFetchPhase(context.getHighlighters()) ); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 3d9a8fb8ebb08..07f47df41e60d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -38,7 +38,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MultiDocValues; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PostingsEnum; @@ -1090,7 +1090,7 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd String queryToString = shardSearcher.doc(controlTopDocs.scoreDocs[i].doc).get("query_to_string"); logger.error("controlTopDocs.scoreDocs[{}].query_to_string={}", i, queryToString); - TermsEnum tenum = MultiFields.getFields(shardSearcher.getIndexReader()).terms(fieldType.queryTermsField.name()).iterator(); + TermsEnum tenum = MultiTerms.getTerms(shardSearcher.getIndexReader(), fieldType.queryTermsField.name()).iterator(); StringBuilder builder = new StringBuilder(); for (BytesRef term = tenum.next(); term != null; term = tenum.next()) { PostingsEnum penum = tenum.postings(null); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java index e5f2160cfcaab..291a42c14665f 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; @@ -47,8 +46,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase { public void testHitsExecutionNeeded() { PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery()); - PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(Settings.EMPTY, - emptyMap()); + PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(emptyMap()); SearchContext searchContext = Mockito.mock(SearchContext.class); Mockito.when(searchContext.highlight()).thenReturn(new SearchContextHighlight(Collections.emptyList())); Mockito.when(searchContext.query()).thenReturn(new MatchAllDocsQuery()); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index 2e9e57d2c7361..b3c8f434f2612 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; @@ -73,10 +72,9 @@ public class TransportRankEvalAction extends HandledTransportAction) RankEvalRequest::new); this.scriptService = scriptService; this.namedXContentRegistry = namedXContentRegistry; diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index cdad280fd9a29..cc5b554e39e93 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -286,7 +286,7 @@ public void testIndicesOptions() { // test that ignore_unavailable=true works but returns one result less assertTrue(client().admin().indices().prepareClose("test2").get().isAcknowledged()); - request.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters(null, "true", null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(6, details.getRetrieved()); @@ -294,37 +294,37 @@ public void testIndicesOptions() { // test that ignore_unavailable=false or default settings throw an IndexClosedException assertTrue(client().admin().indices().prepareClose("test2").get().isAcknowledged()); - request.indicesOptions(IndicesOptions.fromParameters(null, "false", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters(null, "false", null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); assertEquals(1, response.getFailures().size()); assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexClosedException.class)); // test expand_wildcards request = new RankEvalRequest(task, new String[] { "tes*" }); - request.indicesOptions(IndicesOptions.fromParameters("none", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters("none", null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(0, details.getRetrieved()); - request.indicesOptions(IndicesOptions.fromParameters("open", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters("open", null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(6, details.getRetrieved()); assertEquals(5, details.getRelevantRetrieved()); - request.indicesOptions(IndicesOptions.fromParameters("closed", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters("closed", null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); assertEquals(1, response.getFailures().size()); assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexClosedException.class)); // test allow_no_indices request = new RankEvalRequest(task, new String[] { "bad*" }); - request.indicesOptions(IndicesOptions.fromParameters(null, null, "true", SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters(null, null, "true", "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(0, details.getRetrieved()); - request.indicesOptions(IndicesOptions.fromParameters(null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters(null, null, "false", "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); assertEquals(1, response.getFailures().size()); assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexNotFoundException.class)); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java index 10e3611b30d31..1a16c311fcf3a 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java @@ -59,7 +59,8 @@ protected RankEvalRequest createTestInstance() { } RankEvalRequest rankEvalRequest = new RankEvalRequest(RankEvalSpecTests.createTestItem(), indices); IndicesOptions indicesOptions = IndicesOptions.fromOptions( - randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean()); rankEvalRequest.indicesOptions(indicesOptions); return rankEvalRequest; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java index 3223ec0266d53..c12f923aa6089 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -63,7 +64,7 @@ public class RankEvalResponseTests extends ESTestCase { private static final Exception[] RANDOM_EXCEPTIONS = new Exception[] { new ClusterBlockException(singleton(DiscoverySettings.NO_MASTER_BLOCK_WRITES)), - new CircuitBreakingException("Data too large", 123, 456), + new CircuitBreakingException("Data too large", 123, 456, CircuitBreaker.Durability.PERMANENT), new SearchParseException(new TestSearchContext(null), "Parse failure", new XContentLocation(12, 98)), new IllegalArgumentException("Closed resource", new RuntimeException("Resource")), new SearchPhaseExecutionException("search", "all shards failed", diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java index 706f2c0b8f8f1..9bda99b6e3943 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -43,9 +42,9 @@ public class TransportDeleteByQueryAction extends HandledTransportAction) DeleteByQueryRequest::new); this.threadPool = threadPool; this.client = client; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index 60baaa2324299..0acc9a7b37dbc 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -37,11 +37,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; -import org.elasticsearch.client.RestClientBuilder; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.DeprecationHandler; -import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; @@ -50,22 +45,27 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.VersionFieldMapper; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.index.reindex.remote.RemoteScrollableHitSource; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; @@ -105,7 +105,7 @@ public class TransportReindexAction extends HandledTransportAction)ReindexRequest::new); + super(ReindexAction.NAME, transportService, actionFilters, (Writeable.Reader)ReindexRequest::new); this.threadPool = threadPool; this.clusterService = clusterService; this.scriptService = scriptService; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java index 3c60361d7a197..99469df3db6f5 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportRethrottleAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.ThreadPool; @@ -43,9 +42,9 @@ public class TransportRethrottleAction extends TransportTasksAction) UpdateByQueryRequest::new); this.threadPool = threadPool; this.client = client; diff --git a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index b5960592508bf..4ba814e4238df 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -23,33 +23,78 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.reindex.AbstractBulkByScrollRequestBuilder; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.BulkByScrollTask; +import org.elasticsearch.index.reindex.CancelTests; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequestBuilder; import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.index.reindex.ReindexPlugin; import org.elasticsearch.index.reindex.ReindexRequestBuilder; import org.elasticsearch.index.reindex.RethrottleAction; import org.elasticsearch.index.reindex.RethrottleRequestBuilder; import org.elasticsearch.index.reindex.UpdateByQueryAction; import org.elasticsearch.index.reindex.UpdateByQueryRequestBuilder; +import org.elasticsearch.index.shard.IndexingOperationListener; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matcher; +import org.junit.Before; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; public class ReindexDocumentationIT extends ESIntegTestCase { + // Semaphore used to allow & block indexing operations during the test + private static final Semaphore ALLOWED_OPERATIONS = new Semaphore(0); + private static final String INDEX_NAME = "source_index"; + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(ReindexPlugin.class, ReindexCancellationPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Collections.singletonList(ReindexPlugin.class); + } + + @Before + public void setup() { + client().admin().indices().prepareCreate(INDEX_NAME).get(); + } + @SuppressWarnings("unused") - public void reindex() { + public void testReindex() { Client client = client(); // tag::reindex1 - BulkByScrollResponse response = new ReindexRequestBuilder(client, ReindexAction.INSTANCE) + BulkByScrollResponse response = + new ReindexRequestBuilder(client, ReindexAction.INSTANCE) + .source("source_index") .destination("target_index") .filter(QueryBuilders.matchQuery("category", "xzy")) // <1> .get(); @@ -57,44 +102,58 @@ public void reindex() { } @SuppressWarnings("unused") - public void updateByQuery() { + public void testUpdateByQuery() { Client client = client(); + client.admin().indices().prepareCreate("foo").get(); + client.admin().indices().prepareCreate("bar").get(); + client.admin().indices().preparePutMapping(INDEX_NAME).setType("_doc").setSource("cat", "type=keyword").get(); { // tag::update-by-query - UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); + UpdateByQueryRequestBuilder updateByQuery = + new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("source_index").abortOnVersionConflict(false); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query } { // tag::update-by-query-filter - UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); + UpdateByQueryRequestBuilder updateByQuery = + new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("source_index") .filter(QueryBuilders.termQuery("level", "awesome")) .size(1000) - .script(new Script(ScriptType.INLINE, "ctx._source.awesome = 'absolutely'", "painless", Collections.emptyMap())); + .script(new Script(ScriptType.INLINE, + "ctx._source.awesome = 'absolutely'", + "painless", + Collections.emptyMap())); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-filter } { // tag::update-by-query-size - UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); + UpdateByQueryRequestBuilder updateByQuery = + new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("source_index") - .source().setSize(500); + .source() + .setSize(500); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-size } { // tag::update-by-query-sort - UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); - updateByQuery.source("source_index").size(100) - .source().addSort("cat", SortOrder.DESC); + UpdateByQueryRequestBuilder updateByQuery = + new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); + updateByQuery.source("source_index") + .size(100) + .source() + .addSort("cat", SortOrder.DESC); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-sort } { // tag::update-by-query-script - UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); + UpdateByQueryRequestBuilder updateByQuery = + new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("source_index") .script(new Script( ScriptType.INLINE, @@ -111,53 +170,67 @@ public void updateByQuery() { } { // tag::update-by-query-multi-index - UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); + UpdateByQueryRequestBuilder updateByQuery = + new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("foo", "bar").source().setTypes("a", "b"); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-multi-index } { // tag::update-by-query-routing - UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); + UpdateByQueryRequestBuilder updateByQuery = + new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source().setRouting("cat"); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-routing } { // tag::update-by-query-pipeline - UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); + UpdateByQueryRequestBuilder updateByQuery = + new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.setPipeline("hurray"); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-pipeline } + } + + public void testTasks() throws InterruptedException { + final Client client = client(); + final ReindexRequestBuilder builder = reindexAndPartiallyBlock(); + { // tag::update-by-query-list-tasks ListTasksResponse tasksList = client.admin().cluster().prepareListTasks() .setActions(UpdateByQueryAction.NAME).setDetailed(true).get(); for (TaskInfo info: tasksList.getTasks()) { TaskId taskId = info.getTaskId(); - BulkByScrollTask.Status status = (BulkByScrollTask.Status) info.getStatus(); + BulkByScrollTask.Status status = + (BulkByScrollTask.Status) info.getStatus(); // do stuff } // end::update-by-query-list-tasks } + + TaskInfo mainTask = CancelTests.findTaskToCancel(ReindexAction.NAME, builder.request().getSlices()); + BulkByScrollTask.Status status = (BulkByScrollTask.Status) mainTask.getStatus(); + assertNull(status.getReasonCancelled()); + TaskId taskId = mainTask.getTaskId(); { - TaskId taskId = null; // tag::update-by-query-get-task GetTaskResponse get = client.admin().cluster().prepareGetTask(taskId).get(); // end::update-by-query-get-task } { - TaskId taskId = null; // tag::update-by-query-cancel-task // Cancel all update-by-query requests - client.admin().cluster().prepareCancelTasks().setActions(UpdateByQueryAction.NAME).get().getTasks(); + client.admin().cluster().prepareCancelTasks() + .setActions(UpdateByQueryAction.NAME).get().getTasks(); // Cancel a specific update-by-query request - client.admin().cluster().prepareCancelTasks().setTaskId(taskId).get().getTasks(); + client.admin().cluster().prepareCancelTasks() + .setTaskId(taskId).get().getTasks(); // end::update-by-query-cancel-task } { - TaskId taskId = null; // tag::update-by-query-rethrottle new RethrottleRequestBuilder(client, RethrottleAction.INSTANCE) .setTaskId(taskId) @@ -165,13 +238,19 @@ public void updateByQuery() { .get(); // end::update-by-query-rethrottle } + + // unblocking the blocked update + ALLOWED_OPERATIONS.release(builder.request().getSlices()); } @SuppressWarnings("unused") - public void deleteByQuery() { + public void testDeleteByQuery() { Client client = client(); + client.admin().indices().prepareCreate("persons").get(); + // tag::delete-by-query-sync - BulkByScrollResponse response = new DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) + BulkByScrollResponse response = + new DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) .filter(QueryBuilders.matchQuery("gender", "male")) // <1> .source("persons") // <2> .get(); // <3> @@ -195,4 +274,76 @@ public void onFailure(Exception e) { // end::delete-by-query-async } + /** + * Similar to what CancelTests does: blocks some operations to be able to catch some tasks in running state + * @see CancelTests#testCancel(String, AbstractBulkByScrollRequestBuilder, CancelTests.CancelAssertion, Matcher) + */ + private ReindexRequestBuilder reindexAndPartiallyBlock() throws InterruptedException { + final Client client = client(); + final int numDocs = randomIntBetween(10, 100); + ALLOWED_OPERATIONS.release(numDocs); + + indexRandom(true, false, true, IntStream.range(0, numDocs) + .mapToObj(i -> client().prepareIndex(INDEX_NAME, "_doc", Integer.toString(i)).setSource("n", Integer.toString(i))) + .collect(Collectors.toList())); + + // Checks that the all documents have been indexed and correctly counted + assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), numDocs); + assertThat(ALLOWED_OPERATIONS.drainPermits(), equalTo(0)); + + ReindexRequestBuilder builder = new ReindexRequestBuilder(client, ReindexAction.INSTANCE).source(INDEX_NAME) + .destination("target_index", "_doc"); + // Scroll by 1 so that cancellation is easier to control + builder.source().setSize(1); + + int numModifiedDocs = randomIntBetween(builder.request().getSlices() * 2, numDocs); + // chose to modify some of docs - rest is still blocked + ALLOWED_OPERATIONS.release(numModifiedDocs - builder.request().getSlices()); + + // Now execute the reindex action... + builder.execute(); + + // 10 seconds is usually fine but on heavily loaded machines this can take a while + assertTrue("updates blocked", awaitBusy( + () -> ALLOWED_OPERATIONS.hasQueuedThreads() && ALLOWED_OPERATIONS.availablePermits() == 0, + 1, TimeUnit.MINUTES)); + return builder; + } + + public static class ReindexCancellationPlugin extends Plugin { + + @Override + public void onIndexModule(IndexModule indexModule) { + indexModule.addIndexOperationListener(new BlockingOperationListener()); + } + } + + public static class BlockingOperationListener implements IndexingOperationListener { + + @Override + public Engine.Index preIndex(ShardId shardId, Engine.Index index) { + return preCheck(index, index.type()); + } + + @Override + public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { + return preCheck(delete, delete.type()); + } + + private T preCheck(T operation, String type) { + if (("_doc".equals(type) == false) || (operation.origin() != Engine.Operation.Origin.PRIMARY)) { + return operation; + } + + try { + if (ALLOWED_OPERATIONS.tryAcquire(30, TimeUnit.SECONDS)) { + return operation; + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + throw new IllegalStateException("Something went wrong"); + } + } + } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index 6b7b21a55148d..6d6ae01f0626c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; @@ -26,7 +27,6 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.engine.Engine; @@ -195,7 +195,7 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder assertion.assertThat(response, numDocs, numModifiedDocs); } - private TaskInfo findTaskToCancel(String actionName, int workerCount) { + public static TaskInfo findTaskToCancel(String actionName, int workerCount) { ListTasksResponse tasks; long start = System.nanoTime(); do { @@ -298,7 +298,7 @@ public void onIndexModule(IndexModule indexModule) { } public static class BlockingOperationListener implements IndexingOperationListener { - private static final Logger log = Loggers.getLogger(CancelTests.class); + private static final Logger log = LogManager.getLogger(CancelTests.class); @Override public Engine.Index preIndex(ShardId shardId, Engine.Index index) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java index 4784d7f5fe546..19c5739bbc6ce 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java @@ -53,7 +53,7 @@ public class ReindexSourceTargetValidationTests extends ESTestCase { .put(index("baz"), true) .put(index("source", "source_multi"), true) .put(index("source2", "source_multi"), true)).build(); - private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(Settings.EMPTY); + private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(); private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), INDEX_NAME_EXPRESSION_RESOLVER); diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java index 158ecff9b2b4e..a7042b8bfee2b 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -34,7 +33,7 @@ /** * Read-only URL-based blob store */ -public class URLBlobStore extends AbstractComponent implements BlobStore { +public class URLBlobStore implements BlobStore { private final URL path; @@ -53,7 +52,6 @@ public class URLBlobStore extends AbstractComponent implements BlobStore { * @param path base URL */ public URLBlobStore(Settings settings, URL path) { - super(settings); this.path = path; this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.uri.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index 98b8c0a1945a5..8f8ae805fd1e8 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -82,21 +82,21 @@ public URLRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry) { super(metadata, environment.settings(), namedXContentRegistry); - if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(settings) == false) { + if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); } this.environment = environment; - supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(settings); - urlWhiteList = ALLOWED_URLS_SETTING.get(settings).toArray(new URIPattern[]{}); + supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(environment.settings()); + urlWhiteList = ALLOWED_URLS_SETTING.get(environment.settings()).toArray(new URIPattern[]{}); basePath = BlobPath.cleanPath(); url = URL_SETTING.exists(metadata.settings()) - ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(settings); + ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(environment.settings()); } @Override protected BlobStore createBlobStore() { URL normalizedURL = checkURL(url); - return new URLBlobStore(settings, normalizedURL); + return new URLBlobStore(environment.settings(), normalizedURL); } // only use for testing diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java index 939d5540ecfdf..17e21ce468b4c 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java @@ -48,7 +48,6 @@ public final class Netty4CorsConfig { private final long maxAge; private final Set allowedRequestMethods; private final Set allowedRequestHeaders; - private final boolean allowNullOrigin; private final Map> preflightHeaders; private final boolean shortCircuit; @@ -61,7 +60,6 @@ public final class Netty4CorsConfig { maxAge = builder.maxAge; allowedRequestMethods = builder.requestMethods; allowedRequestHeaders = builder.requestHeaders; - allowNullOrigin = builder.allowNullOrigin; preflightHeaders = builder.preflightHeaders; shortCircuit = builder.shortCircuit; } @@ -108,19 +106,6 @@ public boolean isOriginAllowed(final String origin) { return false; } - /** - * Web browsers may set the 'Origin' request header to 'null' if a resource is loaded - * from the local file system. - * - * If isNullOriginAllowed is true then the server will response with the wildcard for the - * the CORS response header 'Access-Control-Allow-Origin'. - * - * @return {@code true} if a 'null' origin should be supported. - */ - public boolean isNullOriginAllowed() { - return allowNullOrigin; - } - /** * Determines if credentials are supported for CORS requests. * diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfigBuilder.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfigBuilder.java index 16513c57bb337..3e87d948dabf4 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfigBuilder.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfigBuilder.java @@ -74,7 +74,6 @@ public static Netty4CorsConfigBuilder forOrigins(final String... origins) { Optional> origins; Optional pattern; final boolean anyOrigin; - boolean allowNullOrigin; boolean enabled = true; boolean allowCredentials; long maxAge; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java index 78ea9decd1dd1..5f7baffc86aa6 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java @@ -167,11 +167,6 @@ private void setPreflightHeaders(final HttpResponse response) { private boolean setOrigin(final HttpResponse response) { final String origin = request.headers().get(HttpHeaderNames.ORIGIN); if (!Strings.isNullOrEmpty(origin)) { - if ("null".equals(origin) && config.isNullOriginAllowed()) { - setAnyOrigin(response); - return true; - } - if (config.isAnyOriginSupported()) { if (config.isCredentialsAllowed()) { echoRequestOrigin(response); @@ -201,10 +196,6 @@ private boolean validateOrigin() { return true; } - if ("null".equals(origin) && config.isNullOriginAllowed()) { - return true; - } - // if the origin is the same as the host of the request, then allow if (isSameOrigin(origin, request.headers().get(HttpHeaderNames.HOST))) { return true; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java index 91bbe1c1a9b3c..38527151695d8 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java @@ -20,9 +20,10 @@ package org.elasticsearch.transport.netty4; import io.netty.util.internal.logging.AbstractInternalLogger; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.SuppressLoggerChecks; -import org.elasticsearch.common.logging.Loggers; @SuppressLoggerChecks(reason = "safely delegates to logger") class Netty4InternalESLogger extends AbstractInternalLogger { @@ -31,7 +32,7 @@ class Netty4InternalESLogger extends AbstractInternalLogger { Netty4InternalESLogger(final String name) { super(name); - this.logger = Loggers.getLogger(name); + this.logger = LogManager.getLogger(name); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java index d6132b26b0899..bee98362e0c1e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java @@ -20,8 +20,10 @@ package org.elasticsearch.transport.netty4; import io.netty.channel.Channel; +import io.netty.channel.ChannelException; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPromise; +import java.io.IOException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; @@ -71,8 +73,14 @@ public void addCloseListener(ActionListener listener) { } @Override - public void setSoLinger(int value) { - channel.config().setOption(ChannelOption.SO_LINGER, value); + public void setSoLinger(int value) throws IOException { + if (channel.isOpen()) { + try { + channel.config().setOption(ChannelOption.SO_LINGER, value); + } catch (ChannelException e) { + throw new IOException(e); + } + } } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 009a75b3e3301..a4e5731cd6226 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -37,12 +37,10 @@ import io.netty.util.AttributeKey; import io.netty.util.concurrent.Future; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.network.NetworkService; @@ -59,8 +57,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; import java.util.Map; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; @@ -101,8 +97,9 @@ public class Netty4Transport extends TcpTransport { private final int workerCount; private final ByteSizeValue receivePredictorMin; private final ByteSizeValue receivePredictorMax; - private volatile Bootstrap clientBootstrap; private final Map serverBootstraps = newConcurrentMap(); + private volatile Bootstrap clientBootstrap; + private volatile NioEventLoopGroup eventLoopGroup; public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { @@ -125,10 +122,12 @@ public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService protected void doStart() { boolean success = false; try { - clientBootstrap = createClientBootstrap(); + ThreadFactory threadFactory = daemonThreadFactory(settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX); + eventLoopGroup = new NioEventLoopGroup(workerCount, threadFactory); + clientBootstrap = createClientBootstrap(eventLoopGroup); if (NetworkService.NETWORK_SERVER.get(settings)) { for (ProfileSettings profileSettings : profileSettings) { - createServerBootstrap(profileSettings); + createServerBootstrap(profileSettings, eventLoopGroup); bindServer(profileSettings); } } @@ -141,9 +140,9 @@ protected void doStart() { } } - private Bootstrap createClientBootstrap() { + private Bootstrap createClientBootstrap(NioEventLoopGroup eventLoopGroup) { final Bootstrap bootstrap = new Bootstrap(); - bootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX))); + bootstrap.group(eventLoopGroup); bootstrap.channel(NioSocketChannel.class); bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); @@ -167,7 +166,7 @@ private Bootstrap createClientBootstrap() { return bootstrap; } - private void createServerBootstrap(ProfileSettings profileSettings) { + private void createServerBootstrap(ProfileSettings profileSettings, NioEventLoopGroup eventLoopGroup) { String name = profileSettings.profileName; if (logger.isDebugEnabled()) { logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], " @@ -176,12 +175,9 @@ private void createServerBootstrap(ProfileSettings profileSettings) { receivePredictorMin, receivePredictorMax); } - - final ThreadFactory workerFactory = daemonThreadFactory(this.settings, TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, name); - final ServerBootstrap serverBootstrap = new ServerBootstrap(); - serverBootstrap.group(new NioEventLoopGroup(workerCount, workerFactory)); + serverBootstrap.group(eventLoopGroup); serverBootstrap.channel(NioServerSocketChannel.class); serverBootstrap.childHandler(getServerChannelInitializer(name)); @@ -274,25 +270,14 @@ long failedPingCount() { @SuppressForbidden(reason = "debug") protected void stopInternal() { Releasables.close(() -> { - final List>> serverBootstrapCloseFutures = new ArrayList<>(serverBootstraps.size()); - for (final Map.Entry entry : serverBootstraps.entrySet()) { - serverBootstrapCloseFutures.add( - Tuple.tuple(entry.getKey(), entry.getValue().config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS))); + Future shutdownFuture = eventLoopGroup.shutdownGracefully(0, 5, TimeUnit.SECONDS); + shutdownFuture.awaitUninterruptibly(); + if (shutdownFuture.isSuccess() == false) { + logger.warn("Error closing netty event loop group", shutdownFuture.cause()); } - for (final Tuple> future : serverBootstrapCloseFutures) { - future.v2().awaitUninterruptibly(); - if (!future.v2().isSuccess()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "Error closing server bootstrap for profile [{}]", future.v1()), future.v2().cause()); - } - } - serverBootstraps.clear(); - if (clientBootstrap != null) { - clientBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly(); - clientBootstrap = null; - } + serverBootstraps.clear(); + clientBootstrap = null; }); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index ad81f5b3063f5..52732d5bc1df4 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -96,7 +96,7 @@ public void testLimitsInFlightRequests() throws Exception { Collection multipleResponses = nettyHttpClient.post(transportAddress.address(), requests); try { assertThat(multipleResponses, hasSize(requests.length)); - assertAtLeastOnceExpectedStatus(multipleResponses, HttpResponseStatus.SERVICE_UNAVAILABLE); + assertAtLeastOnceExpectedStatus(multipleResponses, HttpResponseStatus.TOO_MANY_REQUESTS); } finally { multipleResponses.forEach(ReferenceCounted::release); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index abe02cdf4c1cd..200c9aa4bbeb4 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport.netty4; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; import org.elasticsearch.common.logging.Loggers; @@ -37,14 +38,14 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); - Loggers.addAppender(Loggers.getLogger(TransportLogger.class), appender); + Loggers.addAppender(LogManager.getLogger(ESLoggingHandler.class), appender); + Loggers.addAppender(LogManager.getLogger(TransportLogger.class), appender); appender.start(); } public void tearDown() throws Exception { - Loggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); - Loggers.removeAppender(Loggers.getLogger(TransportLogger.class), appender); + Loggers.removeAppender(LogManager.getLogger(ESLoggingHandler.class), appender); + Loggers.removeAppender(LogManager.getLogger(TransportLogger.class), appender); appender.stop(); super.tearDown(); } diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 315dcc5f6cb1b..90132e2c58fcd 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -39,3 +39,4 @@ dependencies { dependencyLicenses { mapping from: /lucene-.*/, to: 'lucene' } + diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..65e5ca3382240 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +b474e1a2d7f0172338a08f159849a6c491781d70 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index abc772945b1b4..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -429eb7e780c5a6e5200041a1f5b98bccd2623aaf \ No newline at end of file diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java index 7059d93ffc5bf..1997c589bc378 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java @@ -50,7 +50,7 @@ public class IcuFoldingTokenFilterFactory extends AbstractTokenFilterFactory imp public IcuFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); - this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(ICU_FOLDING_NORMALIZER, settings); + this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(indexSettings, ICU_FOLDING_NORMALIZER, settings); } @Override diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java index e43e163e1a034..86490ff486ecf 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java @@ -49,7 +49,7 @@ public IcuNormalizerCharFilterFactory(IndexSettings indexSettings, Environment e } Normalizer2 normalizer = Normalizer2.getInstance( null, method, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE); - this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(normalizer, settings); + this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(indexSettings, normalizer, settings); } @Override diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java index 1ef09f86052bd..73bf92ee872a5 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java @@ -23,7 +23,10 @@ import com.ibm.icu.text.Normalizer2; import com.ibm.icu.text.UnicodeSet; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.Version; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -36,13 +39,16 @@ */ public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { + private static final DeprecationLogger deprecationLogger = + new DeprecationLogger(LogManager.getLogger(IcuNormalizerTokenFilterFactory.class)); + private final Normalizer2 normalizer; public IcuNormalizerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); String method = settings.get("name", "nfkc_cf"); Normalizer2 normalizer = Normalizer2.getInstance(null, method, Normalizer2.Mode.COMPOSE); - this.normalizer = wrapWithUnicodeSetFilter(normalizer, settings); + this.normalizer = wrapWithUnicodeSetFilter(indexSettings, normalizer, settings); } @Override @@ -55,8 +61,17 @@ public Object getMultiTermComponent() { return this; } - static Normalizer2 wrapWithUnicodeSetFilter(final Normalizer2 normalizer, Settings settings) { + static Normalizer2 wrapWithUnicodeSetFilter(final IndexSettings indexSettings, + final Normalizer2 normalizer, + final Settings settings) { String unicodeSetFilter = settings.get("unicodeSetFilter"); + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + if (unicodeSetFilter != null) { + deprecationLogger.deprecated("[unicodeSetFilter] has been deprecated in favor of [unicode_set_filter]"); + } else { + unicodeSetFilter = settings.get("unicode_set_filter"); + } + } if (unicodeSetFilter != null) { UnicodeSet unicodeSet = new UnicodeSet(unicodeSetFilter); diff --git a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml index c9ff2b2fb6463..5cdfcde72b020 100644 --- a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml +++ b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml @@ -46,7 +46,7 @@ - match: { tokens.1.token: bar } - match: { tokens.2.token: resume } --- -"Normalization with a UnicodeSet Filter": +"Normalization with unicode_set_filter": - do: indices.create: index: test @@ -57,14 +57,14 @@ char_filter: charfilter_icu_normalizer: type: icu_normalizer - unicodeSetFilter: "[^ß]" + unicode_set_filter: "[^ß]" filter: tokenfilter_icu_normalizer: type: icu_normalizer - unicodeSetFilter: "[^ßB]" + unicode_set_filter: "[^ßB]" tokenfilter_icu_folding: type: icu_folding - unicodeSetFilter: "[^â]" + unicode_set_filter: "[^â]" - do: indices.analyze: index: test @@ -77,6 +77,7 @@ - match: { tokens.1.token: föo } - match: { tokens.2.token: bâr } - match: { tokens.3.token: ruß } + - do: indices.analyze: index: test @@ -89,6 +90,7 @@ - match: { tokens.1.token: föo } - match: { tokens.2.token: Bâr } - match: { tokens.3.token: ruß } + - do: indices.analyze: index: test @@ -101,3 +103,46 @@ - match: { tokens.1.token: foo } - match: { tokens.2.token: bâr } - match: { tokens.3.token: russ } + +--- +"Normalization with deprecated unicodeSetFilter": + - skip: + version: " - 6.99.99" + reason: unicodeSetFilter deprecated in 7.0.0, replaced by unicode_set_filter + features: "warnings" + + - do: + warnings: + - "[unicodeSetFilter] has been deprecated in favor of [unicode_set_filter]" + indices.create: + index: test + body: + settings: + index: + analysis: + char_filter: + charfilter_icu_normalizer: + type: icu_normalizer + unicodeSetFilter: "[^ß]" + filter: + tokenfilter_icu_normalizer: + type: icu_normalizer + unicodeSetFilter: "[^ßB]" + tokenfilter_icu_folding: + type: icu_folding + unicodeSetFilter: "[^â]" + - do: + warnings: + - "[unicodeSetFilter] has been deprecated in favor of [unicode_set_filter]" + indices.analyze: + index: test + body: + char_filter: ["charfilter_icu_normalizer"] + tokenizer: standard + text: charfilter Föo Bâr Ruß + - length: { tokens: 4 } + - match: { tokens.0.token: charfilter } + - match: { tokens.1.token: föo } + - match: { tokens.2.token: bâr } + - match: { tokens.3.token: ruß } + diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..51fb0eebff73c --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +fc547e69837bcb808f1782bfa35490645bab9cae \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index e103c8c0c7c41..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -837fca1b1d7ca1dc002e53171801526644e52818 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..3389dc2f73ea1 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +e08961a2ec9414947693659ff79bb7e21a410298 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index b7a23ee518fcb..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1dde903172ade259cb26cbe320c25bc1d1356f89 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..b0854f657867a --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +09280919225656c7ce2a14af29666a02bd86c540 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 08b07e7c2f498..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6ca20e96a989e6e6706b8b7b8ad8c82d2a03576 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..00860c9fc832e --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +880f10393cdefff7575fbf5b2ced890666ec81dc \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 3f6fed19af1aa..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c96a2f25dea18b383423a41aca296734353d4bbd \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..4818fd1665f27 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +b41451a9d4e30b8a9a14ccdd7553e5796f77cf44 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 5dc03672c8753..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09363c5ce111d024a6da22a5ea8dbaf54d91dbd0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..1b4f444999f58 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +145fd2c803d682c2cb2d78e6e350e09a09a09ea0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index e940b50d640e1..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13c3840d49480014118de99ef6e07a9e55c50172 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java index 1a9265de2a72f..987942ef4f031 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java @@ -93,6 +93,7 @@ public static Deployment fromString(String string) { } } + private final Settings settings; private final AzureComputeService azureComputeService; private TransportService transportService; private NetworkService networkService; @@ -107,7 +108,7 @@ public static Deployment fromString(String string) { public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureComputeService, TransportService transportService, NetworkService networkService) { - super(settings); + this.settings = settings; this.azureComputeService = azureComputeService; this.transportService = transportService; this.networkService = networkService; diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index 4255fc210fb6c..795db2846cef3 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -19,11 +19,11 @@ package org.elasticsearch.plugin.discovery.azure.classic; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -43,7 +43,7 @@ public class AzureDiscoveryPlugin extends Plugin implements DiscoveryPlugin { public static final String AZURE = "azure"; protected final Settings settings; - private static final Logger logger = Loggers.getLogger(AzureDiscoveryPlugin.class); + private static final Logger logger = LogManager.getLogger(AzureDiscoveryPlugin.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); public AzureDiscoveryPlugin(Settings settings) { diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index c5a5f1df98ee2..35c2e7336a7d6 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -24,10 +24,10 @@ import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoveryModule; @@ -243,7 +243,7 @@ public static void startHttpd() throws Exception { responseBody.write(responseAsBytes); responseBody.close(); } catch (XMLStreamException e) { - Loggers.getLogger(AzureDiscoveryClusterFormationTests.class).error("Failed serializing XML", e); + LogManager.getLogger(AzureDiscoveryClusterFormationTests.class).error("Failed serializing XML", e); throw new RuntimeException(e); } }); diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index e32ba6948d62d..b08561ffde0ed 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -48,9 +48,30 @@ bundlePlugin { } } +task writeTestJavaPolicy { + doLast { + final File tmp = file("${buildDir}/tmp") + if (tmp.exists() == false && tmp.mkdirs() == false) { + throw new GradleException("failed to create temporary directory [${tmp}]") + } + final File javaPolicy = file("${tmp}/java.policy") + javaPolicy.write( + [ + "grant {", + " permission java.util.PropertyPermission \"com.amazonaws.sdk.ec2MetadataServiceEndpointOverride\", \"write\";", + "};" + ].join("\n")) + } +} + test { + dependsOn writeTestJavaPolicy // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name + + // this is needed to manipulate com.amazonaws.sdk.ec2MetadataServiceEndpointOverride system property + // it is better rather disable security manager at all with `systemProperty 'tests.security.manager', 'false'` + systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" } check { diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index 90fac9e80cd78..898a31192ffb0 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -55,7 +55,10 @@ integTestCluster { keystoreSetting 'discovery.ec2.access_key', 'ec2_integration_test_access_key' keystoreSetting 'discovery.ec2.secret_key', 'ec2_integration_test_secret_key' setting 'discovery.zen.hosts_provider', 'ec2' + setting 'network.host', '_ec2_' setting 'discovery.ec2.endpoint', "http://${-> ec2Fixture.addressAndPort}" + systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", "http://${-> ec2Fixture.addressAndPort}" + unicastTransportUri = { seedNode, node, ant -> return null } waitCondition = { node, ant -> diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java index 0cf4cbdeadb34..6027bd861590e 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.ec2; import org.apache.http.NameValuePair; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URLEncodedUtils; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.rest.RestStatus; @@ -60,7 +62,7 @@ public static void main(String[] args) throws Exception { @Override protected Response handle(final Request request) throws IOException { - if ("/".equals(request.getPath()) && ("POST".equals(request.getMethod()))) { + if ("/".equals(request.getPath()) && (HttpPost.METHOD_NAME.equals(request.getMethod()))) { final String userAgent = request.getHeader("User-Agent"); if (userAgent != null && userAgent.startsWith("aws-sdk-java")) { // Simulate an EC2 DescribeInstancesResponse @@ -74,6 +76,9 @@ protected Response handle(final Request request) throws IOException { return new Response(RestStatus.OK.getStatus(), contentType("text/xml; charset=UTF-8"), responseBody); } } + if ("/latest/meta-data/local-ipv4".equals(request.getPath()) && (HttpGet.METHOD_NAME.equals(request.getMethod()))) { + return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, "127.0.0.1".getBytes(UTF_8)); + } return null; } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index a65500d9e2289..6d677d03c3909 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.LazyInitializable; import java.util.Random; @@ -41,15 +40,9 @@ class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service { - public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/"; - private final AtomicReference> lazyClientReference = new AtomicReference<>(); - AwsEc2ServiceImpl(Settings settings) { - super(settings); - } - private AmazonEC2 buildClient(Ec2ClientSettings clientSettings) { final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); final ClientConfiguration configuration = buildConfiguration(logger, clientSettings); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index 8f5037042986b..2817c1c3b60bf 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -69,7 +69,6 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos private final TransportAddressesCache dynamicHosts; AwsEc2UnicastHostsProvider(Settings settings, TransportService transportService, AwsEc2Service awsEc2Service) { - super(settings); this.transportService = transportService; this.awsEc2Service = awsEc2Service; diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java index 084814e13d873..3de5b2ccaf913 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java @@ -24,9 +24,9 @@ import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.BasicSessionCredentials; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -75,7 +75,7 @@ final class Ec2ClientSettings { static final Setting READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout", TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope); - private static final Logger logger = Loggers.getLogger(Ec2ClientSettings.class); + private static final Logger logger = LogManager.getLogger(Ec2ClientSettings.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index bb757dc05adba..6400d10dff021 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -19,11 +19,12 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.util.EC2MetadataUtils; import com.amazonaws.util.json.Jackson; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -52,7 +53,7 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, ReloadablePlugin { - private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class); + private static Logger logger = LogManager.getLogger(Ec2DiscoveryPlugin.class); public static final String EC2 = "ec2"; static { @@ -78,7 +79,7 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Reloa protected final AwsEc2Service ec2Service; public Ec2DiscoveryPlugin(Settings settings) { - this(settings, new AwsEc2ServiceImpl(settings)); + this(settings, new AwsEc2ServiceImpl()); } protected Ec2DiscoveryPlugin(Settings settings, AwsEc2ServiceImpl ec2Service) { @@ -91,7 +92,7 @@ protected Ec2DiscoveryPlugin(Settings settings, AwsEc2ServiceImpl ec2Service) { @Override public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { logger.debug("Register _ec2_, _ec2:xxx_ network names"); - return new Ec2NameResolver(settings); + return new Ec2NameResolver(); } @Override @@ -129,7 +130,8 @@ public Settings additionalSettings() { final Settings.Builder builder = Settings.builder(); // Adds a node attribute for the ec2 availability zone - final String azMetadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone"; + final String azMetadataUrl = EC2MetadataUtils.getHostAddressForEC2MetadataService() + + "/latest/meta-data/placement/availability-zone"; builder.put(getAvailabilityZoneNodeAttributes(settings, azMetadataUrl)); return builder.build(); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java index 92bd01dd9aec7..e9dd3a10e4cc3 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java @@ -19,11 +19,11 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.util.EC2MetadataUtils; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.NetworkService.CustomNameResolver; -import org.elasticsearch.common.settings.Settings; import java.io.BufferedReader; import java.io.IOException; @@ -79,13 +79,6 @@ private enum Ec2HostnameType { } } - /** - * Construct a {@link CustomNameResolver}. - */ - Ec2NameResolver(Settings settings) { - super(settings); - } - /** * @param type the ec2 hostname type to discover. * @return the appropriate host resolved from ec2 meta-data, or null if it cannot be obtained. @@ -94,7 +87,7 @@ private enum Ec2HostnameType { @SuppressForbidden(reason = "We call getInputStream in doPrivileged and provide SocketPermission") public InetAddress[] resolve(Ec2HostnameType type) throws IOException { InputStream in = null; - String metadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + type.ec2Name; + String metadataUrl = EC2MetadataUtils.getHostAddressForEC2MetadataService() + "/latest/meta-data/" + type.ec2Name; try { URL url = new URL(metadataUrl); logger.debug("obtaining ec2 hostname from ec2 meta-data url {}", url); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java index 0596dd697b2eb..e44087f941349 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java @@ -24,8 +24,6 @@ import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.Tag; -import org.elasticsearch.common.settings.Settings; - import java.util.List; public class AwsEc2ServiceMock extends AwsEc2ServiceImpl { @@ -33,8 +31,7 @@ public class AwsEc2ServiceMock extends AwsEc2ServiceImpl { private final int nodes; private final List> tagsList; - public AwsEc2ServiceMock(Settings settings, int nodes, List> tagsList) { - super(settings); + public AwsEc2ServiceMock(int nodes, List> tagsList) { this.nodes = nodes; this.tagsList = tagsList; } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java index a92bd243bc9b7..bc45a95c2f309 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java @@ -32,7 +32,7 @@ public class Ec2DiscoveryPluginMock extends Ec2DiscoveryPlugin { } public Ec2DiscoveryPluginMock(Settings settings, int nodes, List> tagsList) { - super(settings, new AwsEc2ServiceMock(settings, nodes, tagsList)); + super(settings, new AwsEc2ServiceMock(nodes, tagsList)); } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index 295df0c818a91..aa619409c16eb 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -298,7 +298,7 @@ abstract class DummyEc2HostProvider extends AwsEc2UnicastHostsProvider { } public void testGetNodeListEmptyCache() throws Exception { - AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(Settings.EMPTY, 1, null); + AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(1, null); DummyEc2HostProvider provider = new DummyEc2HostProvider(Settings.EMPTY, transportService, awsEc2Service) { @Override protected List fetchDynamicNodes() { diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java index 52bf7e67b0d7d..dedf56b836eb3 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java @@ -19,35 +19,98 @@ package org.elasticsearch.discovery.ec2; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.io.IOException; +import java.io.OutputStream; import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Arrays; import java.util.Collections; +import java.util.function.BiConsumer; +import static com.amazonaws.SDKGlobalConfiguration.EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; /** * Test for EC2 network.host settings. + *

+ * Warning: This test doesn't assert that the exceptions are thrown. + * They aren't. */ +@SuppressForbidden(reason = "use http server") public class Ec2NetworkTests extends ESTestCase { + + private static HttpServer httpServer; + + @BeforeClass + public static void startHttp() throws Exception { + httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); + + BiConsumer registerContext = (path, v) ->{ + final byte[] message = v.getBytes(UTF_8); + httpServer.createContext(path, (s) -> { + s.sendResponseHeaders(RestStatus.OK.getStatus(), message.length); + OutputStream responseBody = s.getResponseBody(); + responseBody.write(message); + responseBody.close(); + }); + }; + registerContext.accept("/latest/meta-data/local-ipv4","127.0.0.1"); + registerContext.accept("/latest/meta-data/public-ipv4","165.168.10.2"); + registerContext.accept("/latest/meta-data/public-hostname","165.168.10.3"); + registerContext.accept("/latest/meta-data/local-hostname","10.10.10.5"); + + httpServer.start(); + } + + @Before + public void setup() { + // redirect EC2 metadata service to httpServer + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY, + "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort())); + } + + @AfterClass + public static void stopHttp() { + httpServer.stop(0); + httpServer = null; + } + /** * Test for network.host: _ec2_ */ public void testNetworkHostEc2() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2_") - .build(); + resolveEc2("_ec2_", InetAddress.getByName("127.0.0.1")); + } + + /** + * Test for network.host: _ec2_ + */ + public void testNetworkHostUnableToResolveEc2() { + // redirect EC2 metadata service to unknown location + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY, + "http://127.0.0.1/")); - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. try { - networkService.resolveBindHostAddresses(null); + resolveEc2("_ec2_", (InetAddress[]) null); } catch (IOException e) { - assertThat(e.getMessage(), containsString("local-ipv4")); + assertThat(e.getMessage(), + equalTo("IOException caught when fetching InetAddress from [http://127.0.0.1//latest/meta-data/local-ipv4]")); } } @@ -55,102 +118,58 @@ public void testNetworkHostEc2() throws IOException { * Test for network.host: _ec2:publicIp_ */ public void testNetworkHostEc2PublicIp() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:publicIp_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("public-ipv4")); - } + resolveEc2("_ec2:publicIp_", InetAddress.getByName("165.168.10.2")); } /** * Test for network.host: _ec2:privateIp_ */ public void testNetworkHostEc2PrivateIp() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:privateIp_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("local-ipv4")); - } + resolveEc2("_ec2:privateIp_", InetAddress.getByName("127.0.0.1")); } /** * Test for network.host: _ec2:privateIpv4_ */ public void testNetworkHostEc2PrivateIpv4() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:privateIpv4_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("local-ipv4")); - } + resolveEc2("_ec2:privateIpv4_", InetAddress.getByName("127.0.0.1")); } /** * Test for network.host: _ec2:privateDns_ */ public void testNetworkHostEc2PrivateDns() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:privateDns_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("local-hostname")); - } + resolveEc2("_ec2:privateDns_", InetAddress.getByName("10.10.10.5")); } /** * Test for network.host: _ec2:publicIpv4_ */ public void testNetworkHostEc2PublicIpv4() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:publicIpv4_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("public-ipv4")); - } + resolveEc2("_ec2:publicIpv4_", InetAddress.getByName("165.168.10.2")); } /** * Test for network.host: _ec2:publicDns_ */ public void testNetworkHostEc2PublicDns() throws IOException { + resolveEc2("_ec2:publicDns_", InetAddress.getByName("165.168.10.3")); + } + + private InetAddress[] resolveEc2(String host, InetAddress ... expected) throws IOException { Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:publicDns_") - .build(); + .put("network.host", host) + .build(); - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("public-hostname")); + NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver())); + + InetAddress[] addresses = networkService.resolveBindHostAddresses( + NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY)); + if (expected == null) { + fail("We should get an IOException, resolved addressed:" + Arrays.toString(addresses)); } + assertThat(addresses, arrayContaining(expected)); + return addresses; } /** @@ -158,11 +177,7 @@ public void testNetworkHostEc2PublicDns() throws IOException { * network.host: _local_ */ public void testNetworkHostCoreLocal() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_local_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); + NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver())); InetAddress[] addresses = networkService.resolveBindHostAddresses(null); assertThat(addresses, arrayContaining(networkService.resolveBindHostAddresses(new String[] { "_local_" }))); } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java index aab6e0c74ecdb..116bf1842d065 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java @@ -94,6 +94,7 @@ public Collection instances() { return instances; } + private final Settings settings; private Compute client; private TimeValue refreshInterval = null; private long lastRefresh; @@ -107,7 +108,7 @@ public Collection instances() { private final boolean validateCerts; public GceInstancesServiceImpl(Settings settings) { - super(settings); + this.settings = settings; this.validateCerts = GCE_VALIDATE_CERTIFICATES.get(settings); this.project = resolveProject(); this.zones = resolveZones(); diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java index c736862d426de..ca25fde742907 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java @@ -44,11 +44,14 @@ public class GceMetadataService extends AbstractLifecycleComponent { public static final Setting GCE_HOST = new Setting<>("cloud.gce.host", "http://metadata.google.internal", Function.identity(), Setting.Property.NodeScope); + private final Settings settings; + /** Global instance of the HTTP transport. */ private HttpTransport gceHttpTransport; public GceMetadataService(Settings settings) { super(settings); + this.settings = settings; } protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurityException, IOException { diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java index 71e9fbc7804df..064fe606244ee 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java @@ -19,9 +19,9 @@ package org.elasticsearch.cloud.gce; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; public class GceModule extends AbstractModule { @@ -29,7 +29,7 @@ public class GceModule extends AbstractModule { static Class computeServiceImpl = GceInstancesServiceImpl.class; protected final Settings settings; - protected final Logger logger = Loggers.getLogger(GceModule.class); + protected final Logger logger = LogManager.getLogger(GceModule.class); public GceModule(Settings settings) { this.settings = settings; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java index 46c4ac7bac547..e53a1e241bb1d 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java @@ -22,9 +22,7 @@ import org.elasticsearch.cloud.gce.GceMetadataService; import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.NetworkService.CustomNameResolver; -import org.elasticsearch.common.settings.Settings; import java.io.IOException; import java.net.InetAddress; @@ -39,7 +37,7 @@ *

  • _gce:hostname_
  • * */ -public class GceNameResolver extends AbstractComponent implements CustomNameResolver { +public class GceNameResolver implements CustomNameResolver { private final GceMetadataService gceMetadataService; @@ -73,8 +71,7 @@ private enum GceAddressResolverType { /** * Construct a {@link CustomNameResolver}. */ - public GceNameResolver(Settings settings, GceMetadataService gceMetadataService) { - super(settings); + public GceNameResolver(GceMetadataService gceMetadataService) { this.gceMetadataService = gceMetadataService; } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index 36f8aa36b34d0..2d1bb07b23909 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -58,6 +58,7 @@ static final class Status { private static final String TERMINATED = "TERMINATED"; } + private final Settings settings; private final GceInstancesService gceInstancesService; private TransportService transportService; private NetworkService networkService; @@ -73,7 +74,7 @@ static final class Status { public GceUnicastHostsProvider(Settings settings, GceInstancesService gceInstancesService, TransportService transportService, NetworkService networkService) { - super(settings); + this.settings = settings; this.gceInstancesService = gceInstancesService; this.transportService = transportService; this.networkService = networkService; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index 418e3fffe2463..9aef304e08fcc 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -21,19 +21,19 @@ import com.google.api.client.http.HttpHeaders; import com.google.api.client.util.ClassInfo; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce; import org.elasticsearch.cloud.gce.GceInstancesService; import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; import org.elasticsearch.cloud.gce.GceMetadataService; import org.elasticsearch.cloud.gce.network.GceNameResolver; import org.elasticsearch.cloud.gce.util.Access; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.gce.GceUnicastHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.plugins.DiscoveryPlugin; @@ -57,7 +57,7 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close public static final String GCE = "gce"; protected final Settings settings; - private static final Logger logger = Loggers.getLogger(GceDiscoveryPlugin.class); + private static final Logger logger = LogManager.getLogger(GceDiscoveryPlugin.class); // stashed when created in order to properly close private final SetOnce gceInstancesService = new SetOnce<>(); @@ -95,7 +95,7 @@ public Map> getZenHostsProviders(Transpor @Override public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { logger.debug("Register _gce_, _gce:xxx network names"); - return new GceNameResolver(settings, new GceMetadataService(settings)); + return new GceNameResolver(new GceMetadataService(settings)); } @Override diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java index 3a34e3629db80..03525eb266ba7 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java @@ -26,18 +26,18 @@ import com.google.api.client.testing.http.MockHttpTransport; import com.google.api.client.testing.http.MockLowLevelHttpRequest; import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.io.InputStream; import java.net.URL; public class GceMockUtils { - protected static final Logger logger = Loggers.getLogger(GceMockUtils.class); + protected static final Logger logger = LogManager.getLogger(GceMockUtils.class); public static final String GCE_METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/"; diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java index 1fe1297904bda..94f2959917d5b 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java @@ -107,7 +107,7 @@ private void resolveGce(String gceNetworkSetting, InetAddress[] expected) throws .build(); GceMetadataServiceMock mock = new GceMetadataServiceMock(nodeSettings); - NetworkService networkService = new NetworkService(Collections.singletonList(new GceNameResolver(nodeSettings, mock))); + NetworkService networkService = new NetworkService(Collections.singletonList(new GceNameResolver(mock))); try { InetAddress[] addresses = networkService.resolveBindHostAddresses( NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY)); diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java index b420e8d0a1198..a185e038582e0 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java @@ -19,11 +19,11 @@ package org.elasticsearch.ingest.geoip; import com.maxmind.geoip2.DatabaseReader; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.CheckedSupplier; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.core.internal.io.IOUtils; import java.io.Closeable; import java.io.IOException; @@ -34,7 +34,7 @@ */ final class DatabaseReaderLazyLoader implements Closeable { - private static final Logger LOGGER = Loggers.getLogger(DatabaseReaderLazyLoader.class); + private static final Logger LOGGER = LogManager.getLogger(DatabaseReaderLazyLoader.class); private final String databaseFileName; private final CheckedSupplier loader; diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index c9448dd88e756..79fefbc64d407 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -20,12 +20,9 @@ package org.elasticsearch.index.mapper.annotatedtext; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.Analyzer.TokenStreamComponents; import org.apache.lucene.analysis.AnalyzerWrapper; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; @@ -69,20 +66,21 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; import java.util.regex.Pattern; import static org.elasticsearch.index.mapper.TypeParsers.parseTextField; /** A {@link FieldMapper} for full-text fields with annotation markup e.g. - * + * * "New mayor is [John Smith](type=person&value=John%20Smith) " - * + * * A special Analyzer wraps the default choice of analyzer in order * to strip the text field of annotation markup and inject the related * entity annotation tokens as supplementary tokens at the relevant points * in the token stream. - * This code is largely a copy of TextFieldMapper which is less than ideal - + * This code is largely a copy of TextFieldMapper which is less than ideal - * my attempts to subclass TextFieldMapper failed but we can revisit this. **/ public class AnnotatedTextFieldMapper extends FieldMapper { @@ -100,7 +98,7 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { private int positionIncrementGap = POSITION_INCREMENT_GAP_USE_ANALYZER; - + public Builder(String name) { super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; @@ -118,7 +116,7 @@ public Builder positionIncrementGap(int positionIncrementGap) { this.positionIncrementGap = positionIncrementGap; return this; } - + @Override public Builder docValues(boolean docValues) { if (docValues) { @@ -141,8 +139,8 @@ public AnnotatedTextFieldMapper build(BuilderContext context) { fieldType.setSearchAnalyzer(new NamedAnalyzer(fieldType.searchAnalyzer(), positionIncrementGap)); fieldType.setSearchQuoteAnalyzer(new NamedAnalyzer(fieldType.searchQuoteAnalyzer(), positionIncrementGap)); } else { - //Using the analyzer's default BUT need to do the same thing AnalysisRegistry.processAnalyzerFactory - // does to splice in new default of posIncGap=100 by wrapping the analyzer + //Using the analyzer's default BUT need to do the same thing AnalysisRegistry.processAnalyzerFactory + // does to splice in new default of posIncGap=100 by wrapping the analyzer if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) { int overrideInc = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP; fieldType.setIndexAnalyzer(new NamedAnalyzer(fieldType.indexAnalyzer(), overrideInc)); @@ -162,7 +160,7 @@ public static class TypeParser implements Mapper.TypeParser { public Mapper.Builder parse( String fieldName, Map node, ParserContext parserContext) throws MapperParsingException { AnnotatedTextFieldMapper.Builder builder = new AnnotatedTextFieldMapper.Builder(fieldName); - + builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer()); builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer()); builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer()); @@ -181,7 +179,7 @@ public Mapper.Builder annotations; - + // Format is markdown-like syntax for URLs eg: // "New mayor is [John Smith](type=person&value=John%20Smith) " - static Pattern markdownPattern = Pattern.compile("\\[([^\\]\\[]*)\\]\\(([^\\)\\(]*)\\)"); - + static Pattern markdownPattern = Pattern.compile("\\[([^\\]\\[]*)\\]\\(([^\\)\\(]*)\\)"); + public static AnnotatedText parse (String textPlusMarkup) { List annotations =new ArrayList<>(); - Matcher m = markdownPattern.matcher(textPlusMarkup); + Matcher m = markdownPattern.matcher(textPlusMarkup); int lastPos = 0; StringBuilder sb = new StringBuilder(); while(m.find()){ if(m.start() > lastPos){ sb.append(textPlusMarkup.substring(lastPos, m.start())); } - + int startOffset = sb.length(); int endOffset = sb.length() + m.group(1).length(); sb.append(m.group(1)); lastPos = m.end(); - + String[] pairs = m.group(2).split("&"); String value = null; for (String pair : pairs) { String[] kv = pair.split("="); try { - if(kv.length == 2){ + if(kv.length == 2){ throw new ElasticsearchParseException("key=value pairs are not supported in annotations"); } if(kv.length == 1) { @@ -230,9 +228,9 @@ public static AnnotatedText parse (String textPlusMarkup) { } } catch (UnsupportedEncodingException uee){ throw new ElasticsearchParseException("Unsupported encoding parsing annotated text", uee); - } - } - } + } + } + } if(lastPos < textPlusMarkup.length()){ sb.append(textPlusMarkup.substring(lastPos)); } @@ -242,13 +240,13 @@ public static AnnotatedText parse (String textPlusMarkup) { protected AnnotatedText(String textMinusMarkup, String textPlusMarkup, List annotations) { this.textMinusMarkup = textMinusMarkup; this.textPlusMarkup = textPlusMarkup; - this.annotations = annotations; + this.annotations = annotations; } - + public static final class AnnotationToken { public final int offset; public final int endOffset; - + public final String value; public AnnotationToken(int offset, int endOffset, String value) { this.offset = offset; @@ -259,12 +257,12 @@ public AnnotationToken(int offset, int endOffset, String value) { public String toString() { return value +" ("+offset+" - "+endOffset+")"; } - + public boolean intersects(int start, int end) { return (start <= offset && end >= offset) || (start <= endOffset && end >= endOffset) || (start >= offset && end <= endOffset); } - + @Override public int hashCode() { final int prime = 31; @@ -274,7 +272,7 @@ public int hashCode() { result = prime * result + Objects.hashCode(value); return result; } - + @Override public boolean equals(Object obj) { if (this == obj) @@ -287,16 +285,16 @@ public boolean equals(Object obj) { return Objects.equals(endOffset, other.endOffset) && Objects.equals(offset, other.offset) && Objects.equals(value, other.value); } - + } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(textMinusMarkup); sb.append("\n"); annotations.forEach(a -> { - sb.append(a); + sb.append(a); sb.append("\n"); }); return sb.toString(); @@ -308,10 +306,10 @@ public int numAnnotations() { public AnnotationToken getAnnotation(int index) { return annotations.get(index); - } + } } - - // A utility class for use with highlighters where the content being highlighted + + // A utility class for use with highlighters where the content being highlighted // needs plain text format for highlighting but marked-up format for token discovery. // The class takes markedup format field values and returns plain text versions. // When asked to tokenize plain-text versions by the highlighter it tokenizes the @@ -330,7 +328,7 @@ public void init(String[] markedUpFieldValues) { annotations[i] = AnnotatedText.parse(markedUpFieldValues[i]); } } - + public String [] getPlainTextValuesForHighlighter(){ String [] result = new String[annotations.length]; for (int i = 0; i < annotations.length; i++) { @@ -338,127 +336,75 @@ public void init(String[] markedUpFieldValues) { } return result; } - + public AnnotationToken[] getIntersectingAnnotations(int start, int end) { List intersectingAnnotations = new ArrayList<>(); int fieldValueOffset =0; for (AnnotatedText fieldValueAnnotations : this.annotations) { //This is called from a highlighter where all of the field values are concatenated - // so each annotation offset will need to be adjusted so that it takes into account + // so each annotation offset will need to be adjusted so that it takes into account // the previous values AND the MULTIVAL delimiter for (AnnotationToken token : fieldValueAnnotations.annotations) { if(token.intersects(start - fieldValueOffset , end - fieldValueOffset)) { - intersectingAnnotations.add(new AnnotationToken(token.offset + fieldValueOffset, + intersectingAnnotations.add(new AnnotationToken(token.offset + fieldValueOffset, token.endOffset + fieldValueOffset, token.value)); } - } + } //add 1 for the fieldvalue separator character fieldValueOffset +=fieldValueAnnotations.textMinusMarkup.length() +1; } return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]); - } - + } + @Override public Analyzer getWrappedAnalyzer(String fieldName) { return delegate; - } - + } + @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - if(components instanceof AnnotatedHighlighterTokenStreamComponents){ - // already wrapped. - return components; - } AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream()); - return new AnnotatedHighlighterTokenStreamComponents(components.getTokenizer(), injector, this.annotations); - } - } - private static final class AnnotatedHighlighterTokenStreamComponents extends TokenStreamComponents{ - - private AnnotationsInjector annotationsInjector; - private AnnotatedText[] annotations; - int readerNum = 0; - - AnnotatedHighlighterTokenStreamComponents(Tokenizer source, AnnotationsInjector annotationsFilter, - AnnotatedText[] annotations) { - super(source, annotationsFilter); - this.annotationsInjector = annotationsFilter; - this.annotations = annotations; + AtomicInteger readerNum = new AtomicInteger(0); + return new TokenStreamComponents(r -> { + String plainText = readToString(r); + AnnotatedText at = this.annotations[readerNum.getAndIncrement()]; + assert at.textMinusMarkup.equals(plainText); + injector.setAnnotations(at); + components.getSource().accept(new StringReader(at.textMinusMarkup)); + }, injector); } + } - @Override - protected void setReader(Reader reader) { - String plainText = readToString(reader); - AnnotatedText at = this.annotations[readerNum++]; - assert at.textMinusMarkup.equals(plainText); - // This code is reliant on the behaviour of highlighter logic - it - // takes plain text multi-value fields and then calls the same analyzer - // for each field value in turn. This class has cached the annotations - // associated with each plain-text value and are arranged in the same order - annotationsInjector.setAnnotations(at); - super.setReader(new StringReader(at.textMinusMarkup)); - } - - } - - public static final class AnnotationAnalyzerWrapper extends AnalyzerWrapper { - private final Analyzer delegate; - public AnnotationAnalyzerWrapper (Analyzer delegate) { + public AnnotationAnalyzerWrapper(Analyzer delegate) { super(delegate.getReuseStrategy()); this.delegate = delegate; } - /** - * Wraps {@link StandardAnalyzer}. - */ - public AnnotationAnalyzerWrapper() { - this(new StandardAnalyzer()); - } - - @Override public Analyzer getWrappedAnalyzer(String fieldName) { return delegate; - } + } @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - if(components instanceof AnnotatedTokenStreamComponents){ - // already wrapped. + if (components.getTokenStream() instanceof AnnotationsInjector) { + // already wrapped return components; } AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream()); - return new AnnotatedTokenStreamComponents(components.getTokenizer(), injector); - } - } - - - //This Analyzer is not "wrappable" because of a limitation in Lucene https://issues.apache.org/jira/browse/LUCENE-8352 - private static final class AnnotatedTokenStreamComponents extends TokenStreamComponents{ - private AnnotationsInjector annotationsInjector; - - AnnotatedTokenStreamComponents(Tokenizer source, AnnotationsInjector annotationsInjector) { - super(source, annotationsInjector); - this.annotationsInjector = annotationsInjector; - } - - @Override - protected void setReader(Reader reader) { - // Sneaky code to change the content downstream components will parse. - // Replace the marked-up content Reader with a plain text Reader and prime the - // annotations injector with the AnnotatedTokens that need to be injected - // as plain-text parsing progresses. - AnnotatedText annotations = AnnotatedText.parse(readToString(reader)); - annotationsInjector.setAnnotations(annotations); - super.setReader(new StringReader(annotations.textMinusMarkup)); + return new TokenStreamComponents(r -> { + AnnotatedText annotations = AnnotatedText.parse(readToString(r)); + injector.setAnnotations(annotations); + components.getSource().accept(new StringReader(annotations.textMinusMarkup)); + }, injector); } } - - static String readToString(Reader reader) { + + static String readToString(Reader reader) { char[] arr = new char[8 * 1024]; StringBuilder buffer = new StringBuilder(); int numCharsRead; @@ -467,15 +413,15 @@ static String readToString(Reader reader) { buffer.append(arr, 0, numCharsRead); } reader.close(); - return buffer.toString(); + return buffer.toString(); } catch (IOException e) { throw new UncheckedIOException("IO Error reading field content", e); } - } + } + - public static final class AnnotationsInjector extends TokenFilter { - + private AnnotatedText annotatedText; AnnotatedText.AnnotationToken nextAnnotationForInjection = null; private int currentAnnotationIndex = 0; @@ -502,8 +448,8 @@ public void setAnnotations(AnnotatedText annotatedText) { nextAnnotationForInjection = null; } } - - + + @Override public void reset() throws IOException { @@ -512,7 +458,7 @@ public void reset() throws IOException { inputExhausted = false; super.reset(); } - + // Abstracts if we are pulling from some pre-cached buffer of // text tokens or directly from the wrapped TokenStream private boolean internalNextToken() throws IOException{ @@ -524,7 +470,7 @@ private boolean internalNextToken() throws IOException{ pendingStates.clear(); } return true; - } + } if(inputExhausted) { return false; } @@ -579,28 +525,28 @@ private void emitAnnotation(int firstSpannedTextPosInc, int annotationPosLen) th posLenAtt.setPositionLength(annotationPosLen); textOffsetAtt.setOffset(nextAnnotationForInjection.offset, nextAnnotationForInjection.endOffset); setType(nextAnnotationForInjection); - + // We may have multiple annotations at this location - stack them up final int annotationOffset = nextAnnotationForInjection.offset; final AnnotatedText.AnnotationToken firstAnnotationAtThisPos = nextAnnotationForInjection; while (nextAnnotationForInjection != null && nextAnnotationForInjection.offset == annotationOffset) { - + setType(nextAnnotationForInjection); termAtt.resizeBuffer(nextAnnotationForInjection.value.length()); termAtt.copyBuffer(nextAnnotationForInjection.value.toCharArray(), 0, nextAnnotationForInjection.value.length()); - + if (nextAnnotationForInjection == firstAnnotationAtThisPos) { posAtt.setPositionIncrement(firstSpannedTextPosInc); //Put at the head of the queue of tokens to be emitted - pendingStates.add(0, captureState()); + pendingStates.add(0, captureState()); } else { - posAtt.setPositionIncrement(0); + posAtt.setPositionIncrement(0); //Put after the head of the queue of tokens to be emitted - pendingStates.add(1, captureState()); + pendingStates.add(1, captureState()); } - - + + // Flag the inject annotation as null to prevent re-injection. currentAnnotationIndex++; if (currentAnnotationIndex < annotatedText.numAnnotations()) { @@ -614,7 +560,7 @@ private void emitAnnotation(int firstSpannedTextPosInc, int annotationPosLen) th } } - + public static final class AnnotatedTextFieldType extends StringFieldType { @@ -625,7 +571,7 @@ public AnnotatedTextFieldType() { protected AnnotatedTextFieldType(AnnotatedTextFieldType ref) { super(ref); } - + @Override public void setIndexAnalyzer(NamedAnalyzer delegate) { if(delegate.analyzer() instanceof AnnotationAnalyzerWrapper){ @@ -655,7 +601,7 @@ public Query existsQuery(QueryShardContext context) { return new NormsFieldExistsQuery(name()); } } - + @Override public Query phraseQuery(String field, TokenStream stream, int slop, boolean enablePosIncrements) throws IOException { PhraseQuery.Builder builder = new PhraseQuery.Builder(); @@ -678,7 +624,7 @@ public Query phraseQuery(String field, TokenStream stream, int slop, boolean ena return builder.build(); } - + @Override public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { @@ -713,12 +659,12 @@ public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolea mpqb.add(multiTerms.toArray(new Term[0])); } return mpqb.build(); - } + } } - + private int positionIncrementGap; protected AnnotatedTextFieldMapper(String simpleName, AnnotatedTextFieldType fieldType, MappedFieldType defaultFieldType, - int positionIncrementGap, + int positionIncrementGap, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); assert fieldType.tokenized(); @@ -774,6 +720,6 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, if (includeDefaults || positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) { builder.field("position_increment_gap", positionIncrementGap); - } + } } } diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java index 2fcf917ab1d79..ca29521802fe2 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java @@ -57,7 +57,7 @@ import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; import static org.hamcrest.CoreMatchers.equalTo; -public class AnnotatedTextHighlighterTests extends ESTestCase { +public class AnnotatedTextHighlighterTests extends ESTestCase { private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, Query query, Locale locale, BreakIterator breakIterator, diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index 6566063d220d3..dc14373026430 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -63,7 +63,7 @@ public void testSizeEnabled() throws Exception { boolean points = false; for (IndexableField field : doc.rootDoc().getFields("_size")) { stored |= field.fieldType().stored(); - points |= field.fieldType().pointDimensionCount() > 0; + points |= field.fieldType().pointIndexDimensionCount() > 0; } assertTrue(stored); assertTrue(points); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 5d5330e8cb563..b4322119dafa7 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -21,12 +21,12 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; -import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.io.InputStream; @@ -37,7 +37,7 @@ public class AzureBlobContainer extends AbstractBlobContainer { - private final Logger logger = Loggers.getLogger(AzureBlobContainer.class); + private final Logger logger = LogManager.getLogger(AzureBlobContainer.class); private final AzureBlobStore blobStore; private final String keyPath; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index f4bc362e53602..6268a3372843e 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; @@ -47,9 +46,8 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { private final String container; private final LocationMode locationMode; - public AzureBlobStore(RepositoryMetaData metadata, Settings settings, AzureStorageService service) + public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) throws URISyntaxException, StorageException { - super(settings); this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); this.service = service; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 0797c78af33bb..d11c8ee81d547 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -125,7 +125,7 @@ protected BlobStore getBlobStore() { */ @Override protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageException { - final AzureBlobStore blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); + final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService); logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 72b62a930aeee..c6e8335bd5a6d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -59,6 +59,7 @@ public List> getSettings() { AzureStorageSettings.KEY_SETTING, AzureStorageSettings.ENDPOINT_SUFFIX_SETTING, AzureStorageSettings.TIMEOUT_SETTING, + AzureStorageSettings.MAX_RETRIES_SETTING, AzureStorageSettings.PROXY_TYPE_SETTING, AzureStorageSettings.PROXY_HOST_SETTING, AzureStorageSettings.PROXY_PORT_SETTING diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 75bcc3e9c1edb..ec461cf38f294 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -70,7 +70,6 @@ public class AzureStorageService extends AbstractComponent { volatile Map storageSettings = emptyMap(); public AzureStorageService(Settings settings) { - super(settings); // eagerly load client settings so that secure settings are read final Map clientsSettings = AzureStorageSettings.load(settings); refreshAndClearCache(clientsSettings); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index c4e4c1439e45f..1c90f97a43728 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -54,7 +54,7 @@ final class AzureStorageSettings { key -> SecureSetting.secureString(key, null)); /** max_retries: Number of retries in case of Azure errors. Defaults to 3 (RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT). */ - private static final Setting MAX_RETRIES_SETTING = + public static final Setting MAX_RETRIES_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "max_retries", (key) -> Setting.intSetting(key, RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT, Setting.Property.NodeScope), ACCOUNT_SETTING, KEY_SETTING); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java index 10deeb4676fd3..a06dd7c3f28b1 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java @@ -34,7 +34,7 @@ protected BlobStore newBlobStore() throws IOException { try { RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, Settings.EMPTY, client); + return new AzureBlobStore(repositoryMetaData, client); } catch (URISyntaxException | StorageException e) { throw new IOException(e); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java index 025ee45b9c3a0..9a0c9039d089c 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java @@ -34,7 +34,7 @@ protected BlobStore newBlobStore() throws IOException { try { RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, Settings.EMPTY, client); + return new AzureBlobStore(repositoryMetaData, client); } catch (URISyntaxException | StorageException e) { throw new IOException(e); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 3b3793f22ba04..f7b49bd24adf6 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -25,9 +25,11 @@ import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Proxy; @@ -35,6 +37,7 @@ import java.net.URISyntaxException; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; +import java.util.Collections; import java.util.Map; import static org.elasticsearch.repositories.azure.AzureStorageService.blobNameFromUri; @@ -60,10 +63,24 @@ public void testReadSecuredSettings() { assertThat(loadedSettings.get("azure3").getEndpointSuffix(), equalTo("my_endpoint_suffix")); } + private AzureRepositoryPlugin pluginWithSettingsValidation(Settings settings) { + final AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings); + new SettingsModule(settings, plugin.getSettings(), Collections.emptyList(), Collections.emptySet()); + return plugin; + } + + private AzureStorageService storageServiceWithSettingsValidation(Settings settings) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + return plugin.azureStoreService; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + public void testCreateClientWithEndpointSuffix() throws IOException { final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); @@ -85,7 +102,7 @@ public void testReinitClientSettings() throws IOException { secureSettings2.setString("azure.client.azure3.account", "myaccount23"); secureSettings2.setString("azure.client.azure3.key", encodeKey("mykey23")); final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings1)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); @@ -117,7 +134,7 @@ public void testReinitClientEmptySettings() throws IOException { secureSettings.setString("azure.client.azure1.account", "myaccount1"); secureSettings.setString("azure.client.azure1.key", encodeKey("mykey11")); final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); @@ -141,7 +158,7 @@ public void testReinitClientWrongSettings() throws IOException { secureSettings2.setString("azure.client.azure1.account", "myaccount1"); // missing key final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings1)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); @@ -154,7 +171,7 @@ public void testReinitClientWrongSettings() throws IOException { } public void testGetSelectedClientNonExisting() { - final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure4")); assertThat(e.getMessage(), is("Unable to find client with name [azure4]")); } @@ -164,7 +181,7 @@ public void testGetSelectedClientDefaultTimeout() { .setSecureSettings(buildSecureSettings()) .put("azure.client.azure3.timeout", "30s") .build(); - final AzureStorageService azureStorageService = new AzureStorageService(timeoutSettings); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(timeoutSettings); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue()); final CloudBlobClient client3 = azureStorageService.client("azure3").v1(); @@ -172,13 +189,13 @@ public void testGetSelectedClientDefaultTimeout() { } public void testGetSelectedClientNoTimeout() { - final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); } public void testGetSelectedClientBackoffPolicy() { - final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -190,7 +207,7 @@ public void testGetSelectedClientBackoffPolicyNbRetries() { .put("azure.client.azure1.max_retries", 7) .build(); - final AzureStorageService azureStorageService = new AzureStorageService(timeoutSettings); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(timeoutSettings); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -200,7 +217,7 @@ public void testNoProxy() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); @@ -213,7 +230,7 @@ public void testProxyHttp() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "http") .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); @@ -233,7 +250,7 @@ public void testMultipleProxies() throws UnknownHostException { .put("azure.client.azure2.proxy.port", 8081) .put("azure.client.azure2.proxy.type", "http") .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); @@ -252,7 +269,7 @@ public void testProxySocks() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "socks") .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); @@ -267,7 +284,7 @@ public void testProxyNoHost() { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -278,7 +295,7 @@ public void testProxyNoPort() { .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -289,7 +306,7 @@ public void testProxyNoType() { .put("azure.client.azure1.proxy.port", 8080) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); } @@ -301,7 +318,7 @@ public void testProxyWrongHost() { .put("azure.client.azure1.proxy.port", 8080) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure proxy host is unknown.", e.getMessage()); } diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml index c7b8949a11335..bb8f148fc8ab1 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml @@ -11,7 +11,7 @@ setup: settings: bucket: ${bucket} client: "integration_test" - base_path: ${base_path} + base_path: "${base_path}" --- "Snapshot/Restore with repository-gcs": @@ -23,7 +23,7 @@ setup: - match: { repository.settings.bucket : ${bucket} } - match: { repository.settings.client : "integration_test" } - - match: { repository.settings.base_path : ${base_path} } + - match: { repository.settings.base_path : "${base_path}" } # Index documents - do: diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 1e94467f5a57e..7894f9fc7df63 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.Streams; import java.io.ByteArrayOutputStream; @@ -69,8 +68,7 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore private final String clientName; private final GoogleCloudStorageService storageService; - GoogleCloudStorageBlobStore(Settings settings, String bucketName, String clientName, GoogleCloudStorageService storageService) { - super(settings); + GoogleCloudStorageBlobStore(String bucketName, String clientName, GoogleCloudStorageService storageService) { this.bucketName = bucketName; this.clientName = clientName; this.storageService = storageService; diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java index 12e7fd26ff565..3186d2547a327 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -38,14 +38,14 @@ public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin final GoogleCloudStorageService storageService; public GoogleCloudStoragePlugin(final Settings settings) { - this.storageService = createStorageService(settings); + this.storageService = createStorageService(); // eagerly load client settings so that secure settings are readable (not closed) reload(settings); } // overridable for tests - protected GoogleCloudStorageService createStorageService(Settings settings) { - return new GoogleCloudStorageService(settings); + protected GoogleCloudStorageService createStorageService() { + return new GoogleCloudStorageService(); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index fe6c8889bd238..508f6b8cdc4ee 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -55,6 +56,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope, Property.Dynamic); static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); + private final Settings settings; private final GoogleCloudStorageService storageService; private final BlobPath basePath; private final boolean compress; @@ -66,6 +68,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { NamedXContentRegistry namedXContentRegistry, GoogleCloudStorageService storageService) { super(metadata, environment.settings(), namedXContentRegistry); + this.settings = environment.settings(); this.storageService = storageService; String basePath = BASE_PATH.get(metadata.settings()); @@ -88,7 +91,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { @Override protected GoogleCloudStorageBlobStore createBlobStore() { - return new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService); + return new GoogleCloudStorageBlobStore(bucket, clientName, storageService); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index b24674da174c3..b38957651df10 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.LazyInitializable; @@ -54,10 +53,6 @@ public class GoogleCloudStorageService extends AbstractComponent { */ private final AtomicReference>> clientsCache = new AtomicReference<>(emptyMap()); - public GoogleCloudStorageService(final Settings settings) { - super(settings); - } - /** * Refreshes the client settings and clears the client cache. Subsequent calls to * {@code GoogleCloudStorageService#client} will return new clients constructed diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 0cc1243f28311..2f23011d4d9b7 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.gcs; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; import java.util.Locale; @@ -42,6 +41,6 @@ protected BlobStore newBlobStore() { } catch (final Exception e) { throw new RuntimeException(e); } - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService); + return new GoogleCloudStorageBlobStore(bucketName, clientName, storageService); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 6d5c1bbf85310..db166a228b576 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -79,17 +79,12 @@ public MockGoogleCloudStoragePlugin(final Settings settings) { } @Override - protected GoogleCloudStorageService createStorageService(Settings settings) { - return new MockGoogleCloudStorageService(settings); + protected GoogleCloudStorageService createStorageService() { + return new MockGoogleCloudStorageService(); } } public static class MockGoogleCloudStorageService extends GoogleCloudStorageService { - - MockGoogleCloudStorageService(Settings settings) { - super(settings); - } - @Override public Storage client(String clientName) { return new MockStorage(BUCKET, blobs); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java index 4634bd3274a70..e2adfed94bbc9 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.gcs; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreTestCase; import java.util.Locale; @@ -42,6 +41,6 @@ protected BlobStore newBlobStore() { } catch (final Exception e) { throw new RuntimeException(e); } - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService); + return new GoogleCloudStorageBlobStore(bucketName, clientName, storageService); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index 0130d2c576cd5..1552282941904 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -63,7 +63,7 @@ public void testClientInitializer() throws Exception { .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint) .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) .build(); - final GoogleCloudStorageService service = new GoogleCloudStorageService(settings); + final GoogleCloudStorageService service = new GoogleCloudStorageService(); service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings)); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.client("another_client")); assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index 93ac0550fbc5c..10a06ec2c3092 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -18,15 +18,6 @@ */ package org.elasticsearch.repositories.hdfs; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.net.InetAddress; -import java.net.URI; -import java.net.UnknownHostException; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Locale; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.FileContext; @@ -36,13 +27,13 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.SpecialPermission; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -50,9 +41,18 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Locale; + public final class HdfsRepository extends BlobStoreRepository { - private static final Logger LOGGER = Loggers.getLogger(HdfsRepository.class); + private static final Logger LOGGER = LogManager.getLogger(HdfsRepository.class); private static final String CONF_SECURITY_PRINCIPAL = "security.principal"; diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 7715c7086a67b..e1ffc7a22d44c 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import java.io.IOException; @@ -55,9 +54,8 @@ class S3BlobStore extends AbstractComponent implements BlobStore { private final StorageClass storageClass; - S3BlobStore(Settings settings, S3Service service, String clientName, String bucket, boolean serverSideEncryption, + S3BlobStore(S3Service service, String clientName, String bucket, boolean serverSideEncryption, ByteSizeValue bufferSize, String cannedACL, String storageClass) { - super(settings); this.service = service; this.clientName = clientName; this.bucket = bucket; diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index d884ab0cb97ef..e0e34e40f3cf8 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -20,10 +20,13 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.auth.BasicAWSCredentials; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -51,6 +54,8 @@ * */ class S3Repository extends BlobStoreRepository { + private static final Logger logger = LogManager.getLogger(S3Repository.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); static final String TYPE = "s3"; @@ -143,6 +148,8 @@ class S3Repository extends BlobStoreRepository { */ static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path"); + private final Settings settings; + private final S3Service service; private final String bucket; @@ -173,6 +180,7 @@ class S3Repository extends BlobStoreRepository { final NamedXContentRegistry namedXContentRegistry, final S3Service service) { super(metadata, settings, namedXContentRegistry); + this.settings = settings; this.service = service; // Parse and validate the user's S3 Storage Class setting @@ -237,7 +245,7 @@ class S3Repository extends BlobStoreRepository { protected S3BlobStore createBlobStore() { if (reference != null) { assert S3ClientSettings.checkDeprecatedCredentials(metadata.settings()) : metadata.name(); - return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass) { + return new S3BlobStore(service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass) { @Override public AmazonS3Reference clientReference() { if (reference.tryIncRef()) { @@ -248,7 +256,7 @@ public AmazonS3Reference clientReference() { } }; } else { - return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + return new S3BlobStore(service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index da3219f2aef08..a2f9da5f846ef 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -64,7 +64,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo protected final S3Service service; public S3RepositoryPlugin(final Settings settings) { - this(settings, new S3Service(settings)); + this(settings, new S3Service()); } S3RepositoryPlugin(final Settings settings, final S3Service service) { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index a431f4da1fdf8..95313f906563d 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import java.io.Closeable; import java.io.IOException; @@ -47,10 +46,6 @@ class S3Service extends AbstractComponent implements Closeable { private volatile Map clientsCache = emptyMap(); private volatile Map clientsSettings = emptyMap(); - S3Service(Settings settings) { - super(settings); - } - /** * Refreshes the settings for the AmazonS3 clients and clears the cache of * existing clients. New clients will be build using these new settings. Old diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 1c3c47943a06e..ec5d5578a03a2 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -68,11 +68,6 @@ public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest) throws A } static final class ProxyS3Service extends S3Service { - - ProxyS3Service(Settings settings) { - super(settings); - } - @Override AmazonS3 buildClient(final S3ClientSettings clientSettings) { final AmazonS3 client = super.buildClient(clientSettings); @@ -82,7 +77,7 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) { } ProxyS3RepositoryPlugin(Settings settings) { - super(settings, new ProxyS3Service(settings)); + super(settings, new ProxyS3Service()); } @Override diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 3f75ae94aa950..b4c2f81a3f8b8 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -118,7 +118,7 @@ public TestS3RepositoryPlugin(final Settings settings) { @Override public Map getRepositories(final Environment env, final NamedXContentRegistry registry) { return Collections.singletonMap(S3Repository.TYPE, - (metadata) -> new S3Repository(metadata, env.settings(), registry, new S3Service(env.settings()) { + (metadata) -> new S3Repository(metadata, env.settings(), registry, new S3Service() { @Override AmazonS3 buildClient(S3ClientSettings clientSettings) { return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java index 55df03ff34a3f..a44ad706b2361 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java @@ -24,7 +24,6 @@ import com.amazonaws.services.s3.model.StorageClass; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.ESBlobStoreTestCase; @@ -117,13 +116,13 @@ public static S3BlobStore randomMockS3BlobStore() { final String theClientName = randomAlphaOfLength(4); final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass); - final S3Service service = new S3Service(Settings.EMPTY) { + final S3Service service = new S3Service() { @Override public synchronized AmazonS3Reference client(String clientName) { assert theClientName.equals(clientName); return new AmazonS3Reference(client); } }; - return new S3BlobStore(Settings.EMPTY, service, theClientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + return new S3BlobStore(service, theClientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index b76af23402c05..ecfa8e8d97dc1 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -57,10 +57,6 @@ public void shutdown() { } private static class DummyS3Service extends S3Service { - DummyS3Service() { - super(Settings.EMPTY); - } - @Override public AmazonS3Reference client(String clientName) { return new AmazonS3Reference(new DummyS3Client()); diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index 39ce992b7a58e..66f38ca043776 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -12,7 +12,7 @@ setup: settings: bucket: ${permanent_bucket} client: integration_test_permanent - base_path: ${permanent_base_path} + base_path: "${permanent_base_path}" canned_acl: private storage_class: standard @@ -26,7 +26,7 @@ setup: - match: { repository_permanent.settings.bucket : ${permanent_bucket} } - match: { repository_permanent.settings.client : "integration_test_permanent" } - - match: { repository_permanent.settings.base_path : ${permanent_base_path} } + - match: { repository_permanent.settings.base_path : "${permanent_base_path}" } - match: { repository_permanent.settings.canned_acl : "private" } - match: { repository_permanent.settings.storage_class : "standard" } - is_false: repository_permanent.settings.access_key diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index 497d85db752db..3f5685aa561fe 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -12,7 +12,7 @@ setup: settings: bucket: ${temporary_bucket} client: integration_test_temporary - base_path: ${temporary_base_path} + base_path: "${temporary_base_path}" canned_acl: private storage_class: standard @@ -26,7 +26,7 @@ setup: - match: { repository_temporary.settings.bucket : ${temporary_bucket} } - match: { repository_temporary.settings.client : "integration_test_temporary" } - - match: { repository_temporary.settings.base_path : ${temporary_base_path} } + - match: { repository_temporary.settings.base_path : "${temporary_base_path}" } - match: { repository_temporary.settings.canned_acl : "private" } - match: { repository_temporary.settings.storage_class : "standard" } - is_false: repository_temporary.settings.access_key diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml index 2df3b8290a19b..d021df267934d 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml @@ -12,7 +12,7 @@ setup: settings: bucket: ${ec2_bucket} client: integration_test_ec2 - base_path: ${ec2_base_path} + base_path: "${ec2_base_path}" canned_acl: private storage_class: standard @@ -26,7 +26,7 @@ setup: - match: { repository_ec2.settings.bucket : ${ec2_bucket} } - match: { repository_ec2.settings.client : "integration_test_ec2" } - - match: { repository_ec2.settings.base_path : ${ec2_base_path} } + - match: { repository_ec2.settings.base_path : "${ec2_base_path}" } - match: { repository_ec2.settings.canned_acl : "private" } - match: { repository_ec2.settings.storage_class : "standard" } - is_false: repository_ec2.settings.access_key diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml index 54929e6e3ad82..dec0476edc713 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml @@ -12,7 +12,7 @@ setup: settings: bucket: ${ecs_bucket} client: integration_test_ecs - base_path: ${ecs_base_path} + base_path: "${ecs_base_path}" canned_acl: private storage_class: standard @@ -26,7 +26,7 @@ setup: - match: { repository_ecs.settings.bucket : ${ecs_bucket} } - match: { repository_ecs.settings.client : "integration_test_ecs" } - - match: { repository_ecs.settings.base_path : ${ecs_base_path} } + - match: { repository_ecs.settings.base_path : "${ecs_base_path}" } - match: { repository_ecs.settings.canned_acl : "private" } - match: { repository_ecs.settings.storage_class : "standard" } - is_false: repository_ecs.settings.access_key diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java index 9848c26022e37..1ffffdf0d315d 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java @@ -48,7 +48,6 @@ public final class NioCorsConfig { private final long maxAge; private final Set allowedRequestMethods; private final Set allowedRequestHeaders; - private final boolean allowNullOrigin; private final Map> preflightHeaders; private final boolean shortCircuit; @@ -61,7 +60,6 @@ public final class NioCorsConfig { maxAge = builder.maxAge; allowedRequestMethods = builder.requestMethods; allowedRequestHeaders = builder.requestHeaders; - allowNullOrigin = builder.allowNullOrigin; preflightHeaders = builder.preflightHeaders; shortCircuit = builder.shortCircuit; } @@ -108,19 +106,6 @@ public boolean isOriginAllowed(final String origin) { return false; } - /** - * Web browsers may set the 'Origin' request header to 'null' if a resource is loaded - * from the local file system. - * - * If isNullOriginAllowed is true then the server will response with the wildcard for the - * the CORS response header 'Access-Control-Allow-Origin'. - * - * @return {@code true} if a 'null' origin should be supported. - */ - public boolean isNullOriginAllowed() { - return allowNullOrigin; - } - /** * Determines if credentials are supported for CORS requests. * diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java index 333e4931aa1f1..62eda913b0ac7 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java @@ -49,19 +49,6 @@ public static NioCorsConfigBuilder forAnyOrigin() { return new NioCorsConfigBuilder(); } - /** - * Creates a {@link NioCorsConfigBuilder} instance with the specified origin. - * - * @return {@link NioCorsConfigBuilder} to support method chaining. - */ - public static NioCorsConfigBuilder forOrigin(final String origin) { - if ("*".equals(origin)) { - return new NioCorsConfigBuilder(); - } - return new NioCorsConfigBuilder(origin); - } - - /** * Create a {@link NioCorsConfigBuilder} instance with the specified pattern origin. * @@ -87,14 +74,12 @@ public static NioCorsConfigBuilder forOrigins(final String... origins) { Optional> origins; Optional pattern; final boolean anyOrigin; - boolean allowNullOrigin; boolean enabled = true; boolean allowCredentials; long maxAge; final Set requestMethods = new HashSet<>(); final Set requestHeaders = new HashSet<>(); final Map> preflightHeaders = new HashMap<>(); - private boolean noPreflightHeaders; boolean shortCircuit; /** @@ -130,18 +115,6 @@ public static NioCorsConfigBuilder forOrigins(final String... origins) { anyOrigin = false; } - /** - * Web browsers may set the 'Origin' request header to 'null' if a resource is loaded - * from the local file system. Calling this method will enable a successful CORS response - * with a wildcard for the CORS response header 'Access-Control-Allow-Origin'. - * - * @return {@link NioCorsConfigBuilder} to support method chaining. - */ - NioCorsConfigBuilder allowNullOrigin() { - allowNullOrigin = true; - return this; - } - /** * Disables CORS support. * @@ -219,71 +192,6 @@ public NioCorsConfigBuilder allowedRequestHeaders(final String... headers) { return this; } - /** - * Returns HTTP response headers that should be added to a CORS preflight response. - * - * An intermediary like a load balancer might require that a CORS preflight request - * have certain headers set. This enables such headers to be added. - * - * @param name the name of the HTTP header. - * @param values the values for the HTTP header. - * @return {@link NioCorsConfigBuilder} to support method chaining. - */ - public NioCorsConfigBuilder preflightResponseHeader(final CharSequence name, final Object... values) { - if (values.length == 1) { - preflightHeaders.put(name, new ConstantValueGenerator(values[0])); - } else { - preflightResponseHeader(name, Arrays.asList(values)); - } - return this; - } - - /** - * Returns HTTP response headers that should be added to a CORS preflight response. - * - * An intermediary like a load balancer might require that a CORS preflight request - * have certain headers set. This enables such headers to be added. - * - * @param name the name of the HTTP header. - * @param value the values for the HTTP header. - * @param the type of values that the Iterable contains. - * @return {@link NioCorsConfigBuilder} to support method chaining. - */ - public NioCorsConfigBuilder preflightResponseHeader(final CharSequence name, final Iterable value) { - preflightHeaders.put(name, new ConstantValueGenerator(value)); - return this; - } - - /** - * Returns HTTP response headers that should be added to a CORS preflight response. - * - * An intermediary like a load balancer might require that a CORS preflight request - * have certain headers set. This enables such headers to be added. - * - * Some values must be dynamically created when the HTTP response is created, for - * example the 'Date' response header. This can be accomplished by using a Callable - * which will have its 'call' method invoked when the HTTP response is created. - * - * @param name the name of the HTTP header. - * @param valueGenerator a Callable which will be invoked at HTTP response creation. - * @param the type of the value that the Callable can return. - * @return {@link NioCorsConfigBuilder} to support method chaining. - */ - public NioCorsConfigBuilder preflightResponseHeader(final CharSequence name, final Callable valueGenerator) { - preflightHeaders.put(name, valueGenerator); - return this; - } - - /** - * Specifies that no preflight response headers should be added to a preflight response. - * - * @return {@link NioCorsConfigBuilder} to support method chaining. - */ - public NioCorsConfigBuilder noPreflightResponseHeaders() { - noPreflightHeaders = true; - return this; - } - /** * Specifies that a CORS request should be rejected if it's invalid before being * further processing. @@ -305,7 +213,7 @@ public NioCorsConfigBuilder shortCircuit() { * @return {@link NioCorsConfig} the configured CorsConfig instance. */ public NioCorsConfig build() { - if (preflightHeaders.isEmpty() && !noPreflightHeaders) { + if (preflightHeaders.isEmpty()) { preflightHeaders.put("date", DateValueGenerator.INSTANCE); preflightHeaders.put("content-length", new ConstantValueGenerator("0")); } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java index 5a9d114d67551..dfb531992f8ef 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java @@ -167,11 +167,6 @@ private void setPreflightHeaders(final HttpResponse response) { private boolean setOrigin(final HttpResponse response) { final String origin = request.headers().get(HttpHeaderNames.ORIGIN); if (!Strings.isNullOrEmpty(origin)) { - if ("null".equals(origin) && config.isNullOriginAllowed()) { - setAnyOrigin(response); - return true; - } - if (config.isAnyOriginSupported()) { if (config.isCredentialsAllowed()) { echoRequestOrigin(response); @@ -201,10 +196,6 @@ private boolean validateOrigin() { return true; } - if ("null".equals(origin) && config.isNullOriginAllowed()) { - return true; - } - // if the origin is the same as the host of the request, then allow if (isSameOrigin(origin, request.headers().get(HttpHeaderNames.HOST))) { return true; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 129f0ada77d5d..15f7d1e28943f 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -41,7 +41,6 @@ import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpTransport; -import org.elasticsearch.transport.Transports; import java.io.IOException; import java.net.InetSocketAddress; @@ -57,8 +56,6 @@ public class NioTransport extends TcpTransport { - private static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX; - public static final Setting NIO_WORKER_COUNT = new Setting<>("transport.nio.worker_count", (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), @@ -94,7 +91,7 @@ protected NioTcpChannel initiateChannel(DiscoveryNode node, ActionListener protected void doStart() { boolean success = false; try { - nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), + nioGroup = new NioGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), NioTransport.NIO_WORKER_COUNT.get(settings), (s) -> new EventHandler(this::onNonChannelException, s)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java index b29df77cae1bb..826bfd6585f42 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport.nio; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.NioIntegTestCase; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; import org.elasticsearch.common.logging.Loggers; @@ -37,12 +38,12 @@ public class NioTransportLoggingIT extends NioIntegTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(Loggers.getLogger(TransportLogger.class), appender); + Loggers.addAppender(LogManager.getLogger(TransportLogger.class), appender); appender.start(); } public void tearDown() throws Exception { - Loggers.removeAppender(Loggers.getLogger(TransportLogger.class), appender); + Loggers.removeAppender(LogManager.getLogger(TransportLogger.class), appender); appender.stop(); super.tearDown(); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index bebdb320db4a1..8438c002c2a4e 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -27,7 +27,6 @@ import org.apache.logging.log4j.core.appender.ConsoleAppender; import org.apache.logging.log4j.core.appender.CountingNoOpAppender; import org.apache.logging.log4j.core.config.Configurator; -import org.apache.logging.log4j.spi.ExtendedLogger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; import org.elasticsearch.cli.UserException; @@ -301,7 +300,7 @@ public void testPrefixLogger() throws IOException, IllegalAccessException, UserE setupLogging("prefix"); final String prefix = randomAlphaOfLength(16); - final Logger logger = new PrefixLogger((ExtendedLogger) LogManager.getLogger("prefix_test"), "prefix_test", prefix); + final Logger logger = new PrefixLogger(LogManager.getLogger("prefix_test"), prefix); logger.info("test"); logger.info("{}", "test"); final Exception e = new Exception("exception"); @@ -332,7 +331,7 @@ public void testPrefixLoggerMarkersCanBeCollected() throws IOException, UserExce final int prefixes = 1 << 19; // to ensure enough markers that the GC should collect some when we force a GC below for (int i = 0; i < prefixes; i++) { // this has the side effect of caching a marker with this prefix - new PrefixLogger((ExtendedLogger) LogManager.getLogger("prefix" + i), "prefix" + i, "prefix" + i); + new PrefixLogger(LogManager.getLogger("logger" + i), "prefix" + i); } System.gc(); // this will free the weakly referenced keys in the marker cache diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 8b305462e4dff..e812af9522b0e 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -98,7 +98,7 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.snapshotsIndexCompatible) { + for (final def version : bwcVersions.unreleasedIndexCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index ac57d51def7c6..62de043b5cca5 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -65,7 +65,7 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.snapshotsWireCompatible) { + for (final def version : bwcVersions.unreleasedWireCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index bfd37863cc246..4e27511fe04fb 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -145,7 +145,7 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.snapshotsWireCompatible) { + for (final def version : bwcVersions.unreleasedWireCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 19c0549d9d2ad..a0e33350417e8 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -182,7 +182,7 @@ private String getNodeId(Predicate versionPredicate) throws IOException return null; } - + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34950") public void testRelocationWithConcurrentIndexing() throws Exception { final String index = "relocation_with_concurrent_indexing"; switch (CLUSTER_TYPE) { diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 3e293f91ce12a..7408b96be1174 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -82,6 +82,9 @@ --- "Find a task result record from the old cluster": + - skip: + features: headers + - do: search: index: .tasks diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 81724bd72ab0a..04f01cf0f0e4c 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -298,7 +298,7 @@ public Collection createComponents(Client client, ClusterService cluster ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - loggingFilter.set(new LoggingFilter(clusterService.getSettings(), threadPool)); + loggingFilter.set(new LoggingFilter(threadPool)); return Collections.emptyList(); } @@ -313,8 +313,7 @@ public static class LoggingFilter extends ActionFilter.Simple { private final ThreadPool threadPool; - public LoggingFilter(Settings settings, ThreadPool pool) { - super(settings); + public LoggingFilter(ThreadPool pool) { this.threadPool = pool; } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java index 11dcfd76fa5ac..27b2f18b0919f 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java @@ -18,7 +18,10 @@ */ package org.elasticsearch.http; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,6 +45,8 @@ * and returns their values. */ public class TestDeprecationHeaderRestAction extends BaseRestHandler { + private static final Logger logger = LogManager.getLogger(TestDeprecationHeaderRestAction.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); public static final Setting TEST_DEPRECATED_SETTING_TRUE1 = Setting.boolSetting("test.setting.deprecated.true1", true, @@ -53,7 +58,7 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler { Setting.boolSetting("test.setting.not_deprecated", false, Setting.Property.NodeScope, Setting.Property.Dynamic); - private static final Map> SETTINGS; + private static final Map> SETTINGS_MAP; static { Map> settingsMap = new HashMap<>(3); @@ -62,14 +67,17 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler { settingsMap.put(TEST_DEPRECATED_SETTING_TRUE2.getKey(), TEST_DEPRECATED_SETTING_TRUE2); settingsMap.put(TEST_NOT_DEPRECATED_SETTING.getKey(), TEST_NOT_DEPRECATED_SETTING); - SETTINGS = Collections.unmodifiableMap(settingsMap); + SETTINGS_MAP = Collections.unmodifiableMap(settingsMap); } public static final String DEPRECATED_ENDPOINT = "[/_test_cluster/deprecated_settings] exists for deprecated tests"; public static final String DEPRECATED_USAGE = "[deprecated_settings] usage is deprecated. use [settings] instead"; + private final Settings settings; + public TestDeprecationHeaderRestAction(Settings settings, RestController controller) { super(settings); + this.settings = settings; controller.registerAsDeprecatedHandler(RestRequest.Method.GET, "/_test_cluster/deprecated_settings", this, DEPRECATED_ENDPOINT, deprecationLogger); @@ -102,7 +110,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client builder.startObject().startArray("settings"); for (String setting : settings) { - builder.startObject().field(setting, SETTINGS.get(setting).getRaw(this.settings)).endObject(); + builder.startObject().field(setting, SETTINGS_MAP.get(setting).getRaw(this.settings)).endObject(); } builder.endArray().endObject(); channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 30c879ec6146e..46b5df8ae0c09 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -57,7 +57,7 @@ test.enabled = false task integTest { if (project.bwc_tests_enabled) { - final def version = bwcVersions.snapshotsIndexCompatible.first() + final def version = bwcVersions.unreleasedIndexCompatible.first() dependsOn "v${version}#bwcTest" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index 61fd9ba3513ca..96c88ae933840 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -45,11 +45,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "Default list of fields to exclude from the returned _source field, can be overridden on each sub-request" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "Default list of fields to extract and return from the _source field, can be overridden on each sub-request" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index 96fa4daf12b95..d14f4ab784a57 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -20,6 +20,10 @@ "type" : "boolean", "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" }, + "ignore_throttled": { + "type" : "boolean", + "description" : "Whether specified concrete, expanded or aliased indices should be ignored when throttled" + }, "allow_no_indices": { "type" : "boolean", "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 4c7c3240dc29a..dfdc00680828f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -102,11 +102,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "A list of fields to extract and return from the _source field" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json index daef35ce6d3b1..2a7a1ffc7286d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json @@ -51,11 +51,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "A list of fields to extract and return from the _source field" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index 3e561a21146e6..f08aa8392521c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -47,11 +47,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "A list of fields to extract and return from the _source field" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index 0f0d8c132b395..4e0c57850a891 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -69,11 +69,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "A list of fields to extract and return from the _source field" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index 9f26ca565b293..65b0261985656 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -54,6 +54,14 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, + "_source_excludes": { + "type" : "list", + "description" : "A list of fields to exclude from the returned _source field" + }, + "_source_includes": { + "type" : "list", + "description" : "A list of fields to extract and return from the _source field" + }, "_source_exclude": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json index 36065a13f6e99..f8d116abe87ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json @@ -47,11 +47,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "A list of fields to extract and return from the _source field" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json index 46f595b2186e0..62fbb59a4e451 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json @@ -40,11 +40,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "A list of fields to extract and return from the _source field" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json index 487beaba86520..0a566df35bbc2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json @@ -1,6 +1,6 @@ { "nodes.reload_secure_settings": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-reload-secure-settings.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings", "methods": ["POST"], "url": { "path": "/_nodes/reload_secure_settings", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 751c2797b9deb..a1fdf7dbd83c6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -58,6 +58,10 @@ "type" : "boolean", "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" }, + "ignore_throttled": { + "type" : "boolean", + "description" : "Whether specified concrete, expanded or aliased indices should be ignored when throttled" + }, "allow_no_indices": { "type" : "boolean", "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" @@ -105,11 +109,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "A list of fields to extract and return from the _source field" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json index a78295dd4f5a3..df1fc1b079923 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json @@ -20,6 +20,10 @@ "type" : "boolean", "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" }, + "ignore_throttled": { + "type" : "boolean", + "description" : "Whether specified concrete, expanded or aliased indices should be ignored when throttled" + }, "allow_no_indices": { "type" : "boolean", "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index a63e248d00f6e..f1294e57cd30c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -34,11 +34,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "A list of fields to extract and return from the _source field" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 3e77f7cd145f5..427a7e04ad8fb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -106,11 +106,11 @@ "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" }, - "_source_exclude": { + "_source_excludes": { "type" : "list", "description" : "A list of fields to exclude from the returned _source field" }, - "_source_include": { + "_source_includes": { "type" : "list", "description" : "A list of fields to extract and return from the _source field" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml index bf4bd0795740d..3811eb8a18cc4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml @@ -73,7 +73,7 @@ bulk: include_type_name: false index: test_index - _source_include: foo + _source_includes: foo body: | { "update": { "_id": "test_id_3" } } { "doc": { "foo": "garply" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml index c852c376cc06f..3c8a86c13bdac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml @@ -66,7 +66,7 @@ bulk: index: test_index type: test_type - _source_include: foo + _source_includes: foo body: | { "update": { "_id": "test_id_3" } } { "doc": { "foo": "garply" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/20_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/20_source_filtering.yml index 04aac33094916..e13edf7be5046 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/20_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/20_source_filtering.yml @@ -27,18 +27,18 @@ - is_false: get._source.include.field2 - do: - explain: { index: test_1, type: test, id: 1, _source_include: include.field1, body: { query: { match_all: {}} } } + explain: { index: test_1, type: test, id: 1, _source_includes: include.field1, body: { query: { match_all: {}} } } - match: { get._source.include.field1: v1 } - is_false: get._source.include.field2 - do: - explain: { index: test_1, type: test, id: 1, _source_include: "include.field1,include.field2", body: { query: { match_all: {}} } } + explain: { index: test_1, type: test, id: 1, _source_includes: "include.field1,include.field2", body: { query: { match_all: {}} } } - match: { get._source.include.field1: v1 } - match: { get._source.include.field2: v2 } - is_false: get._source.count - do: - explain: { index: test_1, type: test, id: 1, _source_include: include, _source_exclude: "*.field2", body: { query: { match_all: {}} } } + explain: { index: test_1, type: test, id: 1, _source_includes: include, _source_excludes: "*.field2", body: { query: { match_all: {}} } } - match: { get._source.include.field1: v1 } - is_false: get._source.include.field2 - is_false: get._source.count diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml index 55520000e2f83..f9247d1076159 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml @@ -40,18 +40,18 @@ - is_false: _source.include.field2 - do: - get: { include_type_name: false, index: test_1, id: 1, _source_include: include.field1 } + get: { include_type_name: false, index: test_1, id: 1, _source_includes: include.field1 } - match: { _source.include.field1: v1 } - is_false: _source.include.field2 - do: - get: { include_type_name: false, index: test_1, id: 1, _source_include: "include.field1,include.field2" } + get: { include_type_name: false, index: test_1, id: 1, _source_includes: "include.field1,include.field2" } - match: { _source.include.field1: v1 } - match: { _source.include.field2: v2 } - is_false: _source.count - do: - get: { include_type_name: false, index: test_1, id: 1, _source_include: include, _source_exclude: "*.field2" } + get: { include_type_name: false, index: test_1, id: 1, _source_includes: include, _source_excludes: "*.field2" } - match: { _source.include.field1: v1 } - is_false: _source.include.field2 - is_false: _source.count diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml index c858886ca3df3..ca629cfa6aafe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/71_source_filtering_with_types.yml @@ -36,18 +36,18 @@ - is_false: _source.include.field2 - do: - get: { index: test_1, type: test, id: 1, _source_include: include.field1 } + get: { index: test_1, type: test, id: 1, _source_includes: include.field1 } - match: { _source.include.field1: v1 } - is_false: _source.include.field2 - do: - get: { index: test_1, type: test, id: 1, _source_include: "include.field1,include.field2" } + get: { index: test_1, type: test, id: 1, _source_includes: "include.field1,include.field2" } - match: { _source.include.field1: v1 } - match: { _source.include.field2: v2 } - is_false: _source.count - do: - get: { index: test_1, type: test, id: 1, _source_include: include, _source_exclude: "*.field2" } + get: { index: test_1, type: test, id: 1, _source_includes: include, _source_excludes: "*.field2" } - match: { _source.include.field1: v1 } - is_false: _source.include.field2 - is_false: _source.count diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/70_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/70_source_filtering.yml index cc168ac25d39f..c9cb2b00db038 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/70_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/70_source_filtering.yml @@ -9,18 +9,18 @@ body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 } - do: - get_source: { index: test_1, type: test, id: 1, _source_include: include.field1 } + get_source: { index: test_1, type: test, id: 1, _source_includes: include.field1 } - match: { include.field1: v1 } - is_false: include.field2 - do: - get_source: { index: test_1, type: test, id: 1, _source_include: "include.field1,include.field2" } + get_source: { index: test_1, type: test, id: 1, _source_includes: "include.field1,include.field2" } - match: { include.field1: v1 } - match: { include.field2: v2 } - is_false: count - do: - get_source: { index: test_1, type: test, id: 1, _source_include: include, _source_exclude: "*.field2" } + get_source: { index: test_1, type: test, id: 1, _source_includes: include, _source_excludes: "*.field2" } - match: { include.field1: v1 } - is_false: include.field2 - is_false: count diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/70_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/70_source_filtering.yml index a1c16b30408a4..4581e060b41a7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/70_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/70_source_filtering.yml @@ -100,7 +100,7 @@ setup: - do: mget: - _source_include: "include.field1,count" + _source_includes: "include.field1,count" index: test_1 body: { ids: [ 1,2 ] } - match: { docs.0._source: { include: { field1: v1 }, count: 1} } @@ -111,8 +111,8 @@ setup: - do: mget: - _source_include: include - _source_exclude: "*.field2" + _source_includes: include + _source_excludes: "*.field2" index: test_1 body: { ids: [ 1,2 ] } - match: { docs.0._source: { include: { field1: v1 } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml new file mode 100644 index 0000000000000..b52027f98c83f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml @@ -0,0 +1,143 @@ +setup: + - skip: + version: " - 6.5.99" + reason: "added in 6.6.0" + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + _doc: + properties: + int_field: + type: integer + double_field: + type: double + incomplete_field: + type: integer + - do: + bulk: + refresh: true + body: + - index: + _index: test + _type: _doc + - int_field: 100 + double_field: 100.0 + incomplete_field: 1000 + - index: + _index: test + _type: _doc + - int_field: 200 + double_field: 200.0 + incomplete_field: 2000 + - index: + _index: test + _type: _doc + - int_field: 300 + double_field: 300.0 + +--- +"basic test": + + - do: + search: + body: + aggs: + mad_int: + median_absolute_deviation: + field: int_field + mad_double: + median_absolute_deviation: + field: double_field + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + + - match: { aggregations.mad_int.value: 100 } + - match: { aggregations.mad_double.value: 100 } + +--- +"with setting compression": + + - do: + search: + body: + aggs: + mad_int: + median_absolute_deviation: + field: int_field + compression: 500 + mad_double: + median_absolute_deviation: + field: double_field + compression: 500 + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + + - match: { aggregations.mad_int.value: 100 } + - match: { aggregations.mad_double.value: 100 } + +--- +"no documents": + + - do: + search: + body: + query: + bool: + filter: + term: + non_existent_field: non_existent_value + aggs: + mad_no_docs: + median_absolute_deviation: + field: non_existent_field + + - match: { hits.total: 0 } + - length: { hits.hits: 0 } + + - match: { aggregations.mad_no_docs.value: null } + +--- +"missing value": + + - do: + search: + body: + aggs: + mad_missing: + median_absolute_deviation: + field: incomplete_field + missing: 3000 + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + + - match: { aggregations.mad_missing.value: 1000 } + +--- +"bad arguments": + + - do: + catch: /\[compression\] must be greater than 0. Found \[0.0\] in \[mad\]/ + search: + body: + aggs: + mad: + median_absolute_deviation: + field: int_field + compression: 0 + + - do: + catch: /\[compression\] must be greater than 0. Found \[-1.0\] in \[mad\]/ + search: + body: + aggs: + mad: + median_absolute_deviation: + field: int_field + compression: -1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index 1b5f9856391b0..e5277c0edcbbb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -49,20 +49,20 @@ setup: - is_false: hits.hits.0._source.include.field2 --- -"_source include and _source in body": - - do: { search: { _source_include: include.field1, body: { _source: include.field2, query: { match_all: {} } } } } +"_source_includes and _source in body": + - do: { search: { _source_includes: include.field1, body: { _source: include.field2, query: { match_all: {} } } } } - match: { hits.hits.0._source.include.field1: v1 } - is_false: hits.hits.0._source.include.field2 --- -"_source_include": - - do: { search: { _source_include: include.field1, body: { query: { match_all: {} } } } } +"_source_includes": + - do: { search: { _source_includes: include.field1, body: { query: { match_all: {} } } } } - match: { hits.hits.0._source.include.field1: v1 } - is_false: hits.hits.0._source.include.field2 --- -"_source_exclude": - - do: { search: { _source_exclude: count, body: { query: { match_all: {} } } } } +"_source_excludes": + - do: { search: { _source_excludes: count, body: { query: { match_all: {} } } } } - match: { hits.hits.0._source.include: { field1 : v1 , field2: v2 }} - is_false: hits.hits.0._source.count diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..3e54326a6c787 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +6bb87c96d76cdc70be77261d39376613b0a8860c \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 4d9522f10de5b..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dce55e44af096cb9029cb26d22a14d8a9c5223ce \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..187572e525147 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +1b29b3e3b080ec32073c007a1940e5aa7b195316 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index c86294acf5a3e..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d1d941758dc91ea7c2d515dd97b5d9b23b0f1874 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..68553b80b1a1b --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +3757a90f73f505d40e6e200d1bacbff897f67548 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 75200bc0c1525..0000000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e884b8ce62a2102b24bfdbe8911674cd5b0d06d9 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..75c05f55ed83b --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +c918cc5ac54e5a4dba4740e9e45a93ebd3c95c77 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index b1ae597fadfb7..0000000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3870972c07d7fa41a3bc58eb65952da53a16a406 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..afd8b925614fe --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +6cff1fa9ac25c840589d9a39a42ed4629b594cf4 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 02935671ce899..0000000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8f0b73cfd01fc48735f1e06f16f7ccb47fc183e \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..6b525fa5ea64b --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +2a843337e03493ab5f3498b5dd232fa9abb9e765 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index fdfab321a6791..0000000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d253fae720355e2ff40d529d62c2b3de403d0d0 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..9487a7fa579a0 --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +afda00bbee5fb8b4c36867eabb83267b3b2b8c10 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index d7c9cdf3e41d6..0000000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d9ca14bcda331a425d2d7c16022fdfd1c6942924 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..3e6fe1ce378c4 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +a2d8bc6a0486cfa6b4de8c1103017b35c0193544 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 93ec704aeaeb0..0000000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -200454bbfe5ec93d941d9a9d27703883122a4522 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..dbb72428046fd --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +79a3b80245a9cf00f24f5d6e298a8e1a887760f1 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index d57b6be7fbf31..0000000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47915a125e54c845a4b540201cda88dc7612da08 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..db1d47c8307d0 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +37c9970ec38f64e7ccecbe17efbabdaabe8da2ea \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 0ed04b6f69b41..0000000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5d49e1c6ee7550234539314e600e2893e13cb80 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..0e7ba7aeb9e94 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +7103c3482c728a9788922aa39e39a5ed2bdd3a11 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 41c6a4a243ed7..0000000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68081b60905f1b53b3705b9cfa4403b8aba44352 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..bba0f7269e45e --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +89d389c1020fac58f462819ad822c9b09e52f563 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 63734717b2fbc..0000000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c99d56a453cecc7258300fd04b438713b944f1b9 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..1d8884aa8f23d --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +b62e34e522f3afa9c3f1655b97b995ff6ba2592d \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 3fa056da3db0a..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2471966478f829b6455556346014f02ff59f50c0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..1ff50782c1780 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +0c92f6b03eb226586b431a834dca90a1f2cd85b8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index bd3d2e719a0ae..0000000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -46e012be699251306ad13f4582c30d79cea4b307 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..dd4d9e0665e6c --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +3a659287ba728f7a0d81694ce32e9ef741a13c19 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 8a4fc23cfcdae..0000000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dea19dd9e971d2a0171e7d78662f732b45148a27 \ No newline at end of file diff --git a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 563414171e98f..a4791e85ef3ca 100644 --- a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -53,7 +53,7 @@ public CustomFieldQuery(Query query, IndexReader reader, boolean phraseHighlight } @Override - void flatten(Query sourceQuery, IndexReader reader, Collection flatQueries, float boost) throws IOException { + protected void flatten(Query sourceQuery, IndexReader reader, Collection flatQueries, float boost) throws IOException { if (sourceQuery instanceof BoostQuery) { BoostQuery bq = (BoostQuery) sourceQuery; sourceQuery = bq.getQuery(); diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 0f636f76d8ae5..2e73d849a86ac 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -105,10 +105,12 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_4_2 = new Version(V_6_4_2_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_4_3_ID = 6040399; public static final Version V_6_4_3 = new Version(V_6_4_3_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); + public static final int V_6_4_4_ID = 6040499; + public static final Version V_6_4_4 = new Version(V_6_4_4_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_6_0_ID = 6060099; - public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); + public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); @@ -131,6 +133,8 @@ public static Version fromId(int id) { return V_6_6_0; case V_6_5_0_ID: return V_6_5_0; + case V_6_4_4_ID: + return V_6_4_4; case V_6_4_3_ID: return V_6_4_3; case V_6_4_2_ID: diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index cc1bb9bb17f44..4e589f613a6fb 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -384,7 +384,7 @@ public ActionModule(boolean transportClient, Settings settings, IndexNameExpress if (transportClient) { restController = null; } else { - restController = new RestController(settings, headers, restWrapper, nodeClient, circuitBreakerService, usageService); + restController = new RestController(headers, restWrapper, nodeClient, circuitBreakerService, usageService); } } diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index b0d553534e44d..e2d01aad230bd 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -35,12 +35,25 @@ */ public interface DocWriteRequest extends IndicesRequest { + /** + * Set the index for this request + * @return the Request + */ + T index(String index); + /** * Get the index that this request operates on * @return the index */ String index(); + + /** + * Set the type for this request + * @return the Request + */ + T type(String type); + /** * Get the type that this request operates on * @return the type diff --git a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java index 7d8dbd1f975bd..a4c3e17e80208 100644 --- a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java +++ b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java @@ -20,7 +20,6 @@ package org.elasticsearch.action; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -28,14 +27,13 @@ /** * A generic proxy that will execute the given action against a specific node. */ -public class TransportActionNodeProxy extends AbstractComponent { +public class TransportActionNodeProxy { private final TransportService transportService; private final Action action; private final TransportRequestOptions transportOptions; public TransportActionNodeProxy(Settings settings, Action action, TransportService transportService) { - super(settings); this.action = action; this.transportService = transportService; this.transportOptions = action.transportOptions(settings); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 6aaece4c986f2..e3ee0dd7b1524 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -31,16 +31,15 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.elasticsearch.cluster.routing.allocation.MoveDecision; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -60,12 +59,12 @@ public class TransportClusterAllocationExplainAction private final GatewayAllocator gatewayAllocator; @Inject - public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportClusterAllocationExplainAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator, GatewayAllocator gatewayAllocator) { - super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterAllocationExplainRequest::new, indexNameExpressionResolver); this.clusterInfoService = clusterInfoService; this.allocationDeciders = allocationDeciders; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index 85ab7a1dbaace..c79aac2afaf1a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -26,7 +26,8 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.unit.TimeValue; -public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class ClusterHealthRequestBuilder + extends MasterNodeReadOperationRequestBuilder { public ClusterHealthRequestBuilder(ElasticsearchClient client, ClusterHealthAction action) { super(client, action, new ClusterHealthRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 255f70c56fe6b..1e8d2adbff829 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -25,10 +25,10 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -37,7 +37,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.index.IndexNotFoundException; @@ -47,15 +46,16 @@ import java.util.function.Predicate; -public class TransportClusterHealthAction extends TransportMasterNodeReadAction { +public class TransportClusterHealthAction + extends TransportMasterNodeReadAction { private final GatewayAllocator gatewayAllocator; @Inject - public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportClusterHealthAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, GatewayAllocator gatewayAllocator) { - super(settings, ClusterHealthAction.NAME, false, transportService, clusterService, threadPool, actionFilters, + super(ClusterHealthAction.NAME, false, transportService, clusterService, threadPool, actionFilters, ClusterHealthRequest::new, indexNameExpressionResolver); this.gatewayAllocator = gatewayAllocator; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java index 1709151e824d9..02a7cf3ebdde4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; -public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder { +public class NodesHotThreadsRequestBuilder + extends NodesOperationRequestBuilder { public NodesHotThreadsRequestBuilder(ElasticsearchClient client, NodesHotThreadsAction action) { super(client, action, new NodesHotThreadsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index 1207300208f91..6321813f189fb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -42,9 +41,9 @@ public class TransportNodesHotThreadsAction extends TransportNodesAction { @Inject - public TransportNodesHotThreadsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + public TransportNodesHotThreadsAction(ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { - super(settings, NodesHotThreadsAction.NAME, threadPool, clusterService, transportService, actionFilters, + super(NodesHotThreadsAction.NAME, threadPool, clusterService, transportService, actionFilters, NodesHotThreadsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeHotThreads.class); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index a422f33fd6efd..a1f9790af9351 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -43,9 +42,9 @@ public class TransportNodesInfoAction extends TransportNodesAction { +public class NodesStatsRequestBuilder + extends NodesOperationRequestBuilder { public NodesStatsRequestBuilder(ElasticsearchClient client, NodesStatsAction action) { super(client, action, new NodesStatsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index d9c382d25ad5e..1028da916a2c1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -43,10 +42,9 @@ public class TransportNodesStatsAction extends TransportNodesAction { @@ -38,9 +37,9 @@ public final class TransportRemoteInfoAction extends HandledTransportAction) RemoteInfoRequest::new); this.remoteClusterService = searchTransportService.getRemoteClusterService(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index b6e8de24c5d7b..6b1fc0a8ed13e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -26,7 +26,8 @@ /** * Builder for unregister repository request */ -public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder { +public class DeleteRepositoryRequestBuilder + extends AcknowledgedRequestBuilder { /** * Constructs unregister repository request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index a39bb12e31e24..469c14f49bd40 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -43,10 +42,11 @@ public class TransportDeleteRepositoryAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, + final ActionListener listener) { repositoriesService.unregisterRepository( new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name()) .masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index e21aa19f7f849..d20915e617b3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -26,7 +26,8 @@ /** * Get repository request builder */ -public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class GetRepositoriesRequestBuilder + extends MasterNodeReadOperationRequestBuilder { /** * Creates new get repository request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index c7474fc28cc05..4b3ee1cd9251a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -49,9 +48,11 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadAction { @Inject - public TransportGetRepositoriesAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetRepositoriesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetRepositoriesRequest::new, indexNameExpressionResolver); + public TransportGetRepositoriesAction(TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(GetRepositoriesAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetRepositoriesRequest::new, indexNameExpressionResolver); } @Override @@ -70,7 +71,8 @@ protected ClusterBlockException checkBlock(GetRepositoriesRequest request, Clust } @Override - protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener listener) { + protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, + final ActionListener listener) { MetaData metaData = state.metaData(); RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); if (request.repositories().length == 0 || (request.repositories().length == 1 && "_all".equals(request.repositories()[0]))) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index ea62bb4eee60a..9f17a6ac43a3c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -30,7 +30,8 @@ /** * Register repository request builder */ -public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder { +public class PutRepositoryRequestBuilder + extends AcknowledgedRequestBuilder { /** * Constructs register repository request diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 6f0339f46eecf..a495ba72f35b7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -43,10 +42,11 @@ public class TransportPutRepositoryAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final PutRepositoryRequest request, ClusterState state, + final ActionListener listener) { repositoriesService.registerRepository( new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]", diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 4614085f26e2b..19fa4cbde15ca 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.threadpool.ThreadPool; @@ -43,10 +42,11 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, + final ActionListener listener) { repositoriesService.verifyRepository(request.name(), new ActionListener() { @Override public void onResponse(RepositoriesService.VerifyResponse verifyResponse) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index 752ceff357a7d..c83443eab9a0c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -25,7 +25,8 @@ /** * Builder for unregister repository request */ -public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder { +public class VerifyRepositoryRequestBuilder + extends MasterNodeOperationRequestBuilder { /** * Constructs unregister repository request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 108ce586573d7..061ec41039b43 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -34,7 +34,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -43,9 +42,11 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, + final ActionListener listener) { ActionListener logWrapper = ActionListener.wrap( response -> { if (request.dryRun() == false) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 46ee53aaf97ab..b7aa57cd6e87a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -29,7 +29,8 @@ /** * Builder for a cluster update settings request */ -public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder { +public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder { public ClusterUpdateSettingsRequestBuilder(ElasticsearchClient client, ClusterUpdateSettingsAction action) { super(client, action, new ClusterUpdateSettingsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 8360797d66021..cf34094290a87 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -49,10 +48,10 @@ public class TransportClusterUpdateSettingsAction extends private final ClusterSettings clusterSettings; @Inject - public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportClusterUpdateSettingsAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) { - super(settings, ClusterUpdateSettingsAction.NAME, false, transportService, clusterService, threadPool, actionFilters, + super(ClusterUpdateSettingsAction.NAME, false, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); this.allocationService = allocationService; this.clusterSettings = clusterSettings; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index 92edcc5649631..df1028a32b977 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder { public ClusterSearchShardsRequestBuilder(ElasticsearchClient client, ClusterSearchShardsAction action) { super(client, action, new ClusterSearchShardsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index f4f36ca4d65e9..41dce3148c1df 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.search.internal.AliasFilter; @@ -52,10 +51,10 @@ public class TransportClusterSearchShardsAction extends private final IndicesService indicesService; @Inject - public TransportClusterSearchShardsAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportClusterSearchShardsAction(TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ClusterSearchShardsAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(ClusterSearchShardsAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterSearchShardsRequest::new, indexNameExpressionResolver); this.indicesService = indicesService; } @@ -97,8 +96,8 @@ protected void masterOperation(final ClusterSearchShardsRequest request, final C } Set nodeIds = new HashSet<>(); - GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, - routingMap, request.preference()); + GroupShardsIterator groupShardsIterator = clusterService.operationRouting() + .searchShards(clusterState, concreteIndices, routingMap, request.preference()); ShardRouting shard; ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()]; int currentGroup = 0; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 4022d0497c018..909b0a6360bc3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -30,7 +30,8 @@ /** * Create snapshot request builder */ -public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { +public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs a new create snapshot request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index a7a5548552be2..b4320f5b4f72e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotsService; @@ -42,10 +41,11 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, + final ActionListener listener) { final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); SnapshotsService.SnapshotRequest snapshotRequest = new SnapshotsService.SnapshotRequest(request.repository(), snapshotName, "create_snapshot [" + snapshotName + "]") diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index 9b723de0e6caa..1e47160903c85 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -26,7 +26,8 @@ /** * Delete snapshot request builder */ -public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { +public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs delete snapshot request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index b3c9f089e6301..e53330349b3bb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -41,10 +40,11 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, + final ActionListener listener) { snapshotsService.deleteSnapshot(request.repository(), request.snapshot(), new SnapshotsService.DeleteSnapshotListener() { @Override public void onResponse() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 2115bd0bc3b81..052f8da0c7508 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -26,7 +26,8 @@ /** * Get snapshots request builder */ -public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilder { +public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs the new get snapshot request diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 2471f027e36b2..23ffbd0dd1e3c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.snapshots.SnapshotId; @@ -56,10 +55,10 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction { +public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs new restore snapshot request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index 32d4800676295..6ce7245503b24 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; @@ -50,10 +49,11 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final RestoreSnapshotRequest request, final ClusterState state, + final ActionListener listener) { RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(request.repository(), request.snapshot(), request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(), request.settings(), request.masterNodeTimeout(), request.includeGlobalState(), request.partial(), request.includeAliases(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java index 37d8ad04d0e7e..0424f858d3388 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java @@ -26,7 +26,8 @@ /** * Snapshots status request builder */ -public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBuilder { +public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs the new snapshot status request diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 79c7c776a196f..f5dc9a348640d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -28,15 +28,14 @@ import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -62,10 +61,10 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction() { @Override @@ -143,7 +144,8 @@ public void onFailure(Exception e) { } private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, List currentSnapshotEntries, - TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) throws IOException { + TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) + throws IOException { // First process snapshot that are currently processed List builder = new ArrayList<>(); Set currentSnapshotNames = new HashSet<>(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index 524e167e3a265..35020556b1ed3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder { public ClusterStateRequestBuilder(ElasticsearchClient client, ClusterStateAction action) { super(client, action, new ClusterStateRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index b7ef075a59afa..743bec1886cab 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -45,9 +44,11 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction { +public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder { public ClusterStatsRequestBuilder(ElasticsearchClient client, ClusterStatsAction action) { super(client, action, new ClusterStatsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 66b258670c128..a5c4adc53c42a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.seqno.SeqNoStats; @@ -53,17 +52,17 @@ public class TransportClusterStatsAction extends TransportNodesAction { private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store, - CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments); + CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, + CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments); private final NodeService nodeService; private final IndicesService indicesService; @Inject - public TransportClusterStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, NodeService nodeService, IndicesService indicesService, - ActionFilters actionFilters) { - super(settings, ClusterStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, + public TransportClusterStatsAction(ThreadPool threadPool, ClusterService clusterService, TransportService transportService, + NodeService nodeService, IndicesService indicesService, ActionFilters actionFilters) { + super(ClusterStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, ClusterStatsRequest::new, ClusterStatsNodeRequest::new, ThreadPool.Names.MANAGEMENT, ClusterStatsNodeResponse.class); this.nodeService = nodeService; this.indicesService = indicesService; @@ -126,7 +125,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); } - return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()])); + return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, + shardsStats.toArray(new ShardStats[shardsStats.size()])); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java index c351ae4a6706d..085850f118777 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -39,10 +38,10 @@ public class TransportDeleteStoredScriptAction extends TransportMasterNodeAction private final ScriptService scriptService; @Inject - public TransportDeleteStoredScriptAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, ScriptService scriptService) { - super(settings, DeleteStoredScriptAction.NAME, transportService, clusterService, threadPool, actionFilters, + public TransportDeleteStoredScriptAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + ScriptService scriptService) { + super(DeleteStoredScriptAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteStoredScriptRequest::new); this.scriptService = scriptService; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java index 368e40b96b7b3..19e86d1b6722d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -39,10 +38,10 @@ public class TransportGetStoredScriptAction extends TransportMasterNodeReadActio private final ScriptService scriptService; @Inject - public TransportGetStoredScriptAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportGetStoredScriptAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ScriptService scriptService) { - super(settings, GetStoredScriptAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(GetStoredScriptAction.NAME, transportService, clusterService, threadPool, actionFilters, GetStoredScriptRequest::new, indexNameExpressionResolver); this.scriptService = scriptService; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index abeb5327c1af5..99fa3979d87d0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -39,10 +38,10 @@ public class TransportPutStoredScriptAction extends TransportMasterNodeAction { +public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder { public PendingClusterTasksRequestBuilder(ElasticsearchClient client, PendingClusterTasksAction action) { super(client, action, new PendingClusterTasksRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 542b2dd8badc4..cb061a25363cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -29,20 +29,22 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.List; -public class TransportPendingClusterTasksAction extends TransportMasterNodeReadAction { +public class TransportPendingClusterTasksAction + extends TransportMasterNodeReadAction { private final ClusterService clusterService; @Inject - public TransportPendingClusterTasksAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, PendingClusterTasksAction.NAME, transportService, clusterService, threadPool, actionFilters, PendingClusterTasksRequest::new, indexNameExpressionResolver); + public TransportPendingClusterTasksAction(TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(PendingClusterTasksAction.NAME, transportService, clusterService, threadPool, actionFilters, + PendingClusterTasksRequest::new, indexNameExpressionResolver); this.clusterService = clusterService; } @@ -63,7 +65,8 @@ protected PendingClusterTasksResponse newResponse() { } @Override - protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener listener) { + protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, + ActionListener listener) { logger.trace("fetching pending tasks from cluster service"); final List pendingTasks = clusterService.getMasterService().pendingTasks(); logger.trace("done fetching pending tasks from cluster service"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 22e8554ed6aa6..8d2ca9886d000 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -67,7 +67,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest { @Inject - public TransportAliasesExistAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportAliasesExistAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, AliasesExistAction.NAME, transportService, clusterService, threadPool, actionFilters, GetAliasesRequest::new, + super(AliasesExistAction.NAME, transportService, clusterService, threadPool, actionFilters, GetAliasesRequest::new, indexNameExpressionResolver); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index faa075afca8fa..d1a573e6da8d7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -39,10 +38,10 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction { @Inject - public TransportGetAliasesAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportGetAliasesAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetAliasesRequest::new, + super(GetAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetAliasesRequest::new, indexNameExpressionResolver); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 786f8935dba12..d360bc45b8763 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -79,6 +79,7 @@ */ public class TransportAnalyzeAction extends TransportSingleShardAction { + private final Settings settings; private final IndicesService indicesService; private final Environment environment; @@ -86,8 +87,9 @@ public class TransportAnalyzeAction extends TransportSingleShardAction private String[] indices; // Delete index should work by default on both open and closed indices. - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true); + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); public DeleteIndexRequest() { } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index a7080209eca4d..50f65312afbf0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -51,11 +50,11 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction { @Inject - public TransportIndicesExistsAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportIndicesExistsAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, IndicesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesExistsRequest::new, + super(IndicesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesExistsRequest::new, indexNameExpressionResolver); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index 223e738ad2068..1211e03ed7911 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -39,10 +38,10 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction { @Inject - public TransportTypesExistsAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportTypesExistsAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, TypesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, TypesExistsRequest::new, + super(TypesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, TypesExistsRequest::new, indexNameExpressionResolver); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 35e19967a3e3a..0f37acf0ad325 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.transport.TransportService; @@ -39,10 +38,10 @@ public class TransportFlushAction extends TransportBroadcastReplicationAction { @Inject - public TransportFlushAction(Settings settings, ClusterService clusterService, TransportService transportService, + public TransportFlushAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportShardFlushAction replicatedFlushAction) { - super(FlushAction.NAME, FlushRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, + super(FlushAction.NAME, FlushRequest::new, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedFlushAction); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java index 1ab46bfd926c6..6d393734608e3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java @@ -19,17 +19,16 @@ package org.elasticsearch.action.admin.indices.flush; -import java.util.function.Supplier; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; +import java.util.function.Supplier; + /** * Synced flush Action. */ @@ -38,9 +37,9 @@ public class TransportSyncedFlushAction extends HandledTransportAction) SyncedFlushRequest::new); this.syncedFlushService = syncedFlushService; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index 621e2b870e90d..a7d4fd8cf4c33 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -31,7 +31,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; @@ -49,10 +48,9 @@ public class TransportForceMergeAction private final IndicesService indicesService; @Inject - public TransportForceMergeAction(Settings settings, ClusterService clusterService, - TransportService transportService, IndicesService indicesService, + public TransportForceMergeAction(ClusterService clusterService, TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ForceMergeAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, + super(ForceMergeAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, ForceMergeRequest::new, ThreadPool.Names.FORCE_MERGE); this.indicesService = indicesService; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index 584ad0bc55ac9..8f32fcb155112 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -54,11 +54,11 @@ public class TransportGetIndexAction extends TransportClusterInfoAction { @Inject - public TransportRefreshAction(Settings settings, ClusterService clusterService, - TransportService transportService, ActionFilters actionFilters, + public TransportRefreshAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportShardRefreshAction shardRefreshAction) { - super(RefreshAction.NAME, RefreshRequest::new, settings, clusterService, transportService, actionFilters, + super(RefreshAction.NAME, RefreshRequest::new, clusterService, transportService, actionFilters, indexNameExpressionResolver, shardRefreshAction); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index 6efebde18f577..4a65427f34e17 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -71,6 +71,10 @@ public final String toString() { return "[" + name + ": " + value + "]"; } + public T value() { + return value; + } + /** * Holder for index stats used to evaluate conditions */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 48c9d46066034..1475e4bd42088 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -196,7 +196,7 @@ public boolean isDryRun() { return dryRun; } - Map> getConditions() { + public Map> getConditions() { return conditions; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index 2d699591192f1..4fb5b6a19f117 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -68,8 +68,8 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement RolloverResponse() { } - RolloverResponse(String oldIndex, String newIndex, Map conditionResults, - boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { + public RolloverResponse(String oldIndex, String newIndex, Map conditionResults, + boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { super(acknowledged, shardsAcknowledged); this.oldIndex = oldIndex; this.newIndex = newIndex; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index ab05d0690031b..ce1f1dc240426 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -45,7 +45,6 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.threadpool.ThreadPool; @@ -73,16 +72,16 @@ public class TransportRolloverAction extends TransportMasterNodeAction { @@ -47,10 +47,10 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction { @Inject - public TransportGetIndexTemplatesAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportGetIndexTemplatesAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexTemplatesRequest::new, indexNameExpressionResolver); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index ae3a799453d67..0202d67814888 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -45,11 +45,11 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction

    - *

  • #hasNext() determines whether the progression has more elements. Return true for infinite progressions
  • + *
  • #hasNext() determines whether the progression has more elements. Return true for infinite progressions + *
  • *
  • #next() determines the next element in the progression, i.e. the next wait time period
  • * * diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index f8f9d154b14d6..c083c89567799 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -40,6 +40,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; +import java.util.function.Supplier; /** * A bulk processor is a thread safe bulk processing class, allowing to easily set when to "flush" a new bulk request @@ -88,6 +89,10 @@ public static class Builder { private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); private TimeValue flushInterval = null; private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff(); + private String globalIndex; + private String globalType; + private String globalRouting; + private String globalPipeline; private Builder(BiConsumer> consumer, Listener listener, Scheduler scheduler, Runnable onClose) { @@ -136,6 +141,26 @@ public Builder setFlushInterval(TimeValue flushInterval) { return this; } + public Builder setGlobalIndex(String globalIndex) { + this.globalIndex = globalIndex; + return this; + } + + public Builder setGlobalType(String globalType) { + this.globalType = globalType; + return this; + } + + public Builder setGlobalRouting(String globalRouting) { + this.globalRouting = globalRouting; + return this; + } + + public Builder setGlobalPipeline(String globalPipeline) { + this.globalPipeline = globalPipeline; + return this; + } + /** * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally * in case they have failed due to resource constraints (i.e. a thread pool was full). @@ -156,8 +181,14 @@ public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) { * Builds a new bulk processor. */ public BulkProcessor build() { - return new BulkProcessor(consumer, backoffPolicy, listener, concurrentRequests, bulkActions, bulkSize, flushInterval, - scheduler, onClose); + return new BulkProcessor(consumer, backoffPolicy, listener, concurrentRequests, bulkActions, + bulkSize, flushInterval, scheduler, onClose, createBulkRequestWithGlobalDefaults()); + } + + private Supplier createBulkRequestWithGlobalDefaults() { + return () -> new BulkRequest(globalIndex, globalType) + .pipeline(globalPipeline) + .routing(globalRouting); } } @@ -184,6 +215,7 @@ public static Builder builder(BiConsumer bulkRequestSupplier; private final BulkRequestHandler bulkRequestHandler; private final Runnable onClose; @@ -191,10 +223,11 @@ public static Builder builder(BiConsumer> consumer, BackoffPolicy backoffPolicy, Listener listener, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval, - Scheduler scheduler, Runnable onClose) { + Scheduler scheduler, Runnable onClose, Supplier bulkRequestSupplier) { this.bulkActions = bulkActions; this.bulkSize = bulkSize.getBytes(); - this.bulkRequest = new BulkRequest(); + this.bulkRequest = bulkRequestSupplier.get(); + this.bulkRequestSupplier = bulkRequestSupplier; this.bulkRequestHandler = new BulkRequestHandler(consumer, backoffPolicy, listener, scheduler, concurrentRequests); // Start period flushing task after everything is setup this.cancellableFlushTask = startFlushTask(flushInterval, scheduler); @@ -217,12 +250,13 @@ public void close() { * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed. *

    * If concurrent requests are not enabled, returns {@code true} immediately. - * If concurrent requests are enabled, waits for up to the specified timeout for all bulk requests to complete then returns {@code true}, + * If concurrent requests are enabled, waits for up to the specified timeout for all bulk requests to complete then returns {@code true} * If the specified waiting time elapses before all bulk requests complete, {@code false} is returned. * * @param timeout The maximum time to wait for the bulk requests to complete * @param unit The time unit of the {@code timeout} argument - * @return {@code true} if all bulk requests completed and {@code false} if the waiting time elapsed before all the bulk requests completed + * @return {@code true} if all bulk requests completed and {@code false} if the waiting time elapsed before all the bulk requests + * completed * @throws InterruptedException If the current thread is interrupted */ public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { @@ -298,7 +332,8 @@ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nu * Adds the data from the bytes to be processed by the bulk processor */ public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, - @Nullable String defaultPipeline, @Nullable Object payload, XContentType xContentType) throws Exception { + @Nullable String defaultPipeline, @Nullable Object payload, + XContentType xContentType) throws Exception { bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, payload, true, xContentType); executeIfNeeded(); return this; @@ -333,7 +368,7 @@ private void execute() { final BulkRequest bulkRequest = this.bulkRequest; final long executionId = executionIdGen.incrementAndGet(); - this.bulkRequest = new BulkRequest(); + this.bulkRequest = bulkRequestSupplier.get(); this.bulkRequestHandler.execute(bulkRequest, executionId); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 6698aa4b62ab5..d2929a2dbc564 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -90,12 +90,21 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; + private String globalPipeline; + private String globalRouting; + private String globalIndex; + private String globalType; private long sizeInBytes = 0; public BulkRequest() { } + public BulkRequest(@Nullable String globalIndex, @Nullable String globalType) { + this.globalIndex = globalIndex; + this.globalType = globalType; + } + /** * Adds a list of requests to be executed. Either index or delete requests. */ @@ -154,6 +163,8 @@ public BulkRequest add(IndexRequest request, @Nullable Object payload) { BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) { Objects.requireNonNull(request, "'request' must not be null"); + applyGlobalMandatoryParameters(request); + requests.add(request); addPayload(payload); // lack of source is validated in validate() method @@ -175,6 +186,8 @@ public BulkRequest add(UpdateRequest request, @Nullable Object payload) { BulkRequest internalAdd(UpdateRequest request, @Nullable Object payload) { Objects.requireNonNull(request, "'request' must not be null"); + applyGlobalMandatoryParameters(request); + requests.add(request); addPayload(payload); if (request.doc() != null) { @@ -199,6 +212,8 @@ public BulkRequest add(DeleteRequest request) { public BulkRequest add(DeleteRequest request, @Nullable Object payload) { Objects.requireNonNull(request, "'request' must not be null"); + applyGlobalMandatoryParameters(request); + requests.add(request); addPayload(payload); sizeInBytes += REQUEST_OVERHEAD; @@ -327,13 +342,13 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null String index = defaultIndex; String type = defaultType; String id = null; - String routing = defaultRouting; + String routing = valueOrDefault(defaultRouting, globalRouting); FetchSourceContext fetchSourceContext = defaultFetchSourceContext; String opType = null; long version = Versions.MATCH_ANY; VersionType versionType = VersionType.INTERNAL; int retryOnConflict = 0; - String pipeline = defaultPipeline; + String pipeline = valueOrDefault(defaultPipeline, globalPipeline); // at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id) // or START_OBJECT which will have another set of parameters @@ -369,20 +384,23 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null } else if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); } else { - throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]"); + throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); - } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { + } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName, + parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); } else if (token != XContentParser.Token.VALUE_NULL) { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); + throw new IllegalArgumentException("Malformed action/metadata line [" + line + + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); } } } else if (token != XContentParser.Token.END_OBJECT) { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + XContentParser.Token.START_OBJECT - + " or " + XContentParser.Token.END_OBJECT + " but found [" + token + "]"); + throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + + XContentParser.Token.START_OBJECT + " or " + XContentParser.Token.END_OBJECT + " but found [" + token + "]"); } if ("delete".equals(action)) { @@ -500,6 +518,15 @@ public final BulkRequest timeout(TimeValue timeout) { return this; } + public final BulkRequest pipeline(String globalPipeline) { + this.globalPipeline = globalPipeline; + return this; + } + + public final BulkRequest routing(String globalRouting){ + this.globalRouting = globalRouting; + return this; + } /** * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ @@ -511,6 +538,14 @@ public TimeValue timeout() { return timeout; } + public String pipeline() { + return globalPipeline; + } + + public String routing() { + return globalRouting; + } + private int findNextMarker(byte marker, int from, BytesReference data, int length) { for (int i = from; i < length; i++) { if (data.get(i) == marker) { @@ -576,4 +611,15 @@ public String getDescription() { return "requests[" + requests.size() + "], indices[" + Strings.collectionToDelimitedString(indices, ", ") + "]"; } + private void applyGlobalMandatoryParameters(DocWriteRequest request) { + request.index(valueOrDefault(request.index(), globalIndex)); + request.type(valueOrDefault(request.type(), globalType)); + } + + private static String valueOrDefault(String value, String globalDefault) { + if (Strings.isNullOrEmpty(value) && !Strings.isNullOrEmpty(globalDefault)) { + return globalDefault; + } + return value; + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index a577569476c85..fc91f4f907ee2 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -41,6 +41,10 @@ public class BulkRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { + public BulkRequestBuilder(ElasticsearchClient client, BulkAction action, @Nullable String globalIndex, @Nullable String globalType) { + super(client, action, new BulkRequest(globalIndex, globalType)); + } + public BulkRequestBuilder(ElasticsearchClient client, BulkAction action) { super(client, action, new BulkRequest()); } @@ -153,4 +157,14 @@ public final BulkRequestBuilder setTimeout(String timeout) { public int numberOfActions() { return request.numberOfActions(); } + + public BulkRequestBuilder pipeline(String globalPipeline) { + request.pipeline(globalPipeline); + return this; + } + + public BulkRequestBuilder routing(String globalRouting) { + request.routing(globalRouting); + return this; + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 30bf2dc14773b..d474dcee3639b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -109,7 +109,8 @@ public String buildFailureMessage() { BulkItemResponse response = responses[i]; if (response.isFailed()) { sb.append("\n[").append(i) - .append("]: index [").append(response.getIndex()).append("], type [").append(response.getType()).append("], id [").append(response.getId()) + .append("]: index [").append(response.getIndex()).append("], type [") + .append(response.getType()).append("], id [").append(response.getId()) .append("], message [").append(response.getFailureMessage()).append("]"); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index a3d7d50f3e22a..8e084c1ceacc1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -51,7 +51,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -99,21 +98,21 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, DocWriteRequest request, String index, Exception e) { + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocWriteRequest request, + String index, Exception e) { if (index.equals(request.index())) { - responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e))); + responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), + request.id(), e))); return true; } return false; @@ -327,19 +328,22 @@ protected void doRun() throws Exception { indexRequest.process(indexCreated, mappingMd, concreteIndex.getName()); break; case UPDATE: - TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest) docWriteRequest); + TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), + (UpdateRequest) docWriteRequest); break; case DELETE: docWriteRequest.routing(metaData.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); // check if routing is required, if so, throw error if routing wasn't specified - if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName(), docWriteRequest.type())) { + if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName(), + docWriteRequest.type())) { throw new RoutingMissingException(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id()); } break; default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]"); } } catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException e) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id(), e); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.type(), + docWriteRequest.id(), e); BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure); responses.set(i, bulkItemResponse); // make sure the request gets never processed again @@ -355,13 +359,15 @@ protected void doRun() throws Exception { continue; } String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), request.routing()).shardId(); + ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), + request.routing()).shardId(); List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); shardRequests.add(new BulkItemRequest(i, request)); } if (requestsByShard.isEmpty()) { - listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos))); + listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), + buildTookInMillis(startTimeNanos))); return; } @@ -407,7 +413,8 @@ public void onFailure(Exception e) { } private void finishHim() { - listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos))); + listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), + buildTookInMillis(startTimeNanos))); } }); } @@ -535,7 +542,8 @@ void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListen } else { long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); - ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener); + ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, + listener); if (bulkRequest.requests().isEmpty()) { // at this stage, the transport bulk action can't deal with a bulk request with no requests, // so we stop and send an empty response back to the client. @@ -628,7 +636,8 @@ void markCurrentItemAsFailed(Exception e) { // 2) Add a bulk item failure for this request // 3) Continue with the next request in the bulk. failedSlots.set(currentSlot); - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), + indexRequest.id(), e); itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType(), failure)); } @@ -641,7 +650,8 @@ static final class IngestBulkResponseListener implements ActionListener itemResponses; private final ActionListener actionListener; - IngestBulkResponseListener(long ingestTookInMillis, int[] originalSlots, List itemResponses, ActionListener actionListener) { + IngestBulkResponseListener(long ingestTookInMillis, int[] originalSlots, List itemResponses, + ActionListener actionListener) { this.ingestTookInMillis = ingestTookInMillis; this.itemResponses = itemResponses; this.actionListener = actionListener; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index e6eab5e89146e..c65f516c39594 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -99,7 +99,7 @@ public TransportShardBulkAction(Settings settings, TransportService transportSer } @Override - protected TransportRequestOptions transportOptions() { + protected TransportRequestOptions transportOptions(Settings settings) { return BulkAction.INSTANCE.transportOptions(settings); } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 879e8e665cd44..165aa7afd9e2e 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -46,7 +46,8 @@ * @see org.elasticsearch.client.Client#delete(DeleteRequest) * @see org.elasticsearch.client.Requests#deleteRequest(String) */ -public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest, CompositeIndicesRequest { +public class DeleteRequest extends ReplicatedWriteRequest + implements DocWriteRequest, CompositeIndicesRequest { private String type; private String id; @@ -89,7 +90,8 @@ public ActionRequestValidationException validate() { validationException = addValidationError("id is missing", validationException); } if (!versionType.validateVersionForWrites(version)) { - validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); + validationException = addValidationError("illegal version value [" + version + "] for version type [" + + versionType.name() + "]", validationException); } if (versionType == VersionType.FORCE) { validationException = addValidationError("version type [force] may no longer be used", validationException); @@ -108,6 +110,7 @@ public String type() { /** * Sets the type of the document to delete. */ + @Override public DeleteRequest type(String type) { this.type = type; return this; diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index c1695e13864cb..85fe196933bff 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.get.GetResult; @@ -59,10 +58,10 @@ public class TransportExplainAction extends TransportSingleShardAction listener) throws IOException { + protected void asyncShardOperation(ExplainRequest request, ShardId shardId, + ActionListener listener) throws IOException { IndexService indexService = searchService.getIndicesService().indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); indexShard.awaitShardSearchActive(b -> { @@ -129,7 +129,8 @@ protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId // Advantage is that we're not opening a second searcher to retrieve the _source. Also // because we are working in the same searcher in engineGetResult we can be sure that a // doc isn't deleted between the initial get and this call. - GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext()); + GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), + request.fetchSourceContext()); return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult); } else { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation); @@ -149,7 +150,8 @@ protected ExplainResponse newResponse() { @Override protected ShardIterator shards(ClusterState state, InternalRequest request) { return clusterService.operationRouting().getShards( - clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference() + clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), + request.request().preference() ); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index 64e411d0fe2f9..25b075d8d9076 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -51,11 +50,11 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, boolean allowExplicitIndex) throws IOException { + private static void parseDocuments(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, + @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, + @Nullable String defaultRouting, boolean allowExplicitIndex) throws IOException { String currentFieldName = null; Token token; while ((token = parser.nextToken()) != Token.END_ARRAY) { @@ -499,13 +502,16 @@ private static void parseDocuments(XContentParser parser, List items, @Nul } } - public static void parseIds(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting) throws IOException { + public static void parseIds(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, + @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, + @Nullable String defaultRouting) throws IOException { Token token; while ((token = parser.nextToken()) != Token.END_ARRAY) { if (!token.isValue()) { throw new IllegalArgumentException("ids array element should only contain ids"); } - items.add(new Item(defaultIndex, defaultType, parser.text()).storedFields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting)); + items.add(new Item(defaultIndex, defaultType, parser.text()).storedFields(defaultFields).fetchSourceContext(defaultFetchSource) + .routing(defaultRouting)); } } diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 69753bdd9795e..cc3415f968d62 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.shard.IndexShard; @@ -47,10 +46,10 @@ public class TransportGetAction extends TransportSingleShardAction { - + @Inject - public GetPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + public GetPipelineTransportAction(ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, GetPipelineRequest::new, + super(GetPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, GetPipelineRequest::new, indexNameExpressionResolver); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index e7686773a2378..97f13bf71d14c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -33,9 +33,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.IngestInfo; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -48,11 +47,11 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction) SimulatePipelineRequest::new); this.ingestService = ingestService; this.executionService = new SimulateExecutionService(threadPool); diff --git a/server/src/main/java/org/elasticsearch/action/main/TransportMainAction.java b/server/src/main/java/org/elasticsearch/action/main/TransportMainAction.java index 62fb866ee1161..f6290f58a0be4 100644 --- a/server/src/main/java/org/elasticsearch/action/main/TransportMainAction.java +++ b/server/src/main/java/org/elasticsearch/action/main/TransportMainAction.java @@ -34,21 +34,22 @@ public class TransportMainAction extends HandledTransportAction { + private final String nodeName; private final ClusterService clusterService; @Inject public TransportMainAction(Settings settings, TransportService transportService, ActionFilters actionFilters, ClusterService clusterService) { - super(settings, MainAction.NAME, transportService, actionFilters, MainRequest::new); + super(MainAction.NAME, transportService, actionFilters, MainRequest::new); + this.nodeName = Node.NODE_NAME_SETTING.get(settings); this.clusterService = clusterService; } @Override protected void doExecute(Task task, MainRequest request, ActionListener listener) { ClusterState clusterState = clusterService.state(); - assert Node.NODE_NAME_SETTING.exists(settings); listener.onResponse( - new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(), + new MainResponse(nodeName, Version.CURRENT, clusterState.getClusterName(), clusterState.metaData().clusterUUID(), Build.CURRENT)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 056c4c29c7a3d..0cff8aadb5268 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -58,7 +58,7 @@ public class MultiSearchRequest extends ActionRequest implements CompositeIndice private int maxConcurrentSearchRequests = 0; private List requests = new ArrayList<>(); - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); /** * Add a search request to execute. Note, the order is important, the search response will be returned in the @@ -287,7 +287,7 @@ public static byte[] writeMultiLineFormat(MultiSearchRequest multiSearchRequest, } return output.toByteArray(); } - + public static void writeSearchRequestParams(SearchRequest request, XContentBuilder xContentBuilder) throws IOException { xContentBuilder.startObject(); if (request.indices() != null) { diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java index 64d512f4be05f..ce43f47a4979b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java @@ -28,6 +28,7 @@ */ public class MultiSearchRequestBuilder extends ActionRequestBuilder { + public MultiSearchRequestBuilder(ElasticsearchClient client, MultiSearchAction action) { super(client, action, new MultiSearchRequest()); } @@ -40,7 +41,8 @@ public MultiSearchRequestBuilder(ElasticsearchClient client, MultiSearchAction a * will not be used (if set). */ public MultiSearchRequestBuilder add(SearchRequest request) { - if (request.indicesOptions() == IndicesOptions.strictExpandOpenAndForbidClosed() && request().indicesOptions() != IndicesOptions.strictExpandOpenAndForbidClosed()) { + if (request.indicesOptions() == IndicesOptions.strictExpandOpenAndForbidClosed() + && request().indicesOptions() != IndicesOptions.strictExpandOpenAndForbidClosed()) { request.indicesOptions(request().indicesOptions()); } @@ -53,7 +55,8 @@ public MultiSearchRequestBuilder add(SearchRequest request) { * same order as the search requests. */ public MultiSearchRequestBuilder add(SearchRequestBuilder request) { - if (request.request().indicesOptions() == IndicesOptions.strictExpandOpenAndForbidClosed() && request().indicesOptions() != IndicesOptions.strictExpandOpenAndForbidClosed()) { + if (request.request().indicesOptions() == SearchRequest.DEFAULT_INDICES_OPTIONS + && request().indicesOptions() != SearchRequest.DEFAULT_INDICES_OPTIONS) { request.request().indicesOptions(request().indicesOptions()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 7d08c9f864e33..2c24d2852217e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -35,9 +35,7 @@ import org.apache.lucene.search.TotalHits.Relation; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.elasticsearch.common.collect.HppcMaps; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -71,7 +69,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -public final class SearchPhaseController extends AbstractComponent { +public final class SearchPhaseController { private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0]; @@ -79,11 +77,9 @@ public final class SearchPhaseController extends AbstractComponent { /** * Constructor. - * @param settings Node settings * @param reduceContextFunction A function that builds a context for the reduce of an {@link InternalAggregation} */ - public SearchPhaseController(Settings settings, Function reduceContextFunction) { - super(settings); + public SearchPhaseController(Function reduceContextFunction) { this.reduceContextFunction = reduceContextFunction; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index dd7f687294383..f4b501b769796 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -88,7 +88,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest private String[] types = Strings.EMPTY_ARRAY; - public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed(); + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 302ed4ccbfec9..54d7aee8f0d62 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -26,11 +26,9 @@ import org.elasticsearch.action.support.HandledTransportAction.ChannelActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; @@ -66,7 +64,7 @@ * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through * transport. */ -public class SearchTransportService extends AbstractComponent { +public class SearchTransportService { public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; @@ -84,9 +82,8 @@ public class SearchTransportService extends AbstractComponent { private final BiFunction responseWrapper; private final Map clientConnections = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); - public SearchTransportService(Settings settings, TransportService transportService, + public SearchTransportService(TransportService transportService, BiFunction responseWrapper) { - super(settings); this.transportService = transportService; this.responseWrapper = responseWrapper; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index f1c9fd5c545fb..f61d268e551b4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -34,11 +33,9 @@ public class TransportClearScrollAction extends HandledTransportAction) SearchRequest::new); + super(SearchAction.NAME, transportService, actionFilters, (Writeable.Reader) SearchRequest::new); this.threadPool = threadPool; this.searchPhaseController = searchPhaseController; this.searchTransportService = searchTransportService; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index 70a50d44fb0e6..d39c7cc25b4bd 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -40,10 +39,9 @@ public class TransportSearchScrollAction extends HandledTransportAction) SearchScrollRequest::new); this.clusterService = clusterService; this.searchTransportService = searchTransportService; diff --git a/server/src/main/java/org/elasticsearch/action/support/ActionFilter.java b/server/src/main/java/org/elasticsearch/action/support/ActionFilter.java index 3e12d0cc84223..c23fe476dcc16 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ActionFilter.java +++ b/server/src/main/java/org/elasticsearch/action/support/ActionFilter.java @@ -22,8 +22,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; /** @@ -47,12 +45,7 @@ void apply(Task * filter chain. This base class should serve any action filter implementations that doesn't require * to apply async filtering logic. */ - abstract class Simple extends AbstractComponent implements ActionFilter { - - protected Simple(Settings settings) { - super(settings); - } - + abstract class Simple implements ActionFilter { @Override public final void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { diff --git a/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java b/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java index 30d6461ef614b..b87ff9f7ec3bd 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java +++ b/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; @@ -42,8 +41,7 @@ public class ActiveShardsObserver extends AbstractComponent { private final ClusterService clusterService; private final ThreadPool threadPool; - public ActiveShardsObserver(final Settings settings, final ClusterService clusterService, final ThreadPool threadPool) { - super(settings); + public ActiveShardsObserver(final ClusterService clusterService, final ThreadPool threadPool) { this.clusterService = clusterService; this.threadPool = threadPool; } diff --git a/server/src/main/java/org/elasticsearch/action/support/DelegatingActionListener.java b/server/src/main/java/org/elasticsearch/action/support/DelegatingActionListener.java index c02de8410cf5c..ab37f81b076a1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DelegatingActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/DelegatingActionListener.java @@ -25,7 +25,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; -public abstract class DelegatingActionListener implements ActionListener { +public abstract class DelegatingActionListener + implements ActionListener { ActionListener delegatedActionListener; diff --git a/server/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/server/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 56d5bf206f370..583c34e09641a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/server/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.support; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -28,7 +27,7 @@ /** * Helper for dealing with destructive operations and wildcard usage. */ -public final class DestructiveOperations extends AbstractComponent { +public final class DestructiveOperations { /** * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. @@ -38,7 +37,6 @@ public final class DestructiveOperations extends AbstractComponent { private volatile boolean destructiveRequiresName; public DestructiveOperations(Settings settings, ClusterSettings clusterSettings) { - super(settings); destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(REQUIRES_NAME_SETTING, this::setDestructiveRequiresName); } diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index 9de040a98b405..f1f4962851c99 100644 --- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -41,27 +40,27 @@ */ public abstract class HandledTransportAction extends TransportAction { - protected HandledTransportAction(Settings settings, String actionName, TransportService transportService, + protected HandledTransportAction(String actionName, TransportService transportService, ActionFilters actionFilters, Supplier request) { - this(settings, actionName, true, transportService, actionFilters, request); + this(actionName, true, transportService, actionFilters, request); } - protected HandledTransportAction(Settings settings, String actionName, TransportService transportService, + protected HandledTransportAction(String actionName, TransportService transportService, ActionFilters actionFilters, Writeable.Reader requestReader) { - this(settings, actionName, true, transportService, actionFilters, requestReader); + this(actionName, true, transportService, actionFilters, requestReader); } - protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker, + protected HandledTransportAction(String actionName, boolean canTripCircuitBreaker, TransportService transportService, ActionFilters actionFilters, Supplier request) { - super(settings, actionName, actionFilters, transportService.getTaskManager()); + super(actionName, actionFilters, transportService.getTaskManager()); transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, false, canTripCircuitBreaker, new TransportHandler()); } - protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker, + protected HandledTransportAction(String actionName, boolean canTripCircuitBreaker, TransportService transportService, ActionFilters actionFilters, Writeable.Reader requestReader) { - super(settings, actionName, actionFilters, transportService.getTaskManager()); + super(actionName, actionFilters, transportService.getTaskManager()); transportService.registerRequestHandler(actionName, ThreadPool.Names.SAME, false, canTripCircuitBreaker, requestReader, new TransportHandler()); } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index f2cf0b5444d52..9753a6f712b0d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -81,7 +81,8 @@ public enum Option { IGNORE_ALIASES, ALLOW_NO_INDICES, FORBID_ALIASES_TO_MULTIPLE_INDICES, - FORBID_CLOSED_INDICES; + FORBID_CLOSED_INDICES, + IGNORE_THROTTLED; public static final EnumSet

    * The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is * started in client mode (only connects, no bind). @@ -223,7 +224,8 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings transportService.start(); transportService.acceptIncomingRequests(); - ClientTemplate transportClient = new ClientTemplate(injector, pluginLifecycleComponents, nodesService, proxy, namedWriteableRegistry); + ClientTemplate transportClient = new ClientTemplate(injector, pluginLifecycleComponents, nodesService, proxy, + namedWriteableRegistry); resourcesToClose.clear(); return transportClient; } finally { diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 0cfc1f5004ce8..b36fe45599590 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -124,21 +124,20 @@ final class TransportClientNodesService extends AbstractComponent implements Clo TransportClientNodesService(Settings settings, TransportService transportService, ThreadPool threadPool, TransportClient.HostFailureListener hostFailureListener) { - super(settings); this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.transportService = transportService; this.threadPool = threadPool; this.minCompatibilityVersion = Version.CURRENT.minimumCompatibilityVersion(); - this.nodesSamplerInterval = TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings); - this.pingTimeout = TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis(); - this.ignoreClusterName = TransportClient.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings); + this.nodesSamplerInterval = TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(settings); + this.pingTimeout = TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT.get(settings).millis(); + this.ignoreClusterName = TransportClient.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(settings); if (logger.isDebugEnabled()) { logger.debug("node_sampler_interval[{}]", nodesSamplerInterval); } - if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(this.settings)) { + if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(settings)) { this.nodesSampler = new SniffNodesSampler(); } else { this.nodesSampler = new SimpleNodeSampler(); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 7996059a09908..d3739fd6cdcba 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -108,11 +108,11 @@ public class ClusterModule extends AbstractModule { public ClusterModule(Settings settings, ClusterService clusterService, List clusterPlugins, ClusterInfoService clusterInfoService) { this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); - this.allocationDeciders = new AllocationDeciders(settings, deciderList); + this.allocationDeciders = new AllocationDeciders(deciderList); this.shardsAllocator = createShardsAllocator(settings, clusterService.getClusterSettings(), clusterPlugins); this.clusterService = clusterService; - this.indexNameExpressionResolver = new IndexNameExpressionResolver(settings); - this.allocationService = new AllocationService(settings, allocationDeciders, shardsAllocator, clusterInfoService); + this.indexNameExpressionResolver = new IndexNameExpressionResolver(); + this.allocationService = new AllocationService(allocationDeciders, shardsAllocator, clusterInfoService); } public static List getNamedWriteables() { @@ -205,16 +205,16 @@ public static Collection createAllocationDeciders(Settings se List clusterPlugins) { // collect deciders by class so that we can detect duplicates Map deciders = new LinkedHashMap<>(); - addAllocationDecider(deciders, new MaxRetryAllocationDecider(settings)); - addAllocationDecider(deciders, new ResizeAllocationDecider(settings)); - addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider(settings)); - addAllocationDecider(deciders, new RebalanceOnlyWhenActiveAllocationDecider(settings)); + addAllocationDecider(deciders, new MaxRetryAllocationDecider()); + addAllocationDecider(deciders, new ResizeAllocationDecider()); + addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider()); + addAllocationDecider(deciders, new RebalanceOnlyWhenActiveAllocationDecider()); addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new EnableAllocationDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new NodeVersionAllocationDecider(settings)); - addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider(settings)); - addAllocationDecider(deciders, new RestoreInProgressAllocationDecider(settings)); + addAllocationDecider(deciders, new NodeVersionAllocationDecider()); + addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider()); + addAllocationDecider(deciders, new RestoreInProgressAllocationDecider()); addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings)); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index d191bc0175606..2da0ff9286f01 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -138,7 +138,8 @@ public void waitForNextChange(Listener listener, Predicate statePr timeoutTimeLeftMS = timeOutValue.millis() - timeSinceStartMS; if (timeoutTimeLeftMS <= 0L) { // things have timeout while we were busy -> notify - logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS)); + logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", + timeOutValue, new TimeValue(timeSinceStartMS)); // update to latest, in case people want to retry timedOut = true; lastObservedState.set(new StoredState(clusterApplierService.state())); @@ -169,7 +170,8 @@ public void waitForNextChange(Listener listener, Predicate statePr if (!observingContext.compareAndSet(null, context)) { throw new ElasticsearchException("already waiting for a cluster state change"); } - clusterApplierService.addTimeoutListener(timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), clusterStateListener); + clusterApplierService.addTimeoutListener(timeoutTimeLeftMS == null ? + null : new TimeValue(timeoutTimeLeftMS), clusterStateListener); } } @@ -190,7 +192,8 @@ public void clusterChanged(ClusterChangedEvent event) { lastObservedState.set(new StoredState(state)); context.listener.onNewClusterState(state); } else { - logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", state.version()); + logger.trace("observer: predicate approved change but observing context has changed " + + "- ignoring (new cluster state version [{}])", state.version()); } } else { logger.trace("observer: predicate rejected change (new cluster state version [{}])", state.version()); @@ -213,7 +216,8 @@ public void postAdded() { lastObservedState.set(new StoredState(newState)); context.listener.onNewClusterState(newState); } else { - logger.trace("observer: postAdded - predicate approved state but observing context has changed - ignoring ({})", newState); + logger.trace("observer: postAdded - predicate approved state but observing context has changed - ignoring ({})", + newState); } } else { logger.trace("observer: postAdded - predicate rejected state ({})", newState); @@ -237,7 +241,8 @@ public void onTimeout(TimeValue timeout) { if (context != null) { clusterApplierService.removeTimeoutListener(this); long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS); - logger.trace("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS)); + logger.trace("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", + timeOutValue, new TimeValue(timeSinceStartMS)); // update to latest, in case people want to retry lastObservedState.set(new StoredState(clusterApplierService.state())); timedOut = true; diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java index 9dc9c7f6f52d0..7a8afcdae38ac 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -28,7 +28,8 @@ /** * A task that can update the cluster state. */ -public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { +public abstract class ClusterStateUpdateTask + implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { private final Priority priority; @@ -41,7 +42,8 @@ public ClusterStateUpdateTask(Priority priority) { } @Override - public final ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { + public final ClusterTasksResult execute(ClusterState currentState, List tasks) + throws Exception { ClusterState result = execute(currentState); return ClusterTasksResult.builder().successes(tasks).build(result); } diff --git a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index f7bb42b8dc368..78eceeb12bcca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -68,7 +68,8 @@ public static KeySerializer getVIntKeySerializer() { /** * Calculates diff between two ImmutableOpenMaps of Diffable objects */ - public static > MapDiff> diff(ImmutableOpenMap before, ImmutableOpenMap after, KeySerializer keySerializer) { + public static > MapDiff> diff(ImmutableOpenMap before, + ImmutableOpenMap after, KeySerializer keySerializer) { assert after != null && before != null; return new ImmutableOpenMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); } @@ -76,7 +77,8 @@ public static > MapDiff> d /** * Calculates diff between two ImmutableOpenMaps of non-diffable objects */ - public static MapDiff> diff(ImmutableOpenMap before, ImmutableOpenMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { + public static MapDiff> diff(ImmutableOpenMap before, + ImmutableOpenMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { assert after != null && before != null; return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer); } @@ -84,7 +86,8 @@ public static MapDiff> diff(ImmutableOpenMap /** * Calculates diff between two ImmutableOpenIntMaps of Diffable objects */ - public static > MapDiff> diff(ImmutableOpenIntMap before, ImmutableOpenIntMap after, KeySerializer keySerializer) { + public static > MapDiff> diff(ImmutableOpenIntMap before, + ImmutableOpenIntMap after, KeySerializer keySerializer) { assert after != null && before != null; return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); } @@ -92,7 +95,8 @@ public static > MapDiff /** * Calculates diff between two ImmutableOpenIntMaps of non-diffable objects */ - public static MapDiff> diff(ImmutableOpenIntMap before, ImmutableOpenIntMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { + public static MapDiff> diff(ImmutableOpenIntMap before, + ImmutableOpenIntMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { assert after != null && before != null; return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer); } @@ -100,7 +104,8 @@ public static MapDiff> diff(ImmutableOpen /** * Calculates diff between two Maps of Diffable objects. */ - public static > MapDiff> diff(Map before, Map after, KeySerializer keySerializer) { + public static > MapDiff> diff(Map before, + Map after, KeySerializer keySerializer) { assert after != null && before != null; return new JdkMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); } @@ -108,7 +113,8 @@ public static > MapDiff> diff(Map /** * Calculates diff between two Maps of non-diffable objects */ - public static MapDiff> diff(Map before, Map after, KeySerializer keySerializer, ValueSerializer valueSerializer) { + public static MapDiff> diff(Map before, Map after, KeySerializer keySerializer, + ValueSerializer valueSerializer) { assert after != null && before != null; return new JdkMapDiff<>(before, after, keySerializer, valueSerializer); } @@ -116,42 +122,48 @@ public static MapDiff> diff(Map before, Map a /** * Loads an object that represents difference between two ImmutableOpenMaps */ - public static MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + public static MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, + ValueSerializer valueSerializer) throws IOException { return new ImmutableOpenMapDiff<>(in, keySerializer, valueSerializer); } /** * Loads an object that represents difference between two ImmutableOpenMaps */ - public static MapDiff> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + public static MapDiff> readImmutableOpenIntMapDiff(StreamInput in, + KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { return new ImmutableOpenIntMapDiff<>(in, keySerializer, valueSerializer); } /** * Loads an object that represents difference between two Maps of Diffable objects */ - public static MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + public static MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, + ValueSerializer valueSerializer) throws IOException { return new JdkMapDiff<>(in, keySerializer, valueSerializer); } /** * Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object */ - public static > MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { + public static > MapDiff> readImmutableOpenMapDiff(StreamInput in, + KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); } /** * Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object */ - public static > MapDiff> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { + public static > MapDiff> readImmutableOpenIntMapDiff(StreamInput in, + KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); } /** * Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object */ - public static > MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { + public static > MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, + Reader reader, Reader> diffReader) throws IOException { return new JdkMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); } @@ -217,7 +229,8 @@ public Map apply(Map map) { */ public static class ImmutableOpenMapDiff extends MapDiff> { - protected ImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + protected ImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, + ValueSerializer valueSerializer) throws IOException { super(in, keySerializer, valueSerializer); } @@ -293,7 +306,8 @@ public ImmutableOpenMap apply(ImmutableOpenMap map) { */ private static class ImmutableOpenIntMapDiff extends MapDiff> { - protected ImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + protected ImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, + ValueSerializer valueSerializer) throws IOException { super(in, keySerializer, valueSerializer); } diff --git a/server/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java index 8e705a2406301..c6632bd524a3a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java @@ -19,19 +19,12 @@ package org.elasticsearch.cluster; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; - /** * ClusterInfoService that provides empty maps for disk usage and shard sizes */ -public class EmptyClusterInfoService extends AbstractComponent implements ClusterInfoService { +public class EmptyClusterInfoService implements ClusterInfoService { public static final EmptyClusterInfoService INSTANCE = new EmptyClusterInfoService(); - private EmptyClusterInfoService() { - super(Settings.EMPTY); - } - @Override public ClusterInfo getClusterInfo() { return ClusterInfo.EMPTY; diff --git a/server/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java b/server/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java index fb5f2334969d6..c225cb551306d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java +++ b/server/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java @@ -33,7 +33,8 @@ public IncompatibleClusterStateVersionException(String msg) { } public IncompatibleClusterStateVersionException(long expectedVersion, String expectedUuid, long receivedVersion, String receivedUuid) { - super("Expected diff for version " + expectedVersion + " with uuid " + expectedUuid + " got version " + receivedVersion + " and uuid " + receivedUuid); + super("Expected diff for version " + expectedVersion + " with uuid " + expectedUuid + " got version " + + receivedVersion + " and uuid " + receivedUuid); } public IncompatibleClusterStateVersionException(StreamInput in) throws IOException{ diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index dbfc4b3445e07..184cbcdf859d5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -88,7 +88,6 @@ public class InternalClusterInfoService extends AbstractComponent public InternalClusterInfoService(Settings settings, ClusterService clusterService, ThreadPool threadPool, NodeClient client, Consumer listener) { - super(settings); this.leastAvailableSpaceUsages = ImmutableOpenMap.of(); this.mostAvailableSpaceUsages = ImmutableOpenMap.of(); this.shardRoutingToDataPath = ImmutableOpenMap.of(); @@ -102,7 +101,8 @@ public InternalClusterInfoService(Settings settings, ClusterService clusterServi ClusterSettings clusterSettings = clusterService.getClusterSettings(); clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout); clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency); - clusterSettings.addSettingsUpdateConsumer(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); + clusterSettings.addSettingsUpdateConsumer(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, + this::setEnabled); // Add InternalClusterInfoService to listen for Master changes this.clusterService.addLocalNodeMasterListener(this); @@ -400,7 +400,8 @@ static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, String nodeId = nodeStats.getNode().getId(); String nodeName = nodeStats.getNode().getName(); if (logger.isTraceEnabled()) { - logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", + logger.trace("node: [{}], most available: total disk: {}," + + " available disk: {} / least available: total disk: {}, available disk: {}", nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), leastAvailablePath.getTotal(), leastAvailablePath.getAvailable()); } @@ -410,7 +411,8 @@ static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, nodeId, leastAvailablePath.getTotal().getBytes()); } } else { - newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().getBytes(), leastAvailablePath.getAvailable().getBytes())); + newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), + leastAvailablePath.getTotal().getBytes(), leastAvailablePath.getAvailable().getBytes())); } if (mostAvailablePath.getTotal().getBytes() < 0) { if (logger.isTraceEnabled()) { @@ -418,7 +420,8 @@ static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, nodeId, mostAvailablePath.getTotal().getBytes()); } } else { - newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().getBytes(), mostAvailablePath.getAvailable().getBytes())); + newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), + mostAvailablePath.getTotal().getBytes(), mostAvailablePath.getAvailable().getBytes())); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java index 9f7fb00e19523..0b17cfecf6c34 100644 --- a/server/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java @@ -37,10 +37,11 @@ public interface LocalNodeMasterListener { /** * The name of the executor that the implementation of the callbacks of this lister should be executed on. The thread * that is responsible for managing instances of this lister is the same thread handling the cluster state events. If - * the work done is the callbacks above is inexpensive, this value may be {@link org.elasticsearch.threadpool.ThreadPool.Names#SAME SAME} - * (indicating that the callbacks will run on the same thread as the cluster state events are fired with). On the other hand, - * if the logic in the callbacks are heavier and take longer to process (or perhaps involve blocking due to IO operations), - * prefer to execute them on a separate more appropriate executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC} + * the work done is the callbacks above is inexpensive, this value may be + * {@link org.elasticsearch.threadpool.ThreadPool.Names#SAME SAME} (indicating that the callbacks will run on the same thread + * as the cluster state events are fired with). On the other hand, if the logic in the callbacks are heavier and take + * longer to process (or perhaps involve blocking due to IO operations), prefer to execute them on a separate more appropriate + * executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC} * or {@link org.elasticsearch.threadpool.ThreadPool.Names#MANAGEMENT MANAGEMENT}). * * @return The name of the executor that will run the callbacks of this listener. diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 56311455a0ee8..770c6bca26b2f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -39,7 +38,7 @@ * Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated * in the cluster state meta data (and broadcast to all members). */ -public class MappingUpdatedAction extends AbstractComponent { +public class MappingUpdatedAction { public static final Setting INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), @@ -50,7 +49,6 @@ public class MappingUpdatedAction extends AbstractComponent { @Inject public MappingUpdatedAction(Settings settings, ClusterSettings clusterSettings) { - super(settings); this.dynamicMappingUpdateTimeout = INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, this::setDynamicMappingUpdateTimeout); } diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index 2559c14848d76..6dd40fba5cb64 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; @@ -48,11 +47,11 @@ public class NodeMappingRefreshAction extends AbstractComponent { private final MetaDataMappingService metaDataMappingService; @Inject - public NodeMappingRefreshAction(Settings settings, TransportService transportService, MetaDataMappingService metaDataMappingService) { - super(settings); + public NodeMappingRefreshAction(TransportService transportService, MetaDataMappingService metaDataMappingService) { this.transportService = transportService; this.metaDataMappingService = metaDataMappingService; - transportService.registerRequestHandler(ACTION_NAME, NodeMappingRefreshRequest::new, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); + transportService.registerRequestHandler(ACTION_NAME, + NodeMappingRefreshRequest::new, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); } public void nodeMappingRefresh(final DiscoveryNode masterNode, final NodeMappingRefreshRequest request) { diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 0949e47cd0527..3780a8bb9f48e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -46,7 +46,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.discovery.Discovery; @@ -89,19 +88,23 @@ public class ShardStateAction extends AbstractComponent { private final ConcurrentMap remoteFailedShardsCache = ConcurrentCollections.newConcurrentMap(); @Inject - public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService, + public ShardStateAction(ClusterService clusterService, TransportService transportService, AllocationService allocationService, RoutingService routingService, ThreadPool threadPool) { - super(settings); this.transportService = transportService; this.clusterService = clusterService; this.threadPool = threadPool; - transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ThreadPool.Names.SAME, StartedShardEntry::new, new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger)); - transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ThreadPool.Names.SAME, FailedShardEntry::new, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger)); + transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ThreadPool.Names.SAME, StartedShardEntry::new, + new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger)); + transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ThreadPool.Names.SAME, FailedShardEntry::new, + new ShardFailedTransportHandler(clusterService, + new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger)); } - private void sendShardAction(final String actionName, final ClusterState currentState, final TransportRequest request, final Listener listener) { - ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); + private void sendShardAction(final String actionName, final ClusterState currentState, + final TransportRequest request, final Listener listener) { + ClusterStateObserver observer = + new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); DiscoveryNode masterNode = currentState.nodes().getMasterNode(); Predicate changePredicate = MasterNodeChangePredicate.build(currentState); if (masterNode == null) { @@ -121,8 +124,11 @@ public void handleException(TransportException exp) { if (isMasterChannelException(exp)) { waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); } else { - logger.warn(new ParameterizedMessage("unexpected failure while sending request [{}] to [{}] for shard entry [{}]", actionName, masterNode, request), exp); - listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp); + logger.warn(new ParameterizedMessage("unexpected failure while sending request [{}]" + + " to [{}] for shard entry [{}]", actionName, masterNode, request), exp); + listener.onFailure(exp instanceof RemoteTransportException ? + (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : + new ElasticsearchException(exp.getCause())) : exp); } } }); @@ -152,7 +158,8 @@ private static boolean isMasterChannelException(TransportException exp) { * @param failure the underlying cause of the failure * @param listener callback upon completion of the request */ - public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, boolean markAsStale, final String message, @Nullable final Exception failure, Listener listener) { + public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, boolean markAsStale, final String message, + @Nullable final Exception failure, Listener listener) { assert primaryTerm > 0L : "primary term should be strictly positive"; final FailedShardEntry shardEntry = new FailedShardEntry(shardId, allocationId, primaryTerm, message, failure, markAsStale); final CompositeListener compositeListener = new CompositeListener(listener); @@ -188,21 +195,24 @@ int remoteShardFailedCacheSize() { /** * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. */ - public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener) { + public void localShardFailed(final ShardRouting shardRouting, final String message, + @Nullable final Exception failure, Listener listener) { localShardFailed(shardRouting, message, failure, listener, clusterService.state()); } /** * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. */ - public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener, - final ClusterState currentState) { - FailedShardEntry shardEntry = new FailedShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, true); + public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, + Listener listener, final ClusterState currentState) { + FailedShardEntry shardEntry = new FailedShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), + 0L, message, failure, true); sendShardAction(SHARD_FAILED_ACTION_NAME, currentState, shardEntry, listener); } // visible for testing - protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, TransportRequest request, Listener listener, Predicate changePredicate) { + protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, + TransportRequest request, Listener listener, Predicate changePredicate) { observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { @@ -231,7 +241,8 @@ private static class ShardFailedTransportHandler implements TransportRequestHand private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; private final Logger logger; - ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) { + ShardFailedTransportHandler(ClusterService clusterService, + ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) { this.clusterService = clusterService; this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor; this.logger = logger; @@ -239,7 +250,8 @@ private static class ShardFailedTransportHandler implements TransportRequestHand @Override public void messageReceived(FailedShardEntry request, TransportChannel channel, Task task) throws Exception { - logger.debug(() -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); + logger.debug(() -> new ParameterizedMessage("{} received shard failed for {}", + request.shardId, request), request.failure); clusterService.submitStateUpdateTask( "shard-failed", request, @@ -248,12 +260,15 @@ public void messageReceived(FailedShardEntry request, TransportChannel channel, new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e); + logger.error(() -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", + request.shardId, request), e); try { channel.sendResponse(e); } catch (Exception channelException) { channelException.addSuppressed(e); - logger.warn(() -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException); + logger.warn(() -> + new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", + request.shardId, e, request), channelException); } } @@ -263,7 +278,9 @@ public void onNoLongerMaster(String source) { try { channel.sendResponse(new NotMasterException(source)); } catch (Exception channelException) { - logger.warn(() -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> + new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", + request.shardId, request), channelException); } } @@ -272,7 +289,9 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception channelException) { - logger.warn(() -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> + new ParameterizedMessage("{} failed to send response while failing shard [{}]", + request.shardId, request), channelException); } } } @@ -302,7 +321,8 @@ public ClusterTasksResult execute(ClusterState currentState, L IndexMetaData indexMetaData = currentState.metaData().index(task.shardId.getIndex()); if (indexMetaData == null) { // tasks that correspond to non-existent indices are marked as successful - logger.debug("{} ignoring shard failed task [{}] (unknown index {})", task.shardId, task, task.shardId.getIndex()); + logger.debug("{} ignoring shard failed task [{}] (unknown index {})", + task.shardId, task, task.shardId.getIndex()); batchResultBuilder.success(task); } else { // The primary term is 0 if the shard failed itself. It is > 0 if a write was done on a primary but was failed to be @@ -334,7 +354,8 @@ public ClusterTasksResult execute(ClusterState currentState, L // they were failed is because a write made it into the primary but not to this copy (which corresponds to // the check "primaryTerm > 0"). if (task.primaryTerm > 0 && inSyncAllocationIds.contains(task.allocationId)) { - logger.debug("{} marking shard {} as stale (shard failed task: [{}])", task.shardId, task.allocationId, task); + logger.debug("{} marking shard {} as stale (shard failed task: [{}])", + task.shardId, task.allocationId, task); tasksToBeApplied.add(task); staleShardsToBeApplied.add(new StaleShard(task.shardId, task.allocationId)); } else { @@ -406,7 +427,8 @@ public static class FailedShardEntry extends TransportRequest { } } - public FailedShardEntry(ShardId shardId, String allocationId, long primaryTerm, String message, Exception failure, boolean markAsStale) { + public FailedShardEntry(ShardId shardId, String allocationId, long primaryTerm, + String message, Exception failure, boolean markAsStale) { this.shardId = shardId; this.allocationId = allocationId; this.primaryTerm = primaryTerm; @@ -481,7 +503,8 @@ private static class ShardStartedTransportHandler implements TransportRequestHan private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor; private final Logger logger; - ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) { + ShardStartedTransportHandler(ClusterService clusterService, + ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) { this.clusterService = clusterService; this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor; this.logger = logger; @@ -500,7 +523,8 @@ public void messageReceived(StartedShardEntry request, TransportChannel channel, } } - public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener { + public static class ShardStartedClusterStateTaskExecutor + implements ClusterStateTaskExecutor, ClusterStateTaskListener { private final AllocationService allocationService; private final Logger logger; @@ -528,13 +552,14 @@ public ClusterTasksResult execute(ClusterState currentState, if (matched.initializing() == false) { assert matched.active() : "expected active shard routing for task " + task + " but found " + matched; // same as above, this might have been a stale in-flight request, so we just ignore. - logger.debug("{} ignoring shard started task [{}] (shard exists but is not initializing: {})", task.shardId, task, - matched); + logger.debug("{} ignoring shard started task [{}] (shard exists but is not initializing: {})", + task.shardId, task, matched); builder.success(task); } else { // remove duplicate actions as allocation service expects a clean list without duplicates if (seenShardRoutings.contains(matched)) { - logger.trace("{} ignoring shard started task [{}] (already scheduled to start {})", task.shardId, task, matched); + logger.trace("{} ignoring shard started task [{}] (already scheduled to start {})", + task.shardId, task, matched); tasksToBeApplied.add(task); } else { logger.debug("{} starting shard {} (shard started task: [{}])", task.shardId, matched, task); diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index fc09741f4d9c2..fafd397722025 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -50,8 +50,8 @@ public class ClusterBlock implements Streamable, ToXContentFragment { ClusterBlock() { } - public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, boolean allowReleaseResources, RestStatus status, - EnumSet levels) { + public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, boolean allowReleaseResources, + RestStatus status, EnumSet levels) { this.id = id; this.description = description; this.retryable = retryable; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java index 497dc49198bfc..dd11175395114 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java @@ -41,7 +41,8 @@ public interface AliasOrIndex { boolean isAlias(); /** - * @return All {@link IndexMetaData} of all concrete indices this alias is referring to or if this is a concrete index its {@link IndexMetaData} + * @return All {@link IndexMetaData} of all concrete indices this alias is referring to + * or if this is a concrete index its {@link IndexMetaData} */ List getIndices(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index 766b35307cdd2..c9258806d51db 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -22,8 +22,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -43,12 +41,7 @@ * Validator for an alias, to be used before adding an alias to the index metadata * and make sure the alias is valid */ -public class AliasValidator extends AbstractComponent { - - public AliasValidator(Settings settings) { - super(settings); - } - +public class AliasValidator { /** * Allows to validate an {@link org.elasticsearch.action.admin.indices.alias.Alias} and make sure * it's valid before it gets added to the index metadata. Doesn't validate the alias filter. diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java index 789b01c0cfa9d..c336549958a1e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java @@ -51,13 +51,15 @@ private static AutoExpandReplicas parse(String value) { } final int dash = value.indexOf('-'); if (-1 == dash) { - throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash); + throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + + "] from value: [" + value + "] at index " + dash); } final String sMin = value.substring(0, dash); try { min = Integer.parseInt(sMin); } catch (NumberFormatException e) { - throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e); + throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + + "] from value: [" + value + "] at index " + dash, e); } String sMax = value.substring(dash + 1); if (sMax.equals(ALL_NODES_VALUE)) { @@ -66,7 +68,8 @@ private static AutoExpandReplicas parse(String value) { try { max = Integer.parseInt(sMax); } catch (NumberFormatException e) { - throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e); + throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + + "] from value: [" + value + "] at index " + dash, e); } } return new AutoExpandReplicas(min, max, true); @@ -78,7 +81,8 @@ private static AutoExpandReplicas parse(String value) { private AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabled) { if (minReplicas > maxReplicas) { - throw new IllegalArgumentException("[" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] minReplicas must be =< maxReplicas but wasn't " + minReplicas + " > " + maxReplicas); + throw new IllegalArgumentException("[" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + + "] minReplicas must be =< maxReplicas but wasn't " + minReplicas + " > " + maxReplicas); } this.minReplicas = minReplicas; this.maxReplicas = maxReplicas; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java index 2032c2f4ef3ba..9cbf46b6afcbc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java @@ -19,9 +19,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; import java.util.ArrayList; import java.util.Collections; @@ -33,14 +31,10 @@ * Resolves cluster names from an expression. The expression must be the exact match of a cluster * name or must be a wildcard expression. */ -public final class ClusterNameExpressionResolver extends AbstractComponent { +public final class ClusterNameExpressionResolver { private final WildcardExpressionResolver wildcardResolver = new WildcardExpressionResolver(); - public ClusterNameExpressionResolver(Settings settings) { - super(settings); - } - /** * Resolves the provided cluster expression to matching cluster names. This method only * supports exact or wildcard matches. diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index b4a7733c62eed..b1b092e008679 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -80,11 +80,21 @@ public class IndexMetaData implements Diffable, ToXContentFragment { - public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); - public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); - public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); - public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ)); - public static final ClusterBlock INDEX_READ_ONLY_ALLOW_DELETE_BLOCK = new ClusterBlock(12, "index read-only / allow delete (api)", false, false, true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.WRITE)); + public static final ClusterBlock INDEX_READ_ONLY_BLOCK = + new ClusterBlock(5, "index read-only (api)", false, false, false, + RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); + public static final ClusterBlock INDEX_READ_BLOCK = + new ClusterBlock(7, "index read (api)", false, false, false, + RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); + public static final ClusterBlock INDEX_WRITE_BLOCK = + new ClusterBlock(8, "index write (api)", false, false, false, + RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); + public static final ClusterBlock INDEX_METADATA_BLOCK = + new ClusterBlock(9, "index metadata (api)", false, false, false, + RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ)); + public static final ClusterBlock INDEX_READ_ONLY_ALLOW_DELETE_BLOCK = + new ClusterBlock(12, "index read-only / allow delete (api)", false, false, + true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.WRITE)); public enum State { OPEN((byte) 0), @@ -122,9 +132,9 @@ public static State fromString(String state) { static Setting buildNumberOfShardsSetting() { /* This is a safety limit that should only be exceeded in very rare and special cases. The assumption is that * 99% of the users have less than 1024 shards per index. We also make it a hard check that requires restart of nodes - * if a cluster should allow to create more than 1024 shards per index. NOTE: this does not limit the number of shards per cluster. - * this also prevents creating stuff like a new index with millions of shards by accident which essentially kills the entire cluster - * with OOM on the spot.*/ + * if a cluster should allow to create more than 1024 shards per index. NOTE: this does not limit the number of shards + * per cluster. this also prevents creating stuff like a new index with millions of shards by accident which essentially + * kills the entire cluster with OOM on the spot.*/ final int maxNumShards = Integer.parseInt(System.getProperty("es.index.max_number_of_shards", "1024")); if (maxNumShards < 1) { throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0"); @@ -144,7 +154,8 @@ static Setting buildNumberOfShardsSetting() { Setting.intSetting(SETTING_ROUTING_PARTITION_SIZE, 1, 1, Property.IndexScope); public static final Setting INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING = - Setting.intSetting("index.number_of_routing_shards", INDEX_NUMBER_OF_SHARDS_SETTING, 1, new Setting.Validator() { + Setting.intSetting("index.number_of_routing_shards", INDEX_NUMBER_OF_SHARDS_SETTING, + 1, new Setting.Validator() { @Override public void validate(Integer numRoutingShards, Map, Integer> settings) { Integer numShards = settings.get(INDEX_NUMBER_OF_SHARDS_SETTING); @@ -294,12 +305,15 @@ public Iterator> settings() { private final ActiveShardCount waitForActiveShards; private final ImmutableOpenMap rolloverInfos; - private IndexMetaData(Index index, long version, long mappingVersion, long settingsVersion, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, + private IndexMetaData(Index index, long version, long mappingVersion, long settingsVersion, long[] primaryTerms, State state, + int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, ImmutableOpenMap customData, ImmutableOpenIntMap> inSyncAllocationIds, - DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, + DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, + DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, Version indexCreatedVersion, Version indexUpgradedVersion, - int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, ImmutableOpenMap rolloverInfos) { + int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, + ImmutableOpenMap rolloverInfos) { this.index = index; this.version = version; @@ -1161,9 +1175,11 @@ public IndexMetaData build() { final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); - return new IndexMetaData(new Index(index, uuid), version, mappingVersion, settingsVersion, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), - tmpAliases.build(), customMetaData.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, - indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, rolloverInfos.build()); + return new IndexMetaData(new Index(index, uuid), version, mappingVersion, settingsVersion, primaryTerms, state, + numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), tmpAliases.build(), customMetaData.build(), + filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, + indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, + rolloverInfos.build()); } public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { @@ -1258,7 +1274,8 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { String mappingType = currentFieldName; - Map mappingSource = MapBuilder.newMapBuilder().put(mappingType, parser.mapOrdered()).map(); + Map mappingSource = + MapBuilder.newMapBuilder().put(mappingType, parser.mapOrdered()).map(); builder.putMapping(new MappingMetaData(mappingType, mappingSource)); } else { throw new IllegalArgumentException("Unexpected token: " + token); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 9be5acc0561d9..1e7658b1054be 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -26,9 +26,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; @@ -36,6 +34,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.InvalidIndexNameException; @@ -54,18 +53,14 @@ import java.util.function.Predicate; import java.util.stream.Collectors; -public class IndexNameExpressionResolver extends AbstractComponent { +import static java.util.Collections.unmodifiableList; - private final List expressionResolvers; - private final DateMathExpressionResolver dateMathExpressionResolver; +public class IndexNameExpressionResolver { - public IndexNameExpressionResolver(Settings settings) { - super(settings); - expressionResolvers = Arrays.asList( - dateMathExpressionResolver = new DateMathExpressionResolver(), - new WildcardExpressionResolver() - ); - } + private final DateMathExpressionResolver dateMathExpressionResolver = new DateMathExpressionResolver(); + private final List expressionResolvers = unmodifiableList(Arrays.asList( + dateMathExpressionResolver, + new WildcardExpressionResolver())); /** * Same as {@link #concreteIndexNames(ClusterState, IndicesOptions, String...)}, but the index expressions and options @@ -152,6 +147,7 @@ Index[] concreteIndices(Context context, String... indexExpressions) { if (indexExpressions == null || indexExpressions.length == 0) { indexExpressions = new String[]{MetaData.ALL}; } + Set originalIndexExpression = Sets.newHashSet(indexExpressions); MetaData metaData = context.getState().metaData(); IndicesOptions options = context.getOptions(); final boolean failClosed = options.forbidClosedIndices() && options.ignoreUnavailable() == false; @@ -201,7 +197,9 @@ Index[] concreteIndices(Context context, String... indexExpressions) { " The write index may be explicitly disabled using is_write_index=false or the alias points to multiple" + " indices without one being designated as a write index"); } - concreteIndices.add(writeIndex.getIndex()); + if (addIndex(writeIndex, context, originalIndexExpression)) { + concreteIndices.add(writeIndex.getIndex()); + } } else { if (aliasOrIndex.getIndices().size() > 1 && !options.allowAliasesToMultipleIndices()) { String[] indexNames = new String[aliasOrIndex.getIndices().size()]; @@ -218,12 +216,14 @@ Index[] concreteIndices(Context context, String... indexExpressions) { if (failClosed) { throw new IndexClosedException(index.getIndex()); } else { - if (options.forbidClosedIndices() == false) { + if (options.forbidClosedIndices() == false && addIndex(index, context, originalIndexExpression)) { concreteIndices.add(index.getIndex()); } } } else if (index.getState() == IndexMetaData.State.OPEN) { - concreteIndices.add(index.getIndex()); + if (addIndex(index, context, originalIndexExpression)) { + concreteIndices.add(index.getIndex()); + } } else { throw new IllegalStateException("index state [" + index.getState() + "] not supported"); } @@ -239,6 +239,15 @@ Index[] concreteIndices(Context context, String... indexExpressions) { return concreteIndices.toArray(new Index[concreteIndices.size()]); } + private static boolean addIndex(IndexMetaData metaData, Context context, Set originalIndices) { + if (context.options.ignoreThrottled()) { + if (originalIndices.contains(metaData.getIndex().getName()) == false) { + return IndexSettings.INDEX_SEARCH_THROTTLED.get(metaData.getSettings()) == false; + } + } + return true; + } + private static IllegalArgumentException aliasesNotSupportedException(String expression) { return new IllegalArgumentException("The provided expression [" + expression + "] matches an " + "alias, specify the corresponding concrete indices instead."); @@ -260,7 +269,8 @@ public Index concreteSingleIndex(ClusterState state, IndicesRequest request) { String indexExpression = request.indices() != null && request.indices().length > 0 ? request.indices()[0] : null; Index[] indices = concreteIndices(state, request.indicesOptions(), indexExpression); if (indices.length != 1) { - throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); + throw new IllegalArgumentException("unable to return a single index as the index and options" + + " provided got resolved to multiple indices"); } return indices[0]; } @@ -797,7 +807,8 @@ private static Set expand(Context context, IndexMetaData.State excludeSt } private boolean isEmptyOrTrivialWildcard(List expressions) { - return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0)))); + return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || + Regex.isMatchAllPattern(expressions.get(0)))); } private static List resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData) { @@ -871,7 +882,8 @@ String resolveExpression(String expression, final Context context) { inDateFormat = true; inPlaceHolderSb.append(c); } else { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. invalid character in placeholder at position [{}]", new String(text, from, length), i); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]." + + " invalid character in placeholder at position [{}]", new String(text, from, length), i); } break; @@ -894,19 +906,22 @@ String resolveExpression(String expression, final Context context) { timeZone = ZoneOffset.UTC; } else { if (inPlaceHolderString.lastIndexOf(RIGHT_BOUND) != inPlaceHolderString.length() - 1) { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. missing closing `}` for date math format", inPlaceHolderString); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]. missing closing `}`" + + " for date math format", inPlaceHolderString); } if (dateTimeFormatLeftBoundIndex == inPlaceHolderString.length() - 2) { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. missing date format", inPlaceHolderString); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]. missing date format", + inPlaceHolderString); } mathExpression = inPlaceHolderString.substring(0, dateTimeFormatLeftBoundIndex); - String dateFormatterPatternAndTimeZoneId = inPlaceHolderString.substring(dateTimeFormatLeftBoundIndex + 1, inPlaceHolderString.length() - 1); - int formatPatternTimeZoneSeparatorIndex = dateFormatterPatternAndTimeZoneId.indexOf(TIME_ZONE_BOUND); + String patternAndTZid = + inPlaceHolderString.substring(dateTimeFormatLeftBoundIndex + 1, inPlaceHolderString.length() - 1); + int formatPatternTimeZoneSeparatorIndex = patternAndTZid.indexOf(TIME_ZONE_BOUND); if (formatPatternTimeZoneSeparatorIndex != -1) { - dateFormatterPattern = dateFormatterPatternAndTimeZoneId.substring(0, formatPatternTimeZoneSeparatorIndex); - timeZone = ZoneId.of(dateFormatterPatternAndTimeZoneId.substring(formatPatternTimeZoneSeparatorIndex + 1)); + dateFormatterPattern = patternAndTZid.substring(0, formatPatternTimeZoneSeparatorIndex); + timeZone = ZoneId.of(patternAndTZid.substring(formatPatternTimeZoneSeparatorIndex + 1)); } else { - dateFormatterPattern = dateFormatterPatternAndTimeZoneId; + dateFormatterPattern = patternAndTZid; timeZone = ZoneOffset.UTC; } dateFormatter = DateFormatters.forPattern(dateFormatterPattern); @@ -937,8 +952,10 @@ String resolveExpression(String expression, final Context context) { case RIGHT_BOUND: if (!escapedChar) { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. invalid character at position [{}]. " + - "`{` and `}` are reserved characters and should be escaped when used as part of the index name using `\\` (e.g. `\\{text\\}`)", new String(text, from, length), i); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]." + + " invalid character at position [{}]. `{` and `}` are reserved characters and" + + " should be escaped when used as part of the index name using `\\` (e.g. `\\{text\\}`)", + new String(text, from, length), i); } default: beforePlaceHolderSb.append(c); @@ -947,7 +964,8 @@ String resolveExpression(String expression, final Context context) { } if (inPlaceHolder) { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. date math placeholder is open ended", new String(text, from, length)); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]. date math placeholder is open ended", + new String(text, from, length)); } if (beforePlaceHolderSb.length() == 0) { throw new ElasticsearchParseException("nothing captured"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 19c3de722793a..acd28a55604d3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -132,14 +132,17 @@ public interface Custom extends NamedDiffable, ToXContentFragment, Clust public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope); - public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, - false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); + public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", + false, false, false, RestStatus.FORBIDDEN, + EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final Setting SETTING_READ_ONLY_ALLOW_DELETE_SETTING = Setting.boolSetting("cluster.blocks.read_only_allow_delete", false, Property.Dynamic, Property.NodeScope); - public static final ClusterBlock CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK = new ClusterBlock(13, "cluster read-only / allow delete (api)", - false, false, true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); + public static final ClusterBlock CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK = + new ClusterBlock(13, "cluster read-only / allow delete (api)", + false, false, true, RestStatus.FORBIDDEN, + EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final MetaData EMPTY_META_DATA = builder().build(); @@ -575,11 +578,13 @@ public String resolveIndexRouting(@Nullable String routing, String aliasOrIndex) AliasMetaData aliasMd = alias.getFirstAliasMetaData(); if (aliasMd.indexRouting() != null) { if (aliasMd.indexRouting().indexOf(',') != -1) { - throw new IllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + aliasMd.getIndexRouting() + "] that resolved to several routing values, rejecting operation"); + throw new IllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + + aliasMd.getIndexRouting() + "] that resolved to several routing values, rejecting operation"); } if (routing != null) { if (!routing.equals(aliasMd.indexRouting())) { - throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation"); } } // Alias routing overrides the parent routing (if any). @@ -594,7 +599,8 @@ private void rejectSingleIndexOperation(String aliasOrIndex, AliasOrIndex result for (IndexMetaData indexMetaData : result.getIndices()) { indexNames[i++] = indexMetaData.getIndex().getName(); } - throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexNames) + "], can't execute a single index op"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + + Arrays.toString(indexNames) + "], can't execute a single index op"); } public boolean hasIndex(String index) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index c327da8afee22..08e81eb93319b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; @@ -100,9 +101,12 @@ * Service responsible for submitting create index requests */ public class MetaDataCreateIndexService extends AbstractComponent { + private static final Logger logger = LogManager.getLogger(MetaDataCreateIndexService.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); public static final int MAX_INDEX_NAME_BYTES = 255; + private final Settings settings; private final ClusterService clusterService; private final IndicesService indicesService; private final AllocationService allocationService; @@ -124,14 +128,14 @@ public MetaDataCreateIndexService( final ThreadPool threadPool, final NamedXContentRegistry xContentRegistry, final boolean forbidPrivateIndexSettings) { - super(settings); + this.settings = settings; this.clusterService = clusterService; this.indicesService = indicesService; this.allocationService = allocationService; this.aliasValidator = aliasValidator; this.env = env; this.indexScopedSettings = indexScopedSettings; - this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); + this.activeShardsObserver = new ActiveShardsObserver(clusterService, threadPool); this.xContentRegistry = xContentRegistry; this.forbidPrivateIndexSettings = forbidPrivateIndexSettings; } @@ -335,7 +339,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { continue; } - //Allow templatesAliases to be templated by replacing a token with the name of the index that we are applying it to + // Allow templatesAliases to be templated by replacing a token with the + // name of the index that we are applying it to if (aliasMetaData.alias().contains("{index}")) { String templatedAlias = aliasMetaData.alias().replace("{index}", request.index()); aliasMetaData = AliasMetaData.newAliasMetaData(aliasMetaData, templatedAlias); @@ -465,7 +470,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { // the context is only used for validation so it's fine to pass fake values for the shard id and the current // timestamp - final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L, null); + final QueryShardContext queryShardContext = + indexService.newQueryShardContext(0, null, () -> 0L, null); for (Alias alias : request.aliases()) { if (Strings.hasLength(alias.filter())) { @@ -481,7 +487,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { // now, update the mappings with the actual source Map mappingsMetaData = new HashMap<>(); - for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), + mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { if (mapper != null) { MappingMetaData mappingMd = new MappingMetaData(mapper); mappingsMetaData.put(mapper.type(), mappingMd); @@ -629,7 +636,8 @@ List getIndexSettingsValidationErrors(final Settings settings, final boo } else if (Strings.isEmpty(customPath) == false) { Path resolvedPath = PathUtils.get(new Path[]{env.sharedDataFile()}, customPath); if (resolvedPath == null) { - validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]"); + validationErrors.add("custom path [" + customPath + + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]"); } } if (forbidPrivateIndexSettings) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java index a2212b5c3f01c..39563ca7037d9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java @@ -48,14 +48,14 @@ * Deletes indices. */ public class MetaDataDeleteIndexService extends AbstractComponent { - + private final Settings settings; private final ClusterService clusterService; private final AllocationService allocationService; @Inject public MetaDataDeleteIndexService(Settings settings, ClusterService clusterService, AllocationService allocationService) { - super(settings); + this.settings = settings; this.clusterService = clusterService; this.allocationService = allocationService; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 28dc7f2425d91..e6d0fc0832445 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -29,9 +29,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -54,7 +52,7 @@ /** * Service responsible for submitting add and remove aliases requests */ -public class MetaDataIndexAliasesService extends AbstractComponent { +public class MetaDataIndexAliasesService { private final ClusterService clusterService; @@ -67,9 +65,8 @@ public class MetaDataIndexAliasesService extends AbstractComponent { private final NamedXContentRegistry xContentRegistry; @Inject - public MetaDataIndexAliasesService(Settings settings, ClusterService clusterService, IndicesService indicesService, + public MetaDataIndexAliasesService(ClusterService clusterService, IndicesService indicesService, AliasValidator aliasValidator, MetaDataDeleteIndexService deleteIndexService, NamedXContentRegistry xContentRegistry) { - super(settings); this.clusterService = clusterService; this.indicesService = indicesService; this.aliasValidator = aliasValidator; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index f7482edd10d0e..38d83b398856e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.metadata; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -40,7 +42,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; @@ -59,8 +60,11 @@ * Service responsible for submitting open/close index requests */ public class MetaDataIndexStateService extends AbstractComponent { + private static final Logger logger = LogManager.getLogger(MetaDataIndexStateService.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); - public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE); + public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, + false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE); private final ClusterService clusterService; @@ -71,15 +75,14 @@ public class MetaDataIndexStateService extends AbstractComponent { private final ActiveShardsObserver activeShardsObserver; @Inject - public MetaDataIndexStateService(Settings settings, ClusterService clusterService, AllocationService allocationService, + public MetaDataIndexStateService(ClusterService clusterService, AllocationService allocationService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, IndicesService indicesService, ThreadPool threadPool) { - super(settings); this.indicesService = indicesService; this.clusterService = clusterService; this.allocationService = allocationService; this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; - this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); + this.activeShardsObserver = new ActiveShardsObserver(clusterService, threadPool); } public void closeIndex(final CloseIndexClusterStateUpdateRequest request, final ActionListener listener) { @@ -88,7 +91,8 @@ public void closeIndex(final CloseIndexClusterStateUpdateRequest request, final } final String indicesAsString = Arrays.toString(request.indices()); - clusterService.submitStateUpdateTask("close-indices " + indicesAsString, new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { + clusterService.submitStateUpdateTask("close-indices " + indicesAsString, + new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { return new ClusterStateUpdateResponse(acknowledged); @@ -138,7 +142,8 @@ public ClusterState execute(ClusterState currentState) { }); } - public void openIndex(final OpenIndexClusterStateUpdateRequest request, final ActionListener listener) { + public void openIndex(final OpenIndexClusterStateUpdateRequest request, + final ActionListener listener) { onlyOpenIndex(request, ActionListener.wrap(response -> { if (response.isAcknowledged()) { String[] indexNames = Arrays.stream(request.indices()).map(Index::getName).toArray(String[]::new); @@ -156,13 +161,15 @@ public void openIndex(final OpenIndexClusterStateUpdateRequest request, final Ac }, listener::onFailure)); } - private void onlyOpenIndex(final OpenIndexClusterStateUpdateRequest request, final ActionListener listener) { + private void onlyOpenIndex(final OpenIndexClusterStateUpdateRequest request, + final ActionListener listener) { if (request.indices() == null || request.indices().length == 0) { throw new IllegalArgumentException("Index name is required"); } final String indicesAsString = Arrays.toString(request.indices()); - clusterService.submitStateUpdateTask("open-indices " + indicesAsString, new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { + clusterService.submitStateUpdateTask("open-indices " + indicesAsString, + new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { return new ClusterStateUpdateResponse(acknowledged); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 40d2a69714069..e397c150c1c11 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -71,11 +71,10 @@ public class MetaDataIndexTemplateService extends AbstractComponent { private final NamedXContentRegistry xContentRegistry; @Inject - public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService, + public MetaDataIndexTemplateService(ClusterService clusterService, MetaDataCreateIndexService metaDataCreateIndexService, AliasValidator aliasValidator, IndicesService indicesService, IndexScopedSettings indexScopedSettings, NamedXContentRegistry xContentRegistry) { - super(settings); this.clusterService = clusterService; this.aliasValidator = aliasValidator; this.indicesService = indicesService; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 3d87990797699..84fa0626317d5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -53,6 +53,7 @@ */ public class MetaDataIndexUpgradeService extends AbstractComponent { + private final Settings settings; private final NamedXContentRegistry xContentRegistry; private final MapperRegistry mapperRegistry; private final IndexScopedSettings indexScopedSettings; @@ -61,7 +62,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { public MetaDataIndexUpgradeService(Settings settings, NamedXContentRegistry xContentRegistry, MapperRegistry mapperRegistry, IndexScopedSettings indexScopedSettings, Collection> indexMetaDataUpgraders) { - super(settings); + this.settings = settings; this.xContentRegistry = xContentRegistry; this.mapperRegistry = mapperRegistry; this.indexScopedSettings = indexScopedSettings; @@ -186,7 +187,8 @@ public Set> entrySet() { return Collections.emptySet(); } }; - try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap, analyzerMap)) { + try (IndexAnalyzers fakeIndexAnalzyers = + new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap, analyzerMap)) { MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); @@ -201,7 +203,8 @@ public Set> entrySet() { * Marks index as upgraded so we don't have to test it again */ private IndexMetaData markAsUpgraded(IndexMetaData indexMetaData) { - Settings settings = Settings.builder().put(indexMetaData.getSettings()).put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).build(); + Settings settings = Settings.builder().put(indexMetaData.getSettings()) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).build(); return IndexMetaData.builder(indexMetaData).settings(settings).build(); } @@ -209,8 +212,10 @@ IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) { final Settings settings = indexMetaData.getSettings(); final Settings upgrade = indexScopedSettings.archiveUnknownOrInvalidSettings( settings, - e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), - (e, ex) -> logger.warn(() -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); + e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", + indexMetaData.getIndex(), e.getKey(), e.getValue()), + (e, ex) -> logger.warn(() -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", + indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); if (upgrade != settings) { return IndexMetaData.builder(indexMetaData).settings(upgrade).build(); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 616fd13d1fadc..8a65ae874f091 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -68,8 +67,7 @@ public class MetaDataMappingService extends AbstractComponent { @Inject - public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService) { - super(settings); + public MetaDataMappingService(ClusterService clusterService, IndicesService indicesService) { this.clusterService = clusterService; this.indicesService = indicesService; } @@ -175,7 +173,8 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Bui try { List updatedTypes = new ArrayList<>(); MapperService mapperService = indexService.mapperService(); - for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), + mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { if (mapper != null) { final String type = mapper.type(); if (!mapper.mappingSource().equals(builder.mapping(type).source())) { @@ -188,7 +187,8 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Bui if (updatedTypes.isEmpty() == false) { logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes); dirty = true; - for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), + mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { if (mapper != null) { builder.putMapping(new MappingMetaData(mapper)); } @@ -215,8 +215,8 @@ public void refreshMapping(final String index, final String indexUUID) { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public ClusterTasksResult execute(ClusterState currentState, - List tasks) throws Exception { + public ClusterTasksResult + execute(ClusterState currentState, List tasks) throws Exception { Map indexMapperServices = new HashMap<>(); ClusterTasksResult.Builder builder = ClusterTasksResult.builder(); try { @@ -325,7 +325,8 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); // Mapping updates on a single type may have side-effects on other types so we need to // update mapping metadata on all types - for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), + mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { if (mapper != null) { indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource())); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index c89e6ddba9546..2284d507afa2c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.metadata; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -37,6 +39,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -60,6 +63,8 @@ * Service responsible for submitting update index settings requests */ public class MetaDataUpdateSettingsService extends AbstractComponent { + private static final Logger logger = LogManager.getLogger(MetaDataUpdateSettingsService.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); private final ClusterService clusterService; @@ -70,9 +75,8 @@ public class MetaDataUpdateSettingsService extends AbstractComponent { private final ThreadPool threadPool; @Inject - public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, + public MetaDataUpdateSettingsService(ClusterService clusterService, AllocationService allocationService, IndexScopedSettings indexScopedSettings, IndicesService indicesService, ThreadPool threadPool) { - super(settings); this.clusterService = clusterService; this.threadPool = threadPool; this.allocationService = allocationService; @@ -80,8 +84,10 @@ public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterSe this.indicesService = indicesService; } - public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener listener) { - final Settings normalizedSettings = Settings.builder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); + public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, + final ActionListener listener) { + final Settings normalizedSettings = + Settings.builder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); Settings.Builder settingsForClosedIndices = Settings.builder(); Settings.Builder settingsForOpenIndices = Settings.builder(); final Set skippedSettings = new HashSet<>(); @@ -166,11 +172,16 @@ public ClusterState execute(ClusterState currentState) { } ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_BLOCK, IndexMetaData.INDEX_READ_ONLY_SETTING, openSettings); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING, openSettings); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_METADATA_BLOCK, IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, openSettings); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_WRITE_BLOCK, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, openSettings); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_BLOCK, + IndexMetaData.INDEX_READ_ONLY_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK, + IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_METADATA_BLOCK, + IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_WRITE_BLOCK, + IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, + IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings); if (!openIndices.isEmpty()) { for (Index index : openIndices) { @@ -182,7 +193,8 @@ public ClusterState execute(ClusterState currentState) { indexSettings.put(indexMetaData.getSettings()); } Settings finalSettings = indexSettings.build(); - indexScopedSettings.validate(finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), true); + indexScopedSettings.validate( + finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), true); metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(finalSettings)); } } @@ -198,7 +210,8 @@ public ClusterState execute(ClusterState currentState) { indexSettings.put(indexMetaData.getSettings()); } Settings finalSettings = indexSettings.build(); - indexScopedSettings.validate(finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), true); + indexScopedSettings.validate( + finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), true); metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(finalSettings)); } } @@ -213,7 +226,8 @@ public ClusterState execute(ClusterState currentState) { } } - ClusterState updatedState = ClusterState.builder(currentState).metaData(metaDataBuilder).routingTable(routingTableBuilder.build()).blocks(blocks).build(); + ClusterState updatedState = ClusterState.builder(currentState).metaData(metaDataBuilder) + .routingTable(routingTableBuilder.build()).blocks(blocks).build(); // now, reroute in case things change that require it (like number of replicas) updatedState = allocationService.reroute(updatedState, "settings update"); @@ -251,7 +265,8 @@ private int getTotalNewShards(Index index, ClusterState currentState, int update /** * Updates the cluster block only iff the setting exists in the given settings */ - private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlocks.Builder blocks, ClusterBlock block, Setting setting, Settings openSettings) { + private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlocks.Builder blocks, ClusterBlock block, + Setting setting, Settings openSettings) { if (setting.exists(openSettings)) { final boolean updateBlock = setting.get(openSettings); for (String index : actualIndices) { @@ -265,7 +280,8 @@ private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlock } - public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener listener) { + public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, + final ActionListener listener) { clusterService.submitStateUpdateTask("update-index-compatibility-versions", new AckedClusterStateUpdateTask(Priority.URGENT, request, wrapPreservingContext(listener, threadPool.getThreadContext())) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 7bb72be0e1e18..1c618c1ef88fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -157,7 +157,8 @@ public static RepositoriesMetaData fromXContent(XContentParser parser) throws IO } settings = Settings.fromXContent(parser); } else { - throw new ElasticsearchParseException("failed to parse repository [{}], unknown field [{}]", name, currentFieldName); + throw new ElasticsearchParseException("failed to parse repository [{}], unknown field [{}]", + name, currentFieldName); } } else { throw new ElasticsearchParseException("failed to parse repository [{}]", name); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java index 1f042b4c576b7..9026d26a11fd5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; @@ -75,9 +74,8 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster private ImmutableOpenMap lastTemplateMetaData; - public TemplateUpgradeService(Settings settings, Client client, ClusterService clusterService, ThreadPool threadPool, + public TemplateUpgradeService(Client client, ClusterService clusterService, ThreadPool threadPool, Collection>> indexTemplateMetaDataUpgraders) { - super(settings); this.client = client; this.clusterService = clusterService; this.threadPool = threadPool; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index e9d805d34c8a1..526dde505efde 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -356,8 +356,8 @@ public Builder initializeAsFromCloseToOpen(IndexMetaData indexMetaData) { */ public Builder initializeAsNewRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards) { final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NEW_INDEX_RESTORED, - "restore_source[" + recoverySource.snapshot().getRepository() + "/" + - recoverySource.snapshot().getSnapshotId().getName() + "]"); + "restore_source[" + recoverySource.snapshot().getRepository() + "/" + + recoverySource.snapshot().getSnapshotId().getName() + "]"); return initializeAsRestore(indexMetaData, recoverySource, ignoreShards, true, unassignedInfo); } @@ -366,15 +366,16 @@ public Builder initializeAsNewRestore(IndexMetaData indexMetaData, SnapshotRecov */ public Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource) { final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, - "restore_source[" + recoverySource.snapshot().getRepository() + "/" + - recoverySource.snapshot().getSnapshotId().getName() + "]"); + "restore_source[" + recoverySource.snapshot().getRepository() + "/" + + recoverySource.snapshot().getSnapshotId().getName() + "]"); return initializeAsRestore(indexMetaData, recoverySource, null, false, unassignedInfo); } /** * Initializes an index, to be restored from snapshot */ - private Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards, boolean asNew, UnassignedInfo unassignedInfo) { + private Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards, + boolean asNew, UnassignedInfo unassignedInfo) { assert indexMetaData.getIndex().equals(index); if (!shards.isEmpty()) { throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); @@ -435,7 +436,8 @@ public Builder addReplica() { int shardNumber = cursor.value; ShardId shardId = new ShardId(index, shardNumber); // version 0, will get updated when reroute will happen - ShardRouting shard = ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null)); + ShardRouting shard = ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null)); shards.put(shardNumber, new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build() ); @@ -522,7 +524,8 @@ public String prettyPrint() { }); for (IndexShardRoutingTable indexShard : ordered) { - sb.append("----shard_id [").append(indexShard.shardId().getIndex().getName()).append("][").append(indexShard.shardId().id()).append("]\n"); + sb.append("----shard_id [").append(indexShard.shardId().getIndex().getName()) + .append("][").append(indexShard.shardId().id()).append("]\n"); for (ShardRouting shard : indexShard) { sb.append("--------").append(shard.shortSummary()).append("\n"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index e3e4da481ceed..c35b7d810ca0e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -65,7 +65,6 @@ public class IndexShardRoutingTable implements Iterable { final List activeShards; final List assignedShards; final Set allAllocationIds; - static final List NO_SHARDS = Collections.emptyList(); final boolean allShardsStarted; private volatile Map activeShardsByAttributes = emptyMap(); @@ -220,15 +219,6 @@ public List assignedShards() { return this.assignedShards; } - /** - * Returns a {@link List} of assigned shards - * - * @return a {@link List} of shards - */ - public List getAssignedShards() { - return this.assignedShards; - } - public ShardIterator shardsRandomIt() { return new PlainShardIterator(shardId, shuffler.shuffle(shards)); } @@ -594,7 +584,8 @@ private AttributesRoutings getInitializingAttribute(AttributesKey key, Discovery ArrayList from = new ArrayList<>(allInitializingShards); List to = collectAttributeShards(key, nodes, from); shardRoutings = new AttributesRoutings(to, Collections.unmodifiableList(from)); - initializingShardsByAttributes = MapBuilder.newMapBuilder(initializingShardsByAttributes).put(key, shardRoutings).immutableMap(); + initializingShardsByAttributes = + MapBuilder.newMapBuilder(initializingShardsByAttributes).put(key, shardRoutings).immutableMap(); } } return shardRoutings; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 87655c0641388..81f7f68593b60 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -54,7 +54,6 @@ public class OperationRouting extends AbstractComponent { private boolean useAdaptiveReplicaSelection; public OperationRouting(Settings settings, ClusterSettings clusterSettings) { - super(settings); this.awarenessAttributes = AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); this.useAdaptiveReplicaSelection = USE_ADAPTIVE_REPLICA_SELECTION_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, @@ -74,13 +73,16 @@ public ShardIterator indexShards(ClusterState clusterState, String index, String return shards(clusterState, index, id, routing).shardsIt(); } - public ShardIterator getShards(ClusterState clusterState, String index, String id, @Nullable String routing, @Nullable String preference) { - return preferenceActiveShardIterator(shards(clusterState, index, id, routing), clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference, null, null); + public ShardIterator getShards(ClusterState clusterState, String index, String id, @Nullable String routing, + @Nullable String preference) { + return preferenceActiveShardIterator(shards(clusterState, index, id, routing), clusterState.nodes().getLocalNodeId(), + clusterState.nodes(), preference, null, null); } public ShardIterator getShards(ClusterState clusterState, String index, int shardId, @Nullable String preference) { final IndexShardRoutingTable indexShard = clusterState.getRoutingTable().shardRoutingTable(index, shardId); - return preferenceActiveShardIterator(indexShard, clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference, null, null); + return preferenceActiveShardIterator(indexShard, clusterState.nodes().getLocalNodeId(), clusterState.nodes(), + preference, null, null); } public GroupShardsIterator searchShards(ClusterState clusterState, @@ -111,7 +113,8 @@ public GroupShardsIterator searchShards(ClusterState clusterState private static final Map> EMPTY_ROUTING = Collections.emptyMap(); - private Set computeTargetedShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing) { + private Set computeTargetedShards(ClusterState clusterState, String[] concreteIndices, + @Nullable Map> routing) { routing = routing == null ? EMPTY_ROUTING : routing; // just use an empty map final Set set = new HashSet<>(); // we use set here and not list since we might get duplicates diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index aaf34d1e1806d..7e8fdb1edaca2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -582,7 +582,7 @@ assert getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId moveToUnassigned(failedShard, unassignedInfo); } else { movePrimaryToUnassignedAndDemoteToReplica(failedShard, unassignedInfo); - promoteReplicaToPrimary(activeReplica, indexMetaData, routingChangesObserver); + promoteReplicaToPrimary(activeReplica, routingChangesObserver); } } else { // initializing shard that is not relocation target, just move to unassigned @@ -611,7 +611,7 @@ assert getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId moveToUnassigned(failedShard, unassignedInfo); } else { movePrimaryToUnassignedAndDemoteToReplica(failedShard, unassignedInfo); - promoteReplicaToPrimary(activeReplica, indexMetaData, routingChangesObserver); + promoteReplicaToPrimary(activeReplica, routingChangesObserver); } } else { assert failedShard.primary() == false; @@ -627,8 +627,7 @@ assert node(failedShard.currentNodeId()).getByShardId(failedShard.shardId()) == " was matched but wasn't removed"; } - private void promoteReplicaToPrimary(ShardRouting activeReplica, IndexMetaData indexMetaData, - RoutingChangesObserver routingChangesObserver) { + private void promoteReplicaToPrimary(ShardRouting activeReplica, RoutingChangesObserver routingChangesObserver) { // if the activeReplica was relocating before this call to failShard, its relocation was cancelled earlier when we // failed initializing replica shards (and moved replica relocation source back to started) assert activeReplica.started() : "replica relocation should have been cancelled: " + activeReplica; @@ -832,11 +831,6 @@ public void sort(Comparator comparator) { */ public int size() { return unassigned.size(); } - /** - * Returns the size of the temporarily marked as ignored unassigned shards - */ - public int ignoredSize() { return ignored.size(); } - /** * Returns the number of non-ignored unassigned primaries */ @@ -957,12 +951,14 @@ public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySour } /** - * Unsupported operation, just there for the interface. Use {@link #removeAndIgnore(AllocationStatus, RoutingChangesObserver)} or + * Unsupported operation, just there for the interface. Use + * {@link #removeAndIgnore(AllocationStatus, RoutingChangesObserver)} or * {@link #initialize(String, String, long, RoutingChangesObserver)}. */ @Override public void remove() { - throw new UnsupportedOperationException("remove is not supported in unassigned iterator, use removeAndIgnore or initialize"); + throw new UnsupportedOperationException("remove is not supported in unassigned iterator," + + " use removeAndIgnore or initialize"); } private void innerRemove() { @@ -1106,14 +1102,19 @@ public static boolean assertShardStats(RoutingNodes routingNodes) { assert unassignedPrimaryCount == routingNodes.unassignedShards.getNumPrimaries() : - "Unassigned primaries is [" + unassignedPrimaryCount + "] but RoutingNodes returned unassigned primaries [" + routingNodes.unassigned().getNumPrimaries() + "]"; + "Unassigned primaries is [" + unassignedPrimaryCount + "] but RoutingNodes returned unassigned primaries [" + + routingNodes.unassigned().getNumPrimaries() + "]"; assert unassignedIgnoredPrimaryCount == routingNodes.unassignedShards.getNumIgnoredPrimaries() : - "Unassigned ignored primaries is [" + unassignedIgnoredPrimaryCount + "] but RoutingNodes returned unassigned ignored primaries [" + routingNodes.unassigned().getNumIgnoredPrimaries() + "]"; + "Unassigned ignored primaries is [" + unassignedIgnoredPrimaryCount + + "] but RoutingNodes returned unassigned ignored primaries [" + routingNodes.unassigned().getNumIgnoredPrimaries() + "]"; assert inactivePrimaryCount == routingNodes.inactivePrimaryCount : - "Inactive Primary count [" + inactivePrimaryCount + "] but RoutingNodes returned inactive primaries [" + routingNodes.inactivePrimaryCount + "]"; + "Inactive Primary count [" + inactivePrimaryCount + "] but RoutingNodes returned inactive primaries [" + + routingNodes.inactivePrimaryCount + "]"; assert inactiveShardCount == routingNodes.inactiveShardCount : - "Inactive Shard count [" + inactiveShardCount + "] but RoutingNodes returned inactive shards [" + routingNodes.inactiveShardCount + "]"; - assert routingNodes.getRelocatingShardCount() == relocating : "Relocating shards mismatch [" + routingNodes.getRelocatingShardCount() + "] but expected [" + relocating + "]"; + "Inactive Shard count [" + inactiveShardCount + "] but RoutingNodes returned inactive shards [" + + routingNodes.inactiveShardCount + "]"; + assert routingNodes.getRelocatingShardCount() == relocating : "Relocating shards mismatch [" + + routingNodes.getRelocatingShardCount() + "] but expected [" + relocating + "]"; return true; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 0bcefa9fc7248..770e5b2717023 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -90,30 +90,33 @@ protected void performReroute(String reason) { return; } logger.trace("rerouting {}", reason); - clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", new ClusterStateUpdateTask(Priority.HIGH) { - @Override - public ClusterState execute(ClusterState currentState) { - rerouting.set(false); - return allocationService.reroute(currentState, reason); - } + clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", + new ClusterStateUpdateTask(Priority.HIGH) { + @Override + public ClusterState execute(ClusterState currentState) { + rerouting.set(false); + return allocationService.reroute(currentState, reason); + } - @Override - public void onNoLongerMaster(String source) { - rerouting.set(false); - // no biggie - } + @Override + public void onNoLongerMaster(String source) { + rerouting.set(false); + // no biggie + } - @Override - public void onFailure(String source, Exception e) { - rerouting.set(false); - ClusterState state = clusterService.state(); - if (logger.isTraceEnabled()) { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); - } else { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); + @Override + public void onFailure(String source, Exception e) { + rerouting.set(false); + ClusterState state = clusterService.state(); + if (logger.isTraceEnabled()) { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", + source, state), e); + } else { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", + source, state.version()), e); + } } - } - }); + }); } catch (Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index bab150fff12bd..0d5ee132ffa9b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -239,14 +239,16 @@ public GroupShardsIterator allAssignedShardsGrouped(String[] indi * @param includeRelocationTargets if true, an extra shard iterator will be added for relocating shards. The extra * iterator contains a single ShardRouting pointing at the relocating target */ - public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) { + public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty, + boolean includeRelocationTargets) { return allSatisfyingPredicateShardsGrouped(indices, includeEmpty, includeRelocationTargets, ASSIGNED_PREDICATE); } private static Predicate ACTIVE_PREDICATE = ShardRouting::active; private static Predicate ASSIGNED_PREDICATE = ShardRouting::assignedToNode; - private GroupShardsIterator allSatisfyingPredicateShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets, Predicate predicate) { + private GroupShardsIterator allSatisfyingPredicateShardsGrouped(String[] indices, boolean includeEmpty, + boolean includeRelocationTargets, Predicate predicate) { // use list here since we need to maintain identity across shards ArrayList set = new ArrayList<>(); for (String index : indices) { @@ -260,10 +262,11 @@ private GroupShardsIterator allSatisfyingPredicateShardsGrouped(S if (predicate.test(shardRouting)) { set.add(shardRouting.shardsIt()); if (includeRelocationTargets && shardRouting.relocating()) { - set.add(new PlainShardIterator(shardRouting.shardId(), Collections.singletonList(shardRouting.getTargetRelocatingShard()))); + set.add(new PlainShardIterator(shardRouting.shardId(), + Collections.singletonList(shardRouting.getTargetRelocatingShard()))); } } else if (includeEmpty) { // we need this for counting properly, just make it an empty one - set.add(new PlainShardIterator(shardRouting.shardId(), Collections.emptyList())); + set.add(new PlainShardIterator(shardRouting.shardId(), Collections.emptyList())); } } } @@ -279,7 +282,8 @@ public ShardsIterator allShardsIncludingRelocationTargets(String[] indices) { return allShardsSatisfyingPredicate(indices, shardRouting -> true, true); } - private ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate predicate, boolean includeRelocationTargets) { + private ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate predicate, + boolean includeRelocationTargets) { // use list here since we need to maintain identity across shards List shards = new ArrayList<>(); for (String index : indices) { @@ -324,7 +328,7 @@ public GroupShardsIterator activePrimaryShardsGrouped(String[] in if (primary.active()) { set.add(primary.shardsIt()); } else if (includeEmpty) { // we need this for counting properly, just make it an empty one - set.add(new PlainShardIterator(primary.shardId(), Collections.emptyList())); + set.add(new PlainShardIterator(primary.shardId(), Collections.emptyList())); } } } @@ -563,14 +567,6 @@ public Builder add(IndexRoutingTable.Builder indexRoutingTableBuilder) { return this; } - public Builder indicesRouting(Map indicesRouting) { - if (indicesRouting == null) { - throw new IllegalStateException("once build is called the builder cannot be reused"); - } - this.indicesRouting.putAll(indicesRouting); - return this; - } - public Builder remove(String index) { if (indicesRouting == null) { throw new IllegalStateException("once build is called the builder cannot be reused"); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 74341ca271a9c..bfc4ce0618833 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -77,12 +77,17 @@ public final class ShardRouting implements Writeable, ToXContentObject { this.expectedShardSize = expectedShardSize; this.targetRelocatingShard = initializeTargetRelocatingShard(); this.asList = Collections.singletonList(this); - assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; - assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; + assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || + state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; + assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : + expectedShardSize + " state: " + state; assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta"; - assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : "recovery source only available on unassigned or initializing shard but was " + state; - assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary : "replica shards always recover from primary"; - assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : "unassigned shard must not be assigned to a node " + this; + assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : + "recovery source only available on unassigned or initializing shard but was " + state; + assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary : + "replica shards always recover from primary"; + assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : + "unassigned shard must not be assigned to a node " + this; } @Nullable @@ -98,8 +103,10 @@ private ShardRouting initializeTargetRelocatingShard() { /** * Creates a new unassigned shard. */ - public static ShardRouting newUnassigned(ShardId shardId, boolean primary, RecoverySource recoverySource, UnassignedInfo unassignedInfo) { - return new ShardRouting(shardId, null, null, primary, ShardRoutingState.UNASSIGNED, recoverySource, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE); + public static ShardRouting newUnassigned(ShardId shardId, boolean primary, RecoverySource recoverySource, + UnassignedInfo unassignedInfo) { + return new ShardRouting(shardId, null, null, primary, ShardRoutingState.UNASSIGNED, + recoverySource, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE); } public Index index() { @@ -446,12 +453,14 @@ public ShardRouting moveUnassignedFromPrimary() { **/ public boolean isSameAllocation(ShardRouting other) { boolean b = this.allocationId != null && other.allocationId != null && this.allocationId.getId().equals(other.allocationId.getId()); - assert b == false || this.currentNodeId.equals(other.currentNodeId) : "ShardRoutings have the same allocation id but not the same node. This [" + this + "], other [" + other + "]"; + assert b == false || this.currentNodeId.equals(other.currentNodeId) : + "ShardRoutings have the same allocation id but not the same node. This [" + this + "], other [" + other + "]"; return b; } /** - * Returns true if this shard is a relocation target for another shard (i.e., was created with {@link #initializeTargetRelocatingShard()} + * Returns true if this shard is a relocation target for another shard + * (i.e., was created with {@link #initializeTargetRelocatingShard()} */ public boolean isRelocationTarget() { return state == ShardRoutingState.INITIALIZING && relocatingNodeId != null; @@ -465,21 +474,25 @@ public boolean isRelocationTargetOf(ShardRouting other) { assert b == false || other.state == ShardRoutingState.RELOCATING : "ShardRouting is a relocation target but the source shard state isn't relocating. This [" + this + "], other [" + other + "]"; - assert b == false || other.allocationId.getId().equals(this.allocationId.getRelocationId()) : - "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId." + + " This [" + this + "], other [" + other + "]"; assert b == false || other.currentNodeId().equals(this.relocatingNodeId) : - "ShardRouting is a relocation target but source current node id isn't equal to target relocating node. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but source current node id isn't equal to target relocating node." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.currentNodeId().equals(other.relocatingNodeId) : - "ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but current node id isn't equal to source relocating node." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.shardId.equals(other.shardId) : - "ShardRouting is a relocation target but both indexRoutings are not of the same shard id. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but both indexRoutings are not of the same shard id." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.primary == other.primary : - "ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]"; + "ShardRouting is a relocation target but primary flag is different." + + " This [" + this + "], target [" + other + "]"; return b; } @@ -494,16 +507,20 @@ public boolean isRelocationSourceOf(ShardRouting other) { assert b == false || this.allocationId.getId().equals(other.allocationId.getRelocationId()) : - "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.currentNodeId().equals(other.relocatingNodeId) : - "ShardRouting is a relocation source but current node isn't equal to other's relocating node. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation source but current node isn't equal to other's relocating node." + + " This [" + this + "], other [" + other + "]"; assert b == false || other.currentNodeId().equals(this.relocatingNodeId) : - "ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation source but relocating node isn't equal to other's current node." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.shardId.equals(other.shardId) : - "ShardRouting is a relocation source but both indexRoutings are not of the same shard. This [" + this + "], target [" + other + "]"; + "ShardRouting is a relocation source but both indexRoutings are not of the same shard." + + " This [" + this + "], target [" + other + "]"; assert b == false || this.primary == other.primary : "ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]"; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 8c3225dc77fd6..0bc94a93cc59a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -39,7 +39,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; import java.util.ArrayList; @@ -69,16 +68,15 @@ public class AllocationService extends AbstractComponent { private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; - public AllocationService(Settings settings, AllocationDeciders allocationDeciders, + public AllocationService(AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator, ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { - this(settings, allocationDeciders, shardsAllocator, clusterInfoService); + this(allocationDeciders, shardsAllocator, clusterInfoService); setGatewayAllocator(gatewayAllocator); } - public AllocationService(Settings settings, AllocationDeciders allocationDeciders, + public AllocationService(AllocationDeciders allocationDeciders, ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { - super(settings); this.allocationDeciders = allocationDeciders; this.shardsAllocator = shardsAllocator; this.clusterInfoService = clusterInfoService; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index 0ddf3ef1529b3..f2447a9c4e51b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -54,7 +54,6 @@ public class DiskThresholdMonitor extends AbstractComponent { public DiskThresholdMonitor(Settings settings, Supplier clusterStateSupplier, ClusterSettings clusterSettings, Client client) { - super(settings); this.clusterStateSupplier = clusterStateSupplier; this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); this.client = client; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index fdba1c7009bc3..ccd64827b32f6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -70,7 +70,6 @@ public class DiskThresholdSettings { private volatile boolean includeRelocations; private volatile boolean enabled; private volatile TimeValue rerouteInterval; - private volatile String floodStageRaw; private volatile Double freeDiskThresholdFloodStage; private volatile ByteSizeValue freeBytesThresholdFloodStage; @@ -80,13 +79,13 @@ public DiskThresholdSettings(Settings settings, ClusterSettings clusterSettings) final String floodStage = CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.get(settings); setHighWatermark(highWatermark); setLowWatermark(lowWatermark); - setFloodStageRaw(floodStage); + setFloodStage(floodStage); this.includeRelocations = CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.get(settings); this.rerouteInterval = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(settings); this.enabled = CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, this::setLowWatermark); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, this::setHighWatermark); - clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING, this::setFloodStageRaw); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING, this::setFloodStage); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, this::setIncludeRelocations); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, this::setRerouteInterval); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); @@ -230,9 +229,8 @@ private void setHighWatermark(String highWatermark) { CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey()); } - private void setFloodStageRaw(String floodStageRaw) { + private void setFloodStage(String floodStageRaw) { // Watermark is expressed in terms of used data, but we need "free" data watermark - this.floodStageRaw = floodStageRaw; this.freeDiskThresholdFloodStage = 100.0 - thresholdPercentageFromWatermark(floodStageRaw); this.freeBytesThresholdFloodStage = thresholdBytesFromWatermark(floodStageRaw, CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey()); @@ -276,10 +274,6 @@ public ByteSizeValue getFreeBytesThresholdFloodStage() { return freeBytesThresholdFloodStage; } - public String getFloodStageRaw() { - return floodStageRaw; - } - public boolean includeRelocations() { return includeRelocations; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index ec0af211ecca5..3ffe1b7756a9b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -24,7 +24,6 @@ import org.apache.lucene.util.IntroSorter; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; @@ -97,7 +96,6 @@ public BalancedShardsAllocator(Settings settings) { @Inject public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { - super(settings); setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); setThreshold(THRESHOLD_SETTING.get(settings)); clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); @@ -171,9 +169,10 @@ public float getShardBalance() { *

  • shard balance - balance property over shards per cluster
  • * *

    - * Each of these properties are expressed as factor such that the properties factor defines the relative importance of the property for the - * weight function. For example if the weight function should calculate the weights only based on a global (shard) balance the index balance - * can be set to {@code 0.0} and will in turn have no effect on the distribution. + * Each of these properties are expressed as factor such that the properties factor defines the relative + * importance of the property for the weight function. For example if the weight function should calculate + * the weights only based on a global (shard) balance the index balance can be set to {@code 0.0} and will + * in turn have no effect on the distribution. *

    * The weight per index is calculated based on the following formula: *
      @@ -447,28 +446,6 @@ private MoveDecision decideRebalance(final ShardRouting shard) { } } - public Map weighShard(ShardRouting shard) { - final ModelNode[] modelNodes = sorter.modelNodes; - final float[] weights = sorter.weights; - - buildWeightOrderedIndices(); - Map nodes = new HashMap<>(modelNodes.length); - float currentNodeWeight = 0.0f; - for (int i = 0; i < modelNodes.length; i++) { - if (modelNodes[i].getNodeId().equals(shard.currentNodeId())) { - // If a node was found with the shard, use that weight instead of 0.0 - currentNodeWeight = weights[i]; - break; - } - } - - for (int i = 0; i < modelNodes.length; i++) { - final float delta = currentNodeWeight - weights[i]; - nodes.put(modelNodes[i].getRoutingNode().node(), delta); - } - return nodes; - } - /** * Balances the nodes on the cluster model according to the weight * function. The configured threshold is the minimum delta between the @@ -531,7 +508,8 @@ private void balanceByWeights() { break advance_range; } if (logger.isTraceEnabled()) { - logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]", + logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}]" + + " max_node [{}] weight: [{}] delta: [{}]", index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); } break; @@ -651,7 +629,8 @@ public void moveShards() { final ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); sourceNode.removeShard(shardRouting); Tuple relocatingShards = routingNodes.relocateShard(shardRouting, targetNode.getNodeId(), - allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); + allocation.clusterInfo().getShardSize(shardRouting, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); targetNode.addShard(relocatingShards.v2()); if (logger.isTraceEnabled()) { logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); @@ -794,7 +773,8 @@ private void allocateUnassigned() { /* * we use 2 arrays and move replicas to the second array once we allocated an identical * replica in the current iteration to make sure all indices get allocated in the same manner. - * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with 2 replica and 1 shard would look like: + * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with + * 2 replica and 1 shard would look like: * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)] * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned. @@ -1007,7 +987,8 @@ private boolean tryRelocateShard(ModelNode minNode, ModelNode maxNode, String id && ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) { if (maxNode.containsShard(shard)) { // simulate moving shard from maxNode to minNode - final float delta = weight.weightShardAdded(this, minNode, idx) - weight.weightShardRemoved(this, maxNode, idx); + final float delta = weight.weightShardAdded( + this, minNode, idx) - weight.weightShardRemoved(this, maxNode, idx); if (delta < minCost || (candidate != null && Float.compare(delta, minCost) == 0 && candidate.id() > shard.id())) { /* this last line is a tie-breaker to make the shard allocation alg deterministic @@ -1039,8 +1020,8 @@ private boolean tryRelocateShard(ModelNode minNode, ModelNode maxNode, String id } } if (logger.isTraceEnabled()) { - logger.trace("Couldn't find shard to relocate from node [{}] to node [{}] allocation decision [{}]", maxNode.getNodeId(), - minNode.getNodeId(), decision == null ? "NO" : decision.type().name()); + logger.trace("Couldn't find shard to relocate from node [{}] to node [{}] allocation decision [{}]", + maxNode.getNodeId(), minNode.getNodeId(), decision == null ? "NO" : decision.type().name()); } return false; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java index 4ffd70aee1cd8..0e6ba4f051dd3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java @@ -186,7 +186,8 @@ protected RerouteExplanation explainOrThrowRejectedCommand(boolean explain, Rout * @param routingNode the node to initialize it to * @param shardRouting the shard routing that is to be matched in unassigned shards */ - protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode, ShardRouting shardRouting) { + protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, + RoutingNode routingNode, ShardRouting shardRouting) { initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null, null); } @@ -212,7 +213,8 @@ protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNo unassigned = it.updateUnassigned(unassignedInfo != null ? unassignedInfo : unassigned.unassignedInfo(), recoverySource != null ? recoverySource : unassigned.recoverySource(), allocation.changes()); } - it.initialize(routingNode.nodeId(), null, allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); + it.initialize(routingNode.nodeId(), null, + allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); return; } assert false : "shard to initialize not found in list of unassigned shards"; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index a42fd2765b598..4d037570dd266 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -121,8 +121,8 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) } if (shardRouting.recoverySource().getType() != RecoverySource.Type.EMPTY_STORE && acceptDataLoss == false) { - String dataLossWarning = "allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm " + - "by setting the accept_data_loss parameter to true"; + String dataLossWarning = "allocating an empty primary for [" + index + "][" + shardId + + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true"; return explainOrThrowRejectedCommand(explain, allocation, dataLossWarning); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java index 6ec09a9bbbbe9..709681f2b2008 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java @@ -109,10 +109,12 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) } if (primaryShardRouting.unassigned()) { return explainOrThrowRejectedCommand(explain, allocation, - "trying to allocate a replica shard [" + index + "][" + shardId + "], while corresponding primary shard is still unassigned"); + "trying to allocate a replica shard [" + index + "][" + shardId + + "], while corresponding primary shard is still unassigned"); } - List replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shardId).replicaShardsWithState(ShardRoutingState.UNASSIGNED); + List replicaShardRoutings = + allocation.routingTable().shardRoutingTable(index, shardId).replicaShardsWithState(ShardRoutingState.UNASSIGNED); ShardRouting shardRouting; if (replicaShardRoutings.isEmpty()) { return explainOrThrowRejectedCommand(explain, allocation, @@ -127,7 +129,8 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) if (explain) { return new RerouteExplanation(this, decision); } - throw new IllegalArgumentException("[" + name() + "] allocation of [" + index + "][" + shardId + "] on node " + discoNode + " is not allowed, reason: " + decision); + throw new IllegalArgumentException("[" + name() + "] allocation of [" + index + "][" + shardId + "] on node " + discoNode + + " is not allowed, reason: " + decision); } initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index 72290eb9ccf1a..67122cb3ff1b1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -161,7 +161,8 @@ public static AllocationCommands fromXContent(XContentParser parser) throws IOEx commands.add(parser.namedObject(AllocationCommand.class, commandName, null)); // move to the end object one if (parser.nextToken() != XContentParser.Token.END_OBJECT) { - throw new ElasticsearchParseException("allocation command is malformed, done parsing a command, but didn't get END_OBJECT, got [{}] instead", token); + throw new ElasticsearchParseException("allocation command is malformed, done parsing a command," + + " but didn't get END_OBJECT, got [{}] instead", token); } } else { throw new ElasticsearchParseException("allocation command is malformed, got [{}] instead", token); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java index 6b4af8c605aae..9358935542029 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java @@ -139,12 +139,14 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) if (explain) { return new RerouteExplanation(this, decision); } - throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", from " + fromDiscoNode + ", to " + toDiscoNode + ", since its not allowed, reason: " + decision); + throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", from " + fromDiscoNode + ", to " + + toDiscoNode + ", since its not allowed, reason: " + decision); } if (decision.type() == Decision.Type.THROTTLE) { // its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it... } - allocation.routingNodes().relocateShard(shardRouting, toRoutingNode.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); + allocation.routingNodes().relocateShard(shardRouting, toRoutingNode.nodeId(), + allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); } if (!found) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java index 12cac56e11a44..3ae86d60bd98c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java @@ -19,13 +19,12 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; /** * {@link AllocationDecider} is an abstract base class that allows to make @@ -33,15 +32,6 @@ * basis. */ public abstract class AllocationDecider extends AbstractComponent { - - /** - * Initializes a new {@link AllocationDecider} - * @param settings {@link Settings} used by this {@link AllocationDecider} - */ - protected AllocationDecider(Settings settings) { - super(settings); - } - /** * Returns a {@link Decision} whether the given shard routing can be * re-balanced to the given allocation. The default is diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index 53e67ba25a429..7f91be340fd79 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.Settings; import java.util.Collection; import java.util.Collections; @@ -38,8 +37,7 @@ public class AllocationDeciders extends AllocationDecider { private final Collection allocations; - public AllocationDeciders(Settings settings, Collection allocations) { - super(settings); + public AllocationDeciders(Collection allocations) { this.allocations = Collections.unmodifiableCollection(allocations); } @@ -74,7 +72,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // short track if a NO is returned. if (decision == Decision.NO) { if (logger.isTraceEnabled()) { - logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.node(), allocationDecider.getClass().getSimpleName()); + logger.trace("Can not allocate [{}] on node [{}] due to [{}]", + shardRouting, node.node(), allocationDecider.getClass().getSimpleName()); } // short circuit only if debugging is not enabled if (!allocation.debugDecision()) { @@ -106,7 +105,8 @@ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAl // short track if a NO is returned. if (decision == Decision.NO) { if (logger.isTraceEnabled()) { - logger.trace("Shard [{}] can not remain on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName()); + logger.trace("Shard [{}] can not remain on node [{}] due to [{}]", + shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName()); } if (!allocation.debugDecision()) { return decision; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 6105c732d5511..06fc9c327053c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -92,7 +92,6 @@ public class AwarenessAllocationDecider extends AllocationDecider { private volatile Map> forcedAwarenessAttributes; public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - super(settings); this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes); setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings)); @@ -208,10 +207,6 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout currentNodeCount, requiredCountPerAttribute + leftoverPerAttribute); } - // all is well, we are below or same as average - if (currentNodeCount <= requiredCountPerAttribute) { - continue; - } } return allocation.decision(Decision.YES, NAME, "node meets all awareness attribute requirements"); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index ea945c23c728c..1ea369c75d9bc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -91,7 +91,6 @@ public String toString() { private volatile ClusterRebalanceType type; public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - super(settings); try { type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings); } catch (IllegalStateException e) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 15456ec3e1133..a11b3dcf102f8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -48,7 +48,6 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { private volatile int clusterConcurrentRebalance; public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - super(settings); this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings); logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java index a2198ad90d9b0..725df82e51599 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java @@ -92,10 +92,6 @@ public enum Type implements Writeable { this.id = id; } - public static Type resolve(String s) { - return Type.valueOf(s.toUpperCase(Locale.ROOT)); - } - public static Type readFrom(StreamInput in) throws IOException { int i = in.readVInt(); switch (i) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index a7426d3e551b5..9676eaf4df1c3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -73,7 +73,6 @@ public class DiskThresholdDecider extends AllocationDecider { private final DiskThresholdSettings diskThresholdSettings; public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) { - super(settings); this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index d0fbe6761b290..8a72fe8cb49a9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -80,7 +80,6 @@ public class EnableAllocationDecider extends AllocationDecider { private volatile Allocation enableAllocation; public EnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - super(settings); this.enableAllocation = CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings); this.enableRebalance = CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index ed2d5384fa704..053d696f6768c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -97,7 +97,6 @@ public class FilterAllocationDecider extends AllocationDecider { private volatile DiscoveryNodeFilters clusterExcludeFilters; public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - super(settings); setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings)); setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings)); setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings)); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java index 4c580509e9229..708482feae7d4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; /** * An allocation decider that prevents shards from being allocated on any node if the shards allocation has been retried N times without @@ -42,15 +41,6 @@ public class MaxRetryAllocationDecider extends AllocationDecider { public static final String NAME = "max_retry"; - /** - * Initializes a new {@link MaxRetryAllocationDecider} - * - * @param settings {@link Settings} used by this {@link AllocationDecider} - */ - public MaxRetryAllocationDecider(Settings settings) { - super(settings); - } - @Override public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java index f2df6d3196dd0..e2817eb87a7d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.Settings; /** * An allocation decider that prevents relocation or allocation from nodes @@ -38,10 +37,6 @@ public class NodeVersionAllocationDecider extends AllocationDecider { public static final String NAME = "node_version"; - public NodeVersionAllocationDecider(Settings settings) { - super(settings); - } - @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { if (shardRouting.primary()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java index c4cd2ecf50dda..3d890067bbf3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.Settings; /** * Only allow rebalancing when all shards are active within the shard replication group. @@ -30,10 +29,6 @@ public class RebalanceOnlyWhenActiveAllocationDecider extends AllocationDecider public static final String NAME = "rebalance_only_when_active"; - public RebalanceOnlyWhenActiveAllocationDecider(Settings settings) { - super(settings); - } - @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { if (!allocation.routingNodes().allReplicasActive(shardRouting.shardId(), allocation.metaData())) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java index 4cceb1cc161c3..2bc81b84570b2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.Settings; /** * An allocation strategy that only allows for a replica to be allocated when the primary is active. @@ -31,10 +30,6 @@ public class ReplicaAfterPrimaryActiveAllocationDecider extends AllocationDecide private static final String NAME = "replica_after_primary_active"; - public ReplicaAfterPrimaryActiveAllocationDecider(Settings settings) { - super(settings); - } - @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return canAllocate(shardRouting, allocation); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java index 8babcd5484f6c..d58a625c6edc3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java @@ -26,11 +26,9 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; - /** * An allocation decider that ensures we allocate the shards of a target index for resize operations next to the source primaries */ @@ -38,15 +36,6 @@ public class ResizeAllocationDecider extends AllocationDecider { public static final String NAME = "resize"; - /** - * Initializes a new {@link ResizeAllocationDecider} - * - * @param settings {@link Settings} used by this {@link AllocationDecider} - */ - public ResizeAllocationDecider(Settings settings) { - super(settings); - } - @Override public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { return canAllocate(shardRouting, null, allocation); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDecider.java index 3fefd4e0abba4..63971ca46e468 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDecider.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.Snapshot; /** @@ -35,16 +34,6 @@ public class RestoreInProgressAllocationDecider extends AllocationDecider { public static final String NAME = "restore_in_progress"; - /** - * Creates a new {@link RestoreInProgressAllocationDecider} instance from - * given settings - * - * @param settings {@link Settings} to use - */ - public RestoreInProgressAllocationDecider(Settings settings) { - super(settings); - } - @Override public Decision canAllocate(final ShardRouting shardRouting, final RoutingNode node, final RoutingAllocation allocation) { return canAllocate(shardRouting, allocation); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index cc2d488974baa..2961b3faaf4dc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -53,7 +53,6 @@ public class SameShardAllocationDecider extends AllocationDecider { private volatile boolean sameHost; public SameShardAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - super(settings); this.sameHost = CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING, this::setSameHost); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index 398ee0a17ad54..1c0a0c0ef0a6b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -73,8 +73,10 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, Property.Dynamic, Property.NodeScope); + private final Settings settings; + public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - super(settings); + this.settings = settings; this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index eb4cc0c4420ff..7eb1b882d1ffe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.Settings; /** * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that @@ -33,16 +32,6 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { public static final String NAME = "snapshot_in_progress"; - /** - * Creates a new {@link org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider} instance from - * given settings - * - * @param settings {@link org.elasticsearch.common.settings.Settings} to use - */ - public SnapshotInProgressAllocationDecider(Settings settings) { - super(settings); - } - /** * Returns a {@link Decision} whether the given shard routing can be * re-balanced to the given allocation. The default is diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 7821ad11a52cd..0d67cd6071f08 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -81,7 +81,6 @@ public class ThrottlingAllocationDecider extends AllocationDecider { private volatile int concurrentOutgoingRecoveries; public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - super(settings); this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings); concurrentIncomingRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.get(settings); concurrentOutgoingRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.get(settings); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index 7272c9ed30200..55728982c6c1b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -95,15 +95,18 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements private final AtomicReference state; // last applied state + private final String nodeName; + private NodeConnectionsService nodeConnectionsService; - public ClusterApplierService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + public ClusterApplierService(String nodeName, Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { super(settings); this.clusterSettings = clusterSettings; this.threadPool = threadPool; this.state = new AtomicReference<>(); this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); this.localNodeMasterListeners = new LocalNodeMasterListeners(threadPool); + this.nodeName = nodeName; } public void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { @@ -130,8 +133,8 @@ protected synchronized void doStart() { Objects.requireNonNull(state.get(), "please set initial state before starting"); addListener(localNodeMasterListeners); threadPoolExecutor = EsExecutors.newSinglePrioritizing( - nodeName() + "/" + CLUSTER_UPDATE_THREAD_NAME, - daemonThreadFactory(settings, CLUSTER_UPDATE_THREAD_NAME), + nodeName + "/" + CLUSTER_UPDATE_THREAD_NAME, + daemonThreadFactory(nodeName, CLUSTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index 58cace44754de..e3df60011792a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.node.Node; import org.elasticsearch.threadpool.ThreadPool; import java.util.Collections; @@ -54,15 +55,24 @@ public class ClusterService extends AbstractLifecycleComponent { public static final org.elasticsearch.common.settings.Setting.AffixSetting USER_DEFINED_META_DATA = Setting.prefixKeySetting("cluster.metadata.", (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope)); + /** + * The node's settings. + */ + private final Settings settings; + private final ClusterName clusterName; private final OperationRouting operationRouting; private final ClusterSettings clusterSettings; + private final String nodeName; + public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { super(settings); - this.masterService = new MasterService(settings, threadPool); + this.settings = settings; + this.nodeName = Node.NODE_NAME_SETTING.get(settings); + this.masterService = new MasterService(nodeName, settings, threadPool); this.operationRouting = new OperationRouting(settings, clusterSettings); this.clusterSettings = clusterSettings; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); @@ -70,7 +80,7 @@ public ClusterService(Settings settings, ClusterSettings clusterSettings, Thread this::setSlowTaskLoggingThreshold); // Add a no-op update consumer so changes are logged this.clusterSettings.addAffixUpdateConsumer(USER_DEFINED_META_DATA, (first, second) -> {}, (first, second) -> {}); - this.clusterApplierService = new ClusterApplierService(settings, clusterSettings, threadPool); + this.clusterApplierService = new ClusterApplierService(nodeName, settings, clusterSettings, threadPool); } private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { @@ -195,10 +205,20 @@ public ClusterSettings getClusterSettings() { return clusterSettings; } + /** + * The node's settings. + */ public Settings getSettings() { return settings; } + /** + * The name of this node. + */ + public final String getNodeName() { + return nodeName; + } + /** * Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, * ClusterStateTaskExecutor, ClusterStateTaskListener)}, submitted updates will not be batched. diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index d720e9d603fe9..cbef687fc410a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -70,6 +70,8 @@ public class MasterService extends AbstractLifecycleComponent { public static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; + private final String nodeName; + private BiConsumer clusterStatePublisher; private java.util.function.Supplier clusterStateSupplier; @@ -81,8 +83,9 @@ public class MasterService extends AbstractLifecycleComponent { private volatile PrioritizedEsThreadPoolExecutor threadPoolExecutor; private volatile Batcher taskBatcher; - public MasterService(Settings settings, ThreadPool threadPool) { + public MasterService(String nodeName, Settings settings, ThreadPool threadPool) { super(settings); + this.nodeName = nodeName; // TODO: introduce a dedicated setting for master service this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); this.threadPool = threadPool; @@ -105,8 +108,8 @@ protected synchronized void doStart() { Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); Objects.requireNonNull(clusterStateSupplier, "please set a cluster state supplier before starting"); threadPoolExecutor = EsExecutors.newSinglePrioritizing( - nodeName() + "/" + MASTER_UPDATE_THREAD_NAME, - daemonThreadFactory(settings, MASTER_UPDATE_THREAD_NAME), + nodeName + "/" + MASTER_UPDATE_THREAD_NAME, + daemonThreadFactory(nodeName, MASTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler()); taskBatcher = new Batcher(logger, threadPoolExecutor); diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index c49143edb446e..cffdf0f4507c0 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -42,7 +42,6 @@ public class FsBlobStore extends AbstractComponent implements BlobStore { private final boolean readOnly; public FsBlobStore(Settings settings, Path path) throws IOException { - super(settings); this.path = path; this.readOnly = settings.getAsBoolean("readonly", false); if (!this.readOnly) { diff --git a/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java b/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java index a682565adf734..2aa6cde969187 100644 --- a/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/server/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -32,8 +32,8 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker { private final long memoryBytesLimit; - private final BreakerSettings settings; private final double overheadConstant; + private final Durability durability; private final AtomicLong used; private final AtomicLong trippedCount; private final Logger logger; @@ -66,9 +66,9 @@ public ChildMemoryCircuitBreaker(BreakerSettings settings, Logger logger, public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBreaker oldBreaker, Logger logger, HierarchyCircuitBreakerService parent, String name) { this.name = name; - this.settings = settings; this.memoryBytesLimit = settings.getLimit(); this.overheadConstant = settings.getOverhead(); + this.durability = settings.getDurability(); if (oldBreaker == null) { this.used = new AtomicLong(0); this.trippedCount = new AtomicLong(0); @@ -78,7 +78,7 @@ public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBre } this.logger = logger; if (logger.isTraceEnabled()) { - logger.trace("creating ChildCircuitBreaker with settings {}", this.settings); + logger.trace("creating ChildCircuitBreaker with settings {}", settings); } this.parent = parent; } @@ -95,7 +95,7 @@ public void circuitBreak(String fieldName, long bytesNeeded) { ", which is larger than the limit of [" + memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]"; logger.debug("{}", message); - throw new CircuitBreakingException(message, bytesNeeded, memoryBytesLimit); + throw new CircuitBreakingException(message, bytesNeeded, memoryBytesLimit, durability); } /** @@ -234,4 +234,12 @@ public long getTrippedCount() { public String getName() { return this.name; } + + /** + * @return whether a tripped circuit breaker will reset itself (transient) or requires manual intervention (permanent). + */ + @Override + public Durability getDurability() { + return this.durability; + } } diff --git a/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java b/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java index 0671091d503b6..e3b61199e29b5 100644 --- a/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java +++ b/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java @@ -83,6 +83,13 @@ public static Type parseValue(String value) { } } + enum Durability { + // The condition that tripped the circuit breaker fixes itself eventually. + TRANSIENT, + // The condition that tripped the circuit breaker requires manual intervention. + PERMANENT + } + /** * Trip the circuit breaker * @param fieldName name of the field responsible for tripping the breaker @@ -127,4 +134,9 @@ public static Type parseValue(String value) { * @return the name of the breaker */ String getName(); + + /** + * @return whether a tripped circuit breaker will reset itself (transient) or requires manual intervention (permanent). + */ + Durability getDurability(); } diff --git a/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java b/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java index e01fe1beee224..9272f4df956e3 100644 --- a/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java +++ b/server/src/main/java/org/elasticsearch/common/breaker/CircuitBreakingException.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.breaker; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -33,23 +34,28 @@ public class CircuitBreakingException extends ElasticsearchException { private final long bytesWanted; private final long byteLimit; - - public CircuitBreakingException(String message) { - super(message); - this.bytesWanted = 0; - this.byteLimit = 0; - } + private final CircuitBreaker.Durability durability; public CircuitBreakingException(StreamInput in) throws IOException { super(in); byteLimit = in.readLong(); bytesWanted = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + durability = in.readEnum(CircuitBreaker.Durability.class); + } else { + durability = CircuitBreaker.Durability.PERMANENT; + } } - public CircuitBreakingException(String message, long bytesWanted, long byteLimit) { + public CircuitBreakingException(String message, CircuitBreaker.Durability durability) { + this(message, 0, 0, durability); + } + + public CircuitBreakingException(String message, long bytesWanted, long byteLimit, CircuitBreaker.Durability durability) { super(message); this.bytesWanted = bytesWanted; this.byteLimit = byteLimit; + this.durability = durability; } @Override @@ -57,6 +63,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(byteLimit); out.writeLong(bytesWanted); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeEnum(durability); + } } public long getBytesWanted() { @@ -67,14 +76,19 @@ public long getByteLimit() { return this.byteLimit; } + public CircuitBreaker.Durability getDurability() { + return durability; + } + @Override public RestStatus status() { - return RestStatus.SERVICE_UNAVAILABLE; + return RestStatus.TOO_MANY_REQUESTS; } @Override protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { builder.field("bytes_wanted", bytesWanted); builder.field("bytes_limit", byteLimit); + builder.field("durability", durability); } } diff --git a/server/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java b/server/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java index dbd1fe92ffe9a..bdf210dc42074 100644 --- a/server/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java +++ b/server/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java @@ -84,7 +84,7 @@ public void circuitBreak(String fieldName, long bytesNeeded) throws CircuitBreak ", which is larger than the limit of [" + memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]"; logger.debug("{}", message); - throw new CircuitBreakingException(message, bytesNeeded, memoryBytesLimit); + throw new CircuitBreakingException(message, bytesNeeded, memoryBytesLimit, Durability.PERMANENT); } /** @@ -197,4 +197,9 @@ public long getTrippedCount() { public String getName() { return FIELDDATA; } + + @Override + public Durability getDurability() { + return Durability.PERMANENT; + } } diff --git a/server/src/main/java/org/elasticsearch/common/breaker/NoopCircuitBreaker.java b/server/src/main/java/org/elasticsearch/common/breaker/NoopCircuitBreaker.java index 6d1f573712755..84b9e91e4a88d 100644 --- a/server/src/main/java/org/elasticsearch/common/breaker/NoopCircuitBreaker.java +++ b/server/src/main/java/org/elasticsearch/common/breaker/NoopCircuitBreaker.java @@ -71,4 +71,9 @@ public long getTrippedCount() { public String getName() { return this.name; } + + @Override + public Durability getDurability() { + return Durability.PERMANENT; + } } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index 7147045c4147e..aa2f0524cc0b4 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.common.bytes; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.BytesStream; @@ -38,7 +37,7 @@ /** * A reference to bytes. */ -public abstract class BytesReference implements Accountable, Comparable, ToXContentFragment { +public abstract class BytesReference implements Comparable, ToXContentFragment { private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it @@ -71,6 +70,11 @@ public static BytesReference bytes(XContentBuilder xContentBuilder) { */ public abstract BytesReference slice(int from, int length); + /** + * The amount of memory used by this BytesReference + */ + public abstract long ramBytesUsed(); + /** * A stream input of the bytes. */ diff --git a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java index 97a8053c1d912..1e0310c024736 100644 --- a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java +++ b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java @@ -19,28 +19,18 @@ package org.elasticsearch.common.component; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.Node; +import org.apache.logging.log4j.LogManager; +/** + * @deprecated declare your own logger + */ +@Deprecated public abstract class AbstractComponent { protected final Logger logger; - protected final DeprecationLogger deprecationLogger; - protected final Settings settings; - public AbstractComponent(Settings settings) { + public AbstractComponent() { this.logger = LogManager.getLogger(getClass()); - this.deprecationLogger = new DeprecationLogger(logger); - this.settings = settings; - } - - /** - * Returns the nodes name from the settings or the empty string if not set. - */ - public final String nodeName() { - return Node.NODE_NAME_SETTING.get(settings); } } diff --git a/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java b/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java index 8a472954ab492..1b06ade9aeb70 100644 --- a/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java +++ b/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java @@ -32,7 +32,7 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent imple private final List listeners = new CopyOnWriteArrayList<>(); protected AbstractLifecycleComponent(Settings settings) { - super(settings); + // TODO drop settings from ctor } @Override diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java deleted file mode 100644 index 20f52a742a2f2..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.spi.ExtendedLogger; - -/** - * Factory to get {@link Logger}s - */ -final class ESLoggerFactory { - - private ESLoggerFactory() { - - } - - static Logger getLogger(String prefix, String name) { - return getLogger(prefix, LogManager.getLogger(name)); - } - - static Logger getLogger(String prefix, Class clazz) { - /* - * At one point we didn't use LogManager.getLogger(clazz) because - * of a bug in log4j that has since been fixed: - * https://github.com/apache/logging-log4j2/commit/ae33698a1846a5e10684ec3e52a99223f06047af - * - * For now we continue to use LogManager.getLogger(clazz.getName()) - * because we expect to eventually migrate away from needing this - * method entirely. - */ - return getLogger(prefix, LogManager.getLogger(clazz.getName())); - } - - static Logger getLogger(String prefix, Logger logger) { - /* - * In a followup we'll throw an exception if prefix is null or empty - * redirecting folks to LogManager.getLogger. - * - * This and more is tracked in https://github.com/elastic/elasticsearch/issues/32174 - */ - if (prefix == null || prefix.length() == 0) { - return logger; - } - return new PrefixLogger((ExtendedLogger)logger, logger.getName(), prefix); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java index e5031f4608e8b..1ee99c72e4d6f 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -57,7 +57,8 @@ public static Logger getLogger(Class clazz, ShardId shardId, String... prefix * Class and no extra prefixes. */ public static Logger getLogger(String loggerName, ShardId shardId) { - return ESLoggerFactory.getLogger(formatPrefix(shardId.getIndexName(), Integer.toString(shardId.id())), loggerName); + String prefix = formatPrefix(shardId.getIndexName(), Integer.toString(shardId.id())); + return new PrefixLogger(LogManager.getLogger(loggerName), prefix); } public static Logger getLogger(Class clazz, Index index, String... prefixes) { @@ -65,33 +66,15 @@ public static Logger getLogger(Class clazz, Index index, String... prefixes) } public static Logger getLogger(Class clazz, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); + return new PrefixLogger(LogManager.getLogger(clazz), formatPrefix(prefixes)); } public static Logger getLogger(Logger parentLogger, String s) { - String prefix = null; + Logger inner = LogManager.getLogger(parentLogger.getName() + s); if (parentLogger instanceof PrefixLogger) { - prefix = ((PrefixLogger)parentLogger).prefix(); + return new PrefixLogger(inner, ((PrefixLogger)parentLogger).prefix()); } - return ESLoggerFactory.getLogger(prefix, parentLogger.getName() + s); - } - - /** - * Get or build a logger. - * @deprecated Prefer {@link LogManager#getLogger} - */ - @Deprecated - public static Logger getLogger(String s) { - return LogManager.getLogger(s); - } - - /** - * Get or build a logger. - * @deprecated Prefer {@link LogManager#getLogger} - */ - @Deprecated - public static Logger getLogger(Class clazz) { - return ESLoggerFactory.getLogger(null, clazz); + return inner; } private static String formatPrefix(String... prefixes) { diff --git a/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java b/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java index f46d360a3fa5b..8a4d43f4df84a 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java +++ b/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.logging; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Marker; import org.apache.logging.log4j.MarkerManager; import org.apache.logging.log4j.message.Message; @@ -70,26 +71,27 @@ public String prefix() { * Construct a prefix logger with the specified name and prefix. * * @param logger the extended logger to wrap - * @param name the name of this prefix logger * @param prefix the prefix for this prefix logger */ - PrefixLogger(final ExtendedLogger logger, final String name, final String prefix) { - super(logger, name, null); + PrefixLogger(final Logger logger, final String prefix) { + super((ExtendedLogger) logger, logger.getName(), null); - final String actualPrefix = (prefix == null ? "" : prefix); + if (prefix == null || prefix.isEmpty()) { + throw new IllegalArgumentException("if you don't need a prefix then use a regular logger"); + } final Marker actualMarker; // markers is not thread-safe, so we synchronize access synchronized (markers) { - final Marker maybeMarker = markers.get(actualPrefix); + final Marker maybeMarker = markers.get(prefix); if (maybeMarker == null) { - actualMarker = new MarkerManager.Log4jMarker(actualPrefix); + actualMarker = new MarkerManager.Log4jMarker(prefix); /* * We must create a new instance here as otherwise the marker will hold a reference to the key in the weak hash map; as * those references are held strongly, this would give a strong reference back to the key preventing them from ever being * collected. This also guarantees that no other strong reference can be held to the prefix anywhere. */ // noinspection RedundantStringConstructorCall - markers.put(new String(actualPrefix), actualMarker); + markers.put(new String(prefix), actualMarker); } else { actualMarker = maybeMarker; } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java index 1920db12117d4..1010c917eca82 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java @@ -38,10 +38,10 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.Document; +import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -609,7 +609,7 @@ public void setMaxNumTokensParsed(int i) { public Query like(int docNum) throws IOException { if (fieldNames == null) { // gather list of valid fields from lucene - Collection fields = MultiFields.getIndexedFields(ir); + Collection fields = FieldInfos.getIndexedFields(ir); fieldNames = fields.toArray(new String[fields.size()]); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index cbf2a2400d7e1..fe0901c6cdb3f 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -123,8 +123,7 @@ public static ScoreMode fromString(String scoreMode) { final ScoreMode scoreMode; final float maxBoost; private final Float minScore; - - protected final CombineFunction combineFunction; + private final CombineFunction combineFunction; /** * Creates a FunctionScoreQuery without function. @@ -192,6 +191,10 @@ public Float getMinScore() { return minScore; } + public CombineFunction getCombineFunction() { + return combineFunction; + } + @Override public Query rewrite(IndexReader reader) throws IOException { Query rewritten = super.rewrite(reader); diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index ab635c1d1c7c5..9667341cd6ef3 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -49,22 +49,24 @@ */ public abstract class AbstractScopedSettings extends AbstractComponent { public static final String ARCHIVED_SETTINGS_PREFIX = "archived."; - private Settings lastSettingsApplied = Settings.EMPTY; + private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); + private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); + private static final Pattern AFFIX_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+[*](?:[.][-\\w]+)+$"); + + private final Settings settings; private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; private final Map> keySettings; private final Map, SettingUpgrader> settingUpgraders; private final Setting.Property scope; - private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); - private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); - private static final Pattern AFFIX_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+[*](?:[.][-\\w]+)+$"); + private Settings lastSettingsApplied; protected AbstractScopedSettings( final Settings settings, final Set> settingsSet, final Set> settingUpgraders, final Setting.Property scope) { - super(settings); + this.settings = settings; this.lastSettingsApplied = Settings.EMPTY; this.settingUpgraders = @@ -104,7 +106,7 @@ protected void validateSettingKey(Setting setting) { } protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, AbstractScopedSettings other) { - super(nodeSettings); + this.settings = nodeSettings; this.lastSettingsApplied = scopeSettings; this.scope = other.scope; complexMatchers = other.complexMatchers; diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 66a4aa65c4480..f72f31772aa38 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -293,6 +293,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE, RemoteClusterService.ENABLE_REMOTE_CLUSTERS, RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS, + RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, TransportService.TRACE_LOG_EXCLUDE_SETTING, TransportService.TRACE_LOG_INCLUDE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsFilter.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsFilter.java index 1c67318e28286..fc68d58d5de0e 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsFilter.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsFilter.java @@ -16,10 +16,10 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.common.settings; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.rest.RestRequest; @@ -36,7 +36,7 @@ * A class that allows to filter settings objects by simple regular expression patterns or full settings keys. * It's used for response filtering on the rest layer to for instance filter out sensitive information like access keys. */ -public final class SettingsFilter extends AbstractComponent { +public final class SettingsFilter { /** * Can be used to specify settings filter that will be used to filter out matching settings in toXContent method */ @@ -45,8 +45,7 @@ public final class SettingsFilter extends AbstractComponent { private final Set patterns; private final String patternString; - public SettingsFilter(Settings settings, Collection patterns) { - super(settings); + public SettingsFilter(Collection patterns) { for (String pattern : patterns) { if (isValidPattern(pattern) == false) { throw new IllegalArgumentException("invalid pattern: " + pattern); diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 086346f470a50..6a78e81d7f3f4 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -146,7 +146,7 @@ public SettingsModule( } // by now we are fully configured, lets check node level settings for unregistered index settings clusterSettings.validate(settings, true); - this.settingsFilter = new SettingsFilter(settings, settingsFilterPattern); + this.settingsFilter = new SettingsFilter(settingsFilterPattern); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java index 5e8fae6092d42..1112b6cb301f5 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; public class DateUtils { public static DateTimeZone zoneIdToDateTimeZone(ZoneId zoneId) { @@ -44,6 +45,7 @@ public static DateTimeZone zoneIdToDateTimeZone(ZoneId zoneId) { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(DateFormatters.class)); // pkg private for tests static final Map DEPRECATED_SHORT_TIMEZONES; + public static final Set DEPRECATED_SHORT_TZ_IDS; static { Map tzs = new HashMap<>(); tzs.put("EST", "-05:00"); // eastern time without daylight savings @@ -52,6 +54,7 @@ public static DateTimeZone zoneIdToDateTimeZone(ZoneId zoneId) { tzs.put("ROC", "Asia/Taipei"); tzs.put("Eire", "Europe/London"); DEPRECATED_SHORT_TIMEZONES = Collections.unmodifiableMap(tzs); + DEPRECATED_SHORT_TZ_IDS = tzs.keySet(); } public static ZoneId dateTimeZoneToZoneId(DateTimeZone timeZone) { diff --git a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java index be69cf5b95a06..6f2e28e97cbae 100644 --- a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java @@ -66,7 +66,6 @@ public void close() { } public PageCacheRecycler(Settings settings) { - super(settings); final Type type = TYPE_SETTING.get(settings); final long limit = LIMIT_HEAP_SETTING.get(settings).getBytes(); final int availableProcessors = EsExecutors.numberOfProcessors(settings); diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index abc95810ba9a9..44367053406e3 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -179,6 +179,11 @@ public static ThreadFactory daemonThreadFactory(Settings settings, String namePr return daemonThreadFactory(threadName(settings, namePrefix)); } + public static ThreadFactory daemonThreadFactory(String nodeName, String namePrefix) { + assert nodeName != null && false == nodeName.isEmpty(); + return daemonThreadFactory(threadName(nodeName, namePrefix)); + } + public static ThreadFactory daemonThreadFactory(Settings settings, String ... names) { return daemonThreadFactory(threadName(settings, names)); } diff --git a/server/src/main/java/org/elasticsearch/discovery/Discovery.java b/server/src/main/java/org/elasticsearch/discovery/Discovery.java index b58f61bac89bb..a2035a93a4f2f 100644 --- a/server/src/main/java/org/elasticsearch/discovery/Discovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/Discovery.java @@ -43,7 +43,8 @@ public interface Discovery extends LifecycleComponent { * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether * they updated their own cluster state or not. * - * The method is guaranteed to throw a {@link FailedToCommitClusterStateException} if the change is not committed and should be rejected. + * The method is guaranteed to throw a {@link FailedToCommitClusterStateException} if the change is not + * committed and should be rejected. * Any other exception signals the something wrong happened but the change is committed. */ void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener); diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 7256bb16747d8..91001d86b0f89 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -77,7 +77,7 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); hostProviders.put("settings", () -> new SettingsBasedHostsProvider(settings, transportService)); - hostProviders.put("file", () -> new FileBasedUnicastHostsProvider(settings, configFile)); + hostProviders.put("file", () -> new FileBasedUnicastHostsProvider(configFile)); for (DiscoveryPlugin plugin : plugins) { plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index e9a83678f8a2c..ebc64fa3af1d1 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -37,8 +37,10 @@ public class DiscoverySettings extends AbstractComponent { public static final int NO_MASTER_BLOCK_ID = 2; - public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); - public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); + public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, false, + RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); + public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, false, + RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); /** * sets the timeout for a complete publishing cycle, including both sending and committing. the master * will continue to process the next cluster state update after this time has elapsed @@ -70,7 +72,6 @@ public class DiscoverySettings extends AbstractComponent { private volatile boolean publishDiff; public DiscoverySettings(Settings settings, ClusterSettings clusterSettings) { - super(settings); clusterSettings.addSettingsUpdateConsumer(NO_MASTER_BLOCK_SETTING, this::setNoMasterBlock); clusterSettings.addSettingsUpdateConsumer(PUBLISH_DIFF_ENABLE_SETTING, this::setPublishDiff); clusterSettings.addSettingsUpdateConsumer(COMMIT_TIMEOUT_SETTING, this::setCommitTimeout); diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java index 462136a22fe94..1ac1cf13585b3 100644 --- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java @@ -47,6 +47,7 @@ */ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements Discovery { + private final ClusterName clusterName; protected final TransportService transportService; private final ClusterApplier clusterApplier; private volatile ClusterState clusterState; @@ -54,6 +55,7 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D public SingleNodeDiscovery(final Settings settings, final TransportService transportService, final MasterService masterService, final ClusterApplier clusterApplier) { super(Objects.requireNonNull(settings)); + this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.transportService = Objects.requireNonNull(transportService); masterService.setClusterStateSupplier(() -> clusterState); this.clusterApplier = clusterApplier; @@ -114,7 +116,7 @@ protected synchronized void doStart() { } protected ClusterState createInitialState(DiscoveryNode localNode) { - ClusterState.Builder builder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)); + ClusterState.Builder builder = ClusterState.builder(clusterName); return builder.nodes(DiscoveryNodes.builder().add(localNode) .localNodeId(localNode.getId()) .masterNodeId(localNode.getId()) diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java b/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index ebce175e98118..8f2853904fa0f 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -98,7 +98,6 @@ public static int compare(MasterCandidate c1, MasterCandidate c2) { } public ElectMasterService(Settings settings) { - super(settings); this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes); } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java index 5d9b1687e4295..3d389fc814188 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java @@ -66,7 +66,6 @@ public abstract class FaultDetection extends AbstractComponent implements Closea protected final int pingRetryCount; public FaultDetection(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName) { - super(settings); this.threadPool = threadPool; this.transportService = transportService; this.clusterName = clusterName; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java index f9b20580ecd9b..c80fceea756e4 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import java.io.IOException; @@ -50,8 +49,7 @@ public class FileBasedUnicastHostsProvider extends AbstractComponent implements private final Path unicastHostsFilePath; - public FileBasedUnicastHostsProvider(Settings settings, Path configFile) { - super(settings); + public FileBasedUnicastHostsProvider(Path configFile) { this.unicastHostsFilePath = configFile.resolve(UNICAST_HOSTS_FILE); } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index e8bafea66d3a4..f699e547bf40e 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -67,9 +66,8 @@ public interface MembershipListener { private final MembershipListener listener; - public MembershipAction(Settings settings, TransportService transportService, MembershipListener listener, + public MembershipAction(TransportService transportService, MembershipListener listener, Collection> joinValidators) { - super(settings); this.transportService = transportService; this.listener = listener; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 5cceba237e544..ecf52a6975369 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoverySettings; @@ -65,9 +64,7 @@ public class NodeJoinController extends AbstractComponent { private ElectionContext electionContext = null; - public NodeJoinController(MasterService masterService, AllocationService allocationService, ElectMasterService electMaster, - Settings settings) { - super(settings); + public NodeJoinController(MasterService masterService, AllocationService allocationService, ElectMasterService electMaster) { this.masterService = masterService; joinTaskExecutor = new JoinTaskExecutor(allocationService, electMaster, logger); } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 5e9f960e893cf..ca014af53d8c9 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler; @@ -97,12 +96,10 @@ public interface IncomingClusterStateListener { private final AtomicLong compatibleClusterStateDiffReceivedCount = new AtomicLong(); public PublishClusterStateAction( - Settings settings, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, IncomingClusterStateListener incomingClusterStateListener, DiscoverySettings discoverySettings) { - super(settings); this.transportService = transportService; this.namedWriteableRegistry = namedWriteableRegistry; this.incomingClusterStateListener = incomingClusterStateListener; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java index 6d6453c776e68..a11e255f88878 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java @@ -52,8 +52,6 @@ public class SettingsBasedHostsProvider extends AbstractComponent implements Uni private final int limitPortCounts; public SettingsBasedHostsProvider(Settings settings, TransportService transportService) { - super(settings); - if (DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) { configuredHosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); // we only limit to 1 address, makes no sense to ping 100 ports diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 74414dc446e6d..8fb9cfce0bf7a 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -45,6 +45,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.KeyedLock; +import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; @@ -117,11 +118,12 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing { private final TimeValue resolveTimeout; + private final String nodeName; + private volatile boolean closed = false; public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, UnicastHostsProvider unicastHostsProvider, PingContextProvider contextProvider) { - super(settings); this.threadPool = threadPool; this.transportService = transportService; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); @@ -131,6 +133,7 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService final int concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); + nodeName = Node.NODE_NAME_SETTING.get(settings); logger.debug( "using concurrent_connects [{}], resolve_timeout [{}]", concurrentConnects, @@ -141,7 +144,7 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]"); unicastZenPingExecutorService = EsExecutors.newScaling( - nodeName() + "/" + "unicast_connect", + nodeName + "/" + "unicast_connect", 0, concurrentConnects, 60, diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index a68557adb9d03..b787835926e7f 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -121,6 +121,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private final NodesFaultDetection nodesFD; private final PublishClusterStateAction publishClusterState; private final MembershipAction membership; + private final ClusterName clusterName; private final ThreadPool threadPool; private final TimeValue pingTimeout; @@ -172,7 +173,7 @@ public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService t this.maxPingsFromAnotherMaster = MAX_PINGS_FROM_ANOTHER_MASTER_SETTING.get(settings); this.sendLeaveRequest = SEND_LEAVE_REQUEST_SETTING.get(settings); this.threadPool = threadPool; - ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); + this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.committedState = new AtomicReference<>(); this.masterElectionIgnoreNonMasters = MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING.get(settings); @@ -210,15 +211,14 @@ public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService t this.publishClusterState = new PublishClusterStateAction( - settings, transportService, namedWriteableRegistry, this, discoverySettings); - this.membership = new MembershipAction(settings, transportService, new MembershipListener(), onJoinValidators); + this.membership = new MembershipAction(transportService, new MembershipListener(), onJoinValidators); this.joinThreadControl = new JoinThreadControl(); - this.nodeJoinController = new NodeJoinController(masterService, allocationService, electMaster, settings); + this.nodeJoinController = new NodeJoinController(masterService, allocationService, electMaster); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::submitRejoin, logger); masterService.setClusterStateSupplier(this::clusterState); @@ -252,7 +252,7 @@ protected void doStart() { // set initial state assert committedState.get() == null; assert localNode != null; - ClusterState.Builder builder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)); + ClusterState.Builder builder = ClusterState.builder(clusterName); ClusterState initialState = builder .blocks(ClusterBlocks.builder() .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK) @@ -302,7 +302,8 @@ protected void doStop() { try { membership.sendLeaveRequest(nodes.getLocalNode(), possibleMaster); } catch (Exception e) { - logger.debug(() -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e); + logger.debug(() -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", + nodes.getMasterNode(), possibleMaster), e); } } } @@ -520,16 +521,19 @@ private boolean joinElectedMaster(DiscoveryNode masterNode) { final Throwable unwrap = ExceptionsHelper.unwrapCause(e); if (unwrap instanceof NotMasterException) { if (++joinAttempt == this.joinRetryAttempts) { - logger.info("failed to send join request to master [{}], reason [{}], tried [{}] times", masterNode, ExceptionsHelper.detailedMessage(e), joinAttempt); + logger.info("failed to send join request to master [{}], reason [{}], tried [{}] times", masterNode, + ExceptionsHelper.detailedMessage(e), joinAttempt); return false; } else { - logger.trace("master {} failed with [{}]. retrying... (attempts done: [{}])", masterNode, ExceptionsHelper.detailedMessage(e), joinAttempt); + logger.trace("master {} failed with [{}]. retrying... (attempts done: [{}])", masterNode, + ExceptionsHelper.detailedMessage(e), joinAttempt); } } else { if (logger.isTraceEnabled()) { logger.trace(() -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e); } else { - logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ExceptionsHelper.detailedMessage(e)); + logger.info("failed to send join request to master [{}], reason [{}]", masterNode, + ExceptionsHelper.detailedMessage(e)); } return false; } @@ -557,7 +561,8 @@ void setCommittedState(ClusterState clusterState) { } // visible for testing - public static class NodeRemovalClusterStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener { + public static class NodeRemovalClusterStateTaskExecutor + implements ClusterStateTaskExecutor, ClusterStateTaskListener { private final AllocationService allocationService; private final ElectMasterService electMasterService; @@ -696,7 +701,8 @@ private void handleMinimumMasterNodesChanged(final int minimumMasterNodes) { synchronized (stateMutex) { // check if we have enough master nodes, if not, we need to move into joining the cluster again if (!electMaster.hasEnoughMasterNodes(committedState.get().nodes())) { - rejoin("not enough master nodes on change of minimum_master_nodes from [" + prevMinimumMasterNode + "] to [" + minimumMasterNodes + "]"); + rejoin("not enough master nodes on change of minimum_master_nodes from [" + prevMinimumMasterNode + "] to [" + + minimumMasterNodes + "]"); } } } @@ -734,10 +740,12 @@ boolean processNextCommittedClusterState(String reason) { } assert newClusterState.nodes().getMasterNode() != null : "received a cluster state without a master"; - assert !newClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock()) : "received a cluster state with a master block"; + assert !newClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock()) : + "received a cluster state with a master block"; if (currentState.nodes().isLocalNodeElectedMaster() && newClusterState.nodes().isLocalNodeElectedMaster() == false) { - handleAnotherMaster(currentState, newClusterState.nodes().getMasterNode(), newClusterState.version(), "via a new cluster state"); + handleAnotherMaster(currentState, newClusterState.nodes().getMasterNode(), newClusterState.version(), + "via a new cluster state"); return false; } @@ -826,15 +834,18 @@ public static boolean shouldIgnoreOrRejectNewClusterState(Logger logger, Cluster // reject cluster states that are not new from the same master if (currentState.supersedes(newClusterState) || - (newClusterState.nodes().getMasterNodeId().equals(currentState.nodes().getMasterNodeId()) && currentState.version() == newClusterState.version())) { + (newClusterState.nodes().getMasterNodeId().equals(currentState.nodes().getMasterNodeId()) && + currentState.version() == newClusterState.version())) { // if the new state has a smaller version, and it has the same master node, then no need to process it - logger.debug("received a cluster state that is not newer than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version()); + logger.debug("received a cluster state that is not newer than the current one, ignoring (received {}, current {})", + newClusterState.version(), currentState.version()); return true; } // reject older cluster states if we are following a master if (currentState.nodes().getMasterNodeId() != null && newClusterState.version() < currentState.version()) { - logger.debug("received a cluster state that has a lower version than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version()); + logger.debug("received a cluster state that has a lower version than the current one, ignoring (received {}, current {})", + newClusterState.version(), currentState.version()); return true; } return false; @@ -850,8 +861,10 @@ public static void validateStateIsFromCurrentMaster(Logger logger, DiscoveryNode return; } if (!currentNodes.getMasterNodeId().equals(newClusterState.nodes().getMasterNodeId())) { - logger.warn("received a cluster state from a different master than the current one, rejecting (received {}, current {})", newClusterState.nodes().getMasterNode(), currentNodes.getMasterNode()); - throw new IllegalStateException("cluster state from a different master than the current one, rejecting (received " + newClusterState.nodes().getMasterNode() + ", current " + currentNodes.getMasterNode() + ")"); + logger.warn("received a cluster state from a different master than the current one, rejecting (received {}, current {})", + newClusterState.nodes().getMasterNode(), currentNodes.getMasterNode()); + throw new IllegalStateException("cluster state from a different master than the current one, rejecting (received " + + newClusterState.nodes().getMasterNode() + ", current " + currentNodes.getMasterNode() + ")"); } } @@ -941,13 +954,15 @@ private DiscoveryNode findMaster() { return null; } } else { - assert !activeMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master"; + assert !activeMasters.contains(localNode) : + "local node should never be elected as master when other nodes indicate an active master"; // lets tie break between discovered nodes return electMaster.tieBreakActiveMasters(activeMasters); } } - static List filterPingResponses(List fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) { + static List filterPingResponses(List fullPingResponses, + boolean masterElectionIgnoreNonMasters, Logger logger) { List pingResponses; if (masterElectionIgnoreNonMasters) { pingResponses = fullPingResponses.stream().filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList()); @@ -1004,7 +1019,8 @@ private boolean localNodeMaster() { return clusterState().nodes().isLocalNodeElectedMaster(); } - private void handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, String reason) { + private void handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, + String reason) { assert localClusterState.nodes().isLocalNodeElectedMaster() : "handleAnotherMaster called but current node is not a master"; assert Thread.holdsLock(stateMutex); @@ -1012,13 +1028,16 @@ private void handleAnotherMaster(ClusterState localClusterState, final Discovery rejoin("zen-disco-discovered another master with a new cluster_state [" + otherMaster + "][" + reason + "]"); } else { // TODO: do this outside mutex - logger.warn("discovered [{}] which is also master but with an older cluster_state, telling [{}] to rejoin the cluster ([{}])", otherMaster, otherMaster, reason); + logger.warn("discovered [{}] which is also master but with an older cluster_state, telling [{}] to rejoin the cluster ([{}])", + otherMaster, otherMaster, reason); try { // make sure we're connected to this node (connect to node does nothing if we're already connected) // since the network connections are asymmetric, it may be that we received a state but have disconnected from the node // in the past (after a master failure, for example) transportService.connectToNode(otherMaster); - transportService.sendRequest(otherMaster, DISCOVERY_REJOIN_ACTION_NAME, new RejoinClusterRequest(localClusterState.nodes().getLocalNodeId()), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + transportService.sendRequest(otherMaster, DISCOVERY_REJOIN_ACTION_NAME, + new RejoinClusterRequest(localClusterState.nodes().getLocalNodeId()), + new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleException(TransportException exp) { @@ -1140,10 +1159,12 @@ public void onPingReceived(final NodesFaultDetection.PingRequest pingRequest) { } if (pingsWhileMaster.incrementAndGet() < maxPingsFromAnotherMaster) { - logger.trace("got a ping from another master {}. current ping count: [{}]", pingRequest.masterNode(), pingsWhileMaster.get()); + logger.trace("got a ping from another master {}. current ping count: [{}]", pingRequest.masterNode(), + pingsWhileMaster.get()); return; } - logger.debug("got a ping from another master {}. resolving who should rejoin. current ping count: [{}]", pingRequest.masterNode(), pingsWhileMaster.get()); + logger.debug("got a ping from another master {}. resolving who should rejoin. current ping count: [{}]", + pingRequest.masterNode(), pingsWhileMaster.get()); synchronized (stateMutex) { ClusterState currentState = committedState.get(); if (currentState.nodes().isLocalNodeElectedMaster()) { diff --git a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index 275311cf6a8f5..ba29a08987d3c 100644 --- a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import java.util.ArrayList; import java.util.List; @@ -42,11 +41,6 @@ * the logic to determine to which nodes (if any) those shards are allocated. */ public abstract class BaseGatewayShardAllocator extends AbstractComponent { - - public BaseGatewayShardAllocator(Settings settings) { - super(settings); - } - /** * Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist. * It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)} diff --git a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index acfcadb2f51b5..d9eb5013e9c6c 100644 --- a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -60,9 +59,8 @@ public class DanglingIndicesState extends AbstractComponent implements ClusterSt private final Map danglingIndices = ConcurrentCollections.newConcurrentMap(); @Inject - public DanglingIndicesState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, + public DanglingIndicesState(NodeEnvironment nodeEnv, MetaStateService metaStateService, LocalAllocateDangledIndices allocateDangledIndices, ClusterService clusterService) { - super(settings); this.nodeEnv = nodeEnv; this.metaStateService = metaStateService; this.allocateDangledIndices = allocateDangledIndices; diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index 77d2c553c2c51..7a31a089903fb 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -50,7 +49,6 @@ public class Gateway extends AbstractComponent { public Gateway(Settings settings, ClusterService clusterService, TransportNodesListGatewayMetaState listGatewayMetaState, IndicesService indicesService) { - super(settings); this.indicesService = indicesService; this.clusterService = clusterService; this.listGatewayMetaState = listGatewayMetaState; @@ -153,7 +151,7 @@ ClusterState.Builder upgradeAndArchiveUnknownOrInvalidSettings(MetaData.Builder clusterSettings.upgradeSettings(metaDataBuilder.transientSettings()), e -> logUnknownSetting("transient", e), (e, ex) -> logInvalidSetting("transient", e, ex))); - ClusterState.Builder builder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)); + ClusterState.Builder builder = ClusterState.builder(clusterService.getClusterName()); builder.metaData(metaDataBuilder); return builder; } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index c616716b86a97..dce92b1dd5083 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; @@ -48,16 +47,17 @@ public class GatewayAllocator extends AbstractComponent { private final PrimaryShardAllocator primaryShardAllocator; private final ReplicaShardAllocator replicaShardAllocator; - private final ConcurrentMap> asyncFetchStarted = ConcurrentCollections.newConcurrentMap(); - private final ConcurrentMap> asyncFetchStore = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap> + asyncFetchStarted = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap> + asyncFetchStore = ConcurrentCollections.newConcurrentMap(); @Inject - public GatewayAllocator(Settings settings, ClusterService clusterService, RoutingService routingService, + public GatewayAllocator(ClusterService clusterService, RoutingService routingService, TransportNodesListGatewayStartedShards startedAction, TransportNodesListShardStoreMetaData storeAction) { - super(settings); this.routingService = routingService; - this.primaryShardAllocator = new InternalPrimaryShardAllocator(settings, startedAction); - this.replicaShardAllocator = new InternalReplicaShardAllocator(settings, storeAction); + this.primaryShardAllocator = new InternalPrimaryShardAllocator(startedAction); + this.replicaShardAllocator = new InternalReplicaShardAllocator(storeAction); clusterService.addStateApplier(event -> { boolean cleanCache = false; DiscoveryNode localNode = event.state().nodes().getLocalNode(); @@ -78,8 +78,7 @@ public GatewayAllocator(Settings settings, ClusterService clusterService, Routin } // for tests - protected GatewayAllocator(Settings settings) { - super(settings); + protected GatewayAllocator() { this.routingService = null; this.primaryShardAllocator = null; this.replicaShardAllocator = null; @@ -155,15 +154,16 @@ class InternalPrimaryShardAllocator extends PrimaryShardAllocator { private final TransportNodesListGatewayStartedShards startedAction; - InternalPrimaryShardAllocator(Settings settings, TransportNodesListGatewayStartedShards startedAction) { - super(settings); + InternalPrimaryShardAllocator(TransportNodesListGatewayStartedShards startedAction) { this.startedAction = startedAction; } @Override - protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { + protected AsyncShardFetch.FetchResult + fetchData(ShardRouting shard, RoutingAllocation allocation) { AsyncShardFetch fetch = - asyncFetchStarted.computeIfAbsent(shard.shardId(), shardId -> new InternalAsyncFetch<>(logger, "shard_started", shardId, startedAction)); + asyncFetchStarted.computeIfAbsent(shard.shardId(), + shardId -> new InternalAsyncFetch<>(logger, "shard_started", shardId, startedAction)); AsyncShardFetch.FetchResult shardState = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); @@ -178,15 +178,16 @@ class InternalReplicaShardAllocator extends ReplicaShardAllocator { private final TransportNodesListShardStoreMetaData storeAction; - InternalReplicaShardAllocator(Settings settings, TransportNodesListShardStoreMetaData storeAction) { - super(settings); + InternalReplicaShardAllocator(TransportNodesListShardStoreMetaData storeAction) { this.storeAction = storeAction; } @Override - protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { + protected AsyncShardFetch.FetchResult + fetchData(ShardRouting shard, RoutingAllocation allocation) { AsyncShardFetch fetch = - asyncFetchStore.computeIfAbsent(shard.shardId(), shardId -> new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), storeAction)); + asyncFetchStore.computeIfAbsent(shard.shardId(), + shardId -> new InternalAsyncFetch<>(logger, "shard_store", shard.shardId(), storeAction)); AsyncShardFetch.FetchResult shardStores = fetch.fetchData(allocation.nodes(), allocation.getIgnoreNodes(shard.shardId())); if (shardStores.hasData()) { diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 46ff2f960e7cf..9bbb5af5bf028 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -69,7 +69,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateA public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader) throws IOException { - super(settings); this.nodeEnv = nodeEnv; this.metaStateService = metaStateService; @@ -166,7 +165,8 @@ public void applyClusterState(ClusterChangedEvent event) { relevantIndices = getRelevantIndices(event.state(), event.previousState(), previouslyWrittenIndices); - final Iterable writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData()); + final Iterable writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, + previousMetaData, event.state().metaData()); // check and write changes in indices for (IndexMetaWriteInfo indexMetaWrite : writeInfo) { try { @@ -303,11 +303,14 @@ private void ensureNoPre019ShardState(NodeEnvironment nodeEnv) throws IOExceptio * * @param previouslyWrittenIndices A list of indices for which the state was already written before * @param potentiallyUnwrittenIndices The list of indices for which state should potentially be written - * @param previousMetaData The last meta data we know of. meta data for all indices in previouslyWrittenIndices list is persisted now + * @param previousMetaData The last meta data we know of. meta data for all indices in previouslyWrittenIndices list is + * persisted now * @param newMetaData The new metadata * @return iterable over all indices states that should be written to disk */ - public static Iterable resolveStatesToBeWritten(Set previouslyWrittenIndices, Set potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) { + public static Iterable resolveStatesToBeWritten(Set previouslyWrittenIndices, + Set potentiallyUnwrittenIndices, + MetaData previousMetaData, MetaData newMetaData) { List indicesToWrite = new ArrayList<>(); for (Index index : potentiallyUnwrittenIndices) { IndexMetaData newIndexMetaData = newMetaData.getIndexSafe(index); @@ -316,7 +319,8 @@ public static Iterable resolveStatesToBeWri if (previouslyWrittenIndices.contains(index) == false || previousIndexMetaData == null) { writeReason = "freshly created"; } else if (previousIndexMetaData.getVersion() != newIndexMetaData.getVersion()) { - writeReason = "version changed from [" + previousIndexMetaData.getVersion() + "] to [" + newIndexMetaData.getVersion() + "]"; + writeReason = "version changed from [" + previousIndexMetaData.getVersion() + "] to [" + + newIndexMetaData.getVersion() + "]"; } if (writeReason != null) { indicesToWrite.add(new GatewayMetaState.IndexMetaWriteInfo(newIndexMetaData, previousIndexMetaData, writeReason)); @@ -325,7 +329,8 @@ public static Iterable resolveStatesToBeWri return indicesToWrite; } - public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) { + public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, + Set previouslyWrittenIndices) { RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); if (newRoutingNode == null) { throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state"); @@ -334,7 +339,8 @@ public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, Cl for (ShardRouting routing : newRoutingNode) { indices.add(routing.index()); } - // we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if we have it written on disk previously + // we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if + // we have it written on disk previously for (IndexMetaData indexMetaData : state.metaData()) { boolean isOrWasClosed = indexMetaData.getState().equals(IndexMetaData.State.CLOSE); // if the index is open we might still have to write the state if it just transitioned from closed to open diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index d77031218179c..e19e8367f1627 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -64,7 +64,8 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste public static final Setting RECOVER_AFTER_MASTER_NODES_SETTING = Setting.intSetting("gateway.recover_after_master_nodes", 0, 0, Property.NodeScope); - public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); + public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, + false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); public static final TimeValue DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET = TimeValue.timeValueMinutes(5); @@ -100,22 +101,22 @@ public GatewayService(Settings settings, AllocationService allocationService, Cl this.clusterService = clusterService; this.threadPool = threadPool; // allow to control a delay of when indices will get created - this.expectedNodes = EXPECTED_NODES_SETTING.get(this.settings); - this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(this.settings); - this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(this.settings); + this.expectedNodes = EXPECTED_NODES_SETTING.get(settings); + this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(settings); + this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(settings); - if (RECOVER_AFTER_TIME_SETTING.exists(this.settings)) { - recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(this.settings); + if (RECOVER_AFTER_TIME_SETTING.exists(settings)) { + recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(settings); } else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) { recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET; } else { recoverAfterTime = null; } - this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(this.settings); - this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(this.settings); + this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(settings); + this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(settings); // default the recover after master nodes to the minimum master nodes in the discovery - if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(this.settings)) { - recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(this.settings); + if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(settings)) { + recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(settings); } else { // TODO: change me once the minimum_master_nodes is changed too recoverAfterMasterNodes = settings.getAsInt("discovery.zen.minimum_master_nodes", -1); @@ -185,7 +186,8 @@ public void clusterChanged(final ClusterChangedEvent event) { } else if (expectedDataNodes != -1 && (nodes.getDataNodes().size() < expectedDataNodes)) { // does not meet the expected... enforceRecoverAfterTime = true; reason = "expecting [" + expectedDataNodes + "] data nodes, but only have [" + nodes.getDataNodes().size() + "]"; - } else if (expectedMasterNodes != -1 && (nodes.getMasterNodes().size() < expectedMasterNodes)) { // does not meet the expected... + } else if (expectedMasterNodes != -1 && (nodes.getMasterNodes().size() < expectedMasterNodes)) { + // does not meet the expected... enforceRecoverAfterTime = true; reason = "expecting [" + expectedMasterNodes + "] master nodes, but only have [" + nodes.getMasterNodes().size() + "]"; } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 5630ceea72945..efdf29e2eb622 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -64,14 +63,14 @@ public class LocalAllocateDangledIndices extends AbstractComponent { private final MetaDataIndexUpgradeService metaDataIndexUpgradeService; @Inject - public LocalAllocateDangledIndices(Settings settings, TransportService transportService, ClusterService clusterService, + public LocalAllocateDangledIndices(TransportService transportService, ClusterService clusterService, AllocationService allocationService, MetaDataIndexUpgradeService metaDataIndexUpgradeService) { - super(settings); this.transportService = transportService; this.clusterService = clusterService; this.allocationService = allocationService; this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; - transportService.registerRequestHandler(ACTION_NAME, AllocateDangledRequest::new, ThreadPool.Names.SAME, new AllocateDangledRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, AllocateDangledRequest::new, ThreadPool.Names.SAME, + new AllocateDangledRequestHandler()); } public void allocateDangled(Collection indices, final Listener listener) { @@ -81,7 +80,8 @@ public void allocateDangled(Collection indices, final Listener li listener.onFailure(new MasterNotDiscoveredException("no master to send allocate dangled request")); return; } - AllocateDangledRequest request = new AllocateDangledRequest(clusterService.localNode(), indices.toArray(new IndexMetaData[indices.size()])); + AllocateDangledRequest request = new AllocateDangledRequest(clusterService.localNode(), + indices.toArray(new IndexMetaData[indices.size()])); transportService.sendRequest(masterNode, ACTION_NAME, request, new TransportResponseHandler() { @Override public AllocateDangledResponse read(StreamInput in) throws IOException { @@ -159,15 +159,18 @@ public ClusterState execute(ClusterState currentState) { minIndexCompatibilityVersion); } catch (Exception ex) { // upgrade failed - adding index as closed - logger.warn(() -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); - upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build(); + logger.warn(() -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be " + + "upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); + upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE) + .version(indexMetaData.getVersion() + 1).build(); } metaData.put(upgradedIndexMetaData, false); blocks.addBlocks(upgradedIndexMetaData); if (upgradedIndexMetaData.getState() == IndexMetaData.State.OPEN) { routingTableBuilder.addAsFromDangling(upgradedIndexMetaData); } - sb.append("[").append(upgradedIndexMetaData.getIndex()).append("/").append(upgradedIndexMetaData.getState()).append("]"); + sb.append("[").append(upgradedIndexMetaData.getIndex()).append("/").append(upgradedIndexMetaData.getState()) + .append("]"); } if (!importNeeded) { return currentState; @@ -175,7 +178,8 @@ public ClusterState execute(ClusterState currentState) { logger.info("auto importing dangled indices {} from [{}]", sb.toString(), request.fromNode); RoutingTable routingTable = routingTableBuilder.build(); - ClusterState updatedState = ClusterState.builder(currentState).metaData(metaData).blocks(blocks).routingTable(routingTable).build(); + ClusterState updatedState = ClusterState.builder(currentState).metaData(metaData).blocks(blocks) + .routingTable(routingTable).build(); // now, reroute return allocationService.reroute( diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 9377247488e7b..24f5fd63662d9 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -42,8 +41,7 @@ public class MetaStateService extends AbstractComponent { private final NodeEnvironment nodeEnv; private final NamedXContentRegistry namedXContentRegistry; - public MetaStateService(Settings settings, NodeEnvironment nodeEnv, NamedXContentRegistry namedXContentRegistry) { - super(settings); + public MetaStateService(NodeEnvironment nodeEnv, NamedXContentRegistry namedXContentRegistry) { this.nodeEnv = nodeEnv; this.namedXContentRegistry = namedXContentRegistry; } diff --git a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index e63f349b55c3e..79030336acc02 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -34,7 +34,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.ShardLockObtainFailedException; import org.elasticsearch.gateway.AsyncShardFetch.FetchResult; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; @@ -63,11 +62,6 @@ * copy that can immediately be promoted to primary, as this takes place in {@link RoutingNodes#failShard}. */ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { - - public PrimaryShardAllocator(Settings settings) { - super(settings); - } - /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ @@ -257,9 +251,13 @@ protected static NodeShardsResult buildNodeShardsResult(ShardRouting shard, bool } else { final String finalAllocationId = allocationId; if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) { - logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); + logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be " + + "opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), + nodeShardState.storeException()); } else { - logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); + logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be " + + "opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), + nodeShardState.storeException()); allocationId = null; } } @@ -267,7 +265,8 @@ protected static NodeShardsResult buildNodeShardsResult(ShardRouting shard, bool if (allocationId != null) { assert nodeShardState.storeException() == null || nodeShardState.storeException() instanceof ShardLockObtainFailedException : - "only allow store that can be opened or that throws a ShardLockObtainFailedException while being opened but got a store throwing " + nodeShardState.storeException(); + "only allow store that can be opened or that throws a ShardLockObtainFailedException while being opened but got a " + + "store throwing " + nodeShardState.storeException(); numberOfAllocationsFound++; if (matchAnyShard || inSyncAllocationIds.contains(nodeShardState.allocationId())) { nodeShardStates.add(nodeShardState); @@ -280,7 +279,8 @@ protected static NodeShardsResult buildNodeShardsResult(ShardRouting shard, bool // prefer shards with matching allocation ids Comparator matchingAllocationsFirst = Comparator.comparing( (NodeGatewayStartedShards state) -> inSyncAllocationIds.contains(state.allocationId())).reversed(); - comparator = matchingAllocationsFirst.thenComparing(NO_STORE_EXCEPTION_FIRST_COMPARATOR).thenComparing(PRIMARY_FIRST_COMPARATOR); + comparator = matchingAllocationsFirst.thenComparing(NO_STORE_EXCEPTION_FIRST_COMPARATOR) + .thenComparing(PRIMARY_FIRST_COMPARATOR); } else { comparator = NO_STORE_EXCEPTION_FIRST_COMPARATOR.thenComparing(PRIMARY_FIRST_COMPARATOR); } @@ -288,7 +288,8 @@ protected static NodeShardsResult buildNodeShardsResult(ShardRouting shard, bool nodeShardStates.sort(comparator); if (logger.isTraceEnabled()) { - logger.trace("{} candidates for allocation: {}", shard, nodeShardStates.stream().map(s -> s.getNode().getName()).collect(Collectors.joining(", "))); + logger.trace("{} candidates for allocation: {}", shard, nodeShardStates.stream().map(s -> s.getNode().getName()) + .collect(Collectors.joining(", "))); } return new NodeShardsResult(nodeShardStates, numberOfAllocationsFound); } diff --git a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index b91637e072fa7..10bd6115b4c74 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -39,7 +39,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.store.StoreFileMetaData; @@ -55,11 +54,6 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { - - public ReplicaShardAllocator(Settings settings) { - super(settings); - } - /** * Process existing recoveries of replicas and see if we need to cancel them if we find a better * match. Today, a better match is one that has full sync id match compared to not having one in @@ -121,10 +115,13 @@ public void processExistingRecoveries(RoutingAllocation allocation) { logger.debug("cancelling allocation of replica on [{}], sync id match found on node [{}]", currentNode, nodeWithHighestMatch); UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA, - "existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node ["+ nodeWithHighestMatch + "]", - null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false, UnassignedInfo.AllocationStatus.NO_ATTEMPT); + "existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node ["+ + nodeWithHighestMatch + "]", + null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT); // don't cancel shard in the loop as it will cause a ConcurrentModificationException - shardCancellationActions.add(() -> routingNodes.failShard(logger, shard, unassignedInfo, metaData.getIndexSafe(shard.index()), allocation.changes())); + shardCancellationActions.add(() -> routingNodes.failShard(logger, shard, unassignedInfo, + metaData.getIndexSafe(shard.index()), allocation.changes())); } } } @@ -298,7 +295,8 @@ private List augmentExplanationsWithStoreInfo(Map data) { + private TransportNodesListShardStoreMetaData.StoreFilesMetaData findStore(ShardRouting shard, RoutingAllocation allocation, + AsyncShardFetch.FetchResult data) { assert shard.currentNodeId() != null; DiscoveryNode primaryNode = allocation.nodes().get(shard.currentNodeId()); if (primaryNode == null) { diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 0a01c7cdabbe3..f4d1bf1b57352 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -55,9 +54,9 @@ public class TransportNodesListGatewayMetaState extends TransportNodesAction { public static final String ACTION_NAME = "internal:gateway/local/started_shards"; + private final Settings settings; private final NodeEnvironment nodeEnv; private final IndicesService indicesService; private final NamedXContentRegistry namedXContentRegistry; @@ -76,8 +77,9 @@ public TransportNodesListGatewayStartedShards(Settings settings, ThreadPool thre TransportService transportService, ActionFilters actionFilters, NodeEnvironment env, IndicesService indicesService, NamedXContentRegistry namedXContentRegistry) { - super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, + super(ACTION_NAME, threadPool, clusterService, transportService, actionFilters, Request::new, NodeRequest::new, ThreadPool.Names.FETCH_SHARD_STARTED, NodeGatewayStartedShards.class); + this.settings = settings; this.nodeEnv = env; this.indicesService = indicesService; this.namedXContentRegistry = namedXContentRegistry; diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index 622020d6451db..2327db1d78a20 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -63,7 +63,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT; public abstract class AbstractHttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport { - + protected final Settings settings; public final HttpHandlingSettings handlingSettings; protected final NetworkService networkService; protected final BigArrays bigArrays; @@ -84,6 +84,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo protected AbstractHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) { super(settings); + this.settings = settings; this.networkService = networkService; this.bigArrays = bigArrays; this.threadPool = threadPool; diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index e4fd949d88e67..c34a5228b7f6b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -188,8 +188,7 @@ public IndexService( this.indexStore = indexStore; indexFieldData.setListener(new FieldDataCacheListener(this)); this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this)); - this.warmer = new IndexWarmer(indexSettings.getSettings(), threadPool, indexFieldData, - bitsetFilterCache.createListener(threadPool)); + this.warmer = new IndexWarmer(threadPool, indexFieldData, bitsetFilterCache.createListener(threadPool)); this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache); this.engineFactory = Objects.requireNonNull(engineFactory); // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE diff --git a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java index 98716e9545df6..277cdcaba26c8 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -46,9 +45,8 @@ public final class IndexWarmer extends AbstractComponent { private final List listeners; - IndexWarmer(Settings settings, ThreadPool threadPool, IndexFieldDataService indexFieldDataService, + IndexWarmer(ThreadPool threadPool, IndexFieldDataService indexFieldDataService, Listener... listeners) { - super(settings); ArrayList list = new ArrayList<>(); final Executor executor = threadPool.executor(ThreadPool.Names.WARMER); list.add(new FieldDataWarmer(executor, indexFieldDataService)); diff --git a/server/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java b/server/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java index c955cbef75d99..9b5897513a871 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.cache.request; import org.apache.lucene.util.Accountable; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.metrics.CounterMetric; /** @@ -44,11 +45,11 @@ public void onMiss() { missCount.inc(); } - public void onCached(Accountable key, Accountable value) { + public void onCached(Accountable key, BytesReference value) { totalMetric.inc(key.ramBytesUsed() + value.ramBytesUsed()); } - public void onRemoval(Accountable key, Accountable value, boolean evicted) { + public void onRemoval(Accountable key, BytesReference value, boolean evicted) { if (evicted) { evictionsMetric.inc(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 3cbfbfaff8343..c58e13d65deb6 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -2563,12 +2563,19 @@ final long lastRefreshedCheckpoint() { return lastRefreshedCheckpointListener.refreshedCheckpoint.get(); } + + private final Object refreshIfNeededMutex = new Object(); + /** * Refresh this engine **internally** iff the requesting seq_no is greater than the last refreshed checkpoint. */ protected final void refreshIfNeeded(String source, long requestingSeqNo) { if (lastRefreshedCheckpoint() < requestingSeqNo) { - refresh(source, SearcherScope.INTERNAL); + synchronized (refreshIfNeededMutex) { + if (lastRefreshedCheckpoint() < requestingSeqNo) { + refresh(source, SearcherScope.INTERNAL); + } + } } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index a44f8a0f8357b..bc20132f13d65 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -95,14 +95,15 @@ final class LuceneChangesSnapshot implements Translog.Snapshot { } }; this.mapperService = mapperService; - this.searchBatchSize = searchBatchSize; + final long requestingSize = (toSeqNo - fromSeqNo) == Long.MAX_VALUE ? Long.MAX_VALUE : (toSeqNo - fromSeqNo + 1L); + this.searchBatchSize = requestingSize < searchBatchSize ? Math.toIntExact(requestingSize) : searchBatchSize; this.fromSeqNo = fromSeqNo; this.toSeqNo = toSeqNo; this.lastSeenSeqNo = fromSeqNo - 1; this.requiredFullRange = requiredFullRange; this.indexSearcher = new IndexSearcher(Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader())); this.indexSearcher.setQueryCache(null); - this.parallelArray = new ParallelArray(searchBatchSize); + this.parallelArray = new ParallelArray(this.searchBatchSize); final TopDocs topDocs = searchOperations(null); this.totalHits = Math.toIntExact(topDocs.totalHits.value); this.scoreDocs = topDocs.scoreDocs; diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java index 167ec9ce26b33..c7e11e85f7da5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -54,13 +54,13 @@ final class TranslogLeafReader extends LeafReader { private final Translog.Index operation; private static final FieldInfo FAKE_SOURCE_FIELD = new FieldInfo(SourceFieldMapper.NAME, 1, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, false); + 0, 0, 0, false); private static final FieldInfo FAKE_ROUTING_FIELD = new FieldInfo(RoutingFieldMapper.NAME, 2, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, false); + 0, 0, 0, false); private static final FieldInfo FAKE_ID_FIELD = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, false); + 0, 0, 0, false); private final Version indexVersionCreated; TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 0635cdd066139..f7c294111dd7f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -117,7 +117,8 @@ public static class Fields { public static class TypeParser implements Mapper.TypeParser { @Override - public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) + throws MapperParsingException { CompletionFieldMapper.Builder builder = new CompletionFieldMapper.Builder(name); NamedAnalyzer indexAnalyzer = null; NamedAnalyzer searchAnalyzer = null; @@ -368,7 +369,8 @@ public Builder(String name) { */ public Builder maxInputLength(int maxInputLength) { if (maxInputLength <= 0) { - throw new IllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + " must be > 0 but was [" + maxInputLength + "]"); + throw new IllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + + " must be > 0 but was [" + maxInputLength + "]"); } this.maxInputLength = maxInputLength; return this; @@ -400,13 +402,15 @@ public CompletionFieldMapper build(BuilderContext context) { completionFieldType.setContextMappings(contextMappings); completionFieldType.setPreservePositionIncrements(preservePositionIncrements); completionFieldType.setPreserveSep(preserveSeparators); - return new CompletionFieldMapper(name, this.fieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, maxInputLength); + return new CompletionFieldMapper(name, this.fieldType, context.indexSettings(), + multiFieldsBuilder.build(this, context), copyTo, maxInputLength); } } private int maxInputLength; - public CompletionFieldMapper(String simpleName, MappedFieldType fieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, int maxInputLength) { + public CompletionFieldMapper(String simpleName, MappedFieldType fieldType, Settings indexSettings, + MultiFields multiFields, CopyTo copyTo, int maxInputLength) { super(simpleName, fieldType, Defaults.FIELD_TYPE, indexSettings, multiFields, copyTo); this.maxInputLength = maxInputLength; } @@ -506,7 +510,8 @@ private boolean isExternalValueOfClass(ParseContext context, Class clazz) { * "STRING" - interpreted as the field value (input) * "OBJECT" - { "input": STRING|ARRAY, "weight": STRING|INT, "contexts": ARRAY|OBJECT } */ - private void parse(ParseContext parseContext, Token token, XContentParser parser, Map inputMap) throws IOException { + private void parse(ParseContext parseContext, Token token, + XContentParser parser, Map inputMap) throws IOException { String currentFieldName = null; if (token == Token.VALUE_STRING) { inputMap.put(parser.text(), new CompletionInputMetaData(parser.text(), Collections.emptyMap(), 1)); @@ -518,7 +523,8 @@ private void parse(ParseContext parseContext, Token token, XContentParser parser if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); if (!ALLOWED_CONTENT_FIELD_NAMES.contains(currentFieldName)) { - throw new IllegalArgumentException("unknown field name [" + currentFieldName + "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES); + throw new IllegalArgumentException("unknown field name [" + currentFieldName + + "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES); } } else if (currentFieldName != null) { if (Fields.CONTENT_FIELD_NAME_INPUT.equals(currentFieldName)) { @@ -529,7 +535,8 @@ private void parse(ParseContext parseContext, Token token, XContentParser parser if (token == Token.VALUE_STRING) { inputs.add(parser.text()); } else { - throw new IllegalArgumentException("input array must have string values, but was [" + token.name() + "]"); + throw new IllegalArgumentException("input array must have string values, but was [" + + token.name() + "]"); } } } else { @@ -552,8 +559,10 @@ private void parse(ParseContext parseContext, Token token, XContentParser parser } else { throw new IllegalArgumentException("weight must be a number or string, but was [" + token.name() + "]"); } - if (weightValue.longValue() < 0 || weightValue.longValue() > Integer.MAX_VALUE) { // always parse a long to make sure we don't get overflow - throw new IllegalArgumentException("weight must be in the interval [0..2147483647], but was [" + weightValue.longValue() + "]"); + // always parse a long to make sure we don't get overflow + if (weightValue.longValue() < 0 || weightValue.longValue() > Integer.MAX_VALUE) { + throw new IllegalArgumentException("weight must be in the interval [0..2147483647], but was [" + + weightValue.longValue() + "]"); } weight = weightValue.intValue(); } else if (Fields.CONTENT_FIELD_NAME_CONTEXTS.equals(currentFieldName)) { @@ -587,7 +596,8 @@ private void parse(ParseContext parseContext, Token token, XContentParser parser } } } else { - throw new ParsingException(parser.getTokenLocation(), "failed to parse [" + parser.currentName() + "]: expected text or object, but got " + token.name()); + throw new ParsingException(parser.getTokenLocation(), "failed to parse [" + parser.currentName() + + "]: expected text or object, but got " + token.name()); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index fa1abe4293948..cb962609fb436 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -72,8 +72,9 @@ public Builder(RootObjectMapper.Builder builder, MapperService mapperService) { this.rootObjectMapper = builder.build(builderContext); final String type = rootObjectMapper.name(); - DocumentMapper existingMapper = mapperService.documentMapper(type); - for (Map.Entry entry : mapperService.mapperRegistry.getMetadataMapperParsers().entrySet()) { + final DocumentMapper existingMapper = mapperService.documentMapper(type); + final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers(); + for (Map.Entry entry : metadataMapperParsers.entrySet()) { final String name = entry.getKey(); final MetadataFieldMapper existingMetadataMapper = existingMapper == null ? null diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 8d785cdfea57d..82baef2780943 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -107,7 +107,8 @@ private void validateType(SourceToParse source) { } if (Objects.equals(source.type(), docMapper.type()) == false) { - throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + docMapper.type() + "]"); + throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + + docMapper.type() + "]"); } } @@ -136,7 +137,8 @@ private static boolean isEmptyDoc(Mapping mapping, XContentParser parser) throws // empty doc, we can handle it... return true; } else if (token != XContentParser.Token.FIELD_NAME) { - throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist"); + throw new MapperParsingException("Malformed content, after first object, either the type field" + + " or the actual properties should exist"); } } return false; @@ -355,7 +357,8 @@ static void parseObjectOrNested(ParseContext context, ObjectMapper mapper) throw String currentFieldName = parser.currentName(); if (token.isValue()) { - throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value"); + throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + + "] as object, but found a concrete value"); } ObjectMapper.Nested nested = mapper.nested(); @@ -379,7 +382,8 @@ static void parseObjectOrNested(ParseContext context, ObjectMapper mapper) throw } } - private static void innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser, String currentFieldName, XContentParser.Token token) throws IOException { + private static void innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser, + String currentFieldName, XContentParser.Token token) throws IOException { while (token != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.START_OBJECT) { parseObject(context, mapper, currentFieldName); @@ -388,12 +392,14 @@ private static void innerParseObject(ParseContext context, ObjectMapper mapper, } else if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); if (MapperService.isMetadataField(context.path().pathAsText(currentFieldName))) { - throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside a document. Use the index API request parameters."); + throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside" + + " a document. Use the index API request parameters."); } } else if (token == XContentParser.Token.VALUE_NULL) { parseNullValue(context, mapper, currentFieldName); } else if (token == null) { - throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but got EOF, has a concrete value been provided to it?"); + throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + + "] as object, but got EOF, has a concrete value been provided to it?"); } else if (token.isValue()) { parseValue(context, mapper, currentFieldName, token); } @@ -558,7 +564,8 @@ private static void parseArray(ParseContext context, ObjectMapper parentMapper, } } - private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException { + private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapper, + String lastFieldName, String arrayFieldName) throws IOException { XContentParser parser = context.parser(); XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { @@ -571,16 +578,19 @@ private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapp } else if (token == XContentParser.Token.VALUE_NULL) { parseNullValue(context, mapper, lastFieldName); } else if (token == null) { - throw new MapperParsingException("object mapping for [" + mapper.name() + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?"); + throw new MapperParsingException("object mapping for [" + mapper.name() + "] with array for [" + arrayFieldName + + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?"); } else { parseValue(context, mapper, lastFieldName, token); } } } - private static void parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + private static void parseValue(final ParseContext context, ObjectMapper parentMapper, + String currentFieldName, XContentParser.Token token) throws IOException { if (currentFieldName == null) { - throw new MapperParsingException("object mapping [" + parentMapper.name() + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]"); + throw new MapperParsingException("object mapping [" + parentMapper.name() + "] trying to serialize a value with" + + " no field associated with it, current value [" + context.parser().textOrNull() + "]"); } final String[] paths = splitAndValidatePath(currentFieldName); @@ -609,7 +619,8 @@ private static void parseNullValue(ParseContext context, ObjectMapper parentMapp } } - private static Mapper.Builder createBuilderFromFieldType(final ParseContext context, MappedFieldType fieldType, String currentFieldName) { + private static Mapper.Builder createBuilderFromFieldType(final ParseContext context, + MappedFieldType fieldType, String currentFieldName) { Mapper.Builder builder = null; if (fieldType instanceof TextFieldType) { builder = context.root().findTemplateBuilder(context, currentFieldName, "text", XContentFieldType.STRING); @@ -671,7 +682,9 @@ private static Mapper.Builder createBuilderFromFieldType(final ParseContext return builder; } - private static Mapper.Builder createBuilderFromDynamicValue(final ParseContext context, XContentParser.Token token, String currentFieldName) throws IOException { + private static Mapper.Builder createBuilderFromDynamicValue(final ParseContext context, + XContentParser.Token token, + String currentFieldName) throws IOException { if (token == XContentParser.Token.VALUE_STRING) { String text = context.parser().text(); @@ -771,10 +784,12 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont } } // TODO how do we identify dynamically that its a binary value? - throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]"); + throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + + currentFieldName + "]"); } - private static void parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + private static void parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, + String currentFieldName, XContentParser.Token token) throws IOException { ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context); if (dynamic == ObjectMapper.Dynamic.STRICT) { throw new StrictDynamicMappingException(parentMapper.fullPath(), currentFieldName); @@ -885,8 +900,8 @@ private static Tuple getDynamicParentMapper(ParseContext context.path()); mapper = (ObjectMapper) builder.build(builderContext); if (mapper.nested() != ObjectMapper.Nested.NO) { - throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) - + "]) through `copy_to` or dots in field names"); + throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + + context.path().pathAsText(paths[i]) + "]) through `copy_to` or dots in field names"); } context.addDynamicMapper(mapper); break; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java index 939736a0a893d..aafe9f6ba03de 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java @@ -228,7 +228,8 @@ public static DynamicTemplate parse(String name, Map conf, try { matchType.matches(regex, ""); } catch (IllegalArgumentException e) { - throw new IllegalArgumentException("Pattern [" + regex + "] of type [" + matchType + "] is invalid. Cannot create dynamic template [" + name + "].", e); + throw new IllegalArgumentException("Pattern [" + regex + "] of type [" + matchType + + "] is invalid. Cannot create dynamic template [" + name + "].", e); } } } @@ -320,14 +321,16 @@ public Map mappingForName(String name, String dynamicType) { private Map processMap(Map map, String name, String dynamicType) { Map processedMap = new HashMap<>(); for (Map.Entry entry : map.entrySet()) { - String key = entry.getKey().replace("{name}", name).replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType); + String key = entry.getKey().replace("{name}", name).replace("{dynamic_type}", dynamicType) + .replace("{dynamicType}", dynamicType); Object value = entry.getValue(); if (value instanceof Map) { value = processMap((Map) value, name, dynamicType); } else if (value instanceof List) { value = processList((List) value, name, dynamicType); } else if (value instanceof String) { - value = value.toString().replace("{name}", name).replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType); + value = value.toString().replace("{name}", name).replace("{dynamic_type}", dynamicType) + .replace("{dynamicType}", dynamicType); } processedMap.put(key, value); } @@ -342,7 +345,9 @@ private List processList(List list, String name, String dynamicType) { } else if (value instanceof List) { value = processList((List) value, name, dynamicType); } else if (value instanceof String) { - value = value.toString().replace("{name}", name).replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType); + value = value.toString().replace("{name}", name) + .replace("{dynamic_type}", dynamicType) + .replace("{dynamicType}", dynamicType); } processedList.add(value); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 2a12bc65d144b..72dbe28d12d09 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -91,7 +91,8 @@ public T index(boolean index) { // can happen when an existing type on the same index has disabled indexing // since we inherit the default field type from the first mapper that is // created on an index - throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types of the same index"); + throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types" + + " of the same index"); } fieldType.setIndexOptions(options); } @@ -227,7 +228,8 @@ protected void setupFieldType(BuilderContext context) { protected MultiFields multiFields; protected CopyTo copyTo; - protected FieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + protected FieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName); assert indexSettings != null; this.indexCreatedVersion = Version.indexCreated(indexSettings); @@ -325,7 +327,8 @@ protected void doMerge(Mapper mergeWith) { if (mergeWith instanceof FieldMapper) { mergedType = ((FieldMapper) mergeWith).contentType(); } - throw new IllegalArgumentException("mapper [" + fieldType().name() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); + throw new IllegalArgumentException("mapper [" + fieldType().name() + "] of different type, current_type [" + contentType() + + "], merged_type [" + mergedType + "]"); } FieldMapper fieldMergeWith = (FieldMapper) mergeWith; multiFields = multiFields.merge(fieldMergeWith.multiFields); @@ -414,12 +417,13 @@ protected final void doXContentAnalyzers(XContentBuilder builder, boolean includ } } else { boolean hasDefaultIndexAnalyzer = fieldType().indexAnalyzer().name().equals("default"); - boolean hasDifferentSearchAnalyzer = fieldType().searchAnalyzer().name().equals(fieldType().indexAnalyzer().name()) == false; - boolean hasDifferentSearchQuoteAnalyzer = fieldType().searchAnalyzer().name().equals(fieldType().searchQuoteAnalyzer().name()) == false; + final String searchAnalyzerName = fieldType().searchAnalyzer().name(); + boolean hasDifferentSearchAnalyzer = searchAnalyzerName.equals(fieldType().indexAnalyzer().name()) == false; + boolean hasDifferentSearchQuoteAnalyzer = searchAnalyzerName.equals(fieldType().searchQuoteAnalyzer().name()) == false; if (includeDefaults || hasDefaultIndexAnalyzer == false || hasDifferentSearchAnalyzer || hasDifferentSearchQuoteAnalyzer) { builder.field("analyzer", fieldType().indexAnalyzer().name()); if (includeDefaults || hasDifferentSearchAnalyzer || hasDifferentSearchQuoteAnalyzer) { - builder.field("search_analyzer", fieldType().searchAnalyzer().name()); + builder.field("search_analyzer", searchAnalyzerName); if (includeDefaults || hasDifferentSearchQuoteAnalyzer) { builder.field("search_quote_analyzer", fieldType().searchQuoteAnalyzer().name()); } @@ -521,7 +525,8 @@ private MultiFields(ImmutableOpenMap mappers) { } public void parse(FieldMapper mainField, ParseContext context) throws IOException { - // TODO: multi fields are really just copy fields, we just need to expose "sub fields" or something that can be part of the mappings + // TODO: multi fields are really just copy fields, we just need to expose "sub fields" or something that can be part + // of the mappings if (mappers.isEmpty()) { return; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index fb2dbea95e895..79b2b0c4c67d1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -104,7 +104,8 @@ public FieldNamesFieldMapper build(BuilderContext context) { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 275ff75f473f2..72f36278783d5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -211,7 +211,8 @@ public Mapper.Builder parse(String name, Map node, ParserContext builder.fieldType().setTreeLevels(Integer.parseInt(fieldNode.toString())); iterator.remove(); } else if (Names.TREE_PRESISION.equals(fieldName)) { - builder.fieldType().setPrecisionInMeters(DistanceUnit.parse(fieldNode.toString(), DistanceUnit.DEFAULT, DistanceUnit.DEFAULT)); + builder.fieldType().setPrecisionInMeters(DistanceUnit.parse(fieldNode.toString(), + DistanceUnit.DEFAULT, DistanceUnit.DEFAULT)); iterator.remove(); } else if (Names.DISTANCE_ERROR_PCT.equals(fieldName)) { builder.fieldType().setDistanceErrorPct(Double.parseDouble(fieldNode.toString())); @@ -229,7 +230,8 @@ public Mapper.Builder parse(String name, Map node, ParserContext builder.coerce(XContentMapValues.nodeBooleanValue(fieldNode, name + "." + Names.COERCE)); iterator.remove(); } else if (GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName().equals(fieldName)) { - builder.ignoreZValue(XContentMapValues.nodeBooleanValue(fieldNode, name + "." + GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName())); + builder.ignoreZValue(XContentMapValues.nodeBooleanValue(fieldNode, + name + "." + GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName())); iterator.remove(); } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)) { pointsOnly = XContentMapValues.nodeBooleanValue(fieldNode, name + "." + Names.STRATEGY_POINTS_ONLY); @@ -314,11 +316,14 @@ public void freeze() { // must be by the time freeze is called. SpatialPrefixTree prefixTree; if ("geohash".equals(tree)) { - prefixTree = new GeohashPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, getLevels(treeLevels, precisionInMeters, Defaults.GEOHASH_LEVELS, true)); + prefixTree = new GeohashPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, + getLevels(treeLevels, precisionInMeters, Defaults.GEOHASH_LEVELS, true)); } else if ("legacyquadtree".equals(tree)) { - prefixTree = new QuadPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, getLevels(treeLevels, precisionInMeters, Defaults.QUADTREE_LEVELS, false)); + prefixTree = new QuadPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, + getLevels(treeLevels, precisionInMeters, Defaults.QUADTREE_LEVELS, false)); } else if ("quadtree".equals(tree)) { - prefixTree = new PackedQuadPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, getLevels(treeLevels, precisionInMeters, Defaults.QUADTREE_LEVELS, false)); + prefixTree = new PackedQuadPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, + getLevels(treeLevels, precisionInMeters, Defaults.QUADTREE_LEVELS, false)); } else { throw new IllegalArgumentException("Unknown prefix tree type [" + tree + "]"); } @@ -503,8 +508,9 @@ public void parse(ParseContext context) throws IOException { } return; } else if (shape instanceof Point == false) { - throw new MapperParsingException("[{" + fieldType().name() + "}] is configured for points only but a " + - ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found"); + throw new MapperParsingException("[{" + fieldType().name() + "}] is configured for points only but a " + + ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + + " was found"); } } indexShape(context, shape); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index a4a65fa2a8d45..e0a2cd7ee428b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -84,7 +84,8 @@ public static class Defaults { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { throw new MapperParsingException(NAME + " is not configurable"); } @@ -157,7 +158,8 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { @Override public IndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { - final IndexFieldData fieldData = fieldDataBuilder.build(indexSettings, fieldType, cache, breakerService, mapperService); + final IndexFieldData fieldData = fieldDataBuilder.build(indexSettings, fieldType, cache, + breakerService, mapperService); return new IndexFieldData() { @Override @@ -182,7 +184,8 @@ public AtomicFieldData loadDirect(LeafReaderContext context) throws Exception { @Override public SortField sortField(Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { - XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, + sortMode, nested); return new SortField(getFieldName(), source, reverse); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index 7e8ac563cacc1..276a8e7583c0e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -79,7 +79,8 @@ public IndexFieldMapper build(BuilderContext context) { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { throw new MapperParsingException(NAME + " is not configurable"); } @@ -131,7 +132,8 @@ public Query termQuery(Object value, @Nullable QueryShardContext context) { if (isSameIndex(value, context.getFullyQualifiedIndex().getName())) { return Queries.newMatchAllQuery(); } else { - return Queries.newMatchNoDocsQuery("Index didn't match. Index queried: " + context.index().getName() + " vs. " + value); + return Queries.newMatchNoDocsQuery("Index didn't match. Index queried: " + context.index().getName() + + " vs. " + value); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 45bb5ed395dc0..eaafeefa7e0dd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -138,9 +138,11 @@ public int hashCode() { /** Checks this type is the same type as other. Adds a conflict if they are different. */ private void checkTypeName(MappedFieldType other) { if (typeName().equals(other.typeName()) == false) { - throw new IllegalArgumentException("mapper [" + name + "] cannot be changed from type [" + typeName() + "] to [" + other.typeName() + "]"); + throw new IllegalArgumentException("mapper [" + name + "] cannot be changed from type [" + typeName() + + "] to [" + other.typeName() + "]"); } else if (getClass() != other.getClass()) { - throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and " + other.getClass().getSimpleName()); + throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and " + + other.getClass().getSimpleName()); } } @@ -338,31 +340,38 @@ public Query rangeQuery( } public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { - throw new IllegalArgumentException("Can only use fuzzy queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]"); + throw new IllegalArgumentException("Can only use fuzzy queries on keyword and text fields - not on [" + name + + "] which is of type [" + typeName() + "]"); } public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { - throw new QueryShardException(context, "Can only use prefix queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]"); + throw new QueryShardException(context, "Can only use prefix queries on keyword and text fields - not on [" + name + + "] which is of type [" + typeName() + "]"); } public Query wildcardQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { - throw new QueryShardException(context, "Can only use wildcard queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]"); + throw new QueryShardException(context, "Can only use wildcard queries on keyword and text fields - not on [" + name + + "] which is of type [" + typeName() + "]"); } - public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { - throw new QueryShardException(context, "Can only use regexp queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]"); + public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context) { + throw new QueryShardException(context, "Can only use regexp queries on keyword and text fields - not on [" + name + + "] which is of type [" + typeName() + "]"); } public abstract Query existsQuery(QueryShardContext context); public Query phraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { - throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]"); + throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name + + "] which is of type [" + typeName() + "]"); } public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { - throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]"); + throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name + + "] which is of type [" + typeName() + "]"); } /** @@ -400,7 +409,7 @@ protected final void failIfNoDocValues() { } protected final void failIfNotIndexed() { - if (indexOptions() == IndexOptions.NONE && pointDimensionCount() == 0) { + if (indexOptions() == IndexOptions.NONE && pointDataDimensionCount() == 0) { // we throw an IAE rather than an ISE so that it translates to a 4xx code rather than 5xx code on the http layer throw new IllegalArgumentException("Cannot search on field [" + name() + "] since it is not indexed."); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 7b9205881df62..828b5b956f5de 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -195,7 +195,8 @@ public static Map parseMapping(NamedXContentRegistry xContentReg * Update mapping by only merging the metadata that is different between received and stored entries */ public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException { - assert newIndexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + newIndexMetaData.getIndex(); + assert newIndexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + + " but was " + newIndexMetaData.getIndex(); // go over and add the relevant mappings (or update them) Set existingMappers = new HashSet<>(); if (mapper != null) { @@ -227,15 +228,16 @@ public boolean updateMapping(final IndexMetaData currentIndexMetaData, final Ind } else if (logger.isTraceEnabled()) { logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string()); } else { - logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index(), op, mappingType); + logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", + index(), op, mappingType); } // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the // merge version of it, which it does when refreshing the mappings), and warn log it. if (documentMapper(mappingType).mappingSource().equals(incomingMappingSource) == false) { - logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index(), mappingType, - incomingMappingSource, documentMapper(mappingType).mappingSource()); + logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", + index(), mappingType, incomingMappingSource, documentMapper(mappingType).mappingSource()); requireRefresh = true; } @@ -287,7 +289,8 @@ public void merge(Map> mappings, MergeReason reason) Map mappingSourcesCompressed = new LinkedHashMap<>(mappings.size()); for (Map.Entry> entry : mappings.entrySet()) { try { - mappingSourcesCompressed.put(entry.getKey(), new CompressedXContent(Strings.toString(XContentFactory.jsonBuilder().map(entry.getValue())))); + mappingSourcesCompressed.put(entry.getKey(), new CompressedXContent(Strings.toString( + XContentFactory.jsonBuilder().map(entry.getValue())))); } catch (Exception e) { throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage()); } @@ -304,7 +307,8 @@ public DocumentMapper merge(String type, CompressedXContent mappingSource, Merge return internalMerge(Collections.singletonMap(type, mappingSource), reason).get(type); } - private synchronized Map internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean onlyUpdateIfNeeded) { + private synchronized Map internalMerge(IndexMetaData indexMetaData, + MergeReason reason, boolean onlyUpdateIfNeeded) { Map map = new LinkedHashMap<>(); for (ObjectCursor cursor : indexMetaData.getMappings().values()) { MappingMetaData mappingMetaData = cursor.value; @@ -379,10 +383,12 @@ static void validateTypeName(String type) { throw new InvalidTypeNameException("mapping type name is empty"); } if (type.length() > 255) { - throw new InvalidTypeNameException("mapping type name [" + type + "] is too long; limit is length 255 but was [" + type.length() + "]"); + throw new InvalidTypeNameException("mapping type name [" + type + "] is too long; limit is length 255 but was [" + + type.length() + "]"); } if (type.charAt(0) == '_' && SINGLE_MAPPING_NAME.equals(type) == false) { - throw new InvalidTypeNameException("mapping type name [" + type + "] can't start with '_' unless it is called [" + SINGLE_MAPPING_NAME + "]"); + throw new InvalidTypeNameException("mapping type name [" + type + "] can't start with '_' unless it is called [" + + SINGLE_MAPPING_NAME + "]"); } if (type.contains("#")) { throw new InvalidTypeNameException("mapping type name [" + type + "] should not include '#' in it"); @@ -395,8 +401,9 @@ static void validateTypeName(String type) { } } - private synchronized Map internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource, - DocumentMapper mapper, MergeReason reason) { + private synchronized Map internalMerge(@Nullable DocumentMapper defaultMapper, + @Nullable String defaultMappingSource, DocumentMapper mapper, + MergeReason reason) { boolean hasNested = this.hasNested; Map fullPathObjectMappers = this.fullPathObjectMappers; FieldTypeLookup fieldTypes = this.fieldTypes; @@ -418,7 +425,8 @@ private synchronized Map internalMerge(@Nullable Documen { if (mapper != null && this.mapper != null && Objects.equals(this.mapper.type(), mapper.type()) == false) { throw new IllegalArgumentException( - "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + Arrays.asList(this.mapper.type(), mapper.type())); + "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + + Arrays.asList(this.mapper.type(), mapper.type())); } } @@ -475,7 +483,8 @@ private synchronized Map internalMerge(@Nullable Documen // deserializing cluster state that was sent by the master node, // this check will be skipped. // Also, don't take metadata mappers into account for the field limit check - checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size() - metadataMappers.length + fieldAliasMappers.size() ); + checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size() - metadataMappers.length + + fieldAliasMappers.size() ); } results.put(newMapper.type(), newMapper); @@ -562,14 +571,16 @@ private void checkNestedFieldsLimit(Map fullPathObjectMapp } } if (actualNestedFields > allowedNestedFields) { - throw new IllegalArgumentException("Limit of nested fields [" + allowedNestedFields + "] in index [" + index().getName() + "] has been exceeded"); + throw new IllegalArgumentException("Limit of nested fields [" + allowedNestedFields + "] in index [" + index().getName() + + "] has been exceeded"); } } private void checkTotalFieldsLimit(long totalMappers) { long allowedTotalFields = indexSettings.getValue(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING); if (allowedTotalFields < totalMappers) { - throw new IllegalArgumentException("Limit of total fields [" + allowedTotalFields + "] in index [" + index().getName() + "] has been exceeded"); + throw new IllegalArgumentException("Limit of total fields [" + allowedTotalFields + "] in index [" + index().getName() + + "] has been exceeded"); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java index 662f33572d927..2cc55fccafacd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -48,7 +48,8 @@ public final class Mapping implements ToXContentFragment { final Map, MetadataFieldMapper> metadataMappersMap; final Map meta; - public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map meta) { + public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, + MetadataFieldMapper[] metadataMappers, Map meta) { this.indexCreated = indexCreated; this.metadataMappers = metadataMappers; Map, MetadataFieldMapper> metadataMappersMap = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index c5c9099224138..f038b1f735aea 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -33,7 +33,8 @@ public abstract class MetadataFieldMapper extends FieldMapper { public interface TypeParser extends Mapper.TypeParser { @Override - MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException; + MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException; /** * Get the default {@link MetadataFieldMapper} to use, if nothing had to be parsed. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 3e7bd121d0b4b..fe60e1b62d9d6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -173,7 +173,8 @@ public Mapper.Builder parse(String name, Map node, ParserContext return builder; } - protected static boolean parseObjectOrDocumentTypeProperties(String fieldName, Object fieldNode, ParserContext parserContext, ObjectMapper.Builder builder) { + protected static boolean parseObjectOrDocumentTypeProperties(String fieldName, Object fieldNode, ParserContext parserContext, + ObjectMapper.Builder builder) { if (fieldName.equals("dynamic")) { String value = fieldNode.toString(); if (value.equalsIgnoreCase("strict")) { @@ -215,7 +216,8 @@ protected static void parseNested(String name, Map node, ObjectM } else if (type.equals(NESTED_CONTENT_TYPE)) { nested = true; } else { - throw new MapperParsingException("Trying to parse an object but has a different type [" + type + "] for [" + name + "]"); + throw new MapperParsingException("Trying to parse an object but has a different type [" + type + + "] for [" + name + "]"); } } fieldNode = node.get("include_in_parent"); @@ -433,7 +435,8 @@ public boolean parentObjectMapperAreNested(MapperService mapperService) { @Override public ObjectMapper merge(Mapper mergeWith) { if (!(mergeWith instanceof ObjectMapper)) { - throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); + throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + + "] with an object mapping [" + name() + "]"); } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; ObjectMapper merged = clone(); @@ -522,7 +525,8 @@ public void toXContent(XContentBuilder builder, Params params, ToXContent custom if (nested.isIncludeInRoot()) { builder.field("include_in_root", true); } - } else if (mappers.isEmpty() && custom == null) { // only write the object content type if there are no properties, otherwise, it is automatically detected + } else if (mappers.isEmpty() && custom == null) { + // only write the object content type if there are no properties, otherwise, it is automatically detected builder.field("type", CONTENT_TYPE); } if (dynamic != null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 009caf2b8e814..ed5135785cde1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -55,7 +55,8 @@ public static class Defaults { public static class Builder extends ObjectMapper.Builder { protected Explicit dynamicTemplates = new Explicit<>(new DynamicTemplate[0], false); - protected Explicit dynamicDateTimeFormatters = new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false); + protected Explicit dynamicDateTimeFormatters = + new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false); protected Explicit dateDetection = new Explicit<>(Defaults.DATE_DETECTION, false); protected Explicit numericDetection = new Explicit<>(Defaults.NUMERIC_DETECTION, false); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index 6a171b767c8b3..7c51ad1cd9555 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -82,7 +82,8 @@ public RoutingFieldMapper build(BuilderContext context) { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 3c7c0dd290a24..02425858d2466 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -107,7 +107,8 @@ public SourceFieldMapper build(BuilderContext context) { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 7851bb1655ad0..d0419a0e44b24 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -268,7 +268,7 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - return new TokenStreamComponents(components.getTokenizer(), new FixedShingleFilter(components.getTokenStream(), 2)); + return new TokenStreamComponents(components.getSource(), new FixedShingleFilter(components.getTokenStream(), 2)); } } @@ -293,7 +293,7 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), minChars, maxChars, false); - return new TokenStreamComponents(components.getTokenizer(), filter); + return new TokenStreamComponents(components.getSource(), filter); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 6f7c1b206178c..537cdbafe9522 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -79,7 +79,8 @@ public static class Defaults { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { throw new MapperParsingException(NAME + " is not configurable"); } @@ -161,7 +162,8 @@ public Query termsQuery(List values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { deprecationLogger.deprecatedAndMaybeLog("range_single_type", - "Running [range] query on [_type] field for an index with a single type. As types are deprecated, this functionality will be removed in future releases."); + "Running [range] query on [_type] field for an index with a single type." + + " As types are deprecated, this functionality will be removed in future releases."); Query result = new MatchAllDocsQuery(); String type = context.getMapperService().documentMapper().type(); if (type != null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index 6afac0fcf8181..9742e084f7eb0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -58,7 +58,8 @@ public static class Defaults { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { throw new MapperParsingException(NAME + " is not configurable"); } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java index b14fab84130b1..218b20ecad39c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java @@ -309,18 +309,14 @@ protected Query doToQuery(QueryShardContext context) throws IOException { query = new MatchAllDocsQuery(); } + CombineFunction boostMode = this.boostMode == null ? DEFAULT_BOOST_MODE : this.boostMode; // handle cases where only one score function and no filter was provided. In this case we create a FunctionScoreQuery. if (filterFunctions.length == 0) { return new FunctionScoreQuery(query, minScore, maxBoost); } else if (filterFunctions.length == 1 && filterFunctions[0] instanceof FunctionScoreQuery.FilterScoreFunction == false) { - CombineFunction combineFunction = this.boostMode; - if (combineFunction == null) { - combineFunction = filterFunctions[0].getDefaultScoreCombiner(); - } - return new FunctionScoreQuery(query, filterFunctions[0], combineFunction, minScore, maxBoost); + return new FunctionScoreQuery(query, filterFunctions[0], boostMode, minScore, maxBoost); } // in all other cases we create a FunctionScoreQuery with filters - CombineFunction boostMode = this.boostMode == null ? DEFAULT_BOOST_MODE : this.boostMode; return new FunctionScoreQuery(query, scoreMode, filterFunctions, boostMode, minScore, maxBoost); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java b/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java index c711fb429366e..a56f8670c23b1 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SeqNoStats.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Objects; public class SeqNoStats implements ToXContentFragment, Writeable { @@ -83,6 +84,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final SeqNoStats that = (SeqNoStats) o; + return maxSeqNo == that.maxSeqNo && + localCheckpoint == that.localCheckpoint && + globalCheckpoint == that.globalCheckpoint; + } + + @Override + public int hashCode() { + return Objects.hash(maxSeqNo, localCheckpoint, globalCheckpoint); + } + @Override public String toString() { return "SeqNoStats{" + diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 3530fe5ae5db8..683a5a79c36af 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; + import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -62,13 +62,12 @@ public class PrimaryReplicaSyncer extends AbstractComponent { private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; @Inject - public PrimaryReplicaSyncer(Settings settings, TransportService transportService, TransportResyncReplicationAction syncAction) { - this(settings, transportService.getTaskManager(), syncAction); + public PrimaryReplicaSyncer(TransportService transportService, TransportResyncReplicationAction syncAction) { + this(transportService.getTaskManager(), syncAction); } // for tests - public PrimaryReplicaSyncer(Settings settings, TaskManager taskManager, SyncAction syncAction) { - super(settings); + public PrimaryReplicaSyncer(TaskManager taskManager, SyncAction syncAction) { this.taskManager = taskManager; this.syncAction = syncAction; } diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index bc77626b94277..447ec9003a4ac 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -22,8 +22,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.memory.MemoryIndex; @@ -98,7 +99,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ try (Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(), request.id(), uidTerm) .version(request.version()).versionType(request.versionType())); Engine.Searcher searcher = indexShard.acquireSearcher("term_vector")) { - Fields topLevelFields = MultiFields.getFields(get.searcher() != null ? get.searcher().reader() : searcher.reader()); + Fields topLevelFields = fields(get.searcher() != null ? get.searcher().reader() : searcher.reader()); DocIdAndVersion docIdAndVersion = get.docIdAndVersion(); /* from an artificial document */ if (request.doc() != null) { @@ -152,6 +153,25 @@ else if (docIdAndVersion != null) { return termVectorsResponse; } + public static Fields fields(IndexReader reader) { + return new Fields() { + @Override + public Iterator iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public Terms terms(String field) throws IOException { + return MultiTerms.getTerms(reader, field); + } + + @Override + public int size() { + throw new UnsupportedOperationException(); + } + }; + } + private static void handleFieldWildcards(IndexShard indexShard, TermVectorsRequest request) { Set fieldNames = new HashSet<>(); for (String pattern : request.selectedFields()) { @@ -270,7 +290,7 @@ private static Fields generateTermVectors(IndexShard indexShard, Map indexServices) { - super(settings); this.indexShards = indexServices; ByteSizeValue indexingBuffer = INDEX_BUFFER_SIZE_SETTING.get(settings); @@ -103,8 +102,8 @@ public class IndexingMemoryController extends AbstractComponent implements Index // null means we used the default (10%) if (indexingBufferSetting == null || indexingBufferSetting.endsWith("%")) { // We only apply the min/max when % value was used for the index buffer: - ByteSizeValue minIndexingBuffer = MIN_INDEX_BUFFER_SIZE_SETTING.get(this.settings); - ByteSizeValue maxIndexingBuffer = MAX_INDEX_BUFFER_SIZE_SETTING.get(this.settings); + ByteSizeValue minIndexingBuffer = MIN_INDEX_BUFFER_SIZE_SETTING.get(settings); + ByteSizeValue maxIndexingBuffer = MAX_INDEX_BUFFER_SIZE_SETTING.get(settings); if (indexingBuffer.getBytes() < minIndexingBuffer.getBytes()) { indexingBuffer = minIndexingBuffer; } @@ -114,9 +113,9 @@ public class IndexingMemoryController extends AbstractComponent implements Index } this.indexingBuffer = indexingBuffer; - this.inactiveTime = SHARD_INACTIVE_TIME_SETTING.get(this.settings); + this.inactiveTime = SHARD_INACTIVE_TIME_SETTING.get(settings); // we need to have this relatively small to free up heap quickly enough - this.interval = SHARD_MEMORY_INTERVAL_TIME_SETTING.get(this.settings); + this.interval = SHARD_MEMORY_INTERVAL_TIME_SETTING.get(settings); this.statusChecker = new ShardsIndicesStatusChecker(); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 2695c1728491b..129b839bac75a 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -71,7 +71,6 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, private final Map stats2 = new IdentityHashMap<>(); public IndicesQueryCache(Settings settings) { - super(settings); final ByteSizeValue size = INDICES_CACHE_QUERY_SIZE_SETTING.get(settings); final int count = INDICES_CACHE_QUERY_COUNT_SETTING.get(settings); logger.debug("using [node] query cache with size [{}] max filter count [{}]", diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index 626d6e8df17f5..7c227b3366fee 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -88,7 +88,6 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo private final Cache cache; IndicesRequestCache(Settings settings) { - super(settings); this.size = INDICES_CACHE_QUERY_SIZE.get(settings); this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null; long sizeInBytes = size.getBytes(); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 206b9e7165ab0..07f50dc30fa38 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -173,6 +173,10 @@ public class IndicesService extends AbstractLifecycleComponent } } + /** + * The node's settings. + */ + private final Settings settings; private final PluginsService pluginsService; private final NodeEnvironment nodeEnv; private final NamedXContentRegistry xContentRegistry; @@ -215,6 +219,7 @@ public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvi Collection>> engineFactoryProviders, Map> indexStoreFactories) { super(settings); + this.settings = settings; this.threadPool = threadPool; this.pluginsService = pluginsService; this.nodeEnv = nodeEnv; @@ -483,7 +488,7 @@ private synchronized IndexService createIndexService(final String reason, IndicesFieldDataCache indicesFieldDataCache, List builtInListeners, IndexingOperationListener... indexingOperationListeners) throws IOException { - final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopedSettings); + final IndexSettings idxSettings = new IndexSettings(indexMetaData, settings, indexScopedSettings); // we ignore private settings since they are not registered settings indexScopedSettings.validate(indexMetaData.getSettings(), true, true, true); logger.debug("creating Index [{}], shards [{}]/[{}] - reason [{}]", diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index 4a55b86291e63..8c2eb9b67b8df 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -90,7 +90,6 @@ public class HunspellService extends AbstractComponent { public HunspellService(final Settings settings, final Environment env, final Map knownDictionaries) throws IOException { - super(settings); this.knownDictionaries = Collections.unmodifiableMap(knownDictionaries); this.hunspellDir = resolveHunspellDirectory(env); this.defaultIgnoreCase = HUNSPELL_IGNORE_CASE.get(settings); diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/BreakerSettings.java b/server/src/main/java/org/elasticsearch/indices/breaker/BreakerSettings.java index cf950e19a4f19..8fd3aa35e9c85 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/BreakerSettings.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/BreakerSettings.java @@ -25,22 +25,24 @@ /** * Settings for a {@link CircuitBreaker} */ -public class BreakerSettings { +public final class BreakerSettings { private final String name; private final long limitBytes; private final double overhead; private final CircuitBreaker.Type type; + private final CircuitBreaker.Durability durability; public BreakerSettings(String name, long limitBytes, double overhead) { - this(name, limitBytes, overhead, CircuitBreaker.Type.MEMORY); + this(name, limitBytes, overhead, CircuitBreaker.Type.MEMORY, CircuitBreaker.Durability.PERMANENT); } - public BreakerSettings(String name, long limitBytes, double overhead, CircuitBreaker.Type type) { + public BreakerSettings(String name, long limitBytes, double overhead, CircuitBreaker.Type type, CircuitBreaker.Durability durability) { this.name = name; this.limitBytes = limitBytes; this.overhead = overhead; this.type = type; + this.durability = durability; } public String getName() { @@ -59,10 +61,15 @@ public CircuitBreaker.Type getType() { return this.type; } + public CircuitBreaker.Durability getDurability() { + return durability; + } + @Override public String toString() { return "[" + this.name + ",type=" + this.type.toString() + + ",durability=" + this.durability.toString() + ",limit=" + this.limitBytes + "/" + new ByteSizeValue(this.limitBytes) + ",overhead=" + this.overhead + "]"; } diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index eb2dc587bfb6d..b87de10657b72 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -106,30 +106,34 @@ public HierarchyCircuitBreakerService(Settings settings, ClusterSettings cluster this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), - FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) + FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.get(settings), + CircuitBreaker.Durability.PERMANENT ); this.inFlightRequestsSettings = new BreakerSettings(CircuitBreaker.IN_FLIGHT_REQUESTS, IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), - IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) + IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_TYPE_SETTING.get(settings), + CircuitBreaker.Durability.TRANSIENT ); this.requestSettings = new BreakerSettings(CircuitBreaker.REQUEST, REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), - REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) + REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.get(settings), + CircuitBreaker.Durability.TRANSIENT ); this.accountingSettings = new BreakerSettings(CircuitBreaker.ACCOUNTING, ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), - ACCOUNTING_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) + ACCOUNTING_CIRCUIT_BREAKER_TYPE_SETTING.get(settings), + CircuitBreaker.Durability.PERMANENT ); this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT, TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), 1.0, - CircuitBreaker.Type.PARENT); + CircuitBreaker.Type.PARENT, null); if (logger.isTraceEnabled()) { logger.trace("parent circuit breaker with settings {}", this.parentSettings); @@ -151,17 +155,17 @@ public HierarchyCircuitBreakerService(Settings settings, ClusterSettings cluster private void setRequestBreakerLimit(ByteSizeValue newRequestMax, Double newRequestOverhead) { BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestMax.getBytes(), newRequestOverhead, - HierarchyCircuitBreakerService.this.requestSettings.getType()); + this.requestSettings.getType(), this.requestSettings.getDurability()); registerBreaker(newRequestSettings); - HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings; + this.requestSettings = newRequestSettings; logger.info("Updated breaker settings request: {}", newRequestSettings); } private void setInFlightRequestsBreakerLimit(ByteSizeValue newInFlightRequestsMax, Double newInFlightRequestsOverhead) { BreakerSettings newInFlightRequestsSettings = new BreakerSettings(CircuitBreaker.IN_FLIGHT_REQUESTS, newInFlightRequestsMax.getBytes(), - newInFlightRequestsOverhead, HierarchyCircuitBreakerService.this.inFlightRequestsSettings.getType()); + newInFlightRequestsOverhead, this.inFlightRequestsSettings.getType(), this.inFlightRequestsSettings.getDurability()); registerBreaker(newInFlightRequestsSettings); - HierarchyCircuitBreakerService.this.inFlightRequestsSettings = newInFlightRequestsSettings; + this.inFlightRequestsSettings = newInFlightRequestsSettings; logger.info("Updated breaker settings for in-flight requests: {}", newInFlightRequestsSettings); } @@ -169,7 +173,7 @@ private void setFieldDataBreakerLimit(ByteSizeValue newFielddataMax, Double newF long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.getBytes(); newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead; BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead, - HierarchyCircuitBreakerService.this.fielddataSettings.getType()); + this.fielddataSettings.getType(), this.fielddataSettings.getDurability()); registerBreaker(newFielddataSettings); HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings; logger.info("Updated breaker settings field data: {}", newFielddataSettings); @@ -177,20 +181,20 @@ private void setFieldDataBreakerLimit(ByteSizeValue newFielddataMax, Double newF private void setAccountingBreakerLimit(ByteSizeValue newAccountingMax, Double newAccountingOverhead) { BreakerSettings newAccountingSettings = new BreakerSettings(CircuitBreaker.ACCOUNTING, newAccountingMax.getBytes(), - newAccountingOverhead, HierarchyCircuitBreakerService.this.inFlightRequestsSettings.getType()); + newAccountingOverhead, HierarchyCircuitBreakerService.this.accountingSettings.getType(), this.accountingSettings.getDurability()); registerBreaker(newAccountingSettings); HierarchyCircuitBreakerService.this.accountingSettings = newAccountingSettings; logger.info("Updated breaker settings for accounting requests: {}", newAccountingSettings); } private boolean validateTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) { - BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.getBytes(), 1.0, CircuitBreaker.Type.PARENT); + BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.getBytes(), 1.0, CircuitBreaker.Type.PARENT, null); validateSettings(new BreakerSettings[]{newParentSettings}); return true; } private void setTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) { - BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.getBytes(), 1.0, CircuitBreaker.Type.PARENT); + BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.getBytes(), 1.0, CircuitBreaker.Type.PARENT, null); this.parentSettings = newParentSettings; } @@ -225,7 +229,7 @@ public AllCircuitBreakerStats stats() { } // Manually add the parent breaker settings since they aren't part of the breaker map allStats.add(new CircuitBreakerStats(CircuitBreaker.PARENT, parentSettings.getLimit(), - parentUsed(0L).totalUsage, 1.0, parentTripCount.get())); + memoryUsed(0L).totalUsage, 1.0, parentTripCount.get())); return new AllCircuitBreakerStats(allStats.toArray(new CircuitBreakerStats[allStats.size()])); } @@ -235,26 +239,38 @@ public CircuitBreakerStats stats(String name) { return new CircuitBreakerStats(breaker.getName(), breaker.getLimit(), breaker.getUsed(), breaker.getOverhead(), breaker.getTrippedCount()); } - private static class ParentMemoryUsage { + private static class MemoryUsage { final long baseUsage; final long totalUsage; + final long transientChildUsage; + final long permanentChildUsage; - ParentMemoryUsage(final long baseUsage, final long totalUsage) { + MemoryUsage(final long baseUsage, final long totalUsage, final long transientChildUsage, final long permanentChildUsage) { this.baseUsage = baseUsage; this.totalUsage = totalUsage; + this.transientChildUsage = transientChildUsage; + this.permanentChildUsage = permanentChildUsage; } } - private ParentMemoryUsage parentUsed(long newBytesReserved) { + private MemoryUsage memoryUsed(long newBytesReserved) { + long transientUsage = 0; + long permanentUsage = 0; + + for (CircuitBreaker breaker : this.breakers.values()) { + long breakerUsed = (long)(breaker.getUsed() * breaker.getOverhead()); + if (breaker.getDurability() == CircuitBreaker.Durability.TRANSIENT) { + transientUsage += breakerUsed; + } else if (breaker.getDurability() == CircuitBreaker.Durability.PERMANENT) { + permanentUsage += breakerUsed; + } + } if (this.trackRealMemoryUsage) { final long current = currentMemoryUsage(); - return new ParentMemoryUsage(current, current + newBytesReserved); + return new MemoryUsage(current, current + newBytesReserved, transientUsage, permanentUsage); } else { - long parentEstimated = 0; - for (CircuitBreaker breaker : this.breakers.values()) { - parentEstimated += breaker.getUsed() * breaker.getOverhead(); - } - return new ParentMemoryUsage(parentEstimated, parentEstimated); + long parentEstimated = transientUsage + permanentUsage; + return new MemoryUsage(parentEstimated, parentEstimated, transientUsage, permanentUsage); } } @@ -276,16 +292,16 @@ long currentMemoryUsage() { * Checks whether the parent breaker has been tripped */ public void checkParentLimit(long newBytesReserved, String label) throws CircuitBreakingException { - final ParentMemoryUsage parentUsed = parentUsed(newBytesReserved); + final MemoryUsage memoryUsed = memoryUsed(newBytesReserved); long parentLimit = this.parentSettings.getLimit(); - if (parentUsed.totalUsage > parentLimit) { + if (memoryUsed.totalUsage > parentLimit) { this.parentTripCount.incrementAndGet(); final StringBuilder message = new StringBuilder("[parent] Data too large, data for [" + label + "]" + - " would be [" + parentUsed.totalUsage + "/" + new ByteSizeValue(parentUsed.totalUsage) + "]" + + " would be [" + memoryUsed.totalUsage + "/" + new ByteSizeValue(memoryUsed.totalUsage) + "]" + ", which is larger than the limit of [" + parentLimit + "/" + new ByteSizeValue(parentLimit) + "]"); if (this.trackRealMemoryUsage) { - final long realUsage = parentUsed.baseUsage; + final long realUsage = memoryUsed.baseUsage; message.append(", real usage: ["); message.append(realUsage); message.append("/"); @@ -306,7 +322,11 @@ public void checkParentLimit(long newBytesReserved, String label) throws Circuit .collect(Collectors.toList()))); message.append("]"); } - throw new CircuitBreakingException(message.toString(), parentUsed.totalUsage, parentLimit); + // derive durability of a tripped parent breaker depending on whether the majority of memory tracked by + // child circuit breakers is categorized as transient or permanent. + CircuitBreaker.Durability durability = memoryUsed.transientChildUsage >= memoryUsed.permanentChildUsage ? + CircuitBreaker.Durability.TRANSIENT : CircuitBreaker.Durability.PERMANENT; + throw new CircuitBreakingException(message.toString(), memoryUsed.totalUsage, parentLimit, durability); } } diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 561a8e8c74290..cd32c415ea54b 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -107,6 +107,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() { }; + private final Settings settings; // a list of shards that failed during recovery // we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update final ConcurrentMap failedShardsCache = ConcurrentCollections.newConcurrentMap(); @@ -156,6 +157,7 @@ public IndicesClusterStateService(Settings settings, PrimaryReplicaSyncer primaryReplicaSyncer, Consumer globalCheckpointSyncer) { super(settings); + this.settings = settings; this.buildInIndexListener = Arrays.asList( peerRecoverySourceService, @@ -172,7 +174,7 @@ public IndicesClusterStateService(Settings settings, this.repositoriesService = repositoriesService; this.primaryReplicaSyncer = primaryReplicaSyncer; this.globalCheckpointSyncer = globalCheckpointSyncer; - this.sendRefreshMapping = this.settings.getAsBoolean("indices.cluster.send_refresh_mapping", true); + this.sendRefreshMapping = settings.getAsBoolean("indices.cluster.send_refresh_mapping", true); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index 4a784af6bb3f3..a5945187de154 100644 --- a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -58,7 +58,6 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL private final Cache cache; public IndicesFieldDataCache(Settings settings, IndexFieldDataCache.Listener indicesFieldDataCacheListener) { - super(settings); this.indicesFieldDataCacheListener = indicesFieldDataCacheListener; final long sizeInBytes = INDICES_FIELDDATA_CACHE_SIZE_KEY.get(settings).getBytes(); CacheBuilder cacheBuilder = CacheBuilder.builder() diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index aeb88021f26e1..28b5eeeba6b1b 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; @@ -84,8 +83,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL private final IndexNameExpressionResolver indexNameExpressionResolver; @Inject - public SyncedFlushService(Settings settings, IndicesService indicesService, ClusterService clusterService, TransportService transportService, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings); + public SyncedFlushService(IndicesService indicesService, ClusterService clusterService, TransportService transportService, IndexNameExpressionResolver indexNameExpressionResolver) { this.indicesService = indicesService; this.clusterService = clusterService; this.transportService = transportService; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index ec05f0e30b0c2..bd237ae453361 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -61,9 +61,8 @@ public static class Actions { final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries(); @Inject - public PeerRecoverySourceService(Settings settings, TransportService transportService, IndicesService indicesService, + public PeerRecoverySourceService(TransportService transportService, IndicesService indicesService, RecoverySettings recoverySettings) { - super(settings); this.transportService = transportService; this.indicesService = indicesService; this.recoverySettings = recoverySettings; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 39709eb3ac2ff..e461628ac4902 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -101,9 +101,8 @@ public static class Actions { private final RecoveriesCollection onGoingRecoveries; - public PeerRecoveryTargetService(Settings settings, ThreadPool threadPool, TransportService transportService, RecoverySettings - recoverySettings, ClusterService clusterService) { - super(settings); + public PeerRecoveryTargetService(ThreadPool threadPool, TransportService transportService, + RecoverySettings recoverySettings, ClusterService clusterService) { this.threadPool = threadPool; this.transportService = transportService; this.recoverySettings = recoverySettings; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index e238277b698d0..b90bed90d052b 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -85,8 +85,6 @@ public class RecoverySettings extends AbstractComponent { private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { - super(settings); - this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the master time to remove a faulty node diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index f62618ec91eac..5db490fbb7f27 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -78,6 +78,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe Property.NodeScope); public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists"; private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED); + private final Settings settings; private final IndicesService indicesService; private final ClusterService clusterService; private final TransportService transportService; @@ -91,7 +92,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe @Inject public IndicesStore(Settings settings, IndicesService indicesService, ClusterService clusterService, TransportService transportService, ThreadPool threadPool) { - super(settings); + this.settings = settings; this.indicesService = indicesService; this.clusterService = clusterService; this.transportService = transportService; diff --git a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index 62f5dba9825fb..68c033f188d0e 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -66,6 +66,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction enginePlugins = pluginsService.filterPlugins(EnginePlugin.class); @@ -418,7 +418,7 @@ protected Node( threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, bigArrays, scriptModule.getScriptService(), client, metaStateService, engineFactoryProviders, indexStoreFactories); - final AliasValidator aliasValidator = new AliasValidator(settings); + final AliasValidator aliasValidator = new AliasValidator(); final MetaDataCreateIndexService metaDataCreateIndexService = new MetaDataCreateIndexService( settings, @@ -462,7 +462,7 @@ protected Node( indicesModule.getMapperRegistry(), settingsModule.getIndexScopedSettings(), indexMetaDataUpgraders); final GatewayMetaState gatewayMetaState = new GatewayMetaState(settings, nodeEnvironment, metaStateService, metaDataIndexUpgradeService, metaDataUpgrader); - new TemplateUpgradeService(settings, client, clusterService, threadPool, indexTemplateMetaDataUpgraders); + new TemplateUpgradeService(client, clusterService, threadPool, indexTemplateMetaDataUpgraders); final Transport transport = networkModule.getTransportSupplier().get(); Set taskHeaders = Stream.concat( pluginsService.filterPlugins(ActionPlugin.class).stream().flatMap(p -> p.getTaskHeaders().stream()), @@ -470,8 +470,8 @@ protected Node( ).collect(Collectors.toSet()); final TransportService transportService = newTransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), taskHeaders); - final ResponseCollectorService responseCollectorService = new ResponseCollectorService(this.settings, clusterService); - final SearchTransportService searchTransportService = new SearchTransportService(settings, transportService, + final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); + final SearchTransportService searchTransportService = new SearchTransportService(transportService, SearchExecutionStatsCollector.makeWrapper(responseCollectorService)); final HttpServerTransport httpServerTransport = newHttpTransport(networkModule); @@ -494,10 +494,10 @@ protected Node( .flatMap(List::stream) .collect(toList()); - final PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(settings, tasksExecutors); + final PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(tasksExecutors); final PersistentTasksClusterService persistentTasksClusterService = new PersistentTasksClusterService(settings, registry, clusterService); - final PersistentTasksService persistentTasksService = new PersistentTasksService(settings, clusterService, threadPool, client); + final PersistentTasksService persistentTasksService = new PersistentTasksService(clusterService, threadPool, client); modules.add(b -> { b.bind(Node.class).toInstance(this); @@ -524,12 +524,11 @@ protected Node( b.bind(MetaDataCreateIndexService.class).toInstance(metaDataCreateIndexService); b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService); - b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, - searchService::createReduceContext)); + b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(searchService::createReduceContext)); b.bind(Transport.class).toInstance(transport); b.bind(TransportService.class).toInstance(transportService); b.bind(NetworkService.class).toInstance(networkService); - b.bind(UpdateHelper.class).toInstance(new UpdateHelper(settings, scriptModule.getScriptService())); + b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptModule.getScriptService())); b.bind(MetaDataIndexUpgradeService.class).toInstance(metaDataIndexUpgradeService); b.bind(ClusterInfoService.class).toInstance(clusterInfoService); b.bind(GatewayMetaState.class).toInstance(gatewayMetaState); @@ -537,9 +536,9 @@ protected Node( { RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); processRecoverySettings(settingsModule.getClusterSettings(), recoverySettings); - b.bind(PeerRecoverySourceService.class).toInstance(new PeerRecoverySourceService(settings, transportService, + b.bind(PeerRecoverySourceService.class).toInstance(new PeerRecoverySourceService(transportService, indicesService, recoverySettings)); - b.bind(PeerRecoveryTargetService.class).toInstance(new PeerRecoveryTargetService(settings, threadPool, + b.bind(PeerRecoveryTargetService.class).toInstance(new PeerRecoveryTargetService(threadPool, transportService, recoverySettings, clusterService)); } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 207886c5cf263..fe9c3d59d6646 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -46,7 +46,7 @@ import java.io.IOException; public class NodeService extends AbstractComponent implements Closeable { - + private final Settings settings; private final ThreadPool threadPool; private final MonitorService monitorService; private final TransportService transportService; @@ -68,7 +68,7 @@ public class NodeService extends AbstractComponent implements Closeable { @Nullable HttpServerTransport httpServerTransport, IngestService ingestService, ClusterService clusterService, SettingsFilter settingsFilter, ResponseCollectorService responseCollectorService, SearchTransportService searchTransportService) { - super(settings); + this.settings = settings; this.threadPool = threadPool; this.monitorService = monitorService; this.transportService = transportService; diff --git a/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java b/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java index 4ab1bc4cee80b..8885728927b34 100644 --- a/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java +++ b/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import java.io.IOException; @@ -49,8 +48,7 @@ public final class ResponseCollectorService extends AbstractComponent implements private final ConcurrentMap nodeIdToStats = ConcurrentCollections.newConcurrentMap(); - public ResponseCollectorService(Settings settings, ClusterService clusterService) { - super(settings); + public ResponseCollectorService(ClusterService clusterService) { clusterService.addListener(this); } diff --git a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java index 1e0f83958bbac..86a657f8336cf 100644 --- a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java @@ -18,9 +18,6 @@ */ package org.elasticsearch.persistent; -import java.io.IOException; -import java.util.Objects; - import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; @@ -37,11 +34,13 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; +import java.util.Objects; + import static org.elasticsearch.action.ValidateActions.addValidationError; /** @@ -137,11 +136,11 @@ public static class TransportAction extends TransportMasterNodeAction> taskExecutors; - public PersistentTasksExecutorRegistry(Settings settings, Collection> taskExecutors) { - super(settings); + public PersistentTasksExecutorRegistry(Collection> taskExecutors) { Map> map = new HashMap<>(); for (PersistentTasksExecutor executor : taskExecutors) { map.put(executor.getTaskName(), executor); diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index 91cdb400aa0d4..a90415b530b43 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.tasks.Task; @@ -57,11 +56,9 @@ public class PersistentTasksNodeService extends AbstractComponent implements Clu private final TaskManager taskManager; private final NodePersistentTasksExecutor nodePersistentTasksExecutor; - public PersistentTasksNodeService(Settings settings, - PersistentTasksService persistentTasksService, + public PersistentTasksNodeService(PersistentTasksService persistentTasksService, PersistentTasksExecutorRegistry persistentTasksExecutorRegistry, TaskManager taskManager, NodePersistentTasksExecutor nodePersistentTasksExecutor) { - super(settings); this.persistentTasksService = persistentTasksService; this.persistentTasksExecutorRegistry = persistentTasksExecutorRegistry; this.taskManager = taskManager; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index 665a803a2d9cf..96775b74ea934 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.node.NodeClosedException; @@ -55,8 +54,7 @@ public class PersistentTasksService extends AbstractComponent { private final ClusterService clusterService; private final ThreadPool threadPool; - public PersistentTasksService(Settings settings, ClusterService clusterService, ThreadPool threadPool, Client client) { - super(settings); + public PersistentTasksService(ClusterService clusterService, ThreadPool threadPool, Client client) { this.client = client; this.clusterService = clusterService; this.threadPool = threadPool; diff --git a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java index 38890e6a12c12..5ea748de14e7c 100644 --- a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java @@ -34,10 +34,9 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import java.io.IOException; import java.util.Objects; @@ -122,11 +121,11 @@ public static class TransportAction extends TransportMasterNodeAction { @@ -152,11 +151,11 @@ public static class TransportAction extends TransportMasterNodeAction> plugins; private final PluginsAndModules info; + public static final Setting> MANDATORY_SETTING = Setting.listSetting("plugin.mandatory", Collections.emptyList(), Function.identity(), Property.NodeScope); @@ -98,8 +100,7 @@ public List getPluginSettingsFilter() { * @param classpathPlugins Plugins that exist in the classpath which should be loaded */ public PluginsService(Settings settings, Path configPath, Path modulesDirectory, Path pluginsDirectory, Collection> classpathPlugins) { - super(settings); - + this.settings = settings; this.configPath = configPath; List> pluginsLoaded = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index aef4381cd8b2d..e97f7acf168f7 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -69,7 +69,6 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta public RepositoriesService(Settings settings, ClusterService clusterService, TransportService transportService, Map typesRegistry, ThreadPool threadPool) { - super(settings); this.typesRegistry = typesRegistry; this.clusterService = clusterService; this.threadPool = threadPool; @@ -78,7 +77,7 @@ public RepositoriesService(Settings settings, ClusterService clusterService, Tra if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { clusterService.addStateApplier(this); } - this.verifyAction = new VerifyNodeRepositoryAction(settings, transportService, clusterService, this); + this.verifyAction = new VerifyNodeRepositoryAction(transportService, clusterService, this); } /** diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index fbaf369912e8a..59b79aedf959c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService.VerifyResponse; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -56,8 +55,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { private final RepositoriesService repositoriesService; - public VerifyNodeRepositoryAction(Settings settings, TransportService transportService, ClusterService clusterService, RepositoriesService repositoriesService) { - super(settings); + public VerifyNodeRepositoryAction(TransportService transportService, ClusterService clusterService, RepositoriesService repositoriesService) { this.transportService = transportService; this.clusterService = clusterService; this.repositoriesService = repositoriesService; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index df80dd473f190..6dca81796522a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -204,6 +204,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private static final String DATA_BLOB_PREFIX = "__"; + private final Settings settings; + private final RateLimiter snapshotRateLimiter; private final RateLimiter restoreRateLimiter; @@ -234,10 +236,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp * Constructs new BlobStoreRepository * * @param metadata The metadata for this repository including name and settings - * @param globalSettings Settings for the node this repository object is created on + * @param settings Settings for the node this repository object is created on */ - protected BlobStoreRepository(RepositoryMetaData metadata, Settings globalSettings, NamedXContentRegistry namedXContentRegistry) { - super(globalSettings); + protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry) { + super(settings); + this.settings = settings; this.metadata = metadata; this.namedXContentRegistry = namedXContentRegistry; snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index 643ff2bc93d3c..7abddafac4ed7 100644 --- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -99,10 +99,10 @@ public FsRepository(RepositoryMetaData metadata, Environment environment, if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); } else { - this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(settings); + this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); } this.compress = COMPRESS_SETTING.exists(metadata.settings()) - ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings); + ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(environment.settings()); this.basePath = BlobPath.cleanPath(); } @@ -110,7 +110,7 @@ public FsRepository(RepositoryMetaData metadata, Environment environment, protected BlobStore createBlobStore() throws Exception { final String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); final Path locationFile = environment.resolveRepoFile(location); - return new FsBlobStore(settings, locationFile); + return new FsBlobStore(environment.settings(), locationFile); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 6b9432483f304..5bb806c02d6df 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -59,7 +59,7 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH private final LongAdder usageCount = new LongAdder(); protected BaseRestHandler(Settings settings) { - super(settings); + // TODO drop settings from ctor } public final long getUsageCount() { diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 82fcf7178d1dd..85a3b0bdb4531 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.path.PathTrie; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; @@ -75,9 +74,8 @@ public class RestController extends AbstractComponent implements HttpServerTrans private final Set headersToCopy; private UsageService usageService; - public RestController(Settings settings, Set headersToCopy, UnaryOperator handlerWrapper, + public RestController(Set headersToCopy, UnaryOperator handlerWrapper, NodeClient client, CircuitBreakerService circuitBreakerService, UsageService usageService) { - super(settings); this.headersToCopy = headersToCopy; this.usageService = usageService; if (handlerWrapper == null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index 746bb643bf62d..e1e4d92116349 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -43,12 +43,14 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { + private final Settings settings; private final ClusterSettings clusterSettings; private final SettingsFilter settingsFilter; public RestClusterGetSettingsAction(Settings settings, RestController controller, ClusterSettings clusterSettings, SettingsFilter settingsFilter) { super(settings); + this.settings = settings; this.clusterSettings = clusterSettings; controller.registerHandler(RestRequest.Method.GET, "/_cluster/settings", this); this.settingsFilter = settingsFilter; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index a6b0c6c05a1a9..aca6235420cca 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -20,6 +20,8 @@ package org.elasticsearch.rest.action.admin.indices; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -27,6 +29,7 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -55,6 +58,8 @@ import static org.elasticsearch.rest.RestRequest.Method.HEAD; public class RestGetMappingAction extends BaseRestHandler { + private static final Logger logger = LogManager.getLogger(RestGetMappingAction.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); public RestGetMappingAction(final Settings settings, final RestController controller) { super(settings); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index b865bd3a42029..60c481e59878f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -19,12 +19,15 @@ package org.elasticsearch.rest.action.admin.indices; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; @@ -34,6 +37,8 @@ import java.io.IOException; public abstract class RestResizeHandler extends BaseRestHandler { + private static final Logger logger = LogManager.getLogger(RestResizeHandler.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); RestResizeHandler(final Settings settings) { super(settings); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java index 2d8e7f5ed6b92..52a06db582966 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -68,7 +68,7 @@ public static final class Builder { * is no existing {@link ScriptMetaData}. */ public Builder(ScriptMetaData previous) { - this.scripts = previous == null ? new HashMap<>() :new HashMap<>(previous.scripts); + this.scripts = previous == null ? new HashMap<>() : new HashMap<>(previous.scripts); } /** @@ -352,6 +352,13 @@ public EnumSet context() { return MetaData.ALL_CONTEXTS; } + /** + * Returns the map of stored scripts. + */ + Map getStoredScripts() { + return scripts; + } + /** * Retrieves a stored script based on a user-specified id. */ diff --git a/server/src/main/java/org/elasticsearch/script/ScriptService.java b/server/src/main/java/org/elasticsearch/script/ScriptService.java index 6a54af8721e3b..7c4011a63c412 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptService.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; @@ -48,6 +49,7 @@ import java.io.Closeable; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -97,8 +99,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust public static final Setting SCRIPT_CACHE_EXPIRE_SETTING = Setting.positiveTimeSetting("script.cache.expire", TimeValue.timeValueMillis(0), Property.NodeScope); public static final Setting SCRIPT_MAX_SIZE_IN_BYTES = - Setting.intSetting("script.max_size_in_bytes", 65535, Property.NodeScope); - // public Setting(String key, Function defaultValue, Function parser, Property... properties) { + Setting.intSetting("script.max_size_in_bytes", 65535, 0, Property.Dynamic, Property.NodeScope); public static final Setting> SCRIPT_MAX_COMPILATIONS_RATE = new Setting<>("script.max_compilations_rate", "75/5m", MAX_COMPILATION_RATE_FUNCTION, Property.Dynamic, Property.NodeScope); @@ -109,6 +110,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust public static final Setting> CONTEXTS_ALLOWED_SETTING = Setting.listSetting("script.allowed_contexts", Collections.emptyList(), Function.identity(), Setting.Property.NodeScope); + private final Settings settings; private final Set typesAllowed; private final Set contextsAllowed; @@ -121,15 +123,15 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust private ClusterState clusterState; + private int maxSizeInBytes; + private Tuple rate; private long lastInlineCompileTime; private double scriptsPerTimeWindow; private double compilesAllowedPerNano; public ScriptService(Settings settings, Map engines, Map> contexts) { - super(settings); - - Objects.requireNonNull(settings); + this.settings = Objects.requireNonNull(settings); this.engines = Objects.requireNonNull(engines); this.contexts = Objects.requireNonNull(contexts); @@ -221,10 +223,12 @@ public ScriptService(Settings settings, Map engines, Map source : getScriptsFromClusterState().entrySet()) { + if (source.getValue().getSource().getBytes(StandardCharsets.UTF_8).length > newMaxSizeInBytes) { + throw new IllegalArgumentException("script.max_size_in_bytes cannot be set to [" + newMaxSizeInBytes + "], " + + "stored script [" + source.getKey() + "] exceeds the new value with a size of " + + "[" + source.getValue().getSource().getBytes(StandardCharsets.UTF_8).length + "]"); + } + } + + maxSizeInBytes = newMaxSizeInBytes; + } + /** * This configures the maximum script compilations per five minute window. * @@ -295,6 +315,13 @@ public FactoryType compile(Script script, ScriptContext maxSizeInBytes) { + throw new IllegalArgumentException("exceeded max allowed inline script size in bytes [" + maxSizeInBytes + "] " + + "with size [" + idOrCode.getBytes(StandardCharsets.UTF_8).length + "] for script [" + idOrCode + "]"); + } + } + if (logger.isTraceEnabled()) { logger.trace("compiling lang: [{}] type: [{}] script: {}", lang, type, idOrCode); } @@ -369,7 +396,8 @@ void checkCompilationLimit() { // Otherwise reject the request throw new CircuitBreakingException("[script] Too many dynamic script compilations within, max: [" + rate.v1() + "/" + rate.v2() +"]; please use indexed, or scripts with parameters instead; " + - "this limit can be changed by the [" + SCRIPT_MAX_COMPILATIONS_RATE.getKey() + "] setting"); + "this limit can be changed by the [" + SCRIPT_MAX_COMPILATIONS_RATE.getKey() + "] setting", + CircuitBreaker.Durability.TRANSIENT); } } @@ -390,6 +418,20 @@ public boolean isAnyContextEnabled() { return contextsAllowed == null || contextsAllowed.isEmpty() == false; } + Map getScriptsFromClusterState() { + if (clusterState == null) { + return Collections.emptyMap(); + } + + ScriptMetaData scriptMetadata = clusterState.metaData().custom(ScriptMetaData.TYPE); + + if (scriptMetadata == null) { + return Collections.emptyMap(); + } + + return scriptMetadata.getStoredScripts(); + } + StoredScriptSource getScriptFromClusterState(String id) { ScriptMetaData scriptMetadata = clusterState.metaData().custom(ScriptMetaData.TYPE); @@ -408,10 +450,8 @@ StoredScriptSource getScriptFromClusterState(String id) { public void putStoredScript(ClusterService clusterService, PutStoredScriptRequest request, ActionListener listener) { - int max = SCRIPT_MAX_SIZE_IN_BYTES.get(settings); - - if (request.content().length() > max) { - throw new IllegalArgumentException("exceeded max allowed stored script size in bytes [" + max + "] with size [" + + if (request.content().length() > maxSizeInBytes) { + throw new IllegalArgumentException("exceeded max allowed stored script size in bytes [" + maxSizeInBytes + "] with size [" + request.content().length() + "] for script [" + request.id() + "]"); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 47943a92b00a1..539f2de529f23 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -183,6 +183,8 @@ import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalWeightedAvg; import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalMedianAbsoluteDeviation; +import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; @@ -364,6 +366,9 @@ private void registerAggregations(List plugins) { PercentileRanksAggregationBuilder::parse) .addResultReader(InternalTDigestPercentileRanks.NAME, InternalTDigestPercentileRanks::new) .addResultReader(InternalHDRPercentileRanks.NAME, InternalHDRPercentileRanks::new)); + registerAggregation(new AggregationSpec(MedianAbsoluteDeviationAggregationBuilder.NAME, + MedianAbsoluteDeviationAggregationBuilder::new, MedianAbsoluteDeviationAggregationBuilder::parse) + .addResultReader(InternalMedianAbsoluteDeviation::new)); registerAggregation(new AggregationSpec(CardinalityAggregationBuilder.NAME, CardinalityAggregationBuilder::new, CardinalityAggregationBuilder::parse).addResultReader(InternalCardinality::new)); registerAggregation(new AggregationSpec(GlobalAggregationBuilder.NAME, GlobalAggregationBuilder::new, @@ -715,7 +720,7 @@ private void registerFetchSubPhases(List plugins) { registerFetchSubPhase(new FetchSourceSubPhase()); registerFetchSubPhase(new VersionFetchSubPhase()); registerFetchSubPhase(new MatchedQueriesFetchSubPhase()); - registerFetchSubPhase(new HighlightPhase(settings, highlighters)); + registerFetchSubPhase(new HighlightPhase(highlighters)); registerFetchSubPhase(new ScoreFetchSubPhase()); FetchPhaseConstructionContext context = new FetchPhaseConstructionContext(highlighters); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index f1a43077a6215..51750c3953ad6 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -182,13 +183,14 @@ public SearchService(ClusterService clusterService, IndicesService indicesServic ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase, ResponseCollectorService responseCollectorService) { super(clusterService.getSettings()); + Settings settings = clusterService.getSettings(); this.threadPool = threadPool; this.clusterService = clusterService; this.indicesService = indicesService; this.scriptService = scriptService; this.responseCollectorService = responseCollectorService; this.bigArrays = bigArrays; - this.queryPhase = new QueryPhase(settings); + this.queryPhase = new QueryPhase(); this.fetchPhase = fetchPhase; this.multiBucketConsumerService = new MultiBucketConsumerService(clusterService, settings); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java index 7363ec8306d97..6d8c8a94f3e6f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java @@ -83,6 +83,8 @@ import org.elasticsearch.search.aggregations.metrics.ValueCount; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviation; import java.util.Map; @@ -316,6 +318,13 @@ public static PercentileRanksAggregationBuilder percentileRanks(String name, dou return new PercentileRanksAggregationBuilder(name, values); } + /** + * Create a new {@link MedianAbsoluteDeviation} aggregation with the given name + */ + public static MedianAbsoluteDeviationAggregationBuilder medianAbsoluteDeviation(String name) { + return new MedianAbsoluteDeviationAggregationBuilder(name); + } + /** * Create a new {@link Cardinality} aggregation with the given name. */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java new file mode 100644 index 0000000000000..01ab3a323e12d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class InternalMedianAbsoluteDeviation extends InternalNumericMetricsAggregation.SingleValue implements MedianAbsoluteDeviation { + + static double computeMedianAbsoluteDeviation(TDigestState valuesSketch) { + + if (valuesSketch.size() == 0) { + return Double.NaN; + } else { + final double approximateMedian = valuesSketch.quantile(0.5); + final TDigestState approximatedDeviationsSketch = new TDigestState(valuesSketch.compression()); + valuesSketch.centroids().forEach(centroid -> { + final double deviation = Math.abs(approximateMedian - centroid.mean()); + approximatedDeviationsSketch.add(deviation, centroid.count()); + }); + + return approximatedDeviationsSketch.quantile(0.5); + } + } + + private final TDigestState valuesSketch; + private final double medianAbsoluteDeviation; + + InternalMedianAbsoluteDeviation(String name, + List pipelineAggregators, + Map metaData, + DocValueFormat format, + TDigestState valuesSketch) { + + super(name, pipelineAggregators, metaData); + this.format = Objects.requireNonNull(format); + this.valuesSketch = Objects.requireNonNull(valuesSketch); + + this.medianAbsoluteDeviation = computeMedianAbsoluteDeviation(this.valuesSketch); + } + + public InternalMedianAbsoluteDeviation(StreamInput in) throws IOException { + super(in); + format = in.readNamedWriteable(DocValueFormat.class); + valuesSketch = TDigestState.read(in); + medianAbsoluteDeviation = in.readDouble(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(format); + TDigestState.write(valuesSketch, out); + out.writeDouble(medianAbsoluteDeviation); + } + + @Override + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { + final TDigestState valueMerged = new TDigestState(valuesSketch.compression()); + for (InternalAggregation aggregation : aggregations) { + final InternalMedianAbsoluteDeviation madAggregation = (InternalMedianAbsoluteDeviation) aggregation; + valueMerged.add(madAggregation.valuesSketch); + } + + return new InternalMedianAbsoluteDeviation(name, pipelineAggregators(), metaData, format, valueMerged); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + final boolean anyResults = valuesSketch.size() > 0; + final Double mad = anyResults + ? getMedianAbsoluteDeviation() + : null; + + builder.field(CommonFields.VALUE.getPreferredName(), mad); + if (format != DocValueFormat.RAW && anyResults) { + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(mad).toString()); + } + + return builder; + } + + @Override + protected int doHashCode() { + return Objects.hash(valuesSketch); + } + + @Override + protected boolean doEquals(Object obj) { + InternalMedianAbsoluteDeviation other = (InternalMedianAbsoluteDeviation) obj; + return Objects.equals(valuesSketch, other.valuesSketch); + } + + @Override + public String getWriteableName() { + return MedianAbsoluteDeviationAggregationBuilder.NAME; + } + + public TDigestState getValuesSketch() { + return valuesSketch; + } + + @Override + public double value() { + return getMedianAbsoluteDeviation(); + } + + @Override + public double getMedianAbsoluteDeviation() { + return medianAbsoluteDeviation; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviation.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviation.java new file mode 100644 index 0000000000000..63658ee9e9971 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviation.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +/** + * An aggregation that approximates the median absolute deviation of a numeric field + * + * @see https://en.wikipedia.org/wiki/Median_absolute_deviation + */ +public interface MedianAbsoluteDeviation extends NumericMetricsAggregation.SingleValue { + + /** + * Returns the median absolute deviation statistic computed for this aggregation + */ + double getMedianAbsoluteDeviation(); +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java new file mode 100644 index 0000000000000..b5b119cc38f6a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder.LeafOnly; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class MedianAbsoluteDeviationAggregationBuilder extends LeafOnly { + + public static final String NAME = "median_absolute_deviation"; + + private static final ParseField COMPRESSION_FIELD = new ParseField("compression"); + + private static final ObjectParser PARSER; + + static { + PARSER = new ObjectParser<>(NAME); + ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, false); + PARSER.declareDouble(MedianAbsoluteDeviationAggregationBuilder::compression, COMPRESSION_FIELD); + } + + public static MedianAbsoluteDeviationAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { + return PARSER.parse(parser, new MedianAbsoluteDeviationAggregationBuilder(aggregationName), null); + } + + private double compression = 1000d; + + public MedianAbsoluteDeviationAggregationBuilder(String name) { + super(name, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + } + + public MedianAbsoluteDeviationAggregationBuilder(StreamInput in) throws IOException { + super(in, ValuesSourceType.NUMERIC, ValueType.NUMERIC); + compression = in.readDouble(); + } + + protected MedianAbsoluteDeviationAggregationBuilder(MedianAbsoluteDeviationAggregationBuilder clone, + AggregatorFactories.Builder factoriesBuilder, + Map metaData) { + super(clone, factoriesBuilder, metaData); + this.compression = clone.compression; + } + + /** + * Returns the compression factor of the t-digest sketches used + */ + public double compression() { + return compression; + } + + /** + * Set the compression factor of the t-digest sketches used + */ + public MedianAbsoluteDeviationAggregationBuilder compression(double compression) { + if (compression <= 0d) { + throw new IllegalArgumentException( + "[" + COMPRESSION_FIELD.getPreferredName() + "] must be greater than 0. Found [" + compression + "] in [" + name + "]"); + } + this.compression = compression; + return this; + } + + @Override + protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBuilder, Map metaData) { + return new MedianAbsoluteDeviationAggregationBuilder(this, factoriesBuilder, metaData); + } + + @Override + protected void innerWriteTo(StreamOutput out) throws IOException { + out.writeDouble(compression); + } + + @Override + protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, + ValuesSourceConfig config, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder) + throws IOException { + + return new MedianAbsoluteDeviationAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData, compression); + } + + @Override + protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.field(COMPRESSION_FIELD.getPreferredName(), compression); + return builder; + } + + @Override + protected int innerHashCode() { + return Objects.hash(compression); + } + + @Override + protected boolean innerEquals(Object obj) { + MedianAbsoluteDeviationAggregationBuilder other = (MedianAbsoluteDeviationAggregationBuilder) obj; + return Objects.equals(compression, other.compression); + } + + @Override + public String getType() { + return NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregator.java new file mode 100644 index 0000000000000..6dd275455de90 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregator.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.search.aggregations.metrics.InternalMedianAbsoluteDeviation.computeMedianAbsoluteDeviation; + +public class MedianAbsoluteDeviationAggregator extends NumericMetricsAggregator.SingleValue { + + private final ValuesSource.Numeric valuesSource; + private final DocValueFormat format; + + private final double compression; + + private ObjectArray valueSketches; + + MedianAbsoluteDeviationAggregator(String name, + SearchContext context, + Aggregator parent, + List pipelineAggregators, + Map metaData, + @Nullable ValuesSource.Numeric valuesSource, + DocValueFormat format, + double compression) throws IOException { + + super(name, context, parent, pipelineAggregators, metaData); + + this.valuesSource = valuesSource; + this.format = Objects.requireNonNull(format); + this.compression = compression; + this.valueSketches = context.bigArrays().newObjectArray(1); + } + + private boolean hasDataForBucket(long bucketOrd) { + return bucketOrd < valueSketches.size() && valueSketches.get(bucketOrd) != null; + } + + @Override + public double metric(long owningBucketOrd) { + if (hasDataForBucket(owningBucketOrd)) { + return computeMedianAbsoluteDeviation(valueSketches.get(owningBucketOrd)); + } else { + return Double.NaN; + } + } + + @Override + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } else { + return ScoreMode.COMPLETE_NO_SCORES; + } + } + + @Override + protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + + final BigArrays bigArrays = context.bigArrays(); + final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); + + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long bucket) throws IOException { + + valueSketches = bigArrays.grow(valueSketches, bucket + 1); + + TDigestState valueSketch = valueSketches.get(bucket); + if (valueSketch == null) { + valueSketch = new TDigestState(compression); + valueSketches.set(bucket, valueSketch); + } + + if (values.advanceExact(doc)) { + final int valueCount = values.docValueCount(); + for (int i = 0; i < valueCount; i++) { + final double value = values.nextValue(); + valueSketch.add(value); + } + } + } + }; + } + + @Override + public InternalAggregation buildAggregation(long bucket) throws IOException { + if (hasDataForBucket(bucket)) { + final TDigestState valueSketch = valueSketches.get(bucket); + return new InternalMedianAbsoluteDeviation(name, pipelineAggregators(), metaData(), format, valueSketch); + } else { + return buildEmptyAggregation(); + } + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return new InternalMedianAbsoluteDeviation(name, pipelineAggregators(), metaData(), format, new TDigestState(compression)); + } + + @Override + public void doClose() { + Releasables.close(valueSketches); + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java new file mode 100644 index 0000000000000..3e621e396de06 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class MedianAbsoluteDeviationAggregatorFactory extends + ValuesSourceAggregatorFactory { + + private final double compression; + + MedianAbsoluteDeviationAggregatorFactory(String name, + ValuesSourceConfig config, + SearchContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData, + double compression) throws IOException { + + super(name, config, context, parent, subFactoriesBuilder, metaData); + this.compression = compression; + } + + @Override + protected Aggregator createUnmapped(Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + + return new MedianAbsoluteDeviationAggregator( + name, + context, + parent, + pipelineAggregators, + metaData, + null, + config.format(), + compression + ); + } + + @Override + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + + return new MedianAbsoluteDeviationAggregator( + name, + context, + parent, + pipelineAggregators, + metaData, + valuesSource, + config.format(), + compression + ); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedMedianAbsoluteDeviation.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedMedianAbsoluteDeviation.java new file mode 100644 index 0000000000000..42a0fd69987ec --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedMedianAbsoluteDeviation.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ParsedMedianAbsoluteDeviation extends ParsedSingleValueNumericMetricsAggregation implements MedianAbsoluteDeviation { + + private static final ObjectParser PARSER = new ObjectParser<>( + ParsedMedianAbsoluteDeviation.class.getSimpleName(), + true, + ParsedMedianAbsoluteDeviation::new + ); + + static { + declareSingleValueFields(PARSER, Double.NaN); + } + + public static ParsedMedianAbsoluteDeviation fromXContent(XContentParser parser, String name) { + ParsedMedianAbsoluteDeviation parsedMedianAbsoluteDeviation = PARSER.apply(parser, null); + parsedMedianAbsoluteDeviation.setName(name); + return parsedMedianAbsoluteDeviation; + } + + @Override + protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + final boolean hasValue = Double.isFinite(getMedianAbsoluteDeviation()); + builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? getMedianAbsoluteDeviation() : null); + if (hasValue && valueAsString != null) { + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), valueAsString); + } + return builder; + } + + @Override + public double getMedianAbsoluteDeviation() { + return value(); + } + + @Override + public String getType() { + return MedianAbsoluteDeviationAggregationBuilder.NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java index e3c0bec6900e8..5298e7eca0586 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceContext.java @@ -91,8 +91,8 @@ public String[] excludes() { public static FetchSourceContext parseFromRestRequest(RestRequest request) { Boolean fetchSource = null; - String[] source_excludes = null; - String[] source_includes = null; + String[] sourceExcludes = null; + String[] sourceIncludes = null; String source = request.param("_source"); if (source != null) { @@ -101,23 +101,22 @@ public static FetchSourceContext parseFromRestRequest(RestRequest request) { } else if (Booleans.isFalse(source)) { fetchSource = false; } else { - source_includes = Strings.splitStringByCommaToArray(source); + sourceIncludes = Strings.splitStringByCommaToArray(source); } } + String sIncludes = request.param("_source_includes"); - sIncludes = request.param("_source_include", sIncludes); if (sIncludes != null) { - source_includes = Strings.splitStringByCommaToArray(sIncludes); + sourceIncludes = Strings.splitStringByCommaToArray(sIncludes); } String sExcludes = request.param("_source_excludes"); - sExcludes = request.param("_source_exclude", sExcludes); if (sExcludes != null) { - source_excludes = Strings.splitStringByCommaToArray(sExcludes); + sourceExcludes = Strings.splitStringByCommaToArray(sExcludes); } - if (fetchSource != null || source_includes != null || source_excludes != null) { - return new FetchSourceContext(fetchSource == null ? true : fetchSource, source_includes, source_excludes); + if (fetchSource != null || sourceIncludes != null || sourceExcludes != null) { + return new FetchSourceContext(fetchSource == null ? true : fetchSource, sourceIncludes, sourceExcludes); } return null; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java index 11e46061d6786..b4cbd03116758 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightPhase.java @@ -20,9 +20,7 @@ package org.elasticsearch.search.fetch.subphase.highlight; import org.apache.lucene.search.Query; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -35,11 +33,10 @@ import java.util.HashMap; import java.util.Map; -public class HighlightPhase extends AbstractComponent implements FetchSubPhase { +public class HighlightPhase implements FetchSubPhase { private final Map highlighters; - public HighlightPhase(Settings settings, Map highlighters) { - super(settings); + public HighlightPhase(Map highlighters) { this.highlighters = highlighters; } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 56d409ef3133c..3523966b7eda4 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -39,7 +39,6 @@ import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.QueueResizingEsThreadPoolExecutor; import org.elasticsearch.search.DocValueFormat; @@ -79,10 +78,10 @@ public class QueryPhase implements SearchPhase { private final SuggestPhase suggestPhase; private RescorePhase rescorePhase; - public QueryPhase(Settings settings) { + public QueryPhase() { this.aggregationPhase = new AggregationPhase(); - this.suggestPhase = new SuggestPhase(settings); - this.rescorePhase = new RescorePhase(settings); + this.suggestPhase = new SuggestPhase(); + this.rescorePhase = new RescorePhase(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index 7f5a1be285d8e..f4f3317a81b07 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -22,9 +22,7 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.internal.SearchContext; @@ -33,12 +31,7 @@ /** * Rescore phase of a search request, used to run potentially expensive scoring models against the top matching documents. */ -public class RescorePhase extends AbstractComponent implements SearchPhase { - - public RescorePhase(Settings settings) { - super(settings); - } - +public class RescorePhase implements SearchPhase { @Override public void preProcess(SearchContext context) { } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java b/server/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java index 874448b924c06..89b1f08958134 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java @@ -20,8 +20,6 @@ import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.suggest.Suggest.Suggestion; @@ -37,12 +35,7 @@ /** * Suggest phase of a search request, used to collect suggestions */ -public class SuggestPhase extends AbstractComponent implements SearchPhase { - - public SuggestPhase(Settings settings) { - super(settings); - } - +public class SuggestPhase implements SearchPhase { @Override public void preProcess(SearchContext context) { } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index bc2f6a8d42f65..9d9721ab046d9 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -25,7 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.codecs.TermStats; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -72,7 +72,7 @@ public final class DirectCandidateGenerator extends CandidateGenerator { public DirectCandidateGenerator(DirectSpellChecker spellchecker, String field, SuggestMode suggestMode, IndexReader reader, double nonErrorLikelihood, int numCandidates) throws IOException { this(spellchecker, field, suggestMode, reader, nonErrorLikelihood, - numCandidates, null, null, MultiFields.getTerms(reader, field)); + numCandidates, null, null, MultiTerms.getTerms(reader, field)); } public DirectCandidateGenerator(DirectSpellChecker spellchecker, String field, SuggestMode suggestMode, IndexReader reader, diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index 3ff6774ff5c8b..10112ad2f43dd 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -21,7 +21,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Terms; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.spell.DirectSpellChecker; @@ -78,14 +78,14 @@ public Suggestion> innerExecute(String name, P for (int i = 0; i < numGenerators; i++) { PhraseSuggestionContext.DirectCandidateGenerator generator = generators.get(i); DirectSpellChecker directSpellChecker = generator.createDirectSpellChecker(); - Terms terms = MultiFields.getTerms(indexReader, generator.field()); + Terms terms = MultiTerms.getTerms(indexReader, generator.field()); if (terms != null) { gens.add(new DirectCandidateGenerator(directSpellChecker, generator.field(), generator.suggestMode(), indexReader, realWordErrorLikelihood, generator.size(), generator.preFilter(), generator.postFilter(), terms)); } } final String suggestField = suggestion.getField(); - final Terms suggestTerms = MultiFields.getTerms(indexReader, suggestField); + final Terms suggestTerms = MultiTerms.getTerms(indexReader, suggestField); if (gens.size() > 0 && suggestTerms != null) { final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit()); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java index 1bdf1c90d7d09..b13f33f76394b 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.suggest.phrase; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; @@ -45,7 +45,7 @@ public abstract class WordScorer { private final boolean useTotalTermFreq; public WordScorer(IndexReader reader, String field, double realWordLikelyHood, BytesRef separator) throws IOException { - this(reader, MultiFields.getTerms(reader, field), field, realWordLikelyHood, separator); + this(reader, MultiTerms.getTerms(reader, field), field, realWordLikelyHood, separator); } public WordScorer(IndexReader reader, Terms terms, String field, double realWordLikelyHood, BytesRef separator) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 791b59a1d5bb0..7e1873c3f0a6e 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -151,10 +151,9 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp private final CleanRestoreStateTaskExecutor cleanRestoreStateTaskExecutor; @Inject - public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, + public RestoreService(ClusterService clusterService, RepositoriesService repositoriesService, AllocationService allocationService, MetaDataCreateIndexService createIndexService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettings clusterSettings) { - super(settings); this.clusterService = clusterService; this.repositoriesService = repositoriesService; this.allocationService = allocationService; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 88612dbcc5022..b428ffcfc6d1f 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -128,7 +128,7 @@ public SnapshotShardsService(Settings settings, ClusterService clusterService, S } // The constructor of UpdateSnapshotStatusAction will register itself to the TransportService. - this.updateSnapshotStatusHandler = new UpdateSnapshotStatusAction(settings, UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + this.updateSnapshotStatusHandler = new UpdateSnapshotStatusAction(UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver); } @@ -615,9 +615,9 @@ static class UpdateIndexShardSnapshotStatusResponse extends ActionResponse { } class UpdateSnapshotStatusAction extends TransportMasterNodeAction { - UpdateSnapshotStatusAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, + UpdateSnapshotStatusAction(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new); + super(actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new); } @Override diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index 73af26cfc708b..bb93eac6af8ff 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -79,7 +79,6 @@ public class TaskManager extends AbstractComponent implements ClusterStateApplie private final ByteSizeValue maxHeaderSize; public TaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { - super(settings); this.threadPool = threadPool; this.taskHeaders = new ArrayList<>(taskHeaders); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index 1a837f7d6d923..86c546370b704 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -69,8 +69,7 @@ public class TaskResultsService extends AbstractComponent { private final ClusterService clusterService; @Inject - public TaskResultsService(Settings settings, Client client, ClusterService clusterService) { - super(settings); + public TaskResultsService(Client client, ClusterService clusterService) { this.client = client; this.clusterService = clusterService; } diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index ecf311bc4b91d..91d5d9fa3717e 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -162,8 +162,6 @@ public Collection builders() { Setting.timeSetting("thread_pool.estimated_time_interval", TimeValue.timeValueMillis(200), Setting.Property.NodeScope); public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { - super(settings); - assert Node.NODE_NAME_SETTING.exists(settings); final Map builders = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index 5f2635fac88d9..609ce106fabe9 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.transport; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -67,16 +67,15 @@ public class ConnectionManager implements Closeable { private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool) { - this(settings, transport, threadPool, ConnectionProfile.buildDefaultConnectionProfile(settings)); + this(settings, transport, threadPool, TcpTransport.PING_SCHEDULE.get(settings)); } - public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool, ConnectionProfile defaultProfile) { + public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool, TimeValue pingSchedule) { this.transport = transport; this.threadPool = threadPool; - this.pingSchedule = TcpTransport.PING_SCHEDULE.get(settings); - this.defaultProfile = defaultProfile; + this.pingSchedule = pingSchedule; + this.defaultProfile = ConnectionProfile.buildDefaultConnectionProfile(settings); this.lifecycle.moveToStarted(); - if (pingSchedule.millis() > 0) { threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, new ScheduledPing()); } @@ -203,7 +202,7 @@ public void close() { threadPool.generic().execute(() -> { closeLock.writeLock().lock(); try { - // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close + // we are holding a write lock so nobody adds to the connectedNodes / openConnections map - it's safe to first close // all instances and then clear them maps Iterator> iterator = connectedNodes.entrySet().iterator(); while (iterator.hasNext()) { @@ -252,6 +251,10 @@ private void ensureOpen() { } } + TimeValue getPingSchedule() { + return pingSchedule; + } + private class ScheduledPing extends AbstractLifecycleRunnable { private ScheduledPing() { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index bbd02f8d8f01a..fdb3745d130dc 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -167,7 +167,7 @@ public String getKey(final String key) { Setting.Property.NodeScope), REMOTE_CLUSTERS_SEEDS); - + protected final Settings settings; protected final ClusterNameExpressionResolver clusterNameResolver; /** @@ -175,8 +175,8 @@ public String getKey(final String key) { * @param settings the nodes level settings */ protected RemoteClusterAware(Settings settings) { - super(settings); - this.clusterNameResolver = new ClusterNameExpressionResolver(settings); + this.settings = settings; + this.clusterNameResolver = new ClusterNameExpressionResolver(); } /** diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 48f086ad972bf..9f53a42646b59 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.transport; -import java.net.InetSocketAddress; -import java.util.function.Supplier; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; @@ -48,6 +46,7 @@ import java.io.Closeable; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -64,6 +63,7 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; /** @@ -93,6 +93,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo private volatile List> seedNodes; private volatile boolean skipUnavailable; private final ConnectHandler connectHandler; + private final TimeValue initialConnectionTimeout; private SetOnce remoteClusterName = new SetOnce<>(); /** @@ -104,18 +105,11 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * @param connectionManager the connection manager to use for this remote connection * @param maxNumRemoteConnections the maximum number of connections to the remote cluster * @param nodePredicate a predicate to filter eligible remote nodes to connect to + * @param proxyAddress the proxy address */ RemoteClusterConnection(Settings settings, String clusterAlias, List> seedNodes, TransportService transportService, ConnectionManager connectionManager, int maxNumRemoteConnections, - Predicate nodePredicate) { - this(settings, clusterAlias, seedNodes, transportService, connectionManager, maxNumRemoteConnections, nodePredicate, null); - } - - RemoteClusterConnection(Settings settings, String clusterAlias, List> seedNodes, - TransportService transportService, ConnectionManager connectionManager, int maxNumRemoteConnections, Predicate - nodePredicate, - String proxyAddress) { - super(settings); + Predicate nodePredicate, String proxyAddress) { this.transportService = transportService; this.maxNumRemoteConnections = maxNumRemoteConnections; this.nodePredicate = nodePredicate; @@ -140,6 +134,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo // we register the transport service here as a listener to make sure we notify handlers on disconnect etc. connectionManager.addListener(transportService); this.proxyAddress = proxyAddress; + initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); } private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, DiscoveryNode node) { @@ -150,7 +145,7 @@ private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, Discovery InetSocketAddress proxyInetAddress = RemoteClusterAware.parseSeedAddress(proxyAddress); return new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node .getHostAddress(), new TransportAddress(proxyInetAddress), node.getAttributes(), node.getRoles(), node.getVersion()); - } + } } /** @@ -679,7 +674,6 @@ void addConnectedNode(DiscoveryNode node) { public RemoteConnectionInfo getConnectionInfo() { List seedNodeAddresses = seedNodes.stream().map(node -> node.get().getAddress()).collect (Collectors.toList()); - TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); return new RemoteConnectionInfo(clusterAlias, seedNodeAddresses, maxNumRemoteConnections, connectedNodes.size(), initialConnectionTimeout, skipUnavailable); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index dc3bd3a353604..08f08207eae39 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -60,6 +60,7 @@ import java.util.stream.Stream; import static org.elasticsearch.common.settings.Setting.boolSetting; +import static org.elasticsearch.common.settings.Setting.timeSetting; /** * Basic service for accessing remote clusters via gateway nodes @@ -166,6 +167,12 @@ public String getKey(final String key) { Setting.Property.NodeScope), REMOTE_CLUSTERS_SEEDS); + public static final Setting.AffixSetting REMOTE_CLUSTER_PING_SCHEDULE = Setting.affixKeySetting( + "cluster.remote.", + "transport.ping_schedule", + key -> timeSetting(key, TcpTransport.PING_SCHEDULE, Setting.Property.NodeScope), + REMOTE_CLUSTERS_SEEDS); + private static final Predicate DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion()) && (node.isMasterNode() == false || node.isDataNode() || node.isIngestNode()); @@ -211,10 +218,13 @@ private synchronized void updateRemoteClusters(Map listener) { - RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterAlias); - if (remoteClusterConnection == null) { - throw new IllegalArgumentException("no such remote cluster: " + clusterAlias); - } - remoteClusterConnection.ensureConnected(listener); + void ensureConnected(String clusterAlias, ActionListener listener) { + getRemoteClusterConnection(clusterAlias).ensureConnected(listener); } public Transport.Connection getConnection(String cluster) { + return getRemoteClusterConnection(cluster).getConnection(); + } + + RemoteClusterConnection getRemoteClusterConnection(String cluster) { RemoteClusterConnection connection = remoteClusters.get(cluster); if (connection == null) { throw new IllegalArgumentException("no such remote cluster: " + cluster); } - return connection.getConnection(); + return connection; } @Override @@ -386,7 +392,6 @@ synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavail } } - @Override protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress) { updateRemoteCluster(clusterAlias, addresses, proxyAddress, ActionListener.wrap((x) -> {}, (x) -> {})); diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index ad41e8c2902a3..46067930df110 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -65,6 +65,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -117,8 +118,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { - public static final String TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX = "transport_server_worker"; - public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; + public static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = "transport_worker"; public static final Setting> HOST = listSetting("transport.host", emptyList(), Function.identity(), Setting.Property.NodeScope); @@ -178,6 +178,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements public static final Setting DEFAULT_FEATURES_SETTING = Setting.groupSetting(FEATURE_PREFIX + ".", Setting.Property.NodeScope); private final String[] features; + protected final Settings settings; private final CircuitBreakerService circuitBreakerService; protected final ThreadPool threadPool; private final BigArrays bigArrays; @@ -209,11 +210,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final ResponseHandlers responseHandlers = new ResponseHandlers(); private final TransportLogger transportLogger; private final BytesReference pingMessage; + private final String nodeName; public TcpTransport(String transportName, Settings settings, ThreadPool threadPool, BigArrays bigArrays, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { super(settings); + this.settings = settings; this.profileSettings = getProfileSettings(settings); this.threadPool = threadPool; this.bigArrays = bigArrays; @@ -223,6 +226,7 @@ public TcpTransport(String transportName, Settings settings, ThreadPool threadPo this.networkService = networkService; this.transportName = transportName; this.transportLogger = new TransportLogger(); + this.nodeName = Node.NODE_NAME_SETTING.get(settings); final Settings defaultFeatures = DEFAULT_FEATURES_SETTING.get(settings); if (defaultFeatures == null) { @@ -947,7 +951,7 @@ public void sendErrorResponse( stream.setVersion(nodeVersion); stream.setFeatures(features); RemoteTransportException tx = new RemoteTransportException( - nodeName(), new TransportAddress(channel.getLocalAddress()), action, error); + nodeName, new TransportAddress(channel.getLocalAddress()), action, error); threadPool.getThreadContext().writeTo(stream); stream.writeException(tx); byte status = 0; diff --git a/server/src/main/java/org/elasticsearch/transport/Transports.java b/server/src/main/java/org/elasticsearch/transport/Transports.java index b4579374d9ec4..c531d33224a60 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transports.java +++ b/server/src/main/java/org/elasticsearch/transport/Transports.java @@ -29,8 +29,6 @@ public enum Transports { /** threads whose name is prefixed by this string will be considered network threads, even though they aren't */ public static final String TEST_MOCK_TRANSPORT_THREAD_PREFIX = "__mock_network_thread"; - public static final String NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX = "es_nio_transport_worker"; - /** * Utility method to detect whether a thread is a network thread. Typically * used in assertions to make sure that we do not call blocking code from @@ -41,10 +39,8 @@ public static boolean isTransportThread(Thread t) { for (String s : Arrays.asList( HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, HttpServerTransport.HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX, - TcpTransport.TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, - TcpTransport.TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX, - TEST_MOCK_TRANSPORT_THREAD_PREFIX, - NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX)) { + TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX, + TEST_MOCK_TRANSPORT_THREAD_PREFIX)) { if (threadName.contains(s)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/usage/UsageService.java b/server/src/main/java/org/elasticsearch/usage/UsageService.java index 9f742cca9d94a..9e1c2e0373452 100644 --- a/server/src/main/java/org/elasticsearch/usage/UsageService.java +++ b/server/src/main/java/org/elasticsearch/usage/UsageService.java @@ -40,9 +40,6 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodeUsage; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import java.util.ArrayList; @@ -53,14 +50,12 @@ /** * A service to monitor usage of Elasticsearch features. */ -public class UsageService extends AbstractComponent { +public class UsageService { private final List handlers; private final long sinceTime; - @Inject - public UsageService(Settings settings) { - super(settings); + public UsageService() { this.handlers = new ArrayList<>(); this.sinceTime = System.currentTimeMillis(); } diff --git a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index c014845ce093d..4faf754b13869 100644 --- a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -735,11 +734,11 @@ public void testFailureToAndFromXContentWithDetails() throws IOException { break; case 1: // Simple elasticsearch exception with headers (other metadata of type number are not parsed) - failure = new CircuitBreakingException("B", 5_000, 2_000); + failure = new ParsingException(3, 2, "B", null); ((ElasticsearchException) failure).addHeader("header_name", "0", "1"); - expected = new ElasticsearchException("Elasticsearch exception [type=circuit_breaking_exception, reason=B]"); + expected = new ElasticsearchException("Elasticsearch exception [type=parsing_exception, reason=B]"); expected.addHeader("header_name", "0", "1"); - suppressed = new ElasticsearchException("Elasticsearch exception [type=circuit_breaking_exception, reason=B]"); + suppressed = new ElasticsearchException("Elasticsearch exception [type=parsing_exception, reason=B]"); suppressed.addHeader("header_name", "0", "1"); expected.addSuppressed(suppressed); break; @@ -916,9 +915,9 @@ public static Tuple randomExceptions() { expected = new ElasticsearchException("Elasticsearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no master];]"); break; - case 1: - actual = new CircuitBreakingException("Data too large", 123, 456); - expected = new ElasticsearchException("Elasticsearch exception [type=circuit_breaking_exception, reason=Data too large]"); + case 1: // Simple elasticsearch exception with headers (other metadata of type number are not parsed) + actual = new ParsingException(3, 2, "Unknown identifier", null); + expected = new ElasticsearchException("Elasticsearch exception [type=parsing_exception, reason=Unknown identifier]"); break; case 2: actual = new SearchParseException(new TestSearchContext(null), "Parse failure", new XContentLocation(12, 98)); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 62778af8d577c..1646ec7e6c7bf 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -349,10 +350,12 @@ public void testSearchContextMissingException() throws IOException { } public void testCircuitBreakingException() throws IOException { - CircuitBreakingException ex = serialize(new CircuitBreakingException("I hate to say I told you so...", 0, 100)); - assertEquals("I hate to say I told you so...", ex.getMessage()); + CircuitBreakingException ex = serialize(new CircuitBreakingException("Too large", 0, 100, CircuitBreaker.Durability.TRANSIENT), + Version.V_7_0_0_alpha1); + assertEquals("Too large", ex.getMessage()); assertEquals(100, ex.getByteLimit()); assertEquals(0, ex.getBytesWanted()); + assertEquals(CircuitBreaker.Durability.TRANSIENT, ex.getDurability()); } public void testTooManyBucketsException() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java index 1fa4197e74900..1bde97d0b8ebd 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java @@ -80,8 +80,8 @@ public ActionRequestValidationException validate() { } } class FakeTransportAction extends TransportAction { - protected FakeTransportAction(Settings settings, String actionName, ActionFilters actionFilters, TaskManager taskManager) { - super(settings, actionName, actionFilters, taskManager); + protected FakeTransportAction(String actionName, ActionFilters actionFilters, TaskManager taskManager) { + super(actionName, actionFilters, taskManager); } @Override @@ -111,8 +111,8 @@ public ActionResponse newResponse() { public void testSetupRestHandlerContainsKnownBuiltin() { SettingsModule settings = new SettingsModule(Settings.EMPTY); - UsageService usageService = new UsageService(settings.getSettings()); - ActionModule actionModule = new ActionModule(false, settings.getSettings(), new IndexNameExpressionResolver(Settings.EMPTY), + UsageService usageService = new UsageService(); + ActionModule actionModule = new ActionModule(false, settings.getSettings(), new IndexNameExpressionResolver(), settings.getIndexScopedSettings(), settings.getClusterSettings(), settings.getSettingsFilter(), null, emptyList(), null, null, usageService); actionModule.initRestHandlers(null); @@ -134,8 +134,8 @@ public List getRestHandlers(Settings settings, RestController restC SettingsModule settings = new SettingsModule(Settings.EMPTY); ThreadPool threadPool = new TestThreadPool(getTestName()); try { - UsageService usageService = new UsageService(settings.getSettings()); - ActionModule actionModule = new ActionModule(false, settings.getSettings(), new IndexNameExpressionResolver(Settings.EMPTY), + UsageService usageService = new UsageService(); + ActionModule actionModule = new ActionModule(false, settings.getSettings(), new IndexNameExpressionResolver(), settings.getIndexScopedSettings(), settings.getClusterSettings(), settings.getSettingsFilter(), threadPool, singletonList(dupsMainAction), null, null, usageService); Exception e = expectThrows(IllegalArgumentException.class, () -> actionModule.initRestHandlers(null)); @@ -166,8 +166,8 @@ public List getRestHandlers(Settings settings, RestController restC SettingsModule settings = new SettingsModule(Settings.EMPTY); ThreadPool threadPool = new TestThreadPool(getTestName()); try { - UsageService usageService = new UsageService(settings.getSettings()); - ActionModule actionModule = new ActionModule(false, settings.getSettings(), new IndexNameExpressionResolver(Settings.EMPTY), + UsageService usageService = new UsageService(); + ActionModule actionModule = new ActionModule(false, settings.getSettings(), new IndexNameExpressionResolver(), settings.getIndexScopedSettings(), settings.getClusterSettings(), settings.getSettingsFilter(), threadPool, singletonList(registersFakeHandler), null, null, usageService); actionModule.initRestHandlers(null); diff --git a/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index 40795bff730e0..036c0b97cca5c 100644 --- a/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -393,7 +393,7 @@ public void testFlush() { internalCluster().coordOnlyNodeClient().admin().indices().flush(flushRequest).actionGet(); clearInterceptedActions(); - String[] indices = new IndexNameExpressionResolver(Settings.EMPTY) + String[] indices = new IndexNameExpressionResolver() .concreteIndexNames(client().admin().cluster().prepareState().get().getState(), flushRequest); assertIndicesSubset(Arrays.asList(indices), indexShardActions); } @@ -418,7 +418,7 @@ public void testRefresh() { internalCluster().coordOnlyNodeClient().admin().indices().refresh(refreshRequest).actionGet(); clearInterceptedActions(); - String[] indices = new IndexNameExpressionResolver(Settings.EMPTY) + String[] indices = new IndexNameExpressionResolver() .concreteIndexNames(client().admin().cluster().prepareState().get().getState(), refreshRequest); assertIndicesSubset(Arrays.asList(indices), indexShardActions); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java index 067dd0daea005..a75510cfb64ef 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -46,13 +45,13 @@ */ public class ClusterAllocationExplainActionTests extends ESTestCase { - private static final AllocationDeciders NOOP_DECIDERS = new AllocationDeciders(Settings.EMPTY, Collections.emptyList()); + private static final AllocationDeciders NOOP_DECIDERS = new AllocationDeciders(Collections.emptyList()); public void testInitializingOrRelocatingShardExplanation() throws Exception { ShardRoutingState shardRoutingState = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.RELOCATING); ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), shardRoutingState); ShardRouting shard = clusterState.getRoutingTable().index("idx").shard(0).primaryShard(); - RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.emptyList()), clusterState.getRoutingNodes(), clusterState, null, System.nanoTime()); ClusterAllocationExplanation cae = TransportClusterAllocationExplainAction.explainShard(shard, allocation, null, randomBoolean(), new TestGatewayAllocator(), new ShardsAllocator() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index e1ba83374829f..c52fc58b90079 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -152,11 +152,10 @@ class CancellableTestNodesAction extends AbstractTestNodesAction, NodeRequest extends BaseNodeRequest> extends TransportNodesAction { - AbstractTestNodesAction(Settings settings, String actionName, ThreadPool threadPool, + AbstractTestNodesAction(String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, Supplier request, Supplier nodeRequest) { - super(settings, actionName, threadPool, clusterService, transportService, + super(actionName, threadPool, clusterService, transportService, new ActionFilters(new HashSet<>()), request, nodeRequest, ThreadPool.Names.GENERIC, NodeResponse.class); } @@ -192,8 +192,8 @@ protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool clusterService = createClusterService(threadPool, discoveryNode.get()); clusterService.addStateApplier(transportService.getTaskManager()); ActionFilters actionFilters = new ActionFilters(emptySet()); - transportListTasksAction = new TransportListTasksAction(settings, clusterService, transportService, actionFilters); - transportCancelTasksAction = new TransportCancelTasksAction(settings, clusterService, transportService, actionFilters); + transportListTasksAction = new TransportListTasksAction(clusterService, transportService, actionFilters); + transportCancelTasksAction = new TransportCancelTasksAction(clusterService, transportService, actionFilters); transportService.acceptIncomingRequests(); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index a04c8d93c3a8c..cac1ff61b2073 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.plugins.ActionPlugin; @@ -265,10 +264,8 @@ public boolean shouldCancelChildrenOnCancellation() { public static class TransportTestTaskAction extends TransportNodesAction { @Inject - public TransportTestTaskAction(Settings settings, ThreadPool threadPool, - ClusterService clusterService, TransportService transportService) { - super(settings, TestTaskAction.NAME, threadPool, clusterService, transportService, - new ActionFilters(new HashSet<>()), + public TransportTestTaskAction(ThreadPool threadPool, ClusterService clusterService, TransportService transportService) { + super(TestTaskAction.NAME, threadPool, clusterService, transportService, new ActionFilters(new HashSet<>()), NodesRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeResponse.class); } @@ -424,8 +421,8 @@ public static class TransportUnblockTestTasksAction extends TransportTasksAction UnblockTestTasksResponse, UnblockTestTaskResponse> { @Inject - public TransportUnblockTestTasksAction(Settings settings, ClusterService clusterService, TransportService transportService) { - super(settings, UnblockTestTasksAction.NAME, clusterService, transportService, new ActionFilters(new HashSet<>()), + public TransportUnblockTestTasksAction(ClusterService clusterService, TransportService transportService) { + super(UnblockTestTasksAction.NAME, clusterService, transportService, new ActionFilters(new HashSet<>()), UnblockTestTasksRequest::new, UnblockTestTasksResponse::new, ThreadPool.Names.MANAGEMENT); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 4b017bbf57d68..f1768f6bf3e3c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -152,9 +152,8 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, */ abstract class TestNodesAction extends AbstractTestNodesAction { - TestNodesAction(Settings settings, String actionName, ThreadPool threadPool, - ClusterService clusterService, TransportService transportService) { - super(settings, actionName, threadPool, clusterService, transportService, NodesRequest::new, NodeRequest::new); + TestNodesAction(String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) { + super(actionName, threadPool, clusterService, transportService, NodesRequest::new, NodeRequest::new); } @Override @@ -234,9 +233,9 @@ public void writeTo(StreamOutput out) throws IOException { */ abstract static class TestTasksAction extends TransportTasksAction { - protected TestTasksAction(Settings settings, String actionName, + protected TestTasksAction(String actionName, ClusterService clusterService, TransportService transportService) { - super(settings, actionName, clusterService, transportService, new ActionFilters(new HashSet<>()), + super(actionName, clusterService, transportService, new ActionFilters(new HashSet<>()), TestTasksRequest::new, TestTasksResponse::new, ThreadPool.Names.MANAGEMENT); } @@ -276,7 +275,7 @@ private Task startBlockingTestNodesAction(CountDownLatch checkLatch, NodesReques TestNodesAction[] actions = new TestNodesAction[nodesCount]; for (int i = 0; i < testNodes.length; i++) { final int node = i; - actions[i] = new TestNodesAction(CLUSTER_SETTINGS, "internal:testAction", threadPool, testNodes[i].clusterService, + actions[i] = new TestNodesAction("internal:testAction", threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected NodeResponse nodeOperation(NodeRequest request) { @@ -541,7 +540,7 @@ public void testFailedTasksCount() throws ExecutionException, InterruptedExcepti RecordingTaskManagerListener[] listeners = setupListeners(testNodes, "internal:testAction*"); for (int i = 0; i < testNodes.length; i++) { final int node = i; - actions[i] = new TestNodesAction(CLUSTER_SETTINGS, "internal:testAction", threadPool, testNodes[i].clusterService, + actions[i] = new TestNodesAction("internal:testAction", threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected NodeResponse nodeOperation(NodeRequest request) { @@ -581,7 +580,7 @@ public void testTaskLevelActionFailures() throws ExecutionException, Interrupted for (int i = 0; i < testNodes.length; i++) { final int node = i; // Simulate task action that fails on one of the tasks on one of the nodes - tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "internal:testTasksAction", testNodes[i].clusterService, + tasksActions[i] = new TestTasksAction("internal:testTasksAction", testNodes[i].clusterService, testNodes[i].transportService) { @Override protected void taskOperation(TestTasksRequest request, Task task, ActionListener listener) { @@ -660,7 +659,7 @@ public void testTaskNodeFiltering() throws ExecutionException, InterruptedExcept final int node = i; // Simulate a task action that works on all nodes except nodes listed in filterNodes. // We are testing that it works. - tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "internal:testTasksAction", + tasksActions[i] = new TestTasksAction("internal:testTasksAction", testNodes[i].clusterService, testNodes[i].transportService) { @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java index 6e34a7510070e..853a991cf937f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java @@ -80,8 +80,8 @@ public void testSerializeRequest() throws IOException { } public void testClusterStateUpdateTask() { - AllocationService allocationService = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + AllocationService allocationService = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); ClusterState clusterState = createInitialClusterState(allocationService); ClusterRerouteRequest req = new ClusterRerouteRequest(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java index 9bf4d9d32f622..e8767d9edc4de 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java @@ -107,9 +107,9 @@ public void testDoNotIncludeDefaults() { class TestTransportGetIndexAction extends TransportGetIndexAction { TestTransportGetIndexAction() { - super(Settings.EMPTY, GetIndexActionTests.this.transportService, GetIndexActionTests.this.clusterService, + super(GetIndexActionTests.this.transportService, GetIndexActionTests.this.clusterService, GetIndexActionTests.this.threadPool, settingsFilter, new ActionFilters(emptySet()), - new GetIndexActionTests.Resolver(Settings.EMPTY), indicesService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); + new GetIndexActionTests.Resolver(), indicesService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); } @Override @@ -121,10 +121,6 @@ protected void doMasterOperation(GetIndexRequest request, String[] concreteIndic } static class Resolver extends IndexNameExpressionResolver { - Resolver(Settings settings) { - super(settings); - } - @Override public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { return request.indices(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index d59700f2b7a45..cbc9499cda327 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -245,7 +245,7 @@ public void testValidation() { public void testGenerateRolloverIndexName() { String invalidIndexName = randomAlphaOfLength(10) + "A"; - IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); expectThrows(IllegalArgumentException.class, () -> TransportRolloverAction.generateRolloverIndexName(invalidIndexName, indexNameExpressionResolver)); int num = randomIntBetween(0, 100); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java index 85b85cf9e1469..65e9e3dce4797 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java @@ -58,9 +58,9 @@ public class GetSettingsActionTests extends ESTestCase { class TestTransportGetSettingsAction extends TransportGetSettingsAction { TestTransportGetSettingsAction() { - super(Settings.EMPTY, GetSettingsActionTests.this.transportService, GetSettingsActionTests.this.clusterService, + super(GetSettingsActionTests.this.transportService, GetSettingsActionTests.this.clusterService, GetSettingsActionTests.this.threadPool, settingsFilter, new ActionFilters(Collections.emptySet()), - new Resolver(Settings.EMPTY), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); + new Resolver(), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); } @Override protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener listener) { @@ -129,10 +129,6 @@ public void testIncludeDefaultsWithFiltering() { } static class Resolver extends IndexNameExpressionResolver { - Resolver(Settings settings) { - super(settings); - } - @Override public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { return request.indices(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java index ce60b14b3efc7..8f2db1281485d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -107,8 +107,8 @@ public void testErrorCondition() { ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0, Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); - AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + AllocationService service = new AllocationService(new AllocationDeciders( + Collections.singleton(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -126,8 +126,8 @@ public void testPassNumRoutingShards() { ClusterState clusterState = ClusterState.builder(createClusterState("source", 1, 0, Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); - AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + AllocationService service = new AllocationService(new AllocationDeciders( + Collections.singleton(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -156,8 +156,8 @@ public void testPassNumRoutingShardsAndFail() { ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0, numShards * 4, Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); - AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + AllocationService service = new AllocationService(new AllocationDeciders( + Collections.singleton(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -191,8 +191,8 @@ public void testShrinkIndexSettings() { .put("index.blocks.write", true) .build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); - AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + AllocationService service = new AllocationService(new AllocationDeciders( + Collections.singleton(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java index 892721f8a5c68..6302766be9017 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java @@ -183,8 +183,8 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr null, xContentRegistry, true); - MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, - new AliasValidator(Settings.EMPTY), null, + MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(null, createIndexService, + new AliasValidator(), null, new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS), xContentRegistry); final List throwables = new ArrayList<>(); @@ -217,7 +217,7 @@ private List putTemplateDetail(PutRequest request) throws Exception { xContentRegistry(), true); MetaDataIndexTemplateService service = new MetaDataIndexTemplateService( - Settings.EMPTY, clusterService, createIndexService, new AliasValidator(Settings.EMPTY), indicesService, + clusterService, createIndexService, new AliasValidator(), indicesService, new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS), xContentRegistry()); final List throwables = new ArrayList<>(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 1fd912e72a426..3e61557869dae 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -20,22 +20,40 @@ package org.elasticsearch.action.bulk; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.ingest.IngestTestPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; +import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Map; +import java.util.concurrent.ExecutionException; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; public class BulkIntegrationIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IngestTestPlugin.class); + } + public void testBulkIndexCreatesMapping() throws Exception { String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/bulk-log.json"); BulkRequestBuilder bulkBuilder = client().prepareBulk(); @@ -81,4 +99,52 @@ public void testBulkWithWriteIndexAndRouting() { assertFalse(bulkResponse.hasFailures()); assertFalse(client().prepareGet("index3", "type", "id").setRouting("1").get().isExists()); } + + public void testBulkWithGlobalDefaults() throws Exception { + // all requests in the json are missing index and type parameters: "_index" : "test", "_type" : "type1", + String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk-missing-index-type.json"); + { + BulkRequestBuilder bulkBuilder = client().prepareBulk(); + bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON); + ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, bulkBuilder::get); + + assertThat(ex.validationErrors(), containsInAnyOrder( + "index is missing", + "index is missing", + "index is missing", + "type is missing", + "type is missing", + "type is missing")); + } + + { + createSamplePipeline("pipeline"); + BulkRequestBuilder bulkBuilder = client().prepareBulk("test","type1") + .routing("routing") + .pipeline("pipeline"); + + bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON); + BulkResponse bulkItemResponses = bulkBuilder.get(); + assertFalse(bulkItemResponses.hasFailures()); + } + } + + private void createSamplePipeline(String pipelineId) throws IOException, ExecutionException, InterruptedException { + XContentBuilder pipeline = jsonBuilder() + .startObject() + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject(); + + AcknowledgedResponse acknowledgedResponse = client().admin() + .cluster() + .putPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(pipeline), XContentType.JSON)) + .get(); + + assertTrue(acknowledgedResponse.isAcknowledged()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java index 3fbfa381ad352..6a7d9bc02ec3e 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java @@ -72,19 +72,9 @@ public void testBulkProcessorFlushPreservesContext() throws InterruptedException try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { threadPool.getThreadContext().putHeader(headerKey, headerValue); threadPool.getThreadContext().putTransient(transientKey, transientValue); - bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), new BulkProcessor.Listener() { - @Override - public void beforeBulk(long executionId, BulkRequest request) { - } - - @Override - public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - } - - @Override - public void afterBulk(long executionId, BulkRequest request, Throwable failure) { - } - }, 1, bulkSize, new ByteSizeValue(5, ByteSizeUnit.MB), flushInterval, threadPool, () -> {}); + bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), emptyListener(), + 1, bulkSize, new ByteSizeValue(5, ByteSizeUnit.MB), flushInterval, + threadPool, () -> {}, BulkRequest::new); } assertNull(threadPool.getThreadContext().getHeader(headerKey)); assertNull(threadPool.getThreadContext().getTransient(transientKey)); @@ -100,28 +90,32 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) bulkProcessor.close(); } + public void testAwaitOnCloseCallsOnClose() throws Exception { final AtomicBoolean called = new AtomicBoolean(false); - BulkProcessor bulkProcessor = new BulkProcessor((request, listener) -> { - }, BackoffPolicy.noBackoff(), new BulkProcessor.Listener() { + BiConsumer> consumer = (request, listener) -> {}; + BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), emptyListener(), + 0, 10, new ByteSizeValue(1000), null, + (delay, executor, command) -> null, () -> called.set(true), BulkRequest::new); + + assertFalse(called.get()); + bulkProcessor.awaitClose(100, TimeUnit.MILLISECONDS); + assertTrue(called.get()); + } + + private BulkProcessor.Listener emptyListener() { + return new BulkProcessor.Listener() { @Override public void beforeBulk(long executionId, BulkRequest request) { - } @Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - } @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { - } - }, 0, 10, new ByteSizeValue(1000), null, (delay, executor, command) -> null, () -> called.set(true)); - - assertFalse(called.get()); - bulkProcessor.awaitClose(100, TimeUnit.MILLISECONDS); - assertTrue(called.get()); + }; } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 1b2c238098e50..65935bea96e59 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -165,7 +165,8 @@ public void testSimpleBulk9() throws Exception { BulkRequest bulkRequest = new BulkRequest(); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON)); - assertThat(exc.getMessage(), containsString("Malformed action/metadata line [3], expected START_OBJECT or END_OBJECT but found [START_ARRAY]")); + assertThat(exc.getMessage(), + containsString("Malformed action/metadata line [3], expected START_OBJECT or END_OBJECT but found [START_ARRAY]")); } public void testSimpleBulk10() throws Exception { @@ -207,7 +208,8 @@ public void testBulkEmptyObject() throws Exception { BulkRequest bulkRequest = new BulkRequest(); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON)); - assertThat(exc.getMessage(), containsString("Malformed action/metadata line [" + emptyLine + "], expected FIELD_NAME but found [END_OBJECT]")); + assertThat(exc.getMessage(), containsString("Malformed action/metadata line [" + + emptyLine + "], expected FIELD_NAME but found [END_OBJECT]")); } // issue 7361 diff --git a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index f9cdfa092ae3c..decee8ceab714 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -230,7 +230,8 @@ private BulkItemResponse successfulResponse() { } private BulkItemResponse failedResponse() { - return new BulkItemResponse(1, OpType.INDEX, new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); + return new BulkItemResponse(1, OpType.INDEX, new BulkItemResponse.Failure("test", "test", "1", + new EsRejectedExecutionException("pool full"))); } } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 1a0e314d88680..10014c6fb3f56 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.IndexNotFoundException; @@ -103,7 +102,7 @@ private void indicesThatCannotBeCreatedTestCase(Set expected, ClusterState state = mock(ClusterState.class); when(state.getMetaData()).thenReturn(MetaData.EMPTY_META_DATA); when(clusterService.state()).thenReturn(state); - TransportBulkAction action = new TransportBulkAction(Settings.EMPTY, null, mock(TransportService.class), clusterService, + TransportBulkAction action = new TransportBulkAction(null, mock(TransportService.class), clusterService, null, null, null, mock(ActionFilters.class), null, null) { @Override void executeBulk(Task task, BulkRequest bulkRequest, long startTimeNanos, ActionListener listener, diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 7fdb12ff1356a..c93b0345ece02 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -122,11 +122,11 @@ class TestTransportBulkAction extends TransportBulkAction { boolean indexCreated = true; // If set to false, will be set to true by call to createIndex TestTransportBulkAction() { - super(SETTINGS, null, transportService, clusterService, ingestService, + super(null, transportService, clusterService, ingestService, null, null, new ActionFilters(Collections.emptySet()), null, new AutoCreateIndex( SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new IndexNameExpressionResolver(SETTINGS) + new IndexNameExpressionResolver() ) ); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index a1abd4d61f7fd..495484e5e506a 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -57,9 +57,9 @@ class TestTransportBulkAction extends TransportBulkAction { boolean indexCreated = false; // set when the "real" index is created TestTransportBulkAction() { - super(Settings.EMPTY, TransportBulkActionTests.this.threadPool, transportService, clusterService, null, null, - null, new ActionFilters(Collections.emptySet()), new Resolver(Settings.EMPTY), - new AutoCreateIndex(Settings.EMPTY, clusterService.getClusterSettings(), new Resolver(Settings.EMPTY))); + super(TransportBulkActionTests.this.threadPool, transportService, clusterService, null, null, + null, new ActionFilters(Collections.emptySet()), new Resolver(), + new AutoCreateIndex(Settings.EMPTY, clusterService.getClusterSettings(), new Resolver())); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index f6559f226417e..6bfc220b7a3db 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -97,7 +97,7 @@ private TransportBulkAction createAction(boolean controlled, AtomicLong expected boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); - IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY); + IndexNameExpressionResolver resolver = new Resolver(); ActionFilters actionFilters = new ActionFilters(new HashSet<>()); NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { @@ -111,7 +111,6 @@ void doExecute(Action action, Request request, ActionListener listener) { @@ -74,7 +70,7 @@ public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRe AtomicReference> result = new AtomicReference<>(); CountDownLatch latch = new CountDownLatch(1); GroupShardsIterator shardsIter = SearchAsyncActionTests.getShardsIter("idx", - new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed()), + new OriginalIndices(new String[]{"idx"}, SearchRequest.DEFAULT_INDICES_OPTIONS), 2, randomBoolean(), primaryNode, replicaNode); final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); @@ -119,9 +115,7 @@ public void testFilterWithFailure() throws InterruptedException { lookup.put("node1", new SearchAsyncActionTests.MockConnection(primaryNode)); lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode)); final boolean shard1 = randomBoolean(); - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { - + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRequest request, SearchTask task, ActionListener listener) { @@ -143,7 +137,7 @@ public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRe AtomicReference> result = new AtomicReference<>(); CountDownLatch latch = new CountDownLatch(1); GroupShardsIterator shardsIter = SearchAsyncActionTests.getShardsIter("idx", - new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed()), + new OriginalIndices(new String[]{"idx"}, SearchRequest.DEFAULT_INDICES_OPTIONS), 2, randomBoolean(), primaryNode, replicaNode); final SearchRequest searchRequest = new SearchRequest(); @@ -187,7 +181,7 @@ public void testLotsOfShards() throws InterruptedException { final SearchTransportService searchTransportService = - new SearchTransportService(Settings.builder().put("cluster.remote.connect", false).build(), null, null) { + new SearchTransportService(null, null) { @Override public void sendCanMatch( Transport.Connection connection, @@ -199,7 +193,7 @@ public void sendCanMatch( }; final CountDownLatch latch = new CountDownLatch(1); - final OriginalIndices originalIndices = new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed()); + final OriginalIndices originalIndices = new OriginalIndices(new String[]{"idx"}, SearchRequest.DEFAULT_INDICES_OPTIONS); final GroupShardsIterator shardsIter = SearchAsyncActionTests.getShardsIter("idx", originalIndices, 4096, randomBoolean(), primaryNode, replicaNode); final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors())); diff --git a/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java index bd1d6a85b09f5..55c39f735ce31 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; @@ -70,7 +69,7 @@ public void onFailure(Exception e) { } }; List nodesInvoked = new CopyOnWriteArrayList<>(); - SearchTransportService searchTransportService = new SearchTransportService(Settings.EMPTY, null, null) { + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendClearAllScrollContexts(Transport.Connection connection, ActionListener listener) { nodesInvoked.add(connection.getNode()); @@ -135,7 +134,7 @@ public void onFailure(Exception e) { } }; List nodesInvoked = new CopyOnWriteArrayList<>(); - SearchTransportService searchTransportService = new SearchTransportService(Settings.EMPTY, null, null) { + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendFreeContext(Transport.Connection connection, long contextId, @@ -213,7 +212,7 @@ public void onFailure(Exception e) { } }; List nodesInvoked = new CopyOnWriteArrayList<>(); - SearchTransportService searchTransportService = new SearchTransportService(Settings.EMPTY, null, null) { + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendFreeContext(Transport.Connection connection, long contextId, diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index fe9be2a06e27f..c7a3137230405 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.Index; @@ -59,11 +58,9 @@ public void testDfsWith2Shards() throws IOException { results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); - SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, + SearchPhaseController controller = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { - + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, SearchActionListener listener) { @@ -121,11 +118,9 @@ public void testDfsWith1ShardFailed() throws IOException { results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); - SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, + SearchPhaseController controller = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { - + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, SearchActionListener listener) { @@ -180,11 +175,9 @@ public void testFailPhaseOnException() throws IOException { results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); - SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, + SearchPhaseController controller = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { - + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, SearchActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index 7d19ee58f9fb8..9fe4f92ef2b1b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.document.DocumentField; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.InnerHitBuilder; @@ -69,9 +68,7 @@ public void testCollapseSingleHit() throws IOException { .setInnerHits(IntStream.range(0, numInnerHits).mapToObj(hitNum -> new InnerHitBuilder().setName("innerHit" + hitNum)) .collect(Collectors.toList())))); mockSearchPhaseContext.getRequest().source().query(originalQuery); - mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { - + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { assertTrue(executedMultiSearch.compareAndSet(false, true)); @@ -144,9 +141,7 @@ public void testFailOneItemFailsEntirePhase() throws IOException { String collapseValue = randomBoolean() ? null : "boom"; mockSearchPhaseContext.getRequest().source(new SearchSourceBuilder() .collapse(new CollapseBuilder("someField").setInnerHits(new InnerHitBuilder().setName("foobarbaz")))); - mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { - + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { assertTrue(executedMultiSearch.compareAndSet(false, true)); @@ -186,9 +181,7 @@ public void run() throws IOException { public void testSkipPhase() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { - + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { fail("no collapsing here"); @@ -217,9 +210,7 @@ public void run() throws IOException { public void testSkipExpandCollapseNoHits() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { - + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { fail("expand should not try to send empty multi search request"); @@ -249,9 +240,7 @@ public void testExpandRequestOptions() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); boolean version = randomBoolean(); - mockSearchPhaseContext.searchTransport = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { - + mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { final QueryBuilder postFilter = QueryBuilders.existsQuery("foo"); diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 55ca24826fc37..3db14ec19b84a 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.Index; import org.elasticsearch.search.DocValueFormat; @@ -47,7 +46,7 @@ public class FetchSearchPhaseTests extends ESTestCase { public void testShortcutQueryAndFetchOptimization() throws IOException { - SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, + SearchPhaseController controller = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); InitialSearchPhase.ArraySearchPhaseResults results = @@ -90,7 +89,7 @@ public void run() throws IOException { public void testFetchTwoDocument() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); - SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, + SearchPhaseController controller = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); @@ -110,8 +109,7 @@ public void testFetchTwoDocument() throws IOException { queryResult.setShardIndex(1); results.consumeResult(queryResult); - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -147,7 +145,7 @@ public void run() throws IOException { public void testFailFetchOneDoc() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); - SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, + SearchPhaseController controller = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); @@ -167,8 +165,7 @@ public void testFailFetchOneDoc() throws IOException { queryResult.setShardIndex(1); results.consumeResult(queryResult); - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -208,7 +205,7 @@ public void testFetchDocsConcurrently() throws IOException, InterruptedException int resultSetSize = randomIntBetween(0, 100); // we use at least 2 hits otherwise this is subject to single shard optimization and we trip an assert... int numHits = randomIntBetween(2, 100); // also numshards --> 1 hit per shard - SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, + SearchPhaseController controller = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(numHits); InitialSearchPhase.ArraySearchPhaseResults results = @@ -222,8 +219,7 @@ public void testFetchDocsConcurrently() throws IOException, InterruptedException queryResult.setShardIndex(i); results.consumeResult(queryResult); } - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -266,7 +262,7 @@ public void run() throws IOException { public void testExceptionFailsPhase() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); - SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, + SearchPhaseController controller = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); @@ -286,8 +282,7 @@ public void testExceptionFailsPhase() throws IOException { queryResult.setShardIndex(1); results.consumeResult(queryResult); AtomicInteger numFetches = new AtomicInteger(0); - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { @@ -322,7 +317,7 @@ public void run() throws IOException { public void testCleanupIrrelevantContexts() throws IOException { // contexts that are not fetched should be cleaned up MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); - SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, + SearchPhaseController controller = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); InitialSearchPhase.ArraySearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); @@ -342,8 +337,7 @@ public void testCleanupIrrelevantContexts() throws IOException { // contexts tha queryResult.setShardIndex(1); results.consumeResult(queryResult); - SearchTransportService searchTransportService = new SearchTransportService( - Settings.builder().put("cluster.remote.connect", false).build(), null, null) { + SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java index f327086cd00e5..ece695575a107 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -184,11 +184,6 @@ void executeSearch(final Queue requests, final AtomicArray shardsIter = getShardsIter("idx", - new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed()), + new OriginalIndices(new String[]{"idx"}, SearchRequest.DEFAULT_INDICES_OPTIONS), 10, randomBoolean(), primaryNode, replicaNode); int numSkipped = 0; for (SearchShardIterator iter : shardsIter) { @@ -93,7 +91,7 @@ public void onFailure(Exception e) { } } - SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, null, null); + SearchTransportService transportService = new SearchTransportService(null, null); Map lookup = new HashMap<>(); Map seenShard = new ConcurrentHashMap<>(); lookup.put(primaryNode.getId(), new MockConnection(primaryNode)); @@ -183,9 +181,9 @@ public void onFailure(Exception e) { AtomicInteger contextIdGenerator = new AtomicInteger(0); GroupShardsIterator shardsIter = getShardsIter("idx", - new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed()), + new OriginalIndices(new String[]{"idx"}, SearchRequest.DEFAULT_INDICES_OPTIONS), 10, randomBoolean(), primaryNode, replicaNode); - SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, null, null); + SearchTransportService transportService = new SearchTransportService(null, null); Map lookup = new HashMap<>(); Map seenShard = new ConcurrentHashMap<>(); lookup.put(primaryNode.getId(), new MockConnection(primaryNode)); @@ -283,10 +281,10 @@ public void onFailure(Exception e) { Map> nodeToContextMap = newConcurrentMap(); AtomicInteger contextIdGenerator = new AtomicInteger(0); GroupShardsIterator shardsIter = getShardsIter("idx", - new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed()), + new OriginalIndices(new String[]{"idx"}, SearchRequest.DEFAULT_INDICES_OPTIONS), randomIntBetween(1, 10), randomBoolean(), primaryNode, replicaNode); AtomicInteger numFreedContext = new AtomicInteger(); - SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, null, null) { + SearchTransportService transportService = new SearchTransportService(null, null) { @Override public void sendFreeContext(Transport.Connection connection, long contextId, OriginalIndices originalIndices) { numFreedContext.incrementAndGet(); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index b109e82beefee..f4cb7d224d2aa 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -70,7 +69,7 @@ public class SearchPhaseControllerTests extends ESTestCase { @Before public void setup() { - searchPhaseController = new SearchPhaseController(Settings.EMPTY, + searchPhaseController = new SearchPhaseController( (b) -> new InternalAggregation.ReduceContext(BigArrays.NON_RECYCLING_INSTANCE, null, b)); } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index e529af97c800d..3e4747a4db757 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -80,7 +80,7 @@ public void testMergeShardsIterators() { GroupShardsIterator localShardsIterator = new GroupShardsIterator<>(localShardIterators); OriginalIndices localIndices = new OriginalIndices(new String[]{"local_alias", "local_index_2"}, - IndicesOptions.strictExpandOpenAndForbidClosed()); + SearchRequest.DEFAULT_INDICES_OPTIONS); OriginalIndices remoteIndices = new OriginalIndices(new String[]{"remote_alias", "remote_index_2"}, IndicesOptions.strictExpandOpen()); @@ -185,9 +185,9 @@ public void testProcessRemoteShards() { Map remoteIndicesByCluster = new HashMap<>(); remoteIndicesByCluster.put("test_cluster_1", - new OriginalIndices(new String[]{"fo*", "ba*"}, IndicesOptions.strictExpandOpenAndForbidClosed())); + new OriginalIndices(new String[]{"fo*", "ba*"}, SearchRequest.DEFAULT_INDICES_OPTIONS)); remoteIndicesByCluster.put("test_cluster_2", - new OriginalIndices(new String[]{"x*"}, IndicesOptions.strictExpandOpenAndForbidClosed())); + new OriginalIndices(new String[]{"x*"}, SearchRequest.DEFAULT_INDICES_OPTIONS)); Map remoteAliases = new HashMap<>(); TransportSearchAction.processRemoteShards(searchShardsResponseMap, remoteIndicesByCluster, iteratorList, remoteAliases); @@ -274,6 +274,6 @@ private static OriginalIndices randomOriginalIndices() { localIndices[i] = randomAlphaOfLengthBetween(3, 10); } return new OriginalIndices(localIndices, IndicesOptions.fromOptions(randomBoolean(), - randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java b/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java index 4918939c90b86..39c5920f7d9d6 100644 --- a/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java @@ -177,7 +177,7 @@ public void testUpdate() { ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, clusterSettings, new IndexNameExpressionResolver(settings)); + AutoCreateIndex autoCreateIndex = new AutoCreateIndex(settings, clusterSettings, new IndexNameExpressionResolver()); assertThat(autoCreateIndex.getAutoCreate().isAutoCreateIndex(), equalTo(value)); Settings newSettings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), !value).build(); @@ -202,7 +202,7 @@ private static ClusterState buildClusterState(String... indices) { private AutoCreateIndex newAutoCreateIndex(Settings settings) { return new AutoCreateIndex(settings, new ClusterSettings(settings, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new IndexNameExpressionResolver(settings)); + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new IndexNameExpressionResolver()); } private void expectNotMatch(ClusterState clusterState, AutoCreateIndex autoCreateIndex, String index) { diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index f564066b4e3df..af1deb8fa18e3 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -53,7 +53,8 @@ public void testSerialization() throws Exception { for (int i = 0; i < iterations; i++) { Version version = randomVersionBetween(random(), Version.V_7_0_0_alpha1, null); IndicesOptions indicesOptions = IndicesOptions.fromOptions( - randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean()); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(version); @@ -78,9 +79,9 @@ public void testSerialization() throws Exception { public void testSerializationPre70() throws Exception { int iterations = randomIntBetween(5, 20); for (int i = 0; i < iterations; i++) { - Version version = randomVersionBetween(random(), null, Version.V_6_4_0); + Version version = randomVersionBetween(random(), null, Version.V_6_6_0); IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean(), randomBoolean(), randomBoolean()); + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(version); @@ -99,6 +100,12 @@ public void testSerializationPre70() throws Exception { assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices())); assertEquals(indicesOptions2.ignoreAliases(), indicesOptions.ignoreAliases()); + if (output.getVersion().onOrAfter(Version.V_6_6_0)) { + assertEquals(indicesOptions2.ignoreThrottled(), indicesOptions.ignoreThrottled()); + } else { + assertFalse(indicesOptions2.ignoreThrottled()); // make sure we never write this option to pre 6.6 + } + } } @@ -110,9 +117,10 @@ public void testFromOptions() { boolean allowAliasesToMultipleIndices = randomBoolean(); boolean forbidClosedIndices = randomBoolean(); boolean ignoreAliases = randomBoolean(); + boolean ingoreThrottled = randomBoolean(); IndicesOptions indicesOptions = IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices,expandToOpenIndices, - expandToClosedIndices, allowAliasesToMultipleIndices, forbidClosedIndices, ignoreAliases); + expandToClosedIndices, allowAliasesToMultipleIndices, forbidClosedIndices, ignoreAliases, ingoreThrottled); assertThat(indicesOptions.ignoreUnavailable(), equalTo(ignoreUnavailable)); assertThat(indicesOptions.allowNoIndices(), equalTo(allowNoIndices)); @@ -122,6 +130,7 @@ public void testFromOptions() { assertThat(indicesOptions.allowAliasesToMultipleIndices(), equalTo(allowAliasesToMultipleIndices)); assertThat(indicesOptions.forbidClosedIndices(), equalTo(forbidClosedIndices)); assertEquals(ignoreAliases, indicesOptions.ignoreAliases()); + assertEquals(ingoreThrottled, indicesOptions.ignoreThrottled()); } public void testFromOptionsWithDefaultOptions() { @@ -131,7 +140,7 @@ public void testFromOptionsWithDefaultOptions() { boolean expandToClosedIndices = randomBoolean(); IndicesOptions defaultOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); IndicesOptions indicesOptions = IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices,expandToOpenIndices, expandToClosedIndices, defaultOptions); @@ -164,14 +173,16 @@ public void testFromParameters() { } boolean ignoreUnavailable = randomBoolean(); String ignoreUnavailableString = Boolean.toString(ignoreUnavailable); + boolean ignoreThrottled = randomBoolean(); + String ignoreThrottledString = Boolean.toString(ignoreThrottled); boolean allowNoIndices = randomBoolean(); String allowNoIndicesString = Boolean.toString(allowNoIndices); IndicesOptions defaultOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean(), randomBoolean(), randomBoolean()); + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); IndicesOptions updatedOptions = IndicesOptions.fromParameters(expandWildcardsString, ignoreUnavailableString, - allowNoIndicesString, defaultOptions); + allowNoIndicesString, ignoreThrottled, defaultOptions); assertEquals(expandWildcardsOpen, updatedOptions.expandWildcardsOpen()); assertEquals(expandWildcardsClosed, updatedOptions.expandWildcardsClosed()); @@ -185,19 +196,19 @@ public void testFromParameters() { public void testSimpleByteBWC() { Map old = new HashMap<>(); // These correspond to each individual option (bit) in the old byte-based IndicesOptions - old.put((byte) 0, IndicesOptions.fromOptions(false, false, false, false, true, false, false)); - old.put((byte) 1, IndicesOptions.fromOptions(true, false, false, false, true, false, false)); - old.put((byte) 2, IndicesOptions.fromOptions(false, true, false, false, true, false, false)); - old.put((byte) 4, IndicesOptions.fromOptions(false, false, true, false, true, false, false)); - old.put((byte) 8, IndicesOptions.fromOptions(false, false, false, true, true, false, false)); - old.put((byte) 16, IndicesOptions.fromOptions(false, false, false, false, false, false, false)); - old.put((byte) 32, IndicesOptions.fromOptions(false, false, false, false, true, true, false)); - old.put((byte) 64, IndicesOptions.fromOptions(false, false, false, false, true, false, true)); + old.put((byte) 0, IndicesOptions.fromOptions(false, false, false, false, true, false, false, false)); + old.put((byte) 1, IndicesOptions.fromOptions(true, false, false, false, true, false, false, false)); + old.put((byte) 2, IndicesOptions.fromOptions(false, true, false, false, true, false, false, false)); + old.put((byte) 4, IndicesOptions.fromOptions(false, false, true, false, true, false, false, false)); + old.put((byte) 8, IndicesOptions.fromOptions(false, false, false, true, true, false, false, false)); + old.put((byte) 16, IndicesOptions.fromOptions(false, false, false, false, false, false, false, false)); + old.put((byte) 32, IndicesOptions.fromOptions(false, false, false, false, true, true, false, false)); + old.put((byte) 64, IndicesOptions.fromOptions(false, false, false, false, true, false, true, false)); // Test a few multi-selected options - old.put((byte) 13, IndicesOptions.fromOptions(true, false, true, true, true, false, false)); - old.put((byte) 19, IndicesOptions.fromOptions(true, true, false, false, false, false, false)); - old.put((byte) 24, IndicesOptions.fromOptions(false, false, false, true, false, false, false)); - old.put((byte) 123, IndicesOptions.fromOptions(true, true, false, true, false, true, true)); + old.put((byte) 13, IndicesOptions.fromOptions(true, false, true, true, true, false, false, false)); + old.put((byte) 19, IndicesOptions.fromOptions(true, true, false, false, false, false, false, false)); + old.put((byte) 24, IndicesOptions.fromOptions(false, false, false, true, false, false, false, false)); + old.put((byte) 123, IndicesOptions.fromOptions(true, true, false, true, false, true, true, false)); for (Map.Entry entry : old.entrySet()) { IndicesOptions indicesOptions2 = IndicesOptions.fromByte(entry.getKey()); @@ -209,11 +220,13 @@ public void testSimpleByteBWC() { public void testEqualityAndHashCode() { IndicesOptions indicesOptions = IndicesOptions.fromOptions( - randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(indicesOptions, opts -> { return IndicesOptions.fromOptions(opts.ignoreUnavailable(), opts.allowNoIndices(), opts.expandWildcardsOpen(), - opts.expandWildcardsClosed(), opts.allowAliasesToMultipleIndices(), opts.forbidClosedIndices(), opts.ignoreAliases()); + opts.expandWildcardsClosed(), opts.allowAliasesToMultipleIndices(), opts.forbidClosedIndices(), opts.ignoreAliases(), + opts.ignoreThrottled()); }, opts -> { boolean mutated = false; boolean ignoreUnavailable = opts.ignoreUnavailable(); @@ -223,6 +236,7 @@ public void testEqualityAndHashCode() { boolean allowAliasesToMulti = opts.allowAliasesToMultipleIndices(); boolean forbidClosed = opts.forbidClosedIndices(); boolean ignoreAliases = opts.ignoreAliases(); + boolean ignoreThrottled = opts.ignoreThrottled(); while (mutated == false) { if (randomBoolean()) { ignoreUnavailable = !ignoreUnavailable; @@ -252,9 +266,13 @@ public void testEqualityAndHashCode() { ignoreAliases = !ignoreAliases; mutated = true; } + if (randomBoolean()) { + ignoreThrottled = !ignoreThrottled; + mutated = true; + } } return IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandOpen, expandClosed, - allowAliasesToMulti, forbidClosed, ignoreAliases); + allowAliasesToMulti, forbidClosed, ignoreAliases, ignoreThrottled); }); } @@ -264,6 +282,7 @@ public void testFromMap() { null : randomSubsetOf(Arrays.asList("open", "closed")); Boolean ignoreUnavailable = randomBoolean() ? null : randomBoolean(); Boolean allowNoIndices = randomBoolean() ? null : randomBoolean(); + Boolean ignoreThrottled = randomBoolean() ? null : randomBoolean(); Map settings = new HashMap<>(); @@ -279,6 +298,10 @@ public void testFromMap() { settings.put("allow_no_indices", allowNoIndices); } + if (ignoreThrottled != null) { + settings.put("ignore_throttled", ignoreThrottled); + } + IndicesOptions fromMap = IndicesOptions.fromMap(settings, defaults); boolean open = wildcardStates != null ? wildcardStates.contains("open") : defaults.expandWildcardsOpen(); @@ -288,6 +311,7 @@ public void testFromMap() { assertEquals(fromMap.ignoreUnavailable(), ignoreUnavailable == null ? defaults.ignoreUnavailable() : ignoreUnavailable); assertEquals(fromMap.allowNoIndices(), allowNoIndices == null ? defaults.allowNoIndices() : allowNoIndices); + assertEquals(fromMap.ignoreThrottled(), ignoreThrottled == null ? defaults.ignoreThrottled() : ignoreThrottled); } public void testToXContent() throws IOException { @@ -320,5 +344,6 @@ public void testToXContent() throws IOException { } assertEquals(map.get("ignore_unavailable"), options.contains(Option.IGNORE_UNAVAILABLE)); assertEquals(map.get("allow_no_indices"), options.contains(Option.ALLOW_NO_INDICES)); + assertEquals(map.get("ignore_throttled"), options.contains(Option.IGNORE_THROTTLED)); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index 9df73c8c95543..f222bcc015c62 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -80,7 +80,7 @@ public void testActionFiltersRequest() throws ExecutionException, InterruptedExc String actionName = randomAlphaOfLength(randomInt(30)); ActionFilters actionFilters = new ActionFilters(filters); TransportAction transportAction = - new TransportAction(Settings.EMPTY, actionName, actionFilters, + new TransportAction(actionName, actionFilters, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) { @Override protected void doExecute(Task task, TestRequest request, ActionListener listener) { @@ -157,8 +157,8 @@ public void exe String actionName = randomAlphaOfLength(randomInt(30)); ActionFilters actionFilters = new ActionFilters(filters); - TransportAction transportAction = new TransportAction(Settings.EMPTY, - actionName, actionFilters, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) { + TransportAction transportAction = new TransportAction(actionName, + actionFilters, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) { @Override protected void doExecute(Task task, TestRequest request, ActionListener listener) { listener.onResponse(new TestResponse()); diff --git a/server/src/test/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java b/server/src/test/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java index 6a33b8ae1df65..c3ac7fc812aa7 100644 --- a/server/src/test/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/test/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java @@ -54,7 +54,8 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { fail("can't index, does not enough active shard copies"); } catch (UnavailableShardsException e) { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); - assertThat(e.getMessage(), startsWith("[test][0] Not enough active copies to meet shard count of [2] (have 1, needed 2). Timeout: [100ms], request:")); + assertThat(e.getMessage(), + startsWith("[test][0] Not enough active copies to meet shard count of [2] (have 1, needed 2). Timeout: [100ms], request:")); // but really, all is well } @@ -83,12 +84,14 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { fail("can't index, not enough active shard copies"); } catch (UnavailableShardsException e) { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); - assertThat(e.getMessage(), startsWith("[test][0] Not enough active copies to meet shard count of [" + ActiveShardCount.ALL + "] (have 2, needed 3). Timeout: [100ms], request:")); + assertThat(e.getMessage(), startsWith("[test][0] Not enough active copies to meet shard count of [" + + ActiveShardCount.ALL + "] (have 2, needed 3). Timeout: [100ms], request:")); // but really, all is well } allowNodes("test", 3); - clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet(); + clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3) + .setWaitForGreenStatus().execute().actionGet(); logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index d12c3ca26eb08..20383d8cf88bc 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -114,11 +114,15 @@ public Response(int totalShards, int successfulShards, int failedShards, List { + class TestTransportBroadcastByNodeAction + extends TransportBroadcastByNodeAction { private final Map shards = new HashMap<>(); - TestTransportBroadcastByNodeAction(Settings settings, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, String executor) { - super(settings, "indices:admin/test", TransportBroadcastByNodeActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request, executor); + TestTransportBroadcastByNodeAction(TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, + String executor) { + super("indices:admin/test", TransportBroadcastByNodeActionTests.this.clusterService, transportService, + actionFilters, indexNameExpressionResolver, request, executor); } @Override @@ -127,7 +131,9 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List emptyResults, List shardFailures, ClusterState clusterState) { + protected Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, + List emptyResults, List shardFailures, + ClusterState clusterState) { return new Response(totalShards, successfulShards, failedShards, shardFailures); } @@ -171,10 +177,6 @@ public Map getResults() { } class MyResolver extends IndexNameExpressionResolver { - MyResolver() { - super(Settings.EMPTY); - } - @Override public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { return request.indices(); @@ -197,7 +199,6 @@ public void setUp() throws Exception { transportService.acceptIncomingRequests(); setClusterState(clusterService, TEST_INDEX); action = new TestTransportBroadcastByNodeAction( - Settings.EMPTY, transportService, new ActionFilters(new HashSet<>()), new MyResolver(), @@ -226,7 +227,8 @@ void setClusterState(ClusterService clusterService, String index) { totalIndexShards += numberOfShards; for (int j = 0; j < numberOfShards; j++) { final ShardId shardId = new ShardId(index, "_na_", ++shardIndex); - ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.getId(), true, ShardRoutingState.STARTED); + ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.getId(), true, + ShardRoutingState.STARTED); IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(shardId); indexShard.addShard(shard); indexRoutingTable.addIndexShard(indexShard.build()); @@ -263,7 +265,8 @@ public void testGlobalBlock() { PlainActionFuture listener = new PlainActionFuture<>(); ClusterBlocks.Builder block = ClusterBlocks.builder() - .addGlobalBlock(new ClusterBlock(1, "test-block", false, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + .addGlobalBlock(new ClusterBlock(1, "test-block", false, true, false, RestStatus.SERVICE_UNAVAILABLE, + ClusterBlockLevel.ALL)); setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); try { action.new AsyncAction(null, request, listener).start(); @@ -278,7 +281,8 @@ public void testRequestBlock() { PlainActionFuture listener = new PlainActionFuture<>(); ClusterBlocks.Builder block = ClusterBlocks.builder() - .addIndexBlock(TEST_INDEX, new ClusterBlock(1, "test-block", false, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + .addIndexBlock(TEST_INDEX, new ClusterBlock(1, "test-block", false, true, false, RestStatus.SERVICE_UNAVAILABLE, + ClusterBlockLevel.ALL)); setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); try { action.new AsyncAction(null, request, listener).start(); @@ -450,7 +454,8 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept } } totalSuccessfulShards += shardResults.size(); - TransportBroadcastByNodeAction.NodeResponse nodeResponse = action.new NodeResponse(entry.getKey(), shards.size(), shardResults, exceptions); + TransportBroadcastByNodeAction.NodeResponse nodeResponse = action.new NodeResponse(entry.getKey(), shards.size(), + shardResults, exceptions); transport.handleResponse(requestId, nodeResponse); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index f592b3d803af5..d7917ceb2322d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.MasterNotDiscoveredException; @@ -130,9 +129,10 @@ public ActionRequestValidationException validate() { class Response extends ActionResponse {} class Action extends TransportMasterNodeAction { - Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { - super(settings, actionName, transportService, clusterService, threadPool, - new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new); + Action(String actionName, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool) { + super(actionName, transportService, clusterService, threadPool, + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), Request::new); } @Override @@ -174,9 +174,10 @@ public void testLocalOperationWithoutBlocks() throws ExecutionException, Interru setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); - new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { + new Action("internal:testAction", transportService, clusterService, threadPool) { @Override - protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { + protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) + throws Exception { if (masterOperationFailure) { listener.onFailure(exception); } else { @@ -211,7 +212,7 @@ public void testLocalOperationWithBlocks() throws ExecutionException, Interrupte .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); setState(clusterService, stateWithBlock); - new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { + new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected ClusterBlockException checkBlock(Request request, ClusterState state) { Set blocks = state.blocks().global(); @@ -253,7 +254,7 @@ public void testCheckBlockThrowsException() throws InterruptedException { .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); setState(clusterService, stateWithBlock); - new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { + new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected ClusterBlockException checkBlock(Request request, ClusterState state) { Set blocks = state.blocks().global(); @@ -281,7 +282,7 @@ public void testForceLocalOperation() throws ExecutionException, InterruptedExce setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(localNode, remoteNode, null), allNodes)); - new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { + new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected boolean localExecute(Request request) { return true; @@ -296,7 +297,7 @@ public void testMasterNotAvailable() throws ExecutionException, InterruptedExcep Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0)); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); - new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool).execute(request, listener); + new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener); assertTrue(listener.isDone()); assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class); } @@ -305,7 +306,7 @@ public void testMasterBecomesAvailable() throws ExecutionException, InterruptedE Request request = new Request(); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); - new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool).execute(request, listener); + new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener); assertFalse(listener.isDone()); setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); assertTrue(listener.isDone()); @@ -317,7 +318,7 @@ public void testDelegateToMaster() throws ExecutionException, InterruptedExcepti setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); - new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool).execute(request, listener); + new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener); assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; @@ -340,7 +341,7 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted .version(randomIntBetween(0, 10))); // use a random base version so it can go down when simulating a restart. PlainActionFuture listener = new PlainActionFuture<>(); - new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool).execute(request, listener); + new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests.length, equalTo(1)); @@ -413,7 +414,7 @@ public void testMasterFailoverAfterStepDown() throws ExecutionException, Interru setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); - new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { + new Action( "internal:testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { // The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery diff --git a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index be7ec477ff48c..2667f0ee93e64 100644 --- a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; @@ -214,7 +213,6 @@ public void tearDown() throws Exception { public TestTransportNodesAction getTestTransportNodesAction() { return new TestTransportNodesAction( - Settings.EMPTY, THREAD_POOL, clusterService, transportService, @@ -227,7 +225,6 @@ public TestTransportNodesAction getTestTransportNodesAction() { public DataNodesOnlyTransportNodesAction getDataNodesOnlyTransportNodesAction(TransportService transportService) { return new DataNodesOnlyTransportNodesAction( - Settings.EMPTY, THREAD_POOL, clusterService, transportService, @@ -246,10 +243,10 @@ private static DiscoveryNode newNode(int nodeId, Map attributes, private static class TestTransportNodesAction extends TransportNodesAction { - TestTransportNodesAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService + TestTransportNodesAction(ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, Supplier request, Supplier nodeRequest, String nodeExecutor) { - super(settings, "indices:admin/test", threadPool, clusterService, transportService, actionFilters, + super("indices:admin/test", threadPool, clusterService, transportService, actionFilters, request, nodeRequest, nodeExecutor, TestNodeResponse.class); } @@ -279,10 +276,10 @@ protected TestNodeResponse nodeOperation(TestNodeRequest request) { private static class DataNodesOnlyTransportNodesAction extends TestTransportNodesAction { - DataNodesOnlyTransportNodesAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService + DataNodesOnlyTransportNodesAction(ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, Supplier request, Supplier nodeRequest, String nodeExecutor) { - super(settings, threadPool, clusterService, transportService, actionFilters, request, nodeRequest, nodeExecutor); + super(threadPool, clusterService, transportService, actionFilters, request, nodeRequest, nodeExecutor); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 3ef599d8c9bf3..5529701db1513 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -100,8 +100,8 @@ threadPool, BigArrays.NON_RECYCLING_INSTANCE, circuitBreakerService, new NamedWr TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); - broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, clusterService, transportService, - new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), null); + broadcastReplicationAction = new TestBroadcastReplicationAction(clusterService, transportService, + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), null); } @Override @@ -171,7 +171,8 @@ public void testResultCombine() throws InterruptedException, ExecutionException, if (shardsSucceeded == 1 && randomBoolean()) { //sometimes add failure (no failure means shard unavailable) failures = new ReplicationResponse.ShardInfo.Failure[1]; - failures[0] = new ReplicationResponse.ShardInfo.Failure(shardRequests.v1(), null, new Exception("pretend shard failed"), RestStatus.GATEWAY_TIMEOUT, false); + failures[0] = new ReplicationResponse.ShardInfo.Failure(shardRequests.v1(), null, + new Exception("pretend shard failed"), RestStatus.GATEWAY_TIMEOUT, false); failed++; } replicationResponse.setShardInfo(new ReplicationResponse.ShardInfo(2, shardsSucceeded, failures)); @@ -204,14 +205,16 @@ public void testShardsList() throws InterruptedException, ExecutionException { assertThat(shards.get(0), equalTo(shardId)); } - private class TestBroadcastReplicationAction extends TransportBroadcastReplicationAction { - protected final Set>> capturedShardRequests = ConcurrentCollections.newConcurrentSet(); + private class TestBroadcastReplicationAction extends TransportBroadcastReplicationAction { + protected final Set>> capturedShardRequests = + ConcurrentCollections.newConcurrentSet(); - TestBroadcastReplicationAction(Settings settings, ClusterService clusterService, TransportService transportService, + TestBroadcastReplicationAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportReplicationAction replicatedBroadcastShardAction) { - super("internal:test-broadcast-replication-action", DummyBroadcastRequest::new, settings, clusterService, transportService, - actionFilters, indexNameExpressionResolver, replicatedBroadcastShardAction); + TransportReplicationAction action) { + super("internal:test-broadcast-replication-action", DummyBroadcastRequest::new, clusterService, transportService, + actionFilters, indexNameExpressionResolver, action); } @Override @@ -231,7 +234,8 @@ protected BroadcastResponse newResponse(int successfulShards, int failedShards, } @Override - protected void shardExecute(Task task, DummyBroadcastRequest request, ShardId shardId, ActionListener shardActionListener) { + protected void shardExecute(Task task, DummyBroadcastRequest request, ShardId shardId, + ActionListener shardActionListener) { capturedShardRequests.add(new Tuple<>(shardId, shardActionListener)); } } @@ -241,7 +245,8 @@ public FlushResponse assertImmediateResponse(String index, TransportFlushAction FlushResponse flushResponse = ActionTestUtils.executeBlocking(flushAction, new FlushRequest(index)); Date endDate = new Date(); long maxTime = 500; - assertThat("this should not take longer than " + maxTime + " ms. The request hangs somewhere", endDate.getTime() - beginDate.getTime(), lessThanOrEqualTo(maxTime)); + assertThat("this should not take longer than " + maxTime + " ms. The request hangs somewhere", + endDate.getTime() - beginDate.getTime(), lessThanOrEqualTo(maxTime)); return flushResponse; } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java index 9ec91f4e45091..b31ed0a6b9821 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -60,13 +61,14 @@ public void testShardInfoToXContent() throws IOException { new ShardInfo.Failure(new ShardId("index", "_uuid", 3), "_node_id", new IllegalArgumentException("Wrong"), RestStatus.BAD_REQUEST, false), new ShardInfo.Failure(new ShardId("index", "_uuid", 1), - "_node_id", new CircuitBreakingException("Wrong", 12, 21), RestStatus.NOT_ACCEPTABLE, true)); + "_node_id", new CircuitBreakingException("Wrong", 12, 21, CircuitBreaker.Durability.PERMANENT), + RestStatus.NOT_ACCEPTABLE, true)); String output = Strings.toString(shardInfo); assertEquals("{\"total\":6,\"successful\":4,\"failed\":2,\"failures\":[{\"_index\":\"index\",\"_shard\":3," + "\"_node\":\"_node_id\",\"reason\":{\"type\":\"illegal_argument_exception\",\"reason\":\"Wrong\"}," + "\"status\":\"BAD_REQUEST\",\"primary\":false},{\"_index\":\"index\",\"_shard\":1,\"_node\":\"_node_id\"," + - "\"reason\":{\"type\":\"circuit_breaking_exception\",\"reason\":\"Wrong\",\"bytes_wanted\":12,\"bytes_limit\":21}," + - "\"status\":\"NOT_ACCEPTABLE\",\"primary\":true}]}", output); + "\"reason\":{\"type\":\"circuit_breaking_exception\",\"reason\":\"Wrong\",\"bytes_wanted\":12,\"bytes_limit\":21" + + ",\"durability\":\"PERMANENT\"},\"status\":\"NOT_ACCEPTABLE\",\"primary\":true}]}", output); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index ff868c3250aef..0469fac4d7d55 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -167,7 +167,7 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); - shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); + shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); action = new TestAction(Settings.EMPTY, "internal:testAction", transportService, clusterService, shardStateAction, threadPool); } @@ -1114,7 +1114,7 @@ private class TestAction extends TransportReplicationAction()), new IndexNameExpressionResolver(Settings.EMPTY), + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), Request::new, Request::new, ThreadPool.Names.SAME); } @@ -1123,7 +1123,7 @@ private class TestAction extends TransportReplicationAction()), new IndexNameExpressionResolver(Settings.EMPTY), + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), Request::new, Request::new, ThreadPool.Names.SAME); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 1f1e9eb2a1e96..6582634e228ee 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -260,7 +260,7 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); - ShardStateAction shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); + ShardStateAction shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); TestAction action = new TestAction(Settings.EMPTY, "internal:testAction", transportService, clusterService, shardStateAction, threadPool); final String index = "test"; @@ -360,7 +360,7 @@ protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentF super(Settings.EMPTY, "internal:test", new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()), null, null, null, null, - new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), TestRequest::new, TestRequest::new, ThreadPool.Names.SAME); this.withDocumentFailureOnPrimary = withDocumentFailureOnPrimary; this.withDocumentFailureOnReplica = withDocumentFailureOnReplica; @@ -370,7 +370,7 @@ protected TestAction(Settings settings, String actionName, TransportService tran ClusterService clusterService, ShardStateAction shardStateAction, ThreadPool threadPool) { super(settings, actionName, transportService, clusterService, mockIndicesService(clusterService), threadPool, shardStateAction, - new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(), TestRequest::new, TestRequest::new, ThreadPool.Names.SAME); this.withDocumentFailureOnPrimary = false; this.withDocumentFailureOnReplica = false; diff --git a/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 0505143592497..c088c635f590d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -36,7 +36,6 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -88,8 +87,11 @@ public Response() { class TestTransportInstanceSingleOperationAction extends TransportInstanceSingleOperationAction { private final Map shards = new HashMap<>(); - TestTransportInstanceSingleOperationAction(Settings settings, String actionName, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { - super(settings, actionName, THREAD_POOL, TransportInstanceSingleOperationActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request); + TestTransportInstanceSingleOperationAction(String actionName, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier request) { + super(actionName, THREAD_POOL, TransportInstanceSingleOperationActionTests.this.clusterService, transportService, + actionFilters, indexNameExpressionResolver, request); } public Map getResults() { @@ -122,10 +124,6 @@ protected ShardIterator shards(ClusterState clusterState, Request request) { } class MyResolver extends IndexNameExpressionResolver { - MyResolver() { - super(Settings.EMPTY); - } - @Override public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { return request.indices(); @@ -148,7 +146,6 @@ public void setUp() throws Exception { transportService.start(); transportService.acceptIncomingRequests(); action = new TestTransportInstanceSingleOperationAction( - Settings.EMPTY, "indices:admin/test", transportService, new ActionFilters(new HashSet<>()), @@ -212,7 +209,8 @@ public void testFailureWithoutRetry() throws Exception { long requestId = transport.capturedRequests()[0].requestId; transport.clear(); // this should not trigger retry or anything and the listener should report exception immediately - transport.handleRemoteError(requestId, new TransportException("a generic transport exception", new Exception("generic test exception"))); + transport.handleRemoteError(requestId, new TransportException("a generic transport exception", + new Exception("generic test exception"))); try { // result should return immediately @@ -295,7 +293,6 @@ public void testRetryOfAnAlreadyTimedOutRequest() throws Exception { public void testUnresolvableRequestDoesNotHang() throws InterruptedException, ExecutionException, TimeoutException { action = new TestTransportInstanceSingleOperationAction( - Settings.EMPTY, "indices:admin/test_unresolvable", transportService, new ActionFilters(new HashSet<>()), diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index ff7697745da76..ce29fce89219f 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.update; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -135,9 +134,7 @@ public void setUp() throws Exception { final MockScriptEngine engine = new MockScriptEngine("mock", scripts, Collections.emptyMap()); Map engines = Collections.singletonMap(engine.getType(), engine); ScriptService scriptService = new ScriptService(baseSettings, engines, ScriptModule.CORE_CONTEXTS); - final Settings settings = settings(Version.CURRENT).build(); - - updateHelper = new UpdateHelper(settings, scriptService); + updateHelper = new UpdateHelper(scriptService); } @SuppressWarnings("unchecked") diff --git a/server/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java b/server/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java index 31f6963536c50..e7f32aa524972 100644 --- a/server/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java @@ -106,18 +106,22 @@ public void testActions() { client.prepareGet("idx", "type", "id").execute(new AssertingActionListener<>(GetAction.NAME, client.threadPool())); client.prepareSearch().execute(new AssertingActionListener<>(SearchAction.NAME, client.threadPool())); client.prepareDelete("idx", "type", "id").execute(new AssertingActionListener<>(DeleteAction.NAME, client.threadPool())); - client.admin().cluster().prepareDeleteStoredScript("id").execute(new AssertingActionListener<>(DeleteStoredScriptAction.NAME, client.threadPool())); - client.prepareIndex("idx", "type", "id").setSource("source", XContentType.JSON).execute(new AssertingActionListener<>(IndexAction.NAME, client.threadPool())); + client.admin().cluster().prepareDeleteStoredScript("id") + .execute(new AssertingActionListener<>(DeleteStoredScriptAction.NAME, client.threadPool())); + client.prepareIndex("idx", "type", "id").setSource("source", XContentType.JSON) + .execute(new AssertingActionListener<>(IndexAction.NAME, client.threadPool())); // choosing arbitrary cluster admin actions to test client.admin().cluster().prepareClusterStats().execute(new AssertingActionListener<>(ClusterStatsAction.NAME, client.threadPool())); - client.admin().cluster().prepareCreateSnapshot("repo", "bck").execute(new AssertingActionListener<>(CreateSnapshotAction.NAME, client.threadPool())); + client.admin().cluster().prepareCreateSnapshot("repo", "bck") + .execute(new AssertingActionListener<>(CreateSnapshotAction.NAME, client.threadPool())); client.admin().cluster().prepareReroute().execute(new AssertingActionListener<>(ClusterRerouteAction.NAME, client.threadPool())); // choosing arbitrary indices admin actions to test client.admin().indices().prepareCreate("idx").execute(new AssertingActionListener<>(CreateIndexAction.NAME, client.threadPool())); client.admin().indices().prepareStats().execute(new AssertingActionListener<>(IndicesStatsAction.NAME, client.threadPool())); - client.admin().indices().prepareClearCache("idx1", "idx2").execute(new AssertingActionListener<>(ClearIndicesCacheAction.NAME, client.threadPool())); + client.admin().indices().prepareClearCache("idx1", "idx2") + .execute(new AssertingActionListener<>(ClearIndicesCacheAction.NAME, client.threadPool())); client.admin().indices().prepareFlush().execute(new AssertingActionListener<>(FlushAction.NAME, client.threadPool())); } diff --git a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index a689de9a5d324..63c09f1e41dfd 100644 --- a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -19,10 +19,9 @@ package org.elasticsearch.client.node; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.AbstractClientHeadersTestCase; @@ -37,7 +36,7 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTestCase { - private static final ActionFilters EMPTY_FILTERS = new ActionFilters(Collections.emptySet()); + private static final ActionFilters EMPTY_FILTERS = new ActionFilters(Collections.emptySet()); @Override protected Client buildClient(Settings headersSettings, Action[] testedActions) { @@ -60,7 +59,7 @@ private Actions(Settings settings, ThreadPool threadPool, Action[] actions) { private static class InternalTransportAction extends TransportAction { private InternalTransportAction(Settings settings, String actionName, ThreadPool threadPool) { - super(settings, actionName, EMPTY_FILTERS, new TaskManager(settings, threadPool, Collections.emptySet())); + super(actionName, EMPTY_FILTERS, new TaskManager(settings, threadPool, Collections.emptySet())); } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java index d6cf029c00d82..8693308e8650b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -36,7 +36,8 @@ public void testSimpleLocalHealth() { for (String node : internalCluster().getNodeNames()) { // a very high time out, which should never fire due to the local flag - ClusterHealthResponse health = client(node).admin().cluster().prepareHealth().setLocal(true).setWaitForEvents(Priority.LANGUID).setTimeout("30s").get("10s"); + ClusterHealthResponse health = client(node).admin().cluster().prepareHealth().setLocal(true) + .setWaitForEvents(Priority.LANGUID).setTimeout("30s").get("10s"); assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(health.isTimedOut(), equalTo(false)); } @@ -44,7 +45,8 @@ public void testSimpleLocalHealth() { public void testHealth() { logger.info("--> running cluster health on an index that does not exists"); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test1") + .setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(true)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); @@ -59,13 +61,15 @@ public void testHealth() { createIndex("test1"); logger.info("--> running cluster health on an index that does exists"); - healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth("test1") + .setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); logger.info("--> running cluster health on an index that does exists and an index that doesn't exists"); - healthResponse = client().admin().cluster().prepareHealth("test1", "test2").setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth("test1", "test2") + .setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(true)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 1a0e964ef7740..ae1caa787d4be 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -74,8 +74,8 @@ public static class TestPlugin extends Plugin implements ActionPlugin { private final BlockingActionFilter blockingActionFilter; - public TestPlugin(Settings settings) { - blockingActionFilter = new BlockingActionFilter(settings); + public TestPlugin() { + blockingActionFilter = new BlockingActionFilter(); } @Override @@ -87,10 +87,6 @@ public List getActionFilters() { public static class BlockingActionFilter extends org.elasticsearch.action.support.ActionFilter.Simple { private Set blockedActions = emptySet(); - public BlockingActionFilter(Settings settings) { - super(settings); - } - @Override protected boolean apply(String action, ActionRequest request, ActionListener listener) { if (blockedActions.contains(action)) { @@ -132,7 +128,8 @@ public void testClusterInfoServiceCollectsInformation() throws Exception { ensureGreen("test"); InternalTestCluster internalTestCluster = internalCluster(); // Get the cluster info service on the master node - final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); + final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster + .getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); infoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); infoService.onMaster(); ClusterInfo info = infoService.refresh(); @@ -178,7 +175,8 @@ public void testClusterInfoServiceInformationClearOnError() { prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get(); ensureGreen("test"); InternalTestCluster internalTestCluster = internalCluster(); - InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); + InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster + .getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); // get one healthy sample ClusterInfo info = infoService.refresh(); assertNotNull("failed to collect info", info); @@ -186,10 +184,12 @@ public void testClusterInfoServiceInformationClearOnError() { assertThat("some shard sizes are populated", info.shardSizes.size(), greaterThan(0)); - MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, internalTestCluster.getMasterName()); + MockTransportService mockTransportService = (MockTransportService) internalCluster() + .getInstance(TransportService.class, internalTestCluster.getMasterName()); final AtomicBoolean timeout = new AtomicBoolean(false); - final Set blockedActions = newHashSet(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[n]"); + final Set blockedActions = newHashSet(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", + IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[n]"); // drop all outgoing stats requests to force a timeout. for (DiscoveryNode node : internalTestCluster.clusterService().state().getNodes()) { mockTransportService.addSendBehavior(internalTestCluster.getInstance(TransportService.class, node.getName()), diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index af3807226a943..9734bf37b5bc0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -65,8 +65,7 @@ public class ClusterModuleTests extends ModuleTestCase { private ClusterService clusterService = new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null); static class FakeAllocationDecider extends AllocationDecider { - protected FakeAllocationDecider(Settings settings) { - super(settings); + protected FakeAllocationDecider() { } } @@ -129,7 +128,7 @@ public void testRegisterAllocationDecider() { Collections.singletonList(new ClusterPlugin() { @Override public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { - return Collections.singletonList(new FakeAllocationDecider(settings)); + return Collections.singletonList(new FakeAllocationDecider()); } }), clusterInfoService); assertTrue(module.deciderList.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class))); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index a5d865a274140..b5359634fcb36 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -162,7 +162,8 @@ public void testClusterStateDiffSerialization() throws Exception { // Check cluster blocks assertThat(clusterStateFromDiffs.blocks().global(), equalTo(clusterStateFromDiffs.blocks().global())); assertThat(clusterStateFromDiffs.blocks().indices(), equalTo(clusterStateFromDiffs.blocks().indices())); - assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), equalTo(clusterStateFromDiffs.blocks().disableStatePersistence())); + assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), + equalTo(clusterStateFromDiffs.blocks().disableStatePersistence())); // Check metadata assertThat(clusterStateFromDiffs.metaData().version(), equalTo(clusterState.metaData().version())); @@ -179,9 +180,11 @@ public void testClusterStateDiffSerialization() throws Exception { // Smoke test - we cannot compare bytes to bytes because some elements might get serialized in different order // however, serialized size should remain the same - assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, equalTo(ClusterState.Builder.toBytes(clusterState).length)); + assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, + equalTo(ClusterState.Builder.toBytes(clusterState).length)); } catch (AssertionError error) { - logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", clusterState.toString(), clusterStateFromDiffs.toString()); + logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", + clusterState.toString(), clusterStateFromDiffs.toString()); throw error; } } @@ -195,7 +198,8 @@ public void testClusterStateDiffSerialization() throws Exception { */ private ClusterState.Builder randomNodes(ClusterState clusterState) { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); - List nodeIds = randomSubsetOf(randomInt(clusterState.nodes().getNodes().size() - 1), clusterState.nodes().getNodes().keys().toArray(String.class)); + List nodeIds = randomSubsetOf(randomInt(clusterState.nodes().getNodes().size() - 1), + clusterState.nodes().getNodes().keys().toArray(String.class)); for (String nodeId : nodeIds) { if (nodeId.startsWith("node-")) { nodes.remove(nodeId); @@ -220,18 +224,21 @@ private ClusterState.Builder randomRoutingTable(ClusterState clusterState) { RoutingTable.Builder builder = RoutingTable.builder(clusterState.routingTable()); int numberOfIndices = clusterState.routingTable().indicesRouting().size(); if (numberOfIndices > 0) { - List randomIndices = randomSubsetOf(randomInt(numberOfIndices - 1), clusterState.routingTable().indicesRouting().keys().toArray(String.class)); + List randomIndices = randomSubsetOf(randomInt(numberOfIndices - 1), + clusterState.routingTable().indicesRouting().keys().toArray(String.class)); for (String index : randomIndices) { if (randomBoolean()) { builder.remove(index); } else { - builder.add(randomChangeToIndexRoutingTable(clusterState.routingTable().indicesRouting().get(index), clusterState.nodes().getNodes().keys().toArray(String.class))); + builder.add(randomChangeToIndexRoutingTable(clusterState.routingTable().indicesRouting().get(index), + clusterState.nodes().getNodes().keys().toArray(String.class))); } } } int additionalIndexCount = randomIntBetween(1, 20); for (int i = 0; i < additionalIndexCount; i++) { - builder.add(randomIndexRoutingTable("index-" + randomInt(), clusterState.nodes().getNodes().keys().toArray(String.class))); + builder.add(randomIndexRoutingTable("index-" + randomInt(), + clusterState.nodes().getNodes().keys().toArray(String.class))); } return ClusterState.builder(clusterState).routingTable(builder.build()); } @@ -299,7 +306,8 @@ private ClusterState.Builder randomBlocks(ClusterState clusterState) { ClusterBlocks.Builder builder = ClusterBlocks.builder().blocks(clusterState.blocks()); int globalBlocksCount = clusterState.blocks().global().size(); if (globalBlocksCount > 0) { - List blocks = randomSubsetOf(randomInt(globalBlocksCount - 1), clusterState.blocks().global().toArray(new ClusterBlock[globalBlocksCount])); + List blocks = randomSubsetOf(randomInt(globalBlocksCount - 1), + clusterState.blocks().global().toArray(new ClusterBlock[globalBlocksCount])); for (ClusterBlock block : blocks) { builder.removeGlobalBlock(block); } @@ -366,7 +374,8 @@ private ClusterState randomClusterStateParts(ClusterState clusterState, Stri ImmutableOpenMap parts = randomPart.parts(clusterState); int partCount = parts.size(); if (partCount > 0) { - List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(clusterState).keys().toArray(String.class)); + List randomParts = randomSubsetOf(randomInt(partCount - 1), + randomPart.parts(clusterState).keys().toArray(String.class)); for (String part : randomParts) { if (randomBoolean()) { randomPart.remove(builder, part); @@ -477,7 +486,8 @@ private MetaData randomParts(MetaData metaData, String prefix, RandomPart ImmutableOpenMap parts = randomPart.parts(metaData); int partCount = parts.size(); if (partCount > 0) { - List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(metaData).keys().toArray(String.class)); + List randomParts = randomSubsetOf(randomInt(partCount - 1), + randomPart.parts(metaData).keys().toArray(String.class)); for (String part : randomParts) { if (randomBoolean()) { randomPart.remove(builder, part); @@ -545,7 +555,8 @@ public IndexMetaData randomChange(IndexMetaData part) { } break; case 2: - builder.settings(Settings.builder().put(part.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())); + builder.settings(Settings.builder().put(part.getSettings()) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())); break; default: throw new IllegalArgumentException("Shouldn't be here"); diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 4ef8f7cbdb770..c4fcb9bdb53e2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -98,13 +98,15 @@ public void testRandomDiskUsage() { public void testFillShardLevelInfo() { final Index index = new Index("test", "0xdeadbeef"); - ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_0 = ShardRoutingHelper.initialize(test_0, "node1"); test_0 = ShardRoutingHelper.moveToStarted(test_0); Path test0Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0"); CommonStats commonStats0 = new CommonStats(); commonStats0.store = new StoreStats(100); - ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_1 = ShardRoutingHelper.initialize(test_1, "node2"); test_1 = ShardRoutingHelper.moveToStarted(test_1); Path test1Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("1"); diff --git a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 31ffb026e3a7f..2a606328ce466 100644 --- a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -96,7 +96,8 @@ public void testSimpleMinimumMasterNodes() throws Exception { logger.info("--> start second node, cluster should be formed"); internalCluster().startNode(settings); - ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); @@ -115,13 +116,15 @@ public void testSimpleMinimumMasterNodes() throws Exception { client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet(); } // make sure that all shards recovered before trying to flush - assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().getActiveShards(), equalTo(numShards.totalNumShards)); + assertThat(client().admin().cluster().prepareHealth("test") + .setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().getActiveShards(), equalTo(numShards.totalNumShards)); // flush for simpler debugging flushAndRefresh(); logger.info("--> verify we the data back"); for (int i = 0; i < 10; i++) { - assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(100L)); + assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()) + .execute().actionGet().getHits().getTotalHits(), equalTo(100L)); } internalCluster().stopCurrentMasterNode(); @@ -138,7 +141,8 @@ public void testSimpleMinimumMasterNodes() throws Exception { logger.info("--> starting the previous master node again..."); internalCluster().startNode(settings); - clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet(); + clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); @@ -167,7 +171,8 @@ public void testSimpleMinimumMasterNodes() throws Exception { internalCluster().startNode(settings); ensureGreen(); - clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet(); + clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); @@ -211,7 +216,8 @@ public void testMultipleNodesShutdownNonMasterNodes() throws Exception { internalCluster().startNodes(2, settings); ensureGreen(); - ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -225,7 +231,8 @@ public void testMultipleNodesShutdownNonMasterNodes() throws Exception { } ensureGreen(); // make sure that all shards recovered before trying to flush - assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().isTimedOut(), equalTo(false)); + assertThat(client().admin().cluster().prepareHealth("test") + .setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().isTimedOut(), equalTo(false)); // flush for simpler debugging client().admin().indices().prepareFlush().execute().actionGet(); @@ -303,7 +310,8 @@ private void assertNoMasterBlockOnAllNodes() throws InterruptedException { for (Client client : internalCluster().getClients()) { boolean clientHasNoMasterBlock = hasNoMasterBlock.test(client); if (logger.isDebugEnabled()) { - logger.debug("Checking for NO_MASTER_BLOCK on client: {} NO_MASTER_BLOCK: [{}]", client, clientHasNoMasterBlock); + logger.debug("Checking for NO_MASTER_BLOCK on client: {} NO_MASTER_BLOCK: [{}]", + client, clientHasNoMasterBlock); } success &= clientHasNoMasterBlock; } @@ -334,20 +342,25 @@ public void testCanNotBringClusterDown() throws ExecutionException, InterruptedE int updateCount = randomIntBetween(1, nodeCount); - logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); + logger.info("--> updating [{}] to [{}]", + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount))); + .setPersistentSettings(Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount))); logger.info("--> verifying no node left and master is up"); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut()); updateCount = nodeCount + randomIntBetween(1, 2000); - logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); + logger.info("--> trying to updating [{}] to [{}]", + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); try { client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount)); + .setPersistentSettings(Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount)); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [" +updateCount+ "]"); + assertEquals(ex.getMessage(), + "cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [" +updateCount+ "]"); } logger.info("--> verifying no node left and master is up"); @@ -411,7 +424,8 @@ public void onFailure(String source, Exception e) { for (String node : internalCluster().getNodeNames()) { Settings nodeSetting = internalCluster().clusterService(node).state().metaData().settings(); - assertThat(node + " processed the cluster state despite of a min master node violation", nodeSetting.get("_SHOULD_NOT_BE_THERE_"), nullValue()); + assertThat(node + " processed the cluster state despite of a min master node violation", + nodeSetting.get("_SHOULD_NOT_BE_THERE_"), nullValue()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index b8a5e26d5c9ab..231a79f8902cf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -109,29 +109,35 @@ public void testNoMasterActions() throws Exception { checkUpdateAction(false, timeout, client().prepareUpdate("test", "type1", "1") .setScript(new Script( - ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", Collections.emptyMap())).setTimeout(timeout)); + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", + Collections.emptyMap())).setTimeout(timeout)); checkUpdateAction(true, timeout, client().prepareUpdate("no_index", "type1", "1") .setScript(new Script( - ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", Collections.emptyMap())).setTimeout(timeout)); + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", + Collections.emptyMap())).setTimeout(timeout)); - checkWriteAction( - client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout)); + checkWriteAction(client().prepareIndex("test", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout)); - checkWriteAction( - client().prepareIndex("no_index", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout)); + checkWriteAction(client().prepareIndex("no_index", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout)); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject())); - bulkRequestBuilder.add(client().prepareIndex("test", "type1", "2").setSource(XContentFactory.jsonBuilder().startObject().endObject())); + bulkRequestBuilder.add(client().prepareIndex("test", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject())); + bulkRequestBuilder.add(client().prepareIndex("test", "type1", "2") + .setSource(XContentFactory.jsonBuilder().startObject().endObject())); bulkRequestBuilder.setTimeout(timeout); checkWriteAction(bulkRequestBuilder); bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject())); - bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "2").setSource(XContentFactory.jsonBuilder().startObject().endObject())); + bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject())); + bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "2") + .setSource(XContentFactory.jsonBuilder().startObject().endObject())); bulkRequestBuilder.setTimeout(timeout); checkWriteAction(bulkRequestBuilder); @@ -219,7 +225,8 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { } try { - client().prepareIndex("test1", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout).get(); + client().prepareIndex("test1", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout).get(); fail("Expected ClusterBlockException"); } catch (ClusterBlockException e) { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 3e27b784e0a10..14919b7e9f0bc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -198,8 +198,9 @@ private void testFilteringByIndexWorks(String[] indices, String[] expected) { } public void testLargeClusterStatePublishing() throws Exception { - int estimatedBytesSize = scaledRandomIntBetween(ByteSizeValue.parseBytesSizeValue("10k", "estimatedBytesSize").bytesAsInt(), - ByteSizeValue.parseBytesSizeValue("256k", "estimatedBytesSize").bytesAsInt()); + int estimatedBytesSize = scaledRandomIntBetween( + ByteSizeValue.parseBytesSizeValue("10k", "estimatedBytesSize").bytesAsInt(), + ByteSizeValue.parseBytesSizeValue("256k", "estimatedBytesSize").bytesAsInt()); XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties"); int counter = 0; int numberOfFields = 0; @@ -224,9 +225,11 @@ public void testLargeClusterStatePublishing() throws Exception { .addMapping("type", mapping) .setTimeout("60s").get()); ensureGreen(); // wait for green state, so its both green, and there are no more pending events - MappingMetaData masterMappingMetaData = client().admin().indices().prepareGetMappings("test").setTypes("type").get().getMappings().get("test").get("type"); + MappingMetaData masterMappingMetaData = client().admin().indices() + .prepareGetMappings("test").setTypes("type").get().getMappings().get("test").get("type"); for (Client client : clients()) { - MappingMetaData mappingMetadata = client.admin().indices().prepareGetMappings("test").setTypes("type").setLocal(true).get().getMappings().get("test").get("type"); + MappingMetaData mappingMetadata = client.admin().indices() + .prepareGetMappings("test").setTypes("type").setLocal(true).get().getMappings().get("test").get("type"); assertThat(mappingMetadata.source().string(), equalTo(masterMappingMetaData.source().string())); assertThat(mappingMetadata, equalTo(masterMappingMetaData)); } @@ -279,7 +282,8 @@ public void testIndicesIgnoreUnavailableFalse() throws Exception { // ignore_unavailable set to false throws exception when allowNoIndices is turned off IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, true, true, false); try { - client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("fzzbzz").setIndicesOptions(allowNoIndices).get(); + client().admin().cluster().prepareState().clear().setMetaData(true) + .setIndices("fzzbzz").setIndicesOptions(allowNoIndices).get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { assertThat(e.getMessage(), is("no such index [fzzbzz]")); @@ -368,7 +372,8 @@ public Collection createComponents( if (state.nodes().isLocalNodeElectedMaster()) { if (state.custom("test") == null) { if (installed.compareAndSet(false, true)) { - clusterService.submitStateUpdateTask("install-metadata-custom", new ClusterStateUpdateTask(Priority.URGENT) { + clusterService.submitStateUpdateTask("install-metadata-custom", + new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java index c2f6c3b64faae..d5a986b8affff 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java @@ -49,7 +49,8 @@ public void testDataNodes() throws Exception { } internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build()); - assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false)); + assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2") + .setLocal(true).execute().actionGet().isTimedOut(), equalTo(false)); // still no shard should be allocated try { @@ -62,7 +63,8 @@ public void testDataNodes() throws Exception { // now, start a node data, and see that it gets with shards internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).build()); - assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false)); + assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3") + .setLocal(true).execute().actionGet().isTimedOut(), equalTo(false)); IndexResponse indexResponse = client().index(Requests.indexRequest("test").type("type1").id("1") .source(source("1", "test"), XContentType.JSON)).actionGet(); diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index cb557fa13628a..74d7e5c4ff7e8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -49,70 +49,94 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start data node / non master node"); - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false) + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) + .put(Node.NODE_MASTER_SETTING.getKey(), false) .put("discovery.initial_state_timeout", "1s")); try { - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms") + .execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected } logger.info("--> start master node"); - final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + final String masterNodeName = internalCluster() + .startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> stop master node"); internalCluster().stopCurrentMasterNode(); try { - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms") + .execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected } logger.info("--> start master node"); - final String nextMasterEligibleNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); + final String nextMasterEligibleNodeName = internalCluster() + .startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } public void testElectOnlyBetweenMasterNodes() throws IOException { logger.info("--> start data node / non master node"); - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) + .put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); try { - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms") + .execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected } logger.info("--> start master node (1)"); - final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false) + .put(Node.NODE_MASTER_SETTING.getKey(), true)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> start master node (2)"); - final String nextMasterEligableNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + final String nextMasterEligableNodeName = internalCluster().startNode(Settings.builder() + .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> closing master node (1)"); internalCluster().stopCurrentMasterNode(); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); } public void testAliasFilterValidation() throws Exception { logger.info("--> start master node / non data"); - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + internalCluster().startNode(Settings.builder() + .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); logger.info("--> start data node / non master node"); - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); + internalCluster().startNode(Settings.builder() + .put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); - assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", " + + assertAcked(prepareCreate("test").addMapping( + "type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", " + "\"properties\" : {\"field_a\" : { \"type\" : \"keyword\" },\"field_b\" :{ \"type\" : \"keyword\" }}}}}}", XContentType.JSON)); - client().admin().indices().prepareAliases().addAlias("test", "a_test", QueryBuilders.nestedQuery("table_a", QueryBuilders.termQuery("table_a.field_b", "y"), ScoreMode.Avg)).get(); + client().admin().indices().prepareAliases().addAlias("test", "a_test", + QueryBuilders.nestedQuery("table_a", QueryBuilders.termQuery("table_a.field_b", "y"), ScoreMode.Avg)).get(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java b/server/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java index a21f61ce8afdb..11cdb8afc4c41 100644 --- a/server/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java @@ -41,17 +41,21 @@ public void testUpdateSettingsValidation() throws Exception { createIndex("test"); NumShards test = getNumShards("test"); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test") + .setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.totalNumShards)); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)).execute().actionGet(); - healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.number_of_replicas", 0)).execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth("test") + .setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.numPrimaries)); try { - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "")).execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.refresh_interval", "")).execute().actionGet(); fail(); } catch (IllegalArgumentException ex) { logger.info("Error message: [{}]", ex.getMessage()); diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java index 03340e211b41d..450acebc983e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java @@ -111,7 +111,8 @@ public void testClusterUpdateSettingsAcknowledgement() { ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get(); assertAcked(clusterUpdateSettingsResponse); - assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId)); + assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), + equalTo(excludedNodeId)); for (Client client : clients()) { ClusterState clusterState = getLocalClusterState(client); @@ -120,9 +121,11 @@ public void testClusterUpdateSettingsAcknowledgement() { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { assert clusterState.nodes() != null; - if (shardRouting.unassigned() == false && clusterState.nodes().get(shardRouting.currentNodeId()).getId().equals(excludedNodeId)) { - //if the shard is still there it must be relocating and all nodes need to know, since the request was acknowledged - //reroute happens as part of the update settings and we made sure no throttling comes into the picture via settings + if (shardRouting.unassigned() == false && clusterState.nodes() + .get(shardRouting.currentNodeId()).getId().equals(excludedNodeId)) { + // if the shard is still there it must be relocating and all nodes need to know, + // since the request was acknowledged reroute happens as part of the update settings + // and we made sure no throttling comes into the picture via settings assertThat(shardRouting.relocating(), equalTo(true)); } } @@ -154,7 +157,8 @@ public void testClusterUpdateSettingsNoAcknowledgement() { ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings().setTimeout("0s") .setTransientSettings(Settings.builder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get(); assertThat(clusterUpdateSettingsResponse.isAcknowledged(), equalTo(false)); - assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId)); + assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), + equalTo(excludedNodeId)); } private static ClusterState getLocalClusterState(Client client) { diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index df97854cc35b0..c4a5e6c39d976 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -128,7 +128,8 @@ public void testClusterRerouteNoAcknowledgement() throws InterruptedException { MoveAllocationCommand moveAllocationCommand = getAllocationCommand(); - ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").add(moveAllocationCommand).get(); + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute() + .setTimeout("0s").add(moveAllocationCommand).get(); assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(false)); } @@ -146,8 +147,9 @@ public void testClusterRerouteAcknowledgementDryRun() throws InterruptedExceptio assertAcked(client().admin().cluster().prepareReroute().setDryRun(true).add(moveAllocationCommand)); - //testing only on master with the latest cluster state as we didn't make any change thus we cannot guarantee that - //all nodes hold the same cluster state version. We only know there was no need to change anything, thus no need for ack on this update. + // testing only on master with the latest cluster state as we didn't make any change thus + // we cannot guarantee that all nodes hold the same cluster state version. We only know there + // was no need to change anything, thus no need for ack on this update. ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); boolean found = false; for (ShardRouting shardRouting : clusterStateResponse.getState().getRoutingNodes().node(moveAllocationCommand.fromNode())) { @@ -176,7 +178,8 @@ public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedExcept MoveAllocationCommand moveAllocationCommand = getAllocationCommand(); - ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").setDryRun(true).add(moveAllocationCommand).get(); + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s") + .setDryRun(true).add(moveAllocationCommand).get(); //acknowledged anyway as no changes were made assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(true)); } @@ -219,7 +222,8 @@ public void testIndicesAliasesAcknowledgement() { assertAcked(client().admin().indices().prepareAliases().addAlias("test", "alias")); for (Client client : clients()) { - AliasMetaData aliasMetaData = ((AliasOrIndex.Alias) getLocalClusterState(client).metaData().getAliasAndIndexLookup().get("alias")).getFirstAliasMetaData(); + AliasMetaData aliasMetaData = ((AliasOrIndex.Alias) getLocalClusterState(client) + .metaData().getAliasAndIndexLookup().get("alias")).getFirstAliasMetaData(); assertThat(aliasMetaData.alias(), equalTo("alias")); } } @@ -228,7 +232,8 @@ public void testIndicesAliasesAcknowledgement() { public void testIndicesAliasesNoAcknowledgement() { createIndex("test"); - AcknowledgedResponse indicesAliasesResponse = client().admin().indices().prepareAliases().addAlias("test", "alias").setTimeout("0s").get(); + AcknowledgedResponse indicesAliasesResponse = client().admin().indices().prepareAliases() + .addAlias("test", "alias").setTimeout("0s").get(); assertThat(indicesAliasesResponse.isAcknowledged(), equalTo(false)); } @@ -281,7 +286,8 @@ public void testPutMappingNoAcknowledgement() { createIndex("test"); ensureGreen(); - AcknowledgedResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=keyword").setTimeout("0s").get(); + AcknowledgedResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test") + .setSource("field", "type=keyword").setTimeout("0s").get(); assertThat(putMappingResponse.isAcknowledged(), equalTo(false)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 01d0c518c1be7..60a5d4a3e3f1f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -88,7 +88,8 @@ public void setUp() throws Exception { routingTable = RoutingTable.builder() .addAsNew(metaData.index(INDEX)) .build(); - clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); executor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger); } @@ -120,12 +121,13 @@ public void testTriviallySuccessfulTasksBatchedWithFailingTasks() throws Excepti ClusterState currentState = createClusterStateWithStartedShards(reason); List failingTasks = createExistingShards(currentState, reason); List nonExistentTasks = createNonExistentShards(currentState, reason); - ShardStateAction.ShardFailedClusterStateTaskExecutor failingExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger) { - @Override - ClusterState applyFailedShards(ClusterState currentState, List failedShards, List staleShards) { - throw new RuntimeException("simulated applyFailedShards failure"); - } - }; + ShardStateAction.ShardFailedClusterStateTaskExecutor failingExecutor = + new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger) { + @Override + ClusterState applyFailedShards(ClusterState currentState, List failedShards, List staleShards) { + throw new RuntimeException("simulated applyFailedShards failure"); + } + }; List tasks = new ArrayList<>(); tasks.addAll(failingTasks); tasks.addAll(nonExistentTasks); @@ -200,7 +202,8 @@ private ClusterState createClusterStateWithStartedShards(String reason) { private List createExistingShards(ClusterState currentState, String reason) { List shards = new ArrayList<>(); - GroupShardsIterator shardGroups = currentState.routingTable().allAssignedShardsGrouped(new String[] { INDEX }, true); + GroupShardsIterator shardGroups = currentState.routingTable() + .allAssignedShardsGrouped(new String[] { INDEX }, true); for (ShardIterator shardIt : shardGroups) { for (ShardRouting shard : shardIt) { shards.add(shard); @@ -233,11 +236,13 @@ private List createNonExistentShards(ClusterS List existingShards = createExistingShards(currentState, reason); List shardsWithMismatchedAllocationIds = new ArrayList<>(); for (ShardStateAction.FailedShardEntry existingShard : existingShards) { - shardsWithMismatchedAllocationIds.add(new ShardStateAction.FailedShardEntry(existingShard.shardId, UUIDs.randomBase64UUID(), 0L, existingShard.message, existingShard.failure, randomBoolean())); + shardsWithMismatchedAllocationIds.add(new ShardStateAction.FailedShardEntry(existingShard.shardId, + UUIDs.randomBase64UUID(), 0L, existingShard.message, existingShard.failure, randomBoolean())); } List tasks = new ArrayList<>(); - nonExistentShards.forEach(shard -> tasks.add(new ShardStateAction.FailedShardEntry(shard.shardId(), shard.allocationId().getId(), 0L, + nonExistentShards.forEach(shard -> tasks.add( + new ShardStateAction.FailedShardEntry(shard.shardId(), shard.allocationId().getId(), 0L, reason, new CorruptIndexException("simulated", nonExistentIndexUUID), randomBoolean()))); tasks.addAll(shardsWithMismatchedAllocationIds); return tasks; @@ -303,7 +308,8 @@ private static void assertTaskResults( } } - private static List toTasks(ClusterState currentState, List shards, String indexUUID, String message) { + private static List toTasks(ClusterState currentState, List shards, + String indexUUID, String message) { return shards .stream() .map(shard -> new ShardStateAction.FailedShardEntry( diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index 64fa51d159a54..67f5871506cc5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -89,8 +88,9 @@ public class ShardStateActionTests extends ESTestCase { private ClusterService clusterService; private static class TestShardStateAction extends ShardStateAction { - TestShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService, AllocationService allocationService, RoutingService routingService) { - super(settings, clusterService, transportService, allocationService, routingService, THREAD_POOL); + TestShardStateAction(ClusterService clusterService, TransportService transportService, + AllocationService allocationService, RoutingService routingService) { + super(clusterService, transportService, allocationService, routingService, THREAD_POOL); } private Runnable onBeforeWaitForNewMasterAndRetry; @@ -106,7 +106,8 @@ public void setOnAfterWaitForNewMasterAndRetry(Runnable onAfterWaitForNewMasterA } @Override - protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, TransportRequest request, Listener listener, Predicate changePredicate) { + protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, TransportRequest request, + Listener listener, Predicate changePredicate) { onBeforeWaitForNewMasterAndRetry.run(); super.waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); onAfterWaitForNewMasterAndRetry.run(); @@ -128,7 +129,7 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); - shardStateAction = new TestShardStateAction(Settings.EMPTY, clusterService, transportService, null, null); + shardStateAction = new TestShardStateAction(clusterService, transportService, null, null); shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> { }); shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> { @@ -359,20 +360,21 @@ public void testNoLongerPrimaryShardException() throws InterruptedException { long primaryTerm = clusterService.state().metaData().index(index).primaryTerm(failedShard.id()); assertThat(primaryTerm, greaterThanOrEqualTo(1L)); - shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), primaryTerm + 1, randomBoolean(), "test", - getSimulatedFailure(), new ShardStateAction.Listener() { - @Override - public void onSuccess() { - failure.set(null); - latch.countDown(); - } + shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), + primaryTerm + 1, randomBoolean(), "test", getSimulatedFailure(), + new ShardStateAction.Listener() { + @Override + public void onSuccess() { + failure.set(null); + latch.countDown(); + } - @Override - public void onFailure(Exception e) { - failure.set(e); - latch.countDown(); - } - }); + @Override + public void onFailure(Exception e) { + failure.set(e); + latch.countDown(); + } + }); ShardStateAction.NoLongerPrimaryShardException catastrophicError = new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "dummy failure"); @@ -445,7 +447,8 @@ public void testRemoteShardFailedConcurrently() throws Exception { for (int i = 0; i < iterationsPerThread; i++) { ShardRouting failedShard = randomFrom(failedShards); shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), - randomLongBetween(1, Long.MAX_VALUE), randomBoolean(), "test", getSimulatedFailure(), new ShardStateAction.Listener() { + randomLongBetween(1, Long.MAX_VALUE), randomBoolean(), "test", getSimulatedFailure(), + new ShardStateAction.Listener() { @Override public void onSuccess() { notifiedResponses.incrementAndGet(); @@ -523,7 +526,8 @@ public void testShardEntryBWCSerialize() throws Exception { assertThat(failedShardEntry.failure, nullValue()); assertThat(failedShardEntry.markAsStale, equalTo(true)); } - try (StreamInput in = serialize(new FailedShardEntry(shardId, allocationId, 0L, reason, null, false), bwcVersion).streamInput()) { + try (StreamInput in = serialize(new FailedShardEntry(shardId, allocationId, 0L, + reason, null, false), bwcVersion).streamInput()) { in.setVersion(bwcVersion); final StartedShardEntry startedShardEntry = new StartedShardEntry(in); assertThat(startedShardEntry.shardId, equalTo(shardId)); diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index fbb0fa732f601..fe63d1c39e4ae 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -77,7 +77,8 @@ public void testSimpleAwareness() throws Exception { assertThat(awaitBusy( () -> { logger.info("--> waiting for no relocation"); - ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).get(); if (clusterHealth.isTimedOut()) { return false; } @@ -128,7 +129,8 @@ public void testAwarenessZones() throws Exception { .put("index.number_of_replicas", 1)).execute().actionGet(); logger.info("--> waiting for shards to be allocated"); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -163,7 +165,8 @@ public void testAwarenessZonesIncrementalNodes() throws Exception { client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put("index.number_of_shards", 5) .put("index.number_of_replicas", 1)).execute().actionGet(); - ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).execute().actionGet(); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); ObjectIntHashMap counts = new ObjectIntHashMap<>(); @@ -180,10 +183,12 @@ public void testAwarenessZonesIncrementalNodes() throws Exception { logger.info("--> starting another node in zone 'b'"); String B_1 = internalCluster().startNode(Settings.builder().put(commonSettings).put("node.attr.zone", "b").build()); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("3").execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); client().admin().cluster().prepareReroute().get(); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("3").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -202,10 +207,12 @@ public void testAwarenessZonesIncrementalNodes() throws Exception { assertThat(counts.get(B_1), equalTo(2)); String noZoneNode = internalCluster().startNode(); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("4").execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); client().admin().cluster().prepareReroute().get(); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -224,9 +231,11 @@ public void testAwarenessZonesIncrementalNodes() throws Exception { assertThat(counts.get(B_0), equalTo(3)); assertThat(counts.get(B_1), equalTo(2)); assertThat(counts.containsKey(noZoneNode), equalTo(false)); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "").build()).get(); + client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "").build()).get(); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index e7bcce2817c0b..71c9f5a15ba4d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -112,7 +112,8 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .setDryRun(true) .execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); logger.info("--> get the state, verify nothing changed because of the dry run"); state = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -124,15 +125,18 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) .execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForYellowStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.STARTED)); logger.info("--> move shard 1 primary from node1 to node2"); state = client().admin().cluster().prepareReroute() @@ -140,17 +144,21 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .add(new MoveAllocationCommand("test", 0, node_1, node_2)) .execute().actionGet().getState(); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.RELOCATING)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.RELOCATING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); - healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNoRelocatingShards(true).execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus() + .setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary moved from node1 to node2"); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), + equalTo(ShardRoutingState.STARTED)); } public void testRerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception { @@ -223,17 +231,21 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) .execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); - healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForYellowStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.STARTED)); - client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + client().prepareIndex("test", "type", "1").setSource("field", "value") + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); final Index index = resolveIndex("test"); logger.info("--> closing all nodes"); @@ -251,14 +263,16 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc // wait a bit for the cluster to realize that the shard is not there... // TODO can we get around this? the cluster is RED, so what do we wait for? client().admin().cluster().prepareReroute().get(); - assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), equalTo(ClusterHealthStatus.RED)); + assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), + equalTo(ClusterHealthStatus.RED)); logger.info("--> explicitly allocate primary"); state = client().admin().cluster().prepareReroute() .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) .execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); logger.info("--> get the state, verify shard 1 primary allocated"); final String nodeToCheck = node_1; @@ -393,7 +407,8 @@ public void testClusterRerouteWithBlocks() throws Exception { List nodesIds = internalCluster().startNodes(2); logger.info("--> create an index with 1 shard and 0 replicas"); - assertAcked(prepareCreate("test-blocks").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))); + assertAcked(prepareCreate("test-blocks").setSettings(Settings.builder().put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0))); ensureGreen("test-blocks"); logger.info("--> check that the index has 1 shard"); @@ -417,10 +432,11 @@ public void testClusterRerouteWithBlocks() throws Exception { SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test-blocks", blockSetting); - assertAcked(client().admin().cluster().prepareReroute() - .add(new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))); + assertAcked(client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test-blocks", 0, + nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForNoRelocatingShards(true).execute().actionGet(); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus() + .setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } finally { disableIndexBlock("test-blocks", blockSetting); diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index c3d1a6040a8f5..90ba6cbf89066 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -65,7 +65,8 @@ public void testDecommissionNodeNoReplicas() throws Exception { client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); } client().admin().indices().prepareRefresh().execute().actionGet(); - assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(100L)); + assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet() + .getHits().getTotalHits(), equalTo(100L)); logger.info("--> decommission the second node"); client().admin().cluster().prepareUpdateSettings() @@ -84,7 +85,8 @@ public void testDecommissionNodeNoReplicas() throws Exception { } client().admin().indices().prepareRefresh().execute().actionGet(); - assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(100L)); + assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()) + .execute().actionGet().getHits().getTotalHits(), equalTo(100L)); } public void testDisablingAllocationFiltering() throws Exception { @@ -106,7 +108,8 @@ public void testDisablingAllocationFiltering() throws Exception { client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); } client().admin().indices().prepareRefresh().execute().actionGet(); - assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(100L)); + assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()) + .execute().actionGet().getHits().getTotalHits(), equalTo(100L)); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test"); int numShardsOnNode1 = 0; @@ -120,9 +123,10 @@ public void testDisablingAllocationFiltering() throws Exception { if (numShardsOnNode1 > ThrottlingAllocationDecider.DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES) { client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", numShardsOnNode1)).execute().actionGet(); - // make sure we can recover all the nodes at once otherwise we might run into a state where one of the shards has not yet started relocating - // but we already fired up the request to wait for 0 relocating shards. + .setTransientSettings(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", numShardsOnNode1)).execute().actionGet(); + // make sure we can recover all the nodes at once otherwise we might run into a state where + // one of the shards has not yet started relocating but we already fired up the request to wait for 0 relocating shards. } logger.info("--> remove index from the first node"); client().admin().indices().prepareUpdateSettings("test") diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java index 28aad61367d7f..f9c0691576f2b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java @@ -54,7 +54,8 @@ public void testSaneAllocation() { assertThat(node.size(), equalTo(2)); } } - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 0)).execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 0)).execute().actionGet(); ensureGreen(); state = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -69,7 +70,8 @@ public void testSaneAllocation() { assertAcked(prepareCreate("test2", 3)); ensureGreen(); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 1)).execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 1)).execute().actionGet(); ensureGreen(); state = client().admin().cluster().prepareState().execute().actionGet().getState(); diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java index 851ab63297a21..e18b9ca398fc7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java @@ -134,19 +134,19 @@ protected ClusterIndexHealth mutateInstance(ClusterIndexHealth instance) throws instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); case "numberOfReplicas": return new ClusterIndexHealth(instance.getIndex(), instance.getNumberOfShards(), - instance.getNumberOfReplicas() + between(1, 10), instance.getActiveShards(), instance.getRelocatingShards(), - instance.getInitializingShards(), instance.getUnassignedShards(), + instance.getNumberOfReplicas() + between(1, 10), instance.getActiveShards(), + instance.getRelocatingShards(), instance.getInitializingShards(), instance.getUnassignedShards(), instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); case "activeShards": return new ClusterIndexHealth(instance.getIndex(), instance.getNumberOfShards(), - instance.getNumberOfReplicas(), instance.getActiveShards() + between(1, 10), instance.getRelocatingShards(), - instance.getInitializingShards(), instance.getUnassignedShards(), + instance.getNumberOfReplicas(), instance.getActiveShards() + between(1, 10), + instance.getRelocatingShards(), instance.getInitializingShards(), instance.getUnassignedShards(), instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); case "relocatingShards": return new ClusterIndexHealth(instance.getIndex(), instance.getNumberOfShards(), - instance.getNumberOfReplicas(), instance.getActiveShards(), instance.getRelocatingShards() + between(1, 10), - instance.getInitializingShards(), instance.getUnassignedShards(), - instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); + instance.getNumberOfReplicas(), instance.getActiveShards(), + instance.getRelocatingShards() + between(1, 10), instance.getInitializingShards(), + instance.getUnassignedShards(), instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); case "initializingShards": return new ClusterIndexHealth(instance.getIndex(), instance.getNumberOfShards(), instance.getNumberOfReplicas(), instance.getActiveShards(), instance.getRelocatingShards(), diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index 32a8c06bb7020..a51cd5ea6660a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -77,7 +77,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; public class ClusterStateHealthTests extends ESTestCase { - private final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); + private final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); private static ThreadPool threadPool; @@ -140,7 +140,7 @@ public void testClusterHealthWaitsForClusterStateApplication() throws Interrupte logger.info("--> waiting for listener to be called and cluster state being blocked"); listenerCalled.await(); - TransportClusterHealthAction action = new TransportClusterHealthAction(Settings.EMPTY, transportService, + TransportClusterHealthAction action = new TransportClusterHealthAction(transportService, clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver, new TestGatewayAllocator()); PlainActionFuture listener = new PlainActionFuture<>(); action.execute(new ClusterHealthRequest().waitForGreenStatus(), listener); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AliasValidatorTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AliasValidatorTests.java index 05bd9eeab8c3d..f14401980765a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AliasValidatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AliasValidatorTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.test.ESTestCase; @@ -27,7 +26,7 @@ public class AliasValidatorTests extends ESTestCase { public void testValidatesAliasNames() { - AliasValidator validator = new AliasValidator(Settings.EMPTY); + AliasValidator validator = new AliasValidator(); Exception e = expectThrows(InvalidAliasNameException.class, () -> validator.validateAliasStandalone(".", null)); assertEquals("Invalid alias name [.]: must not be '.' or '..'", e.getMessage()); e = expectThrows(InvalidAliasNameException.class, () -> validator.validateAliasStandalone("..", null)); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java index f24dbfbd002ca..b96d5eacb15d1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java @@ -52,7 +52,8 @@ public class AutoExpandReplicasTests extends ESTestCase { public void testParseSettings() { - AutoExpandReplicas autoExpandReplicas = AutoExpandReplicas.SETTING.get(Settings.builder().put("index.auto_expand_replicas", "0-5").build()); + AutoExpandReplicas autoExpandReplicas = AutoExpandReplicas.SETTING + .get(Settings.builder().put("index.auto_expand_replicas", "0-5").build()); assertEquals(0, autoExpandReplicas.getMinReplicas()); assertEquals(5, autoExpandReplicas.getMaxReplicas(8)); assertEquals(2, autoExpandReplicas.getMaxReplicas(3)); @@ -133,7 +134,7 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE dataNodes.add(createNode(DiscoveryNode.Role.DATA)); } allNodes.addAll(dataNodes); - ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()])); + ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); CreateIndexRequest request = new CreateIndexRequest("index", Settings.builder() @@ -173,7 +174,8 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE .map(DiscoveryNode::getId).collect(Collectors.toSet()); List nodesToAdd = conflictingNodes.stream() - .map(n -> new DiscoveryNode(n.getName(), n.getId(), buildNewFakeTransportAddress(), n.getAttributes(), n.getRoles(), n.getVersion())) + .map(n -> new DiscoveryNode(n.getName(), n.getId(), buildNewFakeTransportAddress(), + n.getAttributes(), n.getRoles(), n.getVersion())) .collect(Collectors.toList()); if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java index d6c8707c1d76e..53266ecd4b6dc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -29,7 +28,7 @@ public class ClusterNameExpressionResolverTests extends ESTestCase { - private ClusterNameExpressionResolver clusterNameResolver = new ClusterNameExpressionResolver(Settings.EMPTY); + private ClusterNameExpressionResolver clusterNameResolver = new ClusterNameExpressionResolver(); private static final Set remoteClusters = new HashSet<>(); static { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index b06609296487b..1827554ee50ea 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -63,9 +63,12 @@ public void testExpression() throws Exception { List indexExpressions = Arrays.asList("<.marvel-{now}>", "<.watch_history-{now}>", ""); List result = expressionResolver.resolve(context, indexExpressions); assertThat(result.size(), equalTo(3)); - assertThat(result.get(0), equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); - assertThat(result.get(1), equalTo(".watch_history-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); - assertThat(result.get(2), equalTo("logstash-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(result.get(0), + equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(result.get(1), + equalTo(".watch_history-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(result.get(2), + equalTo("logstash-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); } public void testEmpty() throws Exception { @@ -91,19 +94,22 @@ public void testExpression_MultiParts() throws Exception { public void testExpression_CustomFormat() throws Exception { List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{YYYY.MM.dd}}>")); assertThat(results.size(), equalTo(1)); - assertThat(results.get(0), equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(results.get(0), + equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); } public void testExpression_EscapeStatic() throws Exception { List result = expressionResolver.resolve(context, Arrays.asList("<.mar\\{v\\}el-{now/d}>")); assertThat(result.size(), equalTo(1)); - assertThat(result.get(0), equalTo(".mar{v}el-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(result.get(0), + equalTo(".mar{v}el-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); } public void testExpression_EscapeDateFormat() throws Exception { List result = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'YYYY}}>")); assertThat(result.size(), equalTo(1)); - assertThat(result.get(0), equalTo(".marvel-" + DateTimeFormat.forPattern("'{year}'YYYY").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(result.get(0), + equalTo(".marvel-" + DateTimeFormat.forPattern("'{year}'YYYY").print(new DateTime(context.getStartTime(), UTC)))); } public void testExpression_MixedArray() throws Exception { @@ -112,9 +118,11 @@ public void testExpression_MixedArray() throws Exception { )); assertThat(result.size(), equalTo(4)); assertThat(result.get(0), equalTo("name1")); - assertThat(result.get(1), equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(result.get(1), + equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); assertThat(result.get(2), equalTo("name2")); - assertThat(result.get(3), equalTo(".logstash-" + DateTimeFormat.forPattern("YYYY.MM").print(new DateTime(context.getStartTime(), UTC).withDayOfMonth(1)))); + assertThat(result.get(3), equalTo(".logstash-" + + DateTimeFormat.forPattern("YYYY.MM").print(new DateTime(context.getStartTime(), UTC).withDayOfMonth(1)))); } public void testExpression_CustomTimeZoneInIndexName() throws Exception { @@ -132,7 +140,8 @@ public void testExpression_CustomTimeZoneInIndexName() throws Exception { DateTime now; if (hoursOffset >= 0) { // rounding to next day 00:00 - now = DateTime.now(UTC).plusHours(hoursOffset).plusMinutes(minutesOffset).withHourOfDay(0).withMinuteOfHour(0).withSecondOfMinute(0); + now = DateTime.now(UTC).plusHours(hoursOffset).plusMinutes(minutesOffset) + .withHourOfDay(0).withMinuteOfHour(0).withSecondOfMinute(0); } else { // rounding to today 00:00 now = DateTime.now(UTC).withHourOfDay(0).withMinuteOfHour(0).withSecondOfMinute(0); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 9585381029f81..518a60ffe38f6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -83,7 +83,7 @@ public class IndexCreationTaskTests extends ESTestCase { private final IndicesService indicesService = mock(IndicesService.class); - private final AliasValidator aliasValidator = new AliasValidator(Settings.EMPTY); + private final AliasValidator aliasValidator = new AliasValidator(); private final NamedXContentRegistry xContentRegistry = mock(NamedXContentRegistry.class); private final CreateIndexClusterStateUpdateRequest request = mock(CreateIndexClusterStateUpdateRequest.class); private final Logger logger = mock(Logger.class); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 0832df7c896d9..ab5756fccfc30 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -35,12 +35,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.function.Function; @@ -57,7 +60,7 @@ import static org.hamcrest.Matchers.notNullValue; public class IndexNameExpressionResolverTests extends ESTestCase { - private final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); + private final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); public void testIndexOptionsStrict() { MetaData.Builder mdBuilder = MetaData.builder() @@ -436,42 +439,48 @@ public void testIndexOptionsSingleIndexNoExpandWildcards() { //error on both unavailable and no indices + every alias needs to expand to a single index { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "baz*")); assertThat(infe.getIndex().getName(), equalTo("baz*")); } { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*")); assertThat(infe.getIndex().getName(), equalTo("baz*")); } { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "foofoobar")); assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); } { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "foo", "foofoobar")); assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); } { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IndexClosedException ince = expectThrows(IndexClosedException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "foofoo-closed", "foofoobar")); assertThat(ince.getMessage(), equalTo("closed")); assertEquals(ince.getIndex().getName(), "foofoo-closed"); } - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "barbaz"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foofoo")); @@ -501,7 +510,8 @@ public void testIndexOptionsEmptyCluster() { } - final IndexNameExpressionResolver.Context context2 = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + final IndexNameExpressionResolver.Context context2 = + new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); results = indexNameExpressionResolver.concreteIndexNames(context2, Strings.EMPTY_ARRAY); assertThat(results, emptyArray()); results = indexNameExpressionResolver.concreteIndexNames(context2, "foo"); @@ -511,14 +521,20 @@ public void testIndexOptionsEmptyCluster() { results = indexNameExpressionResolver.concreteIndexNames(context2, "foo*", "bar"); assertThat(results, emptyArray()); - final IndexNameExpressionResolver.Context context3 = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, false, true, false)); + final IndexNameExpressionResolver.Context context3 = + new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, false, true, false)); IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(context3, Strings.EMPTY_ARRAY)); assertThat(infe.getResourceId().toString(), equalTo("[_all]")); } private static IndexMetaData.Builder indexBuilder(String index) { - return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + return IndexMetaData.builder(index).settings(settings()); + } + + private static Settings.Builder settings() { + return settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0); } public void testConcreteIndicesIgnoreIndicesOneMissingIndex() { @@ -540,7 +556,8 @@ public void testConcreteIndicesIgnoreIndicesOneMissingIndexOtherFound() { ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testXXX", "testZZZ")), equalTo(newHashSet("testXXX"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testXXX", "testZZZ")), + equalTo(newHashSet("testXXX"))); } public void testConcreteIndicesIgnoreIndicesAllMissing() { @@ -561,7 +578,8 @@ public void testConcreteIndicesIgnoreIndicesEmptyRequest() { .put(indexBuilder("kuku")); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, new String[]{})), equalTo(newHashSet("kuku", "testXXX"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, new String[]{})), + equalTo(newHashSet("kuku", "testXXX"))); } public void testConcreteIndicesWildcardExpansion() { @@ -573,14 +591,19 @@ public void testConcreteIndicesWildcardExpansion() { .put(indexBuilder("testYYX").state(State.OPEN)); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, false)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(new HashSet())); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, false)); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(new HashSet())); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, false)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(newHashSet("testXXX", "testXXY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, true)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXYY"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(newHashSet("testXYY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, true)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); } public void testConcreteIndicesWildcardWithNegation() { @@ -649,7 +672,7 @@ public void testConcreteIndicesWildcardAndAliases() { // when ignoreAliases option is set, concreteIndexNames resolves the provided expressions // only against the defined indices - IndicesOptions ignoreAliasesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, true); + IndicesOptions ignoreAliasesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, true, false); String[] indexNamesIndexWildcard = indexNameExpressionResolver.concreteIndexNames(state, ignoreAliasesOptions, "foo*"); @@ -673,7 +696,7 @@ public void testConcreteIndicesWildcardAndAliases() { // when ignoreAliases option is not set, concreteIndexNames resolves the provided // expressions against the defined indices and aliases - IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, false); + IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, false, false); List indexNames = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(state, indicesAndAliasesOptions, "foo*")); assertEquals(2, indexNames.size()); @@ -891,17 +914,20 @@ public void testIsPatternMatchingAllIndicesNonMatchingTrailingWildcardAndExclusi public void testIndexOptionsFailClosedIndicesAndAliases() { MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("foo1-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar1-closed")).putAlias(AliasMetaData.builder("foobar2-closed"))) + .put(indexBuilder("foo1-closed").state(IndexMetaData.State.CLOSE) + .putAlias(AliasMetaData.builder("foobar1-closed")).putAlias(AliasMetaData.builder("foobar2-closed"))) .put(indexBuilder("foo2-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar2-closed"))) .put(indexBuilder("foo3").putAlias(AliasMetaData.builder("foobar2-closed"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); - IndexNameExpressionResolver.Context contextICE = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed()); + IndexNameExpressionResolver.Context contextICE = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed()); expectThrows(IndexClosedException.class, () -> indexNameExpressionResolver.concreteIndexNames(contextICE, "foo1-closed")); expectThrows(IndexClosedException.class, () -> indexNameExpressionResolver.concreteIndexNames(contextICE, "foobar1-closed")); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, - contextICE.getOptions().allowNoIndices(), contextICE.getOptions().expandWildcardsOpen(), contextICE.getOptions().expandWildcardsClosed(), contextICE.getOptions())); + contextICE.getOptions().allowNoIndices(), contextICE.getOptions().expandWildcardsOpen(), + contextICE.getOptions().expandWildcardsClosed(), contextICE.getOptions())); String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo1-closed"); assertThat(results, emptyArray()); @@ -926,7 +952,9 @@ public void testIndexOptionsFailClosedIndicesAndAliases() { // expected } - context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), context.getOptions().expandWildcardsClosed(), context.getOptions())); + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, + context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), + context.getOptions().expandWildcardsClosed(), context.getOptions())); results = indexNameExpressionResolver.concreteIndexNames(context, "foobar2-closed"); assertThat(results, arrayWithSize(1)); assertThat(results, arrayContaining("foo3")); @@ -1171,13 +1199,13 @@ public void testDeleteIndexIgnoresAliases() { } { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest("test-alias"); - deleteIndexRequest.indicesOptions(IndicesOptions.fromOptions(true, true, true, true, false, false, true)); + deleteIndexRequest.indicesOptions(IndicesOptions.fromOptions(true, true, true, true, false, false, true, false)); String[] indices = indexNameExpressionResolver.concreteIndexNames(state, deleteIndexRequest); assertEquals(0, indices.length); } { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest("test-a*"); - deleteIndexRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, true, false, false, true)); + deleteIndexRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, true, false, false, true, false)); IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(state, deleteIndexRequest)); assertEquals(infe.getIndex().getName(), "test-a*"); @@ -1291,4 +1319,69 @@ public void testInvalidIndex() { () -> indexNameExpressionResolver.concreteIndexNames(context, "_foo")); assertEquals("Invalid index name [_foo], must not start with '_'.", iine.getMessage()); } + + public void testIgnoreThrottled() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("test-index").state(State.OPEN) + .settings(settings().put(IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), true)) + .putAlias(AliasMetaData.builder("test-alias"))) + .put(indexBuilder("index").state(State.OPEN) + .putAlias(AliasMetaData.builder("test-alias2"))) + .put(indexBuilder("index-closed").state(State.CLOSE) + .settings(settings().put(IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), true)) + .putAlias(AliasMetaData.builder("test-alias-closed"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + { + Index[] indices = indexNameExpressionResolver.concreteIndices(state, + IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED, "*"); + assertEquals(1, indices.length); + assertEquals("index", indices[0].getName()); + } + { + Index[] indices = indexNameExpressionResolver.concreteIndices(state, + IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED, "test-alias"); + assertEquals(1, indices.length); + assertEquals("test-index", indices[0].getName()); + } + { + Index[] indices = indexNameExpressionResolver.concreteIndices(state, + IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED, "test-alias"); + assertEquals(0, indices.length); + } + { + Index[] indices = indexNameExpressionResolver.concreteIndices(state, + IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED, "test-*"); + assertEquals(1, indices.length); + assertEquals("index", indices[0].getName()); + } + { + Index[] indices = indexNameExpressionResolver.concreteIndices(state, + IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED, "ind*", "test-index"); + assertEquals(2, indices.length); + Arrays.sort(indices, Comparator.comparing(Index::getName)); + assertEquals("index", indices[0].getName()); + assertEquals("test-index", indices[1].getName()); + } + + { + Index[] indices = indexNameExpressionResolver.concreteIndices(state, + new IndicesOptions(EnumSet.of(IndicesOptions.Option.ALLOW_NO_INDICES, + IndicesOptions.Option.IGNORE_THROTTLED), + EnumSet.of(IndicesOptions.WildcardStates.OPEN)), "ind*", "test-index"); + assertEquals(2, indices.length); + Arrays.sort(indices, Comparator.comparing(Index::getName)); + assertEquals("index", indices[0].getName()); + assertEquals("test-index", indices[1].getName()); + } + { + Index[] indices = indexNameExpressionResolver.concreteIndices(state, + new IndicesOptions(EnumSet.of(IndicesOptions.Option.ALLOW_NO_INDICES), + EnumSet.of(IndicesOptions.WildcardStates.OPEN, IndicesOptions.WildcardStates.CLOSED)), "ind*", "test-index"); + assertEquals(3, indices.length); + Arrays.sort(indices, Comparator.comparing(Index::getName)); + assertEquals("index", indices[0].getName()); + assertEquals("index-closed", indices[1].getName()); + assertEquals("test-index", indices[2].getName()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 5ccacee395a31..aa0f6834b98ec 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -171,8 +171,8 @@ public void testValidateShrinkIndex() { ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0, Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); - AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + AllocationService service = new AllocationService(new AllocationDeciders( + Collections.singleton(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -241,8 +241,8 @@ public void testValidateSplitIndex() { ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0, Settings.builder().put("index.blocks.write", true).put("index.number_of_routing_shards", targetShards).build())) .nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); - AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + AllocationService service = new AllocationService(new AllocationDeciders( + Collections.singleton(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -375,9 +375,7 @@ private void runPrepareResizeIndexSettingsTest( .build(); final AllocationService service = new AllocationService( - Settings.builder().build(), - new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java index e1fbc47c4a022..9b2d58ac28758 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java @@ -43,9 +43,9 @@ import static org.mockito.Mockito.when; public class MetaDataIndexAliasesServiceTests extends ESTestCase { - private final AliasValidator aliasValidator = new AliasValidator(Settings.EMPTY); + private final AliasValidator aliasValidator = new AliasValidator(); private final MetaDataDeleteIndexService deleteIndexService = mock(MetaDataDeleteIndexService.class); - private final MetaDataIndexAliasesService service = new MetaDataIndexAliasesService(Settings.EMPTY, null, null, aliasValidator, + private final MetaDataIndexAliasesService service = new MetaDataIndexAliasesService(null, null, aliasValidator, deleteIndexService, xContentRegistry()); public MetaDataIndexAliasesServiceTests() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index c2950256884c5..f5ac710510718 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -154,7 +154,8 @@ public void testIndexAndAliasWithSameName() { MetaData.builder().put(builder).build(); fail("exception should have been thrown"); } catch (IllegalStateException e) { - assertThat(e.getMessage(), equalTo("index and alias names need to be unique, but the following duplicates were found [index (alias of [index])]")); + assertThat(e.getMessage(), + equalTo("index and alias names need to be unique, but the following duplicates were found [index (alias of [index])]")); } } @@ -249,7 +250,8 @@ public void testResolveIndexRouting() { metaData.resolveIndexRouting("0", "alias1"); fail("should fail"); } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), is("Alias [alias1] has index routing associated with it [1], and was provided with routing value [0], rejecting operation")); + assertThat(ex.getMessage(), is("Alias [alias1] has index routing associated with it [1], " + + "and was provided with routing value [0], rejecting operation")); } // alias with invalid index routing. @@ -257,14 +259,16 @@ public void testResolveIndexRouting() { metaData.resolveIndexRouting(null, "alias2"); fail("should fail"); } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation")); + assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that" + + " resolved to several routing values, rejecting operation")); } try { metaData.resolveIndexRouting("1", "alias2"); fail("should fail"); } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation")); + assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that" + + " resolved to several routing values, rejecting operation")); } IndexMetaData.Builder builder2 = IndexMetaData.builder("index2") diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java index f78f84958061d..4bff7f8dc6198 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -105,7 +104,7 @@ public void testCalculateChangesAddChangeAndDelete() { IndexTemplateMetaData.builder("changed_test_template").patterns(randomIndexPatterns()).build() ); - final TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, threadPool, + final TemplateUpgradeService service = new TemplateUpgradeService(null, clusterService, threadPool, Arrays.asList( templates -> { if (shouldAdd) { @@ -205,7 +204,7 @@ public void testUpdateTemplates() { additions.put("add_template_" + i, new BytesArray("{\"index_patterns\" : \"*\", \"order\" : " + i + "}")); } - final TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, + final TemplateUpgradeService service = new TemplateUpgradeService(mockClient, clusterService, threadPool, Collections.emptyList()); IllegalStateException ise = expectThrows(IllegalStateException.class, () -> service.upgradeTemplates(additions, deletions)); @@ -297,7 +296,7 @@ public void testClusterStateUpdate() throws InterruptedException { return null; }).when(mockIndicesAdminClient).deleteTemplate(any(DeleteIndexTemplateRequest.class), any(ActionListener.class)); - new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, + new TemplateUpgradeService(mockClient, clusterService, threadPool, Arrays.asList( templates -> { assertNull(templates.put("added_test_template", IndexTemplateMetaData.builder("added_test_template") diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java index 3ac55ec663ca0..ae24915e32d52 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java @@ -302,7 +302,8 @@ public void testSimpleJsonFromAndTo() throws IOException { assertThat(parsedMetaData.templates().get("foo").aliases().size(), equalTo(3)); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar1").alias(), equalTo("alias-bar1")); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar2").alias(), equalTo("alias-bar2")); - assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar2").filter().string(), equalTo("{\"term\":{\"user\":\"kimchy\"}}")); + assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar2").filter().string(), + equalTo("{\"term\":{\"user\":\"kimchy\"}}")); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar3").alias(), equalTo("alias-bar3")); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar3").indexRouting(), equalTo("routing-bar")); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar3").searchRouting(), equalTo("routing-bar")); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index cb2913e5820e1..734712eb5f279 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -50,11 +50,15 @@ public void testConvertWildcardsJustIndicesTests() { assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testXXX"))), equalTo(newHashSet("testXXX"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "ku*"))), equalTo(newHashSet("testXXX", "kuku"))); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXXX", "testXYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), + equalTo(newHashSet("testXXX", "testXYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), equalTo(newHashSet("testXXX", "-testXXX"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testY*"))), equalTo(newHashSet("testXXX", "testYYY"))); @@ -71,11 +75,14 @@ public void testConvertWildcardsTests() { IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYY*", "alias*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYY*", "alias*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("-kuku"))), equalTo(newHashSet("-kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("test*", "-testYYY"))), equalTo(newHashSet("testXXX", "testXYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "testYYY"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYYY", "testX*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "testYYY"))) + , equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYYY", "testX*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); } public void testConvertWildcardsOpenClosedIndicesTests() { @@ -89,8 +96,10 @@ public void testConvertWildcardsOpenClosedIndicesTests() { ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, true)); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, + IndicesOptions.fromOptions(true, true, true, true)); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, true)); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXYY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, false)); @@ -111,10 +120,12 @@ public void testMultipleWildcards() { IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*X*"))), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*X*"))), + equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*X*Y"))), equalTo(newHashSet("testXXY", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("kuku*Y*"))), equalTo(newHashSet("kukuYYY"))); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*Y*"))), equalTo(newHashSet("testXXY", "testXYY", "testYYY", "kukuYYY"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*Y*"))), + equalTo(newHashSet("testXXY", "testXYY", "testYYY", "kukuYYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*Y*X"))).size(), equalTo(0)); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*Y*X"))).size(), equalTo(0)); } @@ -128,7 +139,8 @@ public void testAll() { IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("_all"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("_all"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); } public void testResolveAliases() { @@ -141,14 +153,18 @@ public void testResolveAliases() { IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); // when ignoreAliases option is not set, WildcardExpressionResolver resolves the provided // expressions against the defined indices and aliases - IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false, true, false, false); - IndexNameExpressionResolver.Context indicesAndAliasesContext = new IndexNameExpressionResolver.Context(state, indicesAndAliasesOptions); + IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false, true, false, + false, false); + IndexNameExpressionResolver.Context indicesAndAliasesContext = + new IndexNameExpressionResolver.Context(state, indicesAndAliasesOptions); // ignoreAliases option is set, WildcardExpressionResolver throws error when - IndicesOptions skipAliasesIndicesOptions = IndicesOptions.fromOptions(true, true, true, false, true, false, true); - IndexNameExpressionResolver.Context skipAliasesLenientContext = new IndexNameExpressionResolver.Context(state, skipAliasesIndicesOptions); + IndicesOptions skipAliasesIndicesOptions = IndicesOptions.fromOptions(true, true, true, false, true, false, true, false); + IndexNameExpressionResolver.Context skipAliasesLenientContext = + new IndexNameExpressionResolver.Context(state, skipAliasesIndicesOptions); // ignoreAliases option is set, WildcardExpressionResolver resolves the provided expressions only against the defined indices - IndicesOptions errorOnAliasIndicesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, true); - IndexNameExpressionResolver.Context skipAliasesStrictContext = new IndexNameExpressionResolver.Context(state, errorOnAliasIndicesOptions); + IndicesOptions errorOnAliasIndicesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, true, false); + IndexNameExpressionResolver.Context skipAliasesStrictContext = + new IndexNameExpressionResolver.Context(state, errorOnAliasIndicesOptions); { List indices = resolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo_a*")); @@ -201,12 +217,13 @@ public void testMatchesConcreteIndicesWildcardAndAliases() { // when ignoreAliases option is not set, WildcardExpressionResolver resolves the provided // expressions against the defined indices and aliases - IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, false); - IndexNameExpressionResolver.Context indicesAndAliasesContext = new IndexNameExpressionResolver.Context(state, indicesAndAliasesOptions); + IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, false, false); + IndexNameExpressionResolver.Context indicesAndAliasesContext = + new IndexNameExpressionResolver.Context(state, indicesAndAliasesOptions); // ignoreAliases option is set, WildcardExpressionResolver resolves the provided expressions // only against the defined indices - IndicesOptions onlyIndicesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, true); + IndicesOptions onlyIndicesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, true, false); IndexNameExpressionResolver.Context onlyIndicesContext = new IndexNameExpressionResolver.Context(state, onlyIndicesOptions); { @@ -242,6 +259,7 @@ public void testMatchesConcreteIndicesWildcardAndAliases() { } private static IndexMetaData.Builder indexBuilder(String index) { - return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + return IndexMetaData.builder(index).settings(settings(Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java index 86dbeabd1d73e..379a72f6b9ffe 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -37,7 +37,8 @@ public class AllocationIdTests extends ESTestCase { public void testShardToStarted() { logger.info("-- create unassigned shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, + ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); assertThat(shard.allocationId(), nullValue()); logger.info("-- initialize the shard"); @@ -57,7 +58,8 @@ public void testShardToStarted() { public void testSuccessfulRelocation() { logger.info("-- build started shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, + ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); shard = shard.initialize("node1", null, -1); shard = shard.moveToStarted(); @@ -80,7 +82,8 @@ public void testSuccessfulRelocation() { public void testCancelRelocation() { logger.info("-- build started shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, + ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); shard = shard.initialize("node1", null, -1); shard = shard.moveToStarted(); @@ -100,7 +103,8 @@ public void testCancelRelocation() { public void testMoveToUnassigned() { logger.info("-- build started shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, + ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); shard = shard.initialize("node1", null, -1); shard = shard.moveToStarted(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index e82dbf4d0e94c..c175624125e50 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -68,7 +68,8 @@ public void testDelayedAllocationNodeLeavesAndComesBack() throws Exception { ensureGreen("test"); indexRandomData(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); - assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); + assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState() + .getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); internalCluster().startNode(); // this will use the same data location as the stopped node ensureGreen("test"); @@ -90,7 +91,8 @@ public void testDelayedAllocationTimesOut() throws Exception { ensureGreen("test"); internalCluster().startNode(); // do a second round with longer delay to make sure it happens - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get()); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get()); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); ensureGreen("test"); } @@ -109,9 +111,11 @@ public void testDelayedAllocationChangeWithSettingTo100ms() throws Exception { ensureGreen("test"); indexRandomData(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); - assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); + assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get() + .getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get()); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get()); ensureGreen("test"); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); } @@ -130,9 +134,11 @@ public void testDelayedAllocationChangeWithSettingTo0() throws Exception { ensureGreen("test"); indexRandomData(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); - assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); + assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState() + .getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(0))).get()); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(0))).get()); ensureGreen("test"); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java index 7e74a35cf5584..c05d0a551fe26 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java @@ -537,7 +537,7 @@ public void testAdaptiveReplicaSelection() throws Exception { Set selectedNodes = new HashSet<>(numShards); TestThreadPool threadPool = new TestThreadPool("testThatOnlyNodesSupportNodeIds"); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - ResponseCollectorService collector = new ResponseCollectorService(Settings.EMPTY, clusterService); + ResponseCollectorService collector = new ResponseCollectorService(clusterService); Map outstandingRequests = new HashMap<>(); GroupShardsIterator groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 9b2db5b34b1da..f1876eab2ae51 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -87,7 +87,8 @@ protected Settings nodeSettings(int nodeOrdinal) { } private void createStaleReplicaScenario(String master) throws Exception { - client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + client().prepareIndex("test", "type1").setSource(jsonBuilder() + .startObject().field("field", "value1").endObject()).get(); refresh(); ClusterState state = client().admin().cluster().prepareState().all().get().getState(); List shards = state.routingTable().allShards("test"); @@ -113,7 +114,8 @@ private void createStaleReplicaScenario(String master) throws Exception { ensureStableCluster(2, master); logger.info("--> index a document into previous replica shard (that is now primary)"); - client(replicaNode).prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + client(replicaNode).prepareIndex("test", "type1").setSource(jsonBuilder() + .startObject().field("field", "value1").endObject()).get(); logger.info("--> shut down node that has new acknowledged document"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); @@ -128,9 +130,11 @@ private void createStaleReplicaScenario(String master) throws Exception { logger.info("--> check that old primary shard does not get promoted to primary again"); // kick reroute and wait for all shard states to be fetched client(master).admin().cluster().prepareReroute().get(); - assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0))); + assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), + equalTo(0))); // kick reroute a second time and check that all shards are unassigned - assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), + equalTo(2)); } public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { @@ -138,7 +142,8 @@ public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(2); assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + .setSettings(Settings.builder().put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1)).get()); ensureGreen(); createStaleReplicaScenario(master); @@ -163,16 +168,19 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNodeWithShardCopy)); ensureStableCluster(1); - assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NODE_LEFT)); + assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test") + .getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NODE_LEFT)); logger.info("--> force allocation of stale copy to node that does not have shard copy"); - client().admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)).get(); + client().admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, + dataNodeWithNoShardCopy, true)).get(); logger.info("--> wait until shard is failed and becomes unassigned again"); assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().toString(), client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); - assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); + assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test") + .getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); } public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { @@ -180,7 +188,8 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(2); assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + .setSettings(Settings.builder().put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1)).get()); ensureGreen(); Set historyUUIDs = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards()) .map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY)).collect(Collectors.toSet()); @@ -189,7 +198,8 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { boolean useStaleReplica = randomBoolean(); // if true, use stale replica, otherwise a completely empty copy logger.info("--> explicitly promote old primary shard"); final String idxName = "test"; - ImmutableOpenIntMap> storeStatuses = client().admin().indices().prepareShardStores(idxName).get().getStoreStatuses().get(idxName); + ImmutableOpenIntMap> storeStatuses = client().admin().indices() + .prepareShardStores(idxName).get().getStoreStatuses().get(idxName); ClusterRerouteRequestBuilder rerouteBuilder = client().admin().cluster().prepareReroute(); for (IntObjectCursor> shardStoreStatuses : storeStatuses) { int shardId = shardStoreStatuses.key; @@ -234,9 +244,11 @@ public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() t .put("index.routing.allocation.exclude._name", node) .put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get(); - assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().shardRoutingTable("test", 0).assignedShards(), empty()); + assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable() + .shardRoutingTable("test", 0).assignedShards(), empty()); - client().admin().cluster().prepareReroute().add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true)).get(); + client().admin().cluster().prepareReroute().add( + new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true)).get(); ensureGreen("test"); } @@ -244,12 +256,14 @@ public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception { internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(Settings.EMPTY); assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); + .put("index.number_of_shards", 1).put("index.number_of_replicas", 1) + .put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); ensureGreen("test"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureYellow("test"); - assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test") + .inSyncAllocationIds(0).size()); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { @@ -257,8 +271,10 @@ public boolean clearData(String nodeName) { } }); logger.info("--> wait until shard is failed and becomes unassigned again"); - assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); - assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState() + .getRoutingTable().index("test").allPrimaryShardsUnassigned())); + assertEquals(2, client().admin().cluster().prepareState().get().getState() + .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); internalCluster().startDataOnlyNode(Settings.EMPTY); @@ -269,15 +285,19 @@ public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception { internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(Settings.EMPTY); assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); + .put("index.number_of_shards", 1).put("index.number_of_replicas", + 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); ensureGreen("test"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureYellow("test"); - assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + assertEquals(2, client().admin().cluster().prepareState().get().getState() + .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> indexing..."); - client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); - assertEquals(1, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject() + .field("field", "value1").endObject()).get(); + assertEquals(1, client().admin().cluster().prepareState().get().getState() + .metaData().index("test").inSyncAllocationIds(0).size()); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { @@ -285,12 +305,15 @@ public boolean clearData(String nodeName) { } }); logger.info("--> wait until shard is failed and becomes unassigned again"); - assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); - assertEquals(1, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState() + .getRoutingTable().index("test").allPrimaryShardsUnassigned())); + assertEquals(1, client().admin().cluster().prepareState().get().getState() + .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); internalCluster().startDataOnlyNode(Settings.EMPTY); - assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); + assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState() + .getRoutingTable().index("test").allPrimaryShardsUnassigned())); } public void testNotWaitForQuorumCopies() throws Exception { @@ -300,7 +323,8 @@ public void testNotWaitForQuorumCopies() throws Exception { assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() .put("index.number_of_shards", randomIntBetween(1, 3)).put("index.number_of_replicas", 2)).get()); ensureGreen("test"); - client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + client().prepareIndex("test", "type1").setSource(jsonBuilder() + .startObject().field("field", "value1").endObject()).get(); logger.info("--> removing 2 nodes from cluster"); internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 0f55270354301..a6c2fab5c91e4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -76,10 +76,13 @@ public void setUp() throws Exception { .build(); RoutingTable testRoutingTable = new RoutingTable.Builder() - .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_1).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_1)).build()) - .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_2).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_2)).build()) + .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_1). + getIndex()).initializeAsNew(metaData.index(TEST_INDEX_1)).build()) + .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_2) + .getIndex()).initializeAsNew(metaData.index(TEST_INDEX_2)).build()) .build(); - this.clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(testRoutingTable).build(); + this.clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(testRoutingTable).build(); } /** @@ -99,7 +102,8 @@ private void initPrimaries() { private void startInitializingShards(String index) { logger.info("start primary shards for index {}", index); - this.clusterState = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING)); + this.clusterState = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, + this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING)); } private IndexMetaData.Builder createIndexMetaData(String indexName) { @@ -142,20 +146,23 @@ public void testShardsWithState() { assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards)); initPrimaries(); - assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - 2 * this.numberOfShards)); + assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), + is(this.totalNumberOfShards - 2 * this.numberOfShards)); assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).size(), is(2 * this.numberOfShards)); startInitializingShards(TEST_INDEX_1); assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.STARTED).size(), is(this.numberOfShards)); int initializingExpected = this.numberOfShards + this.numberOfShards * this.numberOfReplicas; assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected)); - assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - this.numberOfShards)); + assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), + is(this.totalNumberOfShards - initializingExpected - this.numberOfShards)); startInitializingShards(TEST_INDEX_2); assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.STARTED).size(), is(2 * this.numberOfShards)); initializingExpected = 2 * this.numberOfShards * this.numberOfReplicas; assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected)); - assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - 2 * this.numberOfShards)); + assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), + is(this.totalNumberOfShards - initializingExpected - 2 * this.numberOfShards)); // now start all replicas too startInitializingShards(TEST_INDEX_1); @@ -168,21 +175,29 @@ public void testActivePrimaryShardsGrouped() { assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], false).size(), is(0)); assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), + is(this.numberOfShards)); initPrimaries(); assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), + is(this.numberOfShards)); startInitializingShards(TEST_INDEX_1); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), + is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards)); startInitializingShards(TEST_INDEX_2); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(2 * this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(2 * this.numberOfShards)); try { clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true); @@ -197,21 +212,29 @@ public void testAllActiveShardsGrouped() { assertThat(this.emptyRoutingTable.allActiveShardsGrouped(new String[0], false).size(), is(0)); assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); initPrimaries(); assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); startInitializingShards(TEST_INDEX_1); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); startInitializingShards(TEST_INDEX_2); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards)); try { clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true); @@ -222,14 +245,19 @@ public void testAllActiveShardsGrouped() { public void testAllAssignedShardsGrouped() { assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); initPrimaries(); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards)); try { clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, false); @@ -336,7 +364,8 @@ public void testDistinctNodes() { ShardRouting routing1 = TestShardRouting.newShardRouting(shardId, "node1", randomBoolean(), ShardRoutingState.STARTED); ShardRouting routing2 = TestShardRouting.newShardRouting(shardId, "node2", randomBoolean(), ShardRoutingState.STARTED); ShardRouting routing3 = TestShardRouting.newShardRouting(shardId, "node1", randomBoolean(), ShardRoutingState.STARTED); - ShardRouting routing4 = TestShardRouting.newShardRouting(shardId, "node3", "node2", randomBoolean(), ShardRoutingState.RELOCATING); + ShardRouting routing4 = TestShardRouting.newShardRouting( + shardId, "node3", "node2", randomBoolean(), ShardRoutingState.RELOCATING); assertTrue(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing2))); assertFalse(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing3))); assertFalse(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing2, routing3))); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java index f87f918d99ecc..1216f143686c4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java @@ -35,8 +35,10 @@ public class ShardRoutingTests extends ESTestCase { public void testIsSameAllocation() { ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, false, ShardRoutingState.UNASSIGNED); ShardRouting unassignedShard1 = TestShardRouting.newShardRouting("test", 1, null, false, ShardRoutingState.UNASSIGNED); - ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "1", randomBoolean(), ShardRoutingState.INITIALIZING); - ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "1", randomBoolean(), ShardRoutingState.INITIALIZING); + ShardRouting initializingShard0 = + TestShardRouting.newShardRouting("test", 0, "1", randomBoolean(), ShardRoutingState.INITIALIZING); + ShardRouting initializingShard1 = + TestShardRouting.newShardRouting("test", 1, "1", randomBoolean(), ShardRoutingState.INITIALIZING); ShardRouting startedShard0 = initializingShard0.moveToStarted(); ShardRouting startedShard1 = initializingShard1.moveToStarted(); @@ -63,9 +65,12 @@ private ShardRouting randomShardRouting(String index, int shard) { } public void testIsSourceTargetRelocation() { - ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, false, ShardRoutingState.UNASSIGNED); - ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "node1", randomBoolean(), ShardRoutingState.INITIALIZING); - ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "node1", randomBoolean(), ShardRoutingState.INITIALIZING); + ShardRouting unassignedShard0 = + TestShardRouting.newShardRouting("test", 0, null, false, ShardRoutingState.UNASSIGNED); + ShardRouting initializingShard0 = + TestShardRouting.newShardRouting("test", 0, "node1", randomBoolean(), ShardRoutingState.INITIALIZING); + ShardRouting initializingShard1 = + TestShardRouting.newShardRouting("test", 1, "node1", randomBoolean(), ShardRoutingState.INITIALIZING); assertFalse(initializingShard0.isRelocationTarget()); ShardRouting startedShard0 = initializingShard0.moveToStarted(); assertFalse(startedShard0.isRelocationTarget()); @@ -126,7 +131,8 @@ public void testEqualsIgnoringVersion() { break; case 1: // change shard id - otherRouting = new ShardRouting(new ShardId(otherRouting.index(), otherRouting.id() + 1), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), + otherRouting = new ShardRouting(new ShardId(otherRouting.index(), otherRouting.id() + 1), + otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize()); break; @@ -135,9 +141,9 @@ public void testEqualsIgnoringVersion() { if (otherRouting.assignedToNode() == false) { unchanged = true; } else { - otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId() + "_1", otherRouting.relocatingNodeId(), - otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), - otherRouting.allocationId(), otherRouting.getExpectedShardSize()); + otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId() + "_1", + otherRouting.relocatingNodeId(), otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), + otherRouting.unassignedInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize()); } break; case 3: @@ -145,9 +151,10 @@ public void testEqualsIgnoringVersion() { if (otherRouting.relocating() == false) { unchanged = true; } else { - otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId() + "_1", - otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), - otherRouting.allocationId(), otherRouting.getExpectedShardSize()); + otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(), + otherRouting.relocatingNodeId() + "_1", otherRouting.primary(), otherRouting.state(), + otherRouting.recoverySource(), otherRouting.unassignedInfo(), otherRouting.allocationId(), + otherRouting.getExpectedShardSize()); } break; case 4: @@ -155,16 +162,18 @@ public void testEqualsIgnoringVersion() { if (otherRouting.active() || otherRouting.primary() == false) { unchanged = true; } else { - otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), - otherRouting.primary(), otherRouting.state(), - new RecoverySource.SnapshotRecoverySource(new Snapshot("test", new SnapshotId("s1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), + otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(), + otherRouting.relocatingNodeId(), otherRouting.primary(), otherRouting.state(), + new RecoverySource.SnapshotRecoverySource(new Snapshot("test", + new SnapshotId("s1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), otherRouting.unassignedInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize()); } break; case 5: // change primary flag - otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), - otherRouting.primary() == false, otherRouting.state(), otherRouting.unassignedInfo()); + otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), + otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), otherRouting.primary() == false, + otherRouting.state(), otherRouting.unassignedInfo()); break; case 6: // change state @@ -174,12 +183,14 @@ public void testEqualsIgnoringVersion() { } while (newState == otherRouting.state()); UnassignedInfo unassignedInfo = otherRouting.unassignedInfo(); - if (unassignedInfo == null && (newState == ShardRoutingState.UNASSIGNED || newState == ShardRoutingState.INITIALIZING)) { + if (unassignedInfo == null && (newState == ShardRoutingState.UNASSIGNED || + newState == ShardRoutingState.INITIALIZING)) { unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test"); } otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), - newState == ShardRoutingState.UNASSIGNED ? null : (otherRouting.currentNodeId() == null ? "1" : otherRouting.currentNodeId()), + newState == ShardRoutingState.UNASSIGNED ? null : + (otherRouting.currentNodeId() == null ? "1" : otherRouting.currentNodeId()), newState == ShardRoutingState.RELOCATING ? "2" : null, otherRouting.primary(), newState, unassignedInfo); break; @@ -187,15 +198,16 @@ public void testEqualsIgnoringVersion() { if (randomBoolean()) { // change unassigned info - otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), - otherRouting.primary(), otherRouting.state(), + otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), + otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo() == null ? new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") : - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, otherRouting.unassignedInfo().getMessage() + "_1")); + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, otherRouting.unassignedInfo().getMessage() + "_1")); } if (unchanged == false) { logger.debug("comparing\nthis {} to\nother {}", routing, otherRouting); - assertFalse("expected non-equality\nthis " + routing + ",\nother " + otherRouting, routing.equalsIgnoringMetaData(otherRouting)); + assertFalse("expected non-equality\nthis " + routing + ",\nother " + otherRouting, + routing.equalsIgnoringMetaData(otherRouting)); } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index d8f7f6552f908..a2e8f9d7f3fb1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -80,8 +80,8 @@ public void testReasonOrdinalOrder() { public void testSerialization() throws Exception { UnassignedInfo.Reason reason = RandomPicks.randomFrom(random(), UnassignedInfo.Reason.values()); UnassignedInfo meta = reason == UnassignedInfo.Reason.ALLOCATION_FAILED ? - new UnassignedInfo(reason, randomBoolean() ? randomAlphaOfLength(4) : null, null, randomIntBetween(1, 100), System.nanoTime(), - System.currentTimeMillis(), false, AllocationStatus.NO_ATTEMPT): + new UnassignedInfo(reason, randomBoolean() ? randomAlphaOfLength(4) : null, null, + randomIntBetween(1, 100), System.nanoTime(), System.currentTimeMillis(), false, AllocationStatus.NO_ATTEMPT): new UnassignedInfo(reason, randomBoolean() ? randomAlphaOfLength(4) : null); BytesStreamOutput out = new BytesStreamOutput(); meta.writeTo(out); @@ -97,7 +97,8 @@ public void testSerialization() throws Exception { public void testIndexCreated() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -109,7 +110,8 @@ public void testIndexCreated() { public void testClusterRecovered() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -121,7 +123,8 @@ public void testClusterRecovered() { public void testIndexReopened() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -133,11 +136,14 @@ public void testIndexReopened() { public void testNewIndexRestored() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) - .routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), new IntHashSet()).build()).build(); + .routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new SnapshotRecoverySource( + new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), + new IntHashSet()).build()).build(); for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NEW_INDEX_RESTORED)); } @@ -145,11 +151,14 @@ public void testNewIndexRestored() { public void testExistingIndexRestored() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) - .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()).build(); + .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), + new SnapshotRecoverySource(new Snapshot("rep1", + new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()).build(); for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED)); } @@ -157,7 +166,8 @@ public void testExistingIndexRestored() { public void testDanglingIndexImported() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -185,17 +195,20 @@ public void testReplicaAdded() { builder.addIndexShard(indexShardRoutingTable); } builder.addReplica(); - clusterState = ClusterState.builder(clusterState).routingTable(RoutingTable.builder(clusterState.routingTable()).add(builder).build()).build(); + clusterState = ClusterState.builder(clusterState) + .routingTable(RoutingTable.builder(clusterState.routingTable()).add(builder).build()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo(), notNullValue()); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.REPLICA_ADDED)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), + equalTo(UnassignedInfo.Reason.REPLICA_ADDED)); } /** * The unassigned meta is kept when a shard goes to INITIALIZING, but cleared when it moves to STARTED. */ public void testStateTransitionMetaHandling() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, true, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, + true, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); assertThat(shard.unassignedInfo(), notNullValue()); shard = shard.initialize("test_node", null, -1); assertThat(shard.state(), equalTo(ShardRoutingState.INITIALIZING)); @@ -216,7 +229,8 @@ public void testNodeLeave() { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -230,8 +244,10 @@ public void testNodeLeave() { assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo(), notNullValue()); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NODE_LEFT)); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), greaterThan(0L)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), + equalTo(UnassignedInfo.Reason.NODE_LEFT)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), + greaterThan(0L)); } /** @@ -245,7 +261,8 @@ public void testFailedShard() { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -254,15 +271,20 @@ public void testFailedShard() { assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); // fail shard ShardRouting shardToFail = clusterState.getRoutingNodes().shardsWithState(STARTED).get(0); - clusterState = allocation.applyFailedShards(clusterState, Collections.singletonList(new FailedShard(shardToFail, "test fail", null, randomBoolean()))); + clusterState = allocation.applyFailedShards(clusterState, + Collections.singletonList(new FailedShard(shardToFail, "test fail", null, randomBoolean()))); // verify the reason and details assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo(), notNullValue()); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getMessage(), equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getDetails(), equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), greaterThan(0L)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), + equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getMessage(), + equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getDetails(), + equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), + greaterThan(0L)); } /** @@ -273,7 +295,8 @@ public void testRemainingDelayCalculation() throws Exception { UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "test", null, 0, baseTime, System.currentTimeMillis(), randomBoolean(), AllocationStatus.NO_ATTEMPT); final long totalDelayNanos = TimeValue.timeValueMillis(10).nanos(); - final Settings indexSettings = Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueNanos(totalDelayNanos)).build(); + final Settings indexSettings = Settings.builder() + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueNanos(totalDelayNanos)).build(); long delay = unassignedInfo.getRemainingDelay(baseTime, indexSettings); assertThat(delay, equalTo(totalDelayNanos)); long delta1 = randomIntBetween(1, (int) (totalDelayNanos - 1)); @@ -295,7 +318,8 @@ public void testNumberOfDelayedUnassigned() throws Exception { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries @@ -317,13 +341,16 @@ public void testFindNextDelayedAllocation() { final long expectMinDelaySettingsNanos = Math.min(delayTest1.nanos(), delayTest2.nanos()); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest1)).numberOfShards(1).numberOfReplicas(1)) - .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest2)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put( + UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest1)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT).put( + UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest2)).numberOfShards(1).numberOfReplicas(1)) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries @@ -344,7 +371,8 @@ public void testFindNextDelayedAllocation() { clusterState = allocation.reroute(clusterState, "time moved"); } - assertThat(UnassignedInfo.findNextDelayedAllocation(baseTime + delta, clusterState), equalTo(expectMinDelaySettingsNanos - delta)); + assertThat(UnassignedInfo.findNextDelayedAllocation(baseTime + delta, clusterState), + equalTo(expectMinDelaySettingsNanos - delta)); } public void testAllocationStatusSerialization() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index dd9846a7b7526..b3e0146bd7f1d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -52,7 +52,8 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testAddNodesAndIndices() { Settings.Builder settings = Settings.builder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); AllocationService service = createAllocationService(settings.build()); ClusterState clusterState = initCluster(service, 1, 3, 3, 1); @@ -95,7 +96,8 @@ public void testAddNodesAndIndices() { public void testMinimalRelocations() { Settings.Builder settings = Settings.builder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) .put("cluster.routing.allocation.node_concurrent_recoveries", 2); AllocationService service = createAllocationService(settings.build()); @@ -156,7 +158,8 @@ public void testMinimalRelocations() { public void testMinimalRelocationsNoLimit() { Settings.Builder settings = Settings.builder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) .put("cluster.routing.allocation.node_concurrent_recoveries", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100); AllocationService service = createAllocationService(settings.build()); @@ -261,8 +264,8 @@ private ClusterState initCluster(AllocationService service, int numberOfNodes, i RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); for (int i = 0; i < numberOfIndices; i++) { - IndexMetaData.Builder index = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas( - numberOfReplicas); + IndexMetaData.Builder index = IndexMetaData.builder("test" + i) + .settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas); metaDataBuilder = metaDataBuilder.put(index); } @@ -279,7 +282,8 @@ private ClusterState initCluster(AllocationService service, int numberOfNodes, i for (int i = 0; i < numberOfNodes; i++) { nodes.add(newNode("node" + i)); } - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(initialRoutingTable).build(); clusterState = service.reroute(clusterState, "reroute"); logger.info("restart all the primary shards, replicas will start initializing"); @@ -300,7 +304,8 @@ private ClusterState addIndex(ClusterState clusterState, AllocationService servi MetaData.Builder metaDataBuilder = MetaData.builder(clusterState.getMetaData()); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); - IndexMetaData.Builder index = IndexMetaData.builder("test" + indexOrdinal).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas( + IndexMetaData.Builder index = IndexMetaData.builder("test" + indexOrdinal).settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards).numberOfReplicas( numberOfReplicas); IndexMetaData imd = index.build(); metaDataBuilder = metaDataBuilder.put(imd, true); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 189dc4542b4b7..1ea0a7f8501d3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -73,7 +73,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(AllocationCommandsTests.class); public void testMoveShardCommand() { - AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService allocation = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("creating an index with 1 shard, no replica"); MetaData metaData = MetaData.builder() @@ -82,10 +83,12 @@ public void testMoveShardCommand() { RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); logger.info("start primary shard"); @@ -100,7 +103,8 @@ public void testMoveShardCommand() { toNodeId = "node1"; } ClusterState newState = allocation.reroute(clusterState, - new AllocationCommands(new MoveAllocationCommand("test", 0, existingNodeId, toNodeId)), false, false).getClusterState(); + new AllocationCommands(new MoveAllocationCommand("test", 0, existingNodeId, toNodeId)), + false, false).getClusterState(); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; assertThat(clusterState.getRoutingNodes().node(existingNodeId).iterator().next().state(), equalTo(ShardRoutingState.RELOCATING)); @@ -131,14 +135,16 @@ public void testAllocateCommand() { logger.info("--> building initial routing table"); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder(index).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) - .putInSyncAllocationIds(0, Collections.singleton("asdf")).putInSyncAllocationIds(1, Collections.singleton("qwertz"))) + .putInSyncAllocationIds(0, Collections.singleton("asdf")) + .putInSyncAllocationIds(1, Collections.singleton("qwertz"))) .build(); // shard routing is added as "from recovery" instead of "new index creation" so that we can test below that allocating an empty // primary with accept_data_loss flag set to false fails RoutingTable routingTable = RoutingTable.builder() .addAsRecovery(metaData.index(index)) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); final ShardId shardId = new ShardId(metaData.index(index).getIndex(), 0); logger.info("--> adding 3 nodes on same rack and do rerouting"); @@ -185,23 +191,28 @@ public void testAllocateCommand() { logger.info("--> allocating empty primary with acceptDataLoss flag set to false"); try { - allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", false)), false, false); + allocation.reroute(clusterState, new AllocationCommands( + new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", false)), false, false); fail("expected IllegalArgumentException when allocating empty primary with acceptDataLoss flag set to false"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("allocating an empty primary for " + shardId + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true")); + assertThat(e.getMessage(), containsString("allocating an empty primary for " + shardId + + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true")); } logger.info("--> allocating stale primary with acceptDataLoss flag set to false"); try { - allocation.reroute(clusterState, new AllocationCommands(new AllocateStalePrimaryAllocationCommand(index, shardId.id(), "node1", false)), false, false); + allocation.reroute(clusterState, new AllocationCommands( + new AllocateStalePrimaryAllocationCommand(index, shardId.id(), "node1", false)), false, false); fail("expected IllegalArgumentException when allocating stale primary with acceptDataLoss flag set to false"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("allocating an empty primary for " + shardId + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true")); + assertThat(e.getMessage(), containsString("allocating an empty primary for " + shardId + + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true")); } logger.info("--> allocating empty primary with acceptDataLoss flag set to true"); ClusterState newState = allocation.reroute(clusterState, - new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true)), false, false).getClusterState(); + new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true)), + false, false).getClusterState(); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1)); @@ -216,7 +227,8 @@ public void testAllocateCommand() { logger.info("--> allocate the replica shard on the primary shard node, should fail"); try { - allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node1")), false, false); + allocation.reroute(clusterState, new AllocationCommands( + new AllocateReplicaAllocationCommand("test", 0, "node1")), false, false); fail("expected IllegalArgumentException when allocating replica shard on the primary shard node"); } catch (IllegalArgumentException e) { } @@ -260,7 +272,8 @@ public void testCancelCommand() { RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding 3 nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -483,14 +496,16 @@ public void testSerialization() throws Exception { public void testXContent() throws Exception { String commands = "{\n" + - " \"commands\" : [\n" + - " {\"allocate_empty_primary\" : {\"index\" : \"test\", \"shard\" : 1, \"node\" : \"node1\", \"accept_data_loss\" : true}}\n" + - " ,{\"allocate_stale_primary\" : {\"index\" : \"test\", \"shard\" : 2, \"node\" : \"node1\", \"accept_data_loss\" : true}}\n" + - " ,{\"allocate_replica\" : {\"index\" : \"test\", \"shard\" : 2, \"node\" : \"node1\"}}\n" + - " ,{\"move\" : {\"index\" : \"test\", \"shard\" : 3, \"from_node\" : \"node2\", \"to_node\" : \"node3\"}} \n" + - " ,{\"cancel\" : {\"index\" : \"test\", \"shard\" : 4, \"node\" : \"node5\", \"allow_primary\" : true}} \n" + - " ]\n" + - "}\n"; + " \"commands\" : [\n" + + " {\"allocate_empty_primary\" : {\"index\" : \"test\", \"shard\" : 1," + + " \"node\" : \"node1\", \"accept_data_loss\" : true}}\n" + + " ,{\"allocate_stale_primary\" : {\"index\" : \"test\", \"shard\" : 2," + + " \"node\" : \"node1\", \"accept_data_loss\" : true}}\n" + + " ,{\"allocate_replica\" : {\"index\" : \"test\", \"shard\" : 2, \"node\" : \"node1\"}}\n" + + " ,{\"move\" : {\"index\" : \"test\", \"shard\" : 3, \"from_node\" : \"node2\", \"to_node\" : \"node3\"}} \n" + + " ,{\"cancel\" : {\"index\" : \"test\", \"shard\" : 4, \"node\" : \"node5\", \"allow_primary\" : true}} \n" + + " ]\n" + + "}\n"; XContentParser parser = createParser(JsonXContent.jsonXContent, commands); // move two tokens, parser expected to be "on" `commands` field parser.nextToken(); @@ -529,7 +544,8 @@ protected NamedXContentRegistry xContentRegistry() { } public void testMoveShardToNonDataNode() { - AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService allocation = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("creating an index with 1 shard, no replica"); MetaData metaData = MetaData.builder() @@ -538,7 +554,8 @@ public void testMoveShardToNonDataNode() { RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding two nodes"); @@ -557,15 +574,17 @@ public void testMoveShardToNonDataNode() { Index index = clusterState.getMetaData().index("test").getIndex(); MoveAllocationCommand command = new MoveAllocationCommand(index.getName(), 0, "node1", "node2"); - RoutingAllocation routingAllocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), + RoutingAllocation routingAllocation = new RoutingAllocation(new AllocationDeciders(Collections.emptyList()), new RoutingNodes(clusterState, false), clusterState, ClusterInfo.EMPTY, System.nanoTime()); logger.info("--> executing move allocation command to non-data node"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> command.execute(routingAllocation, false)); - assertEquals("[move_allocation] can't move [test][0] from " + node1 + " to " + node2 + ": source [" + node2.getName() + "] is not a data node.", e.getMessage()); + assertEquals("[move_allocation] can't move [test][0] from " + node1 + " to " + + node2 + ": source [" + node2.getName() + "] is not a data node.", e.getMessage()); } public void testMoveShardFromNonDataNode() { - AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService allocation = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("creating an index with 1 shard, no replica"); MetaData metaData = MetaData.builder() @@ -574,7 +593,8 @@ public void testMoveShardFromNonDataNode() { RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding two nodes"); @@ -592,10 +612,11 @@ public void testMoveShardFromNonDataNode() { Index index = clusterState.getMetaData().index("test").getIndex(); MoveAllocationCommand command = new MoveAllocationCommand(index.getName(), 0, "node2", "node1"); - RoutingAllocation routingAllocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), + RoutingAllocation routingAllocation = new RoutingAllocation(new AllocationDeciders(Collections.emptyList()), new RoutingNodes(clusterState, false), clusterState, ClusterInfo.EMPTY, System.nanoTime()); logger.info("--> executing move allocation command from non-data node"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> command.execute(routingAllocation, false)); - assertEquals("[move_allocation] can't move [test][0] from " + node2 + " to " + node1 + ": source [" + node2.getName() + "] is not a data node.", e.getMessage()); + assertEquals("[move_allocation] can't move [test][0] from " + node2 + " to " + node1 + + ": source [" + node2.getName() + "] is not a data node.", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java index 684985801c4d1..fdb9fdb46a85c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -41,7 +41,8 @@ public void testPrioritizedIndicesAllocatedFirst() { put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 1) .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 10) .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 1) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 1).build()); + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 1) + .build()); final String highPriorityName; final String lowPriorityName; final int priorityFirst; @@ -58,16 +59,20 @@ public void testPrioritizedIndicesAllocatedFirst() { priorityFirst = 1; } MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("first").settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_PRIORITY, priorityFirst)).numberOfShards(2).numberOfReplicas(1)) - .put(IndexMetaData.builder("second").settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_PRIORITY, prioritySecond)).numberOfShards(2).numberOfReplicas(1)) + .put(IndexMetaData.builder("first").settings(settings(Version.CURRENT) + .put(IndexMetaData.SETTING_PRIORITY, priorityFirst)).numberOfShards(2).numberOfReplicas(1)) + .put(IndexMetaData.builder("second").settings(settings(Version.CURRENT) + .put(IndexMetaData.SETTING_PRIORITY, prioritySecond)).numberOfShards(2).numberOfReplicas(1)) .build(); RoutingTable initialRoutingTable = RoutingTable.builder() .addAsNew(metaData.index("first")) .addAsNew(metaData.index("second")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")) + .add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); clusterState = allocation.reroute(clusterState, "reroute"); @@ -81,7 +86,8 @@ public void testPrioritizedIndicesAllocatedFirst() { assertEquals(lowPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName()); clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); - assertEquals(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).toString(),2, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size()); + assertEquals(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).toString(),2, + clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size()); assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0).getIndexName()); assertEquals(highPriorityName, clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(1).getIndexName()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index 38a72adeb1b3a..b0ce9ad320a9e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -68,7 +68,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded1() { RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -94,7 +95,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded1() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), + equalTo("node3")); logger.info("--> complete relocation"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -128,7 +130,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded2() { RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -155,7 +158,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded2() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), + equalTo("node4")); logger.info("--> complete relocation"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -196,7 +200,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded3() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -229,7 +234,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded3() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(5)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(5)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(5)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), + equalTo("node3")); logger.info("--> complete initializing"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -279,7 +285,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded4() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -306,7 +313,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded4() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(10)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), + equalTo("node3")); logger.info("--> complete initializing"); for (int i = 0; i < 2; i++) { @@ -362,7 +370,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded5() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -388,7 +397,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded5() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo("node3")); logger.info("--> complete relocation"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -405,7 +415,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded5() { clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), + equalTo("node4")); logger.info("--> complete relocation"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -433,7 +444,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded6() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -461,7 +473,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded6() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node5")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), + equalTo("node5")); logger.info("--> complete relocation"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -478,7 +491,8 @@ public void testMoveShardOnceNewNodeWithAttributeAdded6() { clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(3)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node6")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), + equalTo("node6")); logger.info("--> complete relocation"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -507,7 +521,8 @@ public void testFullAwareness1() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -532,7 +547,8 @@ public void testFullAwareness1() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo("node3")); logger.info("--> complete relocation"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -569,7 +585,8 @@ public void testFullAwareness2() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -595,7 +612,8 @@ public void testFullAwareness2() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node4")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo("node4")); logger.info("--> complete relocation"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -639,7 +657,8 @@ public void testFullAwareness3() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -662,7 +681,8 @@ public void testFullAwareness3() { assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo("node3")); logger.info("--> complete initializing"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -711,7 +731,8 @@ public void testUnbalancedZones() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes in different zones and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -739,7 +760,8 @@ public void testUnbalancedZones() { clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(8)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(2)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("A-1")); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo("A-1")); logger.info("--> starting initializing shards on the new node"); clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -767,7 +789,8 @@ public void testUnassignedShardsWithUnbalancedZones() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding 5 nodes in different zones and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 006c6dff6eb0b..0ef64e15ce7a2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -67,7 +67,8 @@ public void testIndexBalance() { final float balanceTreshold = 1.0f; Settings.Builder settings = Settings.builder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold); @@ -75,13 +76,16 @@ public void testIndexBalance() { AllocationService strategy = createAllocationService(settings.build(), new NoopGatewayAllocator()); ClusterState clusterState = initCluster(strategy); - assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); + assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, + numberOfReplicas, numberOfShards, balanceTreshold); clusterState = addNode(clusterState, strategy); - assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); + assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), numberOfNodes + 1, + numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); clusterState = removeNodes(clusterState, strategy); - assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); + assertIndexBalance(clusterState.getRoutingTable(), clusterState.getRoutingNodes(), + (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); } public void testReplicaBalance() { @@ -91,7 +95,8 @@ public void testReplicaBalance() { final float balanceTreshold = 1.0f; Settings.Builder settings = Settings.builder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold); @@ -99,13 +104,16 @@ public void testReplicaBalance() { AllocationService strategy = createAllocationService(settings.build(), new NoopGatewayAllocator()); ClusterState clusterState = initCluster(strategy); - assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); + assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, + numberOfReplicas, numberOfShards, balanceTreshold); clusterState = addNode(clusterState, strategy); - assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); + assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, + numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); clusterState = removeNodes(clusterState, strategy); - assertReplicaBalance(logger, clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); + assertReplicaBalance(logger, clusterState.getRoutingNodes(), + (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold); } @@ -114,7 +122,8 @@ private ClusterState initCluster(AllocationService strategy) { RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); for (int i = 0; i < numberOfIndices; i++) { - IndexMetaData.Builder index = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas); + IndexMetaData.Builder index = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas); metaDataBuilder = metaDataBuilder.put(index); } @@ -132,7 +141,8 @@ private ClusterState initCluster(AllocationService strategy) { for (int i = 0; i < numberOfNodes; i++) { nodes.add(newNode("node" + i)); } - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(initialRoutingTable).build(); clusterState = strategy.reroute(clusterState, "reroute"); logger.info("restart all the primary shards, replicas will start initializing"); @@ -191,20 +201,21 @@ private ClusterState removeNodes(ClusterState clusterState, AllocationService st } - private void assertReplicaBalance(Logger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) { + private void assertReplicaBalance(Logger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, + int numberOfShards, float treshold) { final int numShards = numberOfIndices * numberOfShards * (numberOfReplicas + 1); final float avgNumShards = (float) (numShards) / (float) (numberOfNodes); final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold))); final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold))); for (RoutingNode node : nodes) { -// logger.info(node.nodeId() + ": " + node.shardsWithState(INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")"); assertThat(node.shardsWithState(STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards)); assertThat(node.shardsWithState(STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards)); } } - private void assertIndexBalance(RoutingTable routingTable, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) { + private void assertIndexBalance(RoutingTable routingTable, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, + int numberOfReplicas, int numberOfShards, float treshold) { final int numShards = numberOfShards * (numberOfReplicas + 1); final float avgNumShards = (float) (numShards) / (float) (numberOfNodes); @@ -213,7 +224,6 @@ private void assertIndexBalance(RoutingTable routingTable, RoutingNodes nodes, i for (ObjectCursor index : routingTable.indicesRouting().keys()) { for (RoutingNode node : nodes) { -// logger.info(node.nodeId() +":"+index+ ": " + node.shardsWithState(index, INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")"); assertThat(node.shardsWithState(index.value, STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards)); assertThat(node.shardsWithState(index.value, STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards)); } @@ -235,7 +245,8 @@ public void testPersistedSettings() { settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.2); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.3); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 2.0); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); service.applySettings(settings.build()); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); @@ -253,7 +264,7 @@ public void testPersistedSettings() { public void testNoRebalanceOnPrimaryOverload() { Settings.Builder settings = Settings.builder(); - AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(), + AllocationService strategy = new AllocationService(randomAllocationDeciders(settings.build(), new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random()), new TestGatewayAllocator(), new ShardsAllocator() { /* @@ -330,7 +341,8 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing }, EmptyClusterInfoService.INSTANCE); MetaData.Builder metaDataBuilder = MetaData.builder(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1); + IndexMetaData.Builder indexMeta = IndexMetaData.builder("test") + .settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1); metaDataBuilder = metaDataBuilder.put(indexMeta); MetaData metaData = metaDataBuilder.build(); for (ObjectCursor cursor : metaData.indices().values()) { @@ -343,7 +355,8 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing nodes.add(node); } - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); RoutingNodes routingNodes = clusterState.getRoutingNodes(); @@ -390,11 +403,6 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } private class NoopGatewayAllocator extends GatewayAllocator { - - NoopGatewayAllocator() { - super(Settings.EMPTY); - } - @Override public void applyStartedShards(RoutingAllocation allocation, List startedShards) { // noop diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java index 405f459e99a39..e4a7fa47025d6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -61,7 +61,7 @@ public void testRebalanceNonStartedShardNotAllowed() { randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.UNASSIGNED, ShardRoutingState.RELOCATING)); ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, newRoutingAllocation( - new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState)).getMoveDecision(); + new AllocationDeciders(Collections.emptyList()), clusterState)).getMoveDecision(); assertSame(MoveDecision.NOT_TAKEN, rebalanceDecision); } @@ -70,7 +70,7 @@ public void testRebalanceNotAllowedDuringPendingAsyncFetch() { ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED); ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); RoutingAllocation routingAllocation = newRoutingAllocation( - new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState); + new AllocationDeciders(Collections.emptyList()), clusterState); routingAllocation.setHasPendingAsyncFetch(); MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision(); assertNotNull(rebalanceDecision.getClusterRebalanceDecision()); @@ -85,7 +85,7 @@ public void testRebalanceNotAllowedDuringPendingAsyncFetch() { public void testRebalancingNotAllowedDueToCanRebalance() { final Decision canRebalanceDecision = randomFrom(Decision.NO, Decision.THROTTLE); - AllocationDecider noRebalanceDecider = new AllocationDecider(Settings.EMPTY) { + AllocationDecider noRebalanceDecider = new AllocationDecider() { @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { return allocation.decision(canRebalanceDecision, "TEST", "foobar"); @@ -95,7 +95,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED); ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); RoutingAllocation routingAllocation = newRoutingAllocation( - new AllocationDeciders(Settings.EMPTY, Collections.singleton(noRebalanceDecider)), clusterState); + new AllocationDeciders(Collections.singleton(noRebalanceDecider)), clusterState); MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision(); assertEquals(canRebalanceDecision.type(), rebalanceDecision.getClusterRebalanceDecision().type()); assertEquals(AllocationDecision.fromDecisionType(canRebalanceDecision.type()), rebalanceDecision.getAllocationDecision()); @@ -112,7 +112,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca } public void testRebalancePossible() { - AllocationDecider canAllocateDecider = new AllocationDecider(Settings.EMPTY) { + AllocationDecider canAllocateDecider = new AllocationDecider() { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return Decision.YES; @@ -127,7 +127,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } public void testRebalancingNotAllowedDueToCanAllocate() { - AllocationDecider canAllocateDecider = new AllocationDecider(Settings.EMPTY) { + AllocationDecider canAllocateDecider = new AllocationDecider() { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return Decision.NO; @@ -150,7 +150,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } public void testDontBalanceShardWhenThresholdNotMet() { - AllocationDecider canAllocateDecider = new AllocationDecider(Settings.EMPTY) { + AllocationDecider canAllocateDecider = new AllocationDecider() { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return Decision.YES; @@ -199,7 +199,7 @@ public void testSingleShardBalanceProducesSameResultsAsBalanceStep() { } clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); - AllocationDecider allocationDecider = new AllocationDecider(Settings.EMPTY) { + AllocationDecider allocationDecider = new AllocationDecider() { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { if (excludeNodes.contains(node.nodeId())) { @@ -208,7 +208,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing return Decision.YES; } }; - AllocationDecider rebalanceDecider = new AllocationDecider(Settings.EMPTY) { + AllocationDecider rebalanceDecider = new AllocationDecider() { @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { return Decision.YES; @@ -216,7 +216,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca }; List allocationDeciders = Arrays.asList(rebalanceDecider, allocationDecider); RoutingAllocation routingAllocation = newRoutingAllocation( - new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); + new AllocationDeciders(allocationDeciders), clusterState); // allocate and get the node that is now relocating BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); allocator.allocate(routingAllocation); @@ -229,7 +229,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca } } - routingAllocation = newRoutingAllocation(new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); + routingAllocation = newRoutingAllocation(new AllocationDeciders(allocationDeciders), clusterState); routingAllocation.debugDecision(true); ShardRouting shard = clusterState.getRoutingNodes().activePrimary(shardToRebalance.shardId()); MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision(); @@ -307,7 +307,7 @@ private MoveDecision executeRebalanceFor(final ShardRouting shardRouting, final if (Float.compare(-1.0f, threshold) != 0) { settings = Settings.builder().put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), threshold).build(); } - AllocationDecider allocationDecider = new AllocationDecider(Settings.EMPTY) { + AllocationDecider allocationDecider = new AllocationDecider() { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { if (noDecisionNodes.contains(node.nodeId())) { @@ -316,7 +316,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing return Decision.YES; } }; - AllocationDecider rebalanceDecider = new AllocationDecider(Settings.EMPTY) { + AllocationDecider rebalanceDecider = new AllocationDecider() { @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { return Decision.YES; @@ -324,7 +324,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca }; BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings); RoutingAllocation routingAllocation = newRoutingAllocation( - new AllocationDeciders(Settings.EMPTY, Arrays.asList(allocationDecider, rebalanceDecider)), clusterState); + new AllocationDeciders(Arrays.asList(allocationDecider, rebalanceDecider)), clusterState); return allocator.decideShardAllocation(shardRouting, routingAllocation).getMoveDecision(); } @@ -340,7 +340,7 @@ private ClusterState addNodesToClusterState(ClusterState clusterState, int numNo private Tuple setupStateAndRebalance(AllocationDecider allocationDecider, Settings balancerSettings, boolean rebalanceExpected) { - AllocationDecider rebalanceDecider = new AllocationDecider(Settings.EMPTY) { + AllocationDecider rebalanceDecider = new AllocationDecider() { @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { return Decision.YES; @@ -356,7 +356,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); RoutingAllocation routingAllocation = newRoutingAllocation( - new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); + new AllocationDeciders(allocationDeciders), clusterState); MoveDecision rebalanceDecision = allocator.decideShardAllocation(shard, routingAllocation).getMoveDecision(); if (rebalanceExpected == false) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index cb47426102dbb..bca086b8fc92b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -49,7 +49,8 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(ClusterRebalanceRoutingTests.class); public void testAlways() { - AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + AllocationService strategy = createAllocationService(Settings.builder() + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() @@ -62,10 +63,12 @@ public void testAlways() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { @@ -126,7 +129,8 @@ public void testAlways() { public void testClusterPrimariesActive1() { - AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider + .CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -139,10 +143,12 @@ public void testClusterPrimariesActive1() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2)); @@ -219,7 +225,8 @@ public void testClusterPrimariesActive1() { } public void testClusterPrimariesActive2() { - AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider + .CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -232,10 +239,12 @@ public void testClusterPrimariesActive2() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { @@ -295,7 +304,8 @@ public void testClusterPrimariesActive2() { } public void testClusterAllActive1() { - AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + AllocationService strategy = createAllocationService(Settings.builder() + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -308,10 +318,12 @@ public void testClusterAllActive1() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { @@ -402,11 +414,13 @@ public void testClusterAllActive1() { routingNodes = clusterState.getRoutingNodes(); assertThat(routingNodes.node("node3").size(), equalTo(1)); - assertThat(routingNodes.node("node3").iterator().next().shardId().getIndex().getName(), anyOf(equalTo("test1"), equalTo("test2"))); + assertThat(routingNodes.node("node3").iterator().next().shardId().getIndex().getName(), anyOf(equalTo("test1"), + equalTo("test2"))); } public void testClusterAllActive2() { - AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + AllocationService strategy = createAllocationService(Settings.builder() + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -419,10 +433,12 @@ public void testClusterAllActive2() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { @@ -482,7 +498,8 @@ public void testClusterAllActive2() { } public void testClusterAllActive3() { - AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider + .CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -495,10 +512,12 @@ public void testClusterAllActive3() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); for (int i = 0; i < clusterState.routingTable().index("test1").shards().size(); i++) { @@ -605,7 +624,8 @@ public void allocateUnassigned(RoutingAllocation allocation) { .addAsNew(metaData.index("test1")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("start two nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); @@ -650,7 +670,8 @@ public void allocateUnassigned(RoutingAllocation allocation) { assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING)); } - logger.debug("now start initializing shards and expect exactly one rebalance from node1 to node 2 since index [test] is all on node1"); + logger.debug("now start initializing shards and expect exactly one rebalance" + + " from node1 to node 2 since index [test] is all on node1"); routingNodes = clusterState.getRoutingNodes(); clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)); @@ -676,7 +697,8 @@ public void allocateUnassigned(RoutingAllocation allocation) { public void testRebalanceWhileShardFetching() { final AtomicBoolean hasFetches = new AtomicBoolean(true); - AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + AllocationService strategy = createAllocationService(Settings.builder() + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build(), new TestGatewayAllocator() { @Override public void allocateUnassigned(RoutingAllocation allocation) { @@ -689,16 +711,20 @@ public void allocateUnassigned(RoutingAllocation allocation) { MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) - .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_id", "node1,node2")).numberOfShards(2).numberOfReplicas(0)) + .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_id", "node1,node2")) + .numberOfShards(2).numberOfReplicas(0)) .build(); - // we use a second index here (test1) that never gets assigned otherwise allocateUnassigned is never called if we don't have unassigned shards. + // we use a second index here (test1) that never gets assigned otherwise allocateUnassigned + // is never called if we don't have unassigned shards. RoutingTable initialRoutingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .addAsNew(metaData.index("test1")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("start two nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java index 5c851467c047c..d2e36b7fac9b0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java @@ -57,7 +57,8 @@ public void testClusterConcurrentRebalance() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -69,7 +70,8 @@ public void testClusterConcurrentRebalance() { } logger.info("start two nodes and fully start the shards"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -90,7 +92,9 @@ public void testClusterConcurrentRebalance() { logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10"))) + .add(newNode("node3")).add(newNode("node4")).add(newNode("node5")) + .add(newNode("node6")).add(newNode("node7")).add(newNode("node8")) + .add(newNode("node9")).add(newNode("node10"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index 01cb709568972..1ee136b77b0b8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -55,7 +55,8 @@ public void testSimpleDeadNodeOnStartedPrimaryShard() { RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -102,7 +103,8 @@ public void testDeadNodeWhileRelocatingOnToNode() { RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -140,8 +142,8 @@ public void testDeadNodeWhileRelocatingOnToNode() { logger.info("--> moving primary shard to node3"); AllocationService.CommandsResult commandsResult = allocation.reroute(clusterState, new AllocationCommands( - new MoveAllocationCommand("test", 0, clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")), - false, false); + new MoveAllocationCommand("test", 0, clusterState.routingTable().index("test") + .shard(0).primaryShard().currentNodeId(), "node3")), false, false); assertThat(commandsResult.getClusterState(), not(equalTo(clusterState))); clusterState = commandsResult.getClusterState(); assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING)); @@ -171,7 +173,8 @@ public void testDeadNodeWhileRelocatingOnFromNode() { RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -209,8 +212,8 @@ public void testDeadNodeWhileRelocatingOnFromNode() { logger.info("--> moving primary shard to node3"); AllocationService.CommandsResult commandsResult = allocation.reroute(clusterState, new AllocationCommands( - new MoveAllocationCommand("test",0 , clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")), - false, false); + new MoveAllocationCommand("test",0 , clusterState.routingTable().index("test") + .shard(0).primaryShard().currentNodeId(), "node3")), false, false); assertThat(commandsResult.getClusterState(), not(equalTo(clusterState))); clusterState = commandsResult.getClusterState(); assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java index bee2275743bac..8c643c4b1c27e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java @@ -159,8 +159,7 @@ private ClusterState runAllocationTest(final Settings settings, } private static AllocationService newAllocationService(Settings settings, Set deciders) { - return new AllocationService(settings, - new AllocationDeciders(settings, deciders), + return new AllocationService(new AllocationDeciders(deciders), new TestGatewayAllocator(), new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java index 8c710d01a30ac..fa28b4b1e3482 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java @@ -41,7 +41,8 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest private final Logger logger = LogManager.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class); public void testElectReplicaAsPrimaryDuringRelocation() { - AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService strategy = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("Building initial routing table"); @@ -53,10 +54,12 @@ public void testElectReplicaAsPrimaryDuringRelocation() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")) + .add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the primary shards"); @@ -75,7 +78,8 @@ public void testElectReplicaAsPrimaryDuringRelocation() { assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2)); logger.info("Start another node and perform rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) + .add(newNode("node3"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); logger.info("find the replica shard that gets relocated"); @@ -88,8 +92,10 @@ public void testElectReplicaAsPrimaryDuringRelocation() { // we might have primary relocating, and the test is only for replicas, so only test in the case of replica allocation if (indexShardRoutingTable != null) { - logger.info("kill the node [{}] of the primary shard for the relocating replica", indexShardRoutingTable.primaryShard().currentNodeId()); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); + logger.info("kill the node [{}] of the primary shard for the relocating replica", + indexShardRoutingTable.primaryShard().currentNodeId()); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) + .remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); logger.info("make sure all the primary shards are active"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index 106e95b677586..e649b8f6c180b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -67,13 +67,16 @@ public Long getShardSize(ShardRouting shardRouting) { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding one node and performing rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); - assertEquals(1, clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING)); - assertEquals(byteSize, clusterState.getRoutingTable().shardsWithState(ShardRoutingState.INITIALIZING).get(0).getExpectedShardSize()); + assertEquals(1, clusterState.getRoutingNodes().node("node1") + .numberOfShardsWithState(ShardRoutingState.INITIALIZING)); + assertEquals(byteSize, clusterState.getRoutingTable() + .shardsWithState(ShardRoutingState.INITIALIZING).get(0).getExpectedShardSize()); logger.info("Start the primary shard"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); @@ -82,11 +85,14 @@ public Long getShardSize(ShardRouting shardRouting) { assertEquals(1, clusterState.getRoutingNodes().unassigned().size()); logger.info("Add another one node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); - assertEquals(1, clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING)); - assertEquals(byteSize, clusterState.getRoutingTable().shardsWithState(ShardRoutingState.INITIALIZING).get(0).getExpectedShardSize()); + assertEquals(1, clusterState.getRoutingNodes() + .node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING)); + assertEquals(byteSize, clusterState.getRoutingTable() + .shardsWithState(ShardRoutingState.INITIALIZING).get(0).getExpectedShardSize()); } public void testExpectedSizeOnMove() { @@ -107,10 +113,12 @@ public Long getShardSize(ShardRouting shardRouting) { RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); logger.info("start primary shard"); @@ -125,7 +133,8 @@ public Long getShardSize(ShardRouting shardRouting) { toNodeId = "node1"; } AllocationService.CommandsResult commandsResult = - allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand("test", 0, existingNodeId, toNodeId)), false, false); + allocation.reroute(clusterState, new AllocationCommands( + new MoveAllocationCommand("test", 0, existingNodeId, toNodeId)), false, false); assertThat(commandsResult.getClusterState(), not(equalTo(clusterState))); clusterState = commandsResult.getClusterState(); assertEquals(clusterState.getRoutingNodes().node(existingNodeId).iterator().next().state(), ShardRoutingState.RELOCATING); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index b84c65e81673d..3e07d732db7b8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -66,7 +66,8 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(FailedNodeRoutingTests.class); public void testSimpleFailedNodeTest() { - AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + AllocationService strategy = createAllocationService(Settings.builder() + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() @@ -79,10 +80,12 @@ public void testSimpleFailedNodeTest() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("start 4 nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); logger.info("start all the primary shards, replicas will start initializing"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 05e77c4cf4ba1..c2d6a67468f3f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -72,7 +72,8 @@ public void testFailedShardPrimaryRelocatingToAndFrom() { RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -109,23 +110,24 @@ public void testFailedShardPrimaryRelocatingToAndFrom() { String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(); logger.info("--> moving primary shard to node3"); - AllocationService.CommandsResult commandsResult = allocation.reroute(clusterState, new AllocationCommands( - new MoveAllocationCommand("test", 0, clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")), - false, false); + AllocationService.CommandsResult commandsResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand( + "test", 0, clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")), false, false); assertThat(commandsResult.getClusterState(), not(equalTo(clusterState))); clusterState = commandsResult.getClusterState(); assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(RELOCATING)); assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING)); logger.info("--> fail primary shard recovering instance on node3 being initialized"); - clusterState = allocation.applyFailedShard(clusterState, clusterState.getRoutingNodes().node("node3").iterator().next(), randomBoolean()); + clusterState = allocation.applyFailedShard(clusterState, + clusterState.getRoutingNodes().node("node3").iterator().next(), randomBoolean()); assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(STARTED)); assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(0)); logger.info("--> moving primary shard to node3"); commandsResult = allocation.reroute(clusterState, new AllocationCommands( - new MoveAllocationCommand("test", 0, clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")), + new MoveAllocationCommand("test", 0, + clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3")), false, false); assertThat(commandsResult.getClusterState(), not(equalTo(clusterState))); clusterState = commandsResult.getClusterState(); @@ -133,12 +135,14 @@ public void testFailedShardPrimaryRelocatingToAndFrom() { assertThat(clusterState.getRoutingNodes().node("node3").iterator().next().state(), equalTo(INITIALIZING)); logger.info("--> fail primary shard recovering instance on node1 being relocated"); - clusterState = allocation.applyFailedShard(clusterState, clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next(), randomBoolean()); + clusterState = allocation.applyFailedShard(clusterState, + clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next(), randomBoolean()); // check promotion of replica to primary assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED)); assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(origReplicaNodeId)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(origPrimaryNodeId), equalTo("node3"))); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), + anyOf(equalTo(origPrimaryNodeId), equalTo("node3"))); } public void testFailPrimaryStartedCheckReplicaElected() { @@ -157,10 +161,12 @@ public void testFailPrimaryStartedCheckReplicaElected() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the shards (primaries)"); @@ -176,10 +182,12 @@ public void testFailPrimaryStartedCheckReplicaElected() { assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2"))); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), + anyOf(equalTo("node1"), equalTo("node2"))); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING)); - assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1"))); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), + anyOf(equalTo("node2"), equalTo("node1"))); } logger.info("Start the shards (backups)"); @@ -193,10 +201,12 @@ public void testFailPrimaryStartedCheckReplicaElected() { assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2"))); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), + anyOf(equalTo("node1"), equalTo("node2"))); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1"))); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), + anyOf(equalTo("node2"), equalTo("node1"))); } logger.info("fail the primary shard, will have no place to be rerouted to (single node), so stays unassigned"); @@ -207,9 +217,11 @@ public void testFailPrimaryStartedCheckReplicaElected() { assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2)); - assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), not(equalTo(shardToFail.currentNodeId()))); + assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), + not(equalTo(shardToFail.currentNodeId()))); assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2"))); + assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), + anyOf(equalTo("node1"), equalTo("node2"))); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED)); } @@ -230,7 +242,8 @@ public void testFirstAllocationFailureSingleNode() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding single node and performing rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); @@ -274,14 +287,16 @@ public void testSingleShardMultipleAllocationFailures() { logger.info("Building initial routing table"); int numberOfReplicas = scaledRandomIntBetween(2, 10); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(numberOfReplicas)) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(numberOfReplicas)) .build(); RoutingTable initialRoutingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding {} nodes and performing rerouting", numberOfReplicas + 1); DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(); @@ -344,10 +359,12 @@ public void testFirstAllocationFailureTwoNodes() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); ClusterState newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, not(clusterState)); clusterState = newState; @@ -399,10 +416,12 @@ public void testRebalanceFailure() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the shards (primaries)"); @@ -416,10 +435,12 @@ public void testRebalanceFailure() { assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2"))); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), + anyOf(equalTo("node1"), equalTo("node2"))); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING)); - assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1"))); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), + anyOf(equalTo("node2"), equalTo("node1"))); } logger.info("Start the shards (backups)"); @@ -433,14 +454,17 @@ public void testRebalanceFailure() { assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(i).shards().size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2"))); + assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), + anyOf(equalTo("node1"), equalTo("node2"))); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1"))); + assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), + anyOf(equalTo("node2"), equalTo("node1"))); } logger.info("Adding third node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) + .add(newNode("node3"))).build(); newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -489,7 +513,8 @@ public void testFailAllReplicasInitializingOnPrimaryFail() { ShardId shardId = new ShardId(metaData.index("test").getIndex(), 0); // add 4 nodes - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); @@ -532,10 +557,12 @@ public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToEle .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); // add 4 nodes - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")) + .add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); @@ -545,7 +572,8 @@ public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToEle assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); // start another replica shard, while keep one initializing - clusterState = allocation.applyStartedShards(clusterState, Collections.singletonList(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0))); + clusterState = allocation.applyStartedShards(clusterState, + Collections.singletonList(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0))); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); @@ -579,7 +607,8 @@ public void testReplicaOnNewestVersionIsPromoted() { DiscoveryNodes.builder() .add(newNode("node1-5.x", Version.fromId(5060099)))) .build(); - clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); + clusterState = ClusterState.builder(clusterState) + .routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(3)); @@ -641,8 +670,8 @@ public void testReplicaOnNewestVersionIsPromoted() { continue; } Version nodeVer = cursor.value.getVersion(); - assertTrue("expected node [" + cursor.value.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, - replicaNodeVersion.onOrAfter(nodeVer)); + assertTrue("expected node [" + cursor.value.getId() + "] with version " + nodeVer + + " to be before " + replicaNodeVersion, replicaNodeVersion.onOrAfter(nodeVer)); } startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); @@ -671,8 +700,8 @@ public void testReplicaOnNewestVersionIsPromoted() { continue; } Version nodeVer = cursor.value.getVersion(); - assertTrue("expected node [" + cursor.value.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, - replicaNodeVersion.onOrAfter(nodeVer)); + assertTrue("expected node [" + cursor.value.getId() + "] with version " + + nodeVer + " to be before " + replicaNodeVersion, replicaNodeVersion.onOrAfter(nodeVer)); } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index 6086482a442fc..a44f4c4f913cf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -51,12 +51,15 @@ public void testBalanceAllNodesStarted() { logger.info("Building initial routing table"); - MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)) + MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(3).numberOfReplicas(1)) .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build(); - RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build(); + RoutingTable initialRoutingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -80,7 +83,8 @@ public void testBalanceAllNodesStarted() { logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")) + .add(newNode("node3"))).build(); ClusterState newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, not(equalTo(clusterState))); @@ -167,12 +171,16 @@ public void testBalanceIncrementallyStartNodes() { logger.info("Building initial routing table"); - MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)) - .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build(); + MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(3).numberOfReplicas(1)) + .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)) + .numberOfShards(3).numberOfReplicas(1)).build(); - RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build(); + RoutingTable initialRoutingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -311,11 +319,13 @@ public void testBalanceAllNodesStartedAddIndex() { logger.info("Building initial routing table"); - MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build(); + MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(3).numberOfReplicas(1)).build(); RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(3)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index 994ee8f1438a1..2ce0b7b89bec2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -54,8 +54,8 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase { @Override public void setUp() throws Exception { super.setUp(); - strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + strategy = new AllocationService(new AllocationDeciders( + Collections.singleton(new MaxRetryAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); } @@ -175,7 +175,7 @@ public void testFailedAllocation() { assertEquals(unassignedPrimary.unassignedInfo().getNumFailedAllocations(), i+1); assertThat(unassignedPrimary.unassignedInfo().getMessage(), containsString("boom" + i)); // MaxRetryAllocationDecider#canForceAllocatePrimary should return YES decisions because canAllocate returns YES here - assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary( + assertEquals(Decision.YES, new MaxRetryAllocationDecider().canForceAllocatePrimary( unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0))); } // now we go and check that we are actually stick to unassigned on the next failure @@ -193,7 +193,7 @@ public void testFailedAllocation() { assertEquals(unassignedPrimary.state(), UNASSIGNED); assertThat(unassignedPrimary.unassignedInfo().getMessage(), containsString("boom")); // MaxRetryAllocationDecider#canForceAllocatePrimary should return a NO decision because canAllocate returns NO here - assertEquals(Decision.NO, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary( + assertEquals(Decision.NO, new MaxRetryAllocationDecider().canForceAllocatePrimary( unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0))); } @@ -215,7 +215,7 @@ public void testFailedAllocation() { assertEquals(unassignedPrimary.state(), INITIALIZING); assertThat(unassignedPrimary.unassignedInfo().getMessage(), containsString("boom")); // bumped up the max retry count, so canForceAllocatePrimary should return a YES decision - assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary( + assertEquals(Decision.YES, new MaxRetryAllocationDecider().canForceAllocatePrimary( routingTable.index("idx").shard(0).shards().get(0), null, new RoutingAllocation(null, null, clusterState, null, 0))); // now we start the shard @@ -242,7 +242,7 @@ public void testFailedAllocation() { assertEquals(unassignedPrimary.state(), UNASSIGNED); assertThat(unassignedPrimary.unassignedInfo().getMessage(), containsString("ZOOOMG")); // Counter reset, so MaxRetryAllocationDecider#canForceAllocatePrimary should return a YES decision - assertEquals(Decision.YES, new MaxRetryAllocationDecider(Settings.EMPTY).canForceAllocatePrimary( + assertEquals(Decision.YES, new MaxRetryAllocationDecider().canForceAllocatePrimary( unassignedPrimary, null, new RoutingAllocation(null, null, clusterState, null, 0))); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 89d19e03957a5..7a1c901671c93 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -95,7 +95,8 @@ public void testDoNotAllocateFromPrimary() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -109,7 +110,8 @@ public void testDoNotAllocateFromPrimary() { } logger.info("start two nodes and fully start the shards"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -187,7 +189,8 @@ public void testRandom() { RoutingTable.Builder rtBuilder = RoutingTable.builder(); int numIndices = between(1, 20); for (int i = 0; i < numIndices; i++) { - builder.put(IndexMetaData.builder("test_" + i).settings(settings(Version.CURRENT)).numberOfShards(between(1, 5)).numberOfReplicas(between(0, 2))); + builder.put(IndexMetaData.builder("test_" + i).settings(settings(Version.CURRENT)) + .numberOfShards(between(1, 5)).numberOfReplicas(between(0, 2))); } MetaData metaData = builder.build(); @@ -196,7 +199,8 @@ public void testRandom() { } RoutingTable routingTable = rtBuilder.build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(routingTable.allShards().size())); List nodes = new ArrayList<>(); int nodeIdx = 0; @@ -241,7 +245,8 @@ public void testRollingRestart() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -304,20 +309,26 @@ public void testRebalanceDoesNotAllocatePrimaryAndReplicasOnDifferentVersionNode AllocationId allocationId2P = AllocationId.newInitializing(); AllocationId allocationId2R = AllocationId.newInitializing(); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shard1.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1).numberOfReplicas(1).putInSyncAllocationIds(0, Sets.newHashSet(allocationId1P.getId(), allocationId1R.getId()))) - .put(IndexMetaData.builder(shard2.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1).numberOfReplicas(1).putInSyncAllocationIds(0, Sets.newHashSet(allocationId2P.getId(), allocationId2R.getId()))) + .put(IndexMetaData.builder(shard1.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1) + .numberOfReplicas(1).putInSyncAllocationIds(0, Sets.newHashSet(allocationId1P.getId(), allocationId1R.getId()))) + .put(IndexMetaData.builder(shard2.getIndexName()).settings(settings(Version.CURRENT).put(Settings.EMPTY)).numberOfShards(1) + .numberOfReplicas(1).putInSyncAllocationIds(0, Sets.newHashSet(allocationId2P.getId(), allocationId2R.getId()))) .build(); RoutingTable routingTable = RoutingTable.builder() .add(IndexRoutingTable.builder(shard1.getIndex()) .addIndexShard(new IndexShardRoutingTable.Builder(shard1) - .addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), newNode.getId(), null, true, ShardRoutingState.STARTED, allocationId1P)) - .addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), oldNode1.getId(), null, false, ShardRoutingState.STARTED, allocationId1R)) + .addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), newNode.getId(), + null, true, ShardRoutingState.STARTED, allocationId1P)) + .addShard(TestShardRouting.newShardRouting(shard1.getIndexName(), shard1.getId(), oldNode1.getId(), + null, false, ShardRoutingState.STARTED, allocationId1R)) .build()) ) .add(IndexRoutingTable.builder(shard2.getIndex()) .addIndexShard(new IndexShardRoutingTable.Builder(shard2) - .addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), newNode.getId(), null, true, ShardRoutingState.STARTED, allocationId2P)) - .addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), oldNode1.getId(), null, false, ShardRoutingState.STARTED, allocationId2R)) + .addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), newNode.getId(), + null, true, ShardRoutingState.STARTED, allocationId2P)) + .addShard(TestShardRouting.newShardRouting(shard2.getIndexName(), shard2.getId(), oldNode1.getId(), + null, false, ShardRoutingState.STARTED, allocationId2R)) .build()) ) .build(); @@ -325,8 +336,9 @@ public void testRebalanceDoesNotAllocatePrimaryAndReplicasOnDifferentVersionNode .metaData(metaData) .routingTable(routingTable) .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build(); - AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Collections.singleton(new NodeVersionAllocationDecider(Settings.EMPTY))); - AllocationService strategy = new MockAllocationService(Settings.EMPTY, + AllocationDeciders allocationDeciders = new AllocationDeciders( + Collections.singleton(new NodeVersionAllocationDecider())); + AllocationService strategy = new MockAllocationService( allocationDeciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); state = strategy.reroute(state, new AllocationCommands(), true, false).getClusterState(); @@ -357,10 +369,10 @@ public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() { new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()) .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build(); - AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList( - new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY), - new NodeVersionAllocationDecider(Settings.EMPTY))); - AllocationService strategy = new MockAllocationService(Settings.EMPTY, + AllocationDeciders allocationDeciders = new AllocationDeciders(Arrays.asList( + new ReplicaAfterPrimaryActiveAllocationDecider(), + new NodeVersionAllocationDecider())); + AllocationService strategy = new MockAllocationService( allocationDeciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); state = strategy.reroute(state, new AllocationCommands(), true, false).getClusterState(); @@ -401,16 +413,16 @@ private void assertRecoveryNodeVersions(RoutingNodes routingNodes) { String fromId = r.currentNodeId(); assertThat(fromId, notNullValue()); assertThat(toId, notNullValue()); - logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().getVersion(), - toId, routingNodes.node(toId).node().getVersion()); + logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, + routingNodes.node(fromId).node().getVersion(), toId, routingNodes.node(toId).node().getVersion()); assertTrue(routingNodes.node(toId).node().getVersion().onOrAfter(routingNodes.node(fromId).node().getVersion())); } else { ShardRouting primary = routingNodes.activePrimary(r.shardId()); assertThat(primary, notNullValue()); String fromId = primary.currentNodeId(); String toId = r.relocatingNodeId(); - logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().getVersion(), - toId, routingNodes.node(toId).node().getVersion()); + logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, + routingNodes.node(fromId).node().getVersion(), toId, routingNodes.node(toId).node().getVersion()); assertTrue(routingNodes.node(toId).node().getVersion().onOrAfter(routingNodes.node(fromId).node().getVersion())); } } @@ -422,8 +434,8 @@ private void assertRecoveryNodeVersions(RoutingNodes routingNodes) { assertThat(primary, notNullValue()); String fromId = primary.currentNodeId(); String toId = r.currentNodeId(); - logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().getVersion(), - toId, routingNodes.node(toId).node().getVersion()); + logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, + routingNodes.node(fromId).node().getVersion(), toId, routingNodes.node(toId).node().getVersion()); assertTrue(routingNodes.node(toId).node().getVersion().onOrAfter(routingNodes.node(fromId).node().getVersion())); } } @@ -450,10 +462,11 @@ public void testMessages() { final ShardRouting primaryShard = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); final ShardRouting replicaShard = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().get(0); - RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, + null, 0); routingAllocation.debugDecision(true); - final NodeVersionAllocationDecider allocationDecider = new NodeVersionAllocationDecider(Settings.EMPTY); + final NodeVersionAllocationDecider allocationDecider = new NodeVersionAllocationDecider(); Decision decision = allocationDecider.canAllocate(primaryShard, newNode, routingAllocation); assertThat(decision.type(), is(Decision.Type.YES)); assertThat(decision.getExplanation(), is("the primary shard is new or already existed on the node")); @@ -469,9 +482,11 @@ public void testMessages() { newNode.node().getVersion() + "] to a node with older version [" + oldNode.node().getVersion() + "]")); final SnapshotRecoverySource newVersionSnapshot = new SnapshotRecoverySource( - new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), newNode.node().getVersion(), "test"); + new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), + newNode.node().getVersion(), "test"); final SnapshotRecoverySource oldVersionSnapshot = new SnapshotRecoverySource( - new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), oldNode.node().getVersion(), "test"); + new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), + oldNode.node().getVersion(), "test"); decision = allocationDecider.canAllocate(ShardRoutingHelper.newWithRestoreSource(primaryShard, newVersionSnapshot), oldNode, routingAllocation); @@ -487,7 +502,8 @@ public void testMessages() { final RoutingChangesObserver routingChangesObserver = new RoutingChangesObserver.AbstractRoutingChangesObserver(); final RoutingNodes routingNodes = new RoutingNodes(clusterState, false); - final ShardRouting startedPrimary = routingNodes.startShard(logger, routingNodes.initializeShard(primaryShard, "newNode", null, 0, + final ShardRouting startedPrimary = routingNodes.startShard(logger, + routingNodes.initializeShard(primaryShard, "newNode", null, 0, routingChangesObserver), routingChangesObserver); routingAllocation = new RoutingAllocation(null, routingNodes, clusterState, null, 0); routingAllocation.debugDecision(true); @@ -497,8 +513,8 @@ public void testMessages() { assertThat(decision.getExplanation(), is("cannot allocate replica shard to a node with version [" + oldNode.node().getVersion() + "] since this is older than the primary version [" + newNode.node().getVersion() + "]")); - routingNodes.startShard(logger, routingNodes.relocateShard(startedPrimary, "oldNode", 0, routingChangesObserver).v2(), - routingChangesObserver); + routingNodes.startShard(logger, routingNodes.relocateShard(startedPrimary, + "oldNode", 0, routingChangesObserver).v2(), routingChangesObserver); routingAllocation = new RoutingAllocation(null, routingNodes, clusterState, null, 0); routingAllocation.debugDecision(true); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java index cf26df9000220..b47638884e883 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java @@ -43,7 +43,8 @@ public void testPreferLocalPrimaryAllocationOverFiltered() { int numberOfShards = randomIntBetween(5, 20); int totalNumberOfShards = numberOfShards * 2; - logger.info("create an allocation with [{}] initial primary recoveries and [{}] concurrent recoveries", primaryRecoveries, concurrentRecoveries); + logger.info("create an allocation with [{}] initial primary recoveries and [{}] concurrent recoveries", + primaryRecoveries, concurrentRecoveries); AllocationService strategy = createAllocationService(Settings.builder() .put("cluster.routing.allocation.node_concurrent_recoveries", concurrentRecoveries) .put("cluster.routing.allocation.node_initial_primaries_recoveries", primaryRecoveries) @@ -52,8 +53,10 @@ public void testPreferLocalPrimaryAllocationOverFiltered() { logger.info("create 2 indices with [{}] no replicas, and wait till all are allocated", numberOfShards); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(0)) - .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(0)) + .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards).numberOfReplicas(0)) + .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards).numberOfReplicas(0)) .build(); RoutingTable initialRoutingTable = RoutingTable.builder() @@ -61,7 +64,8 @@ public void testPreferLocalPrimaryAllocationOverFiltered() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("adding two nodes and performing rerouting till all are allocated"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -88,10 +92,12 @@ public void testPreferLocalPrimaryAllocationOverFiltered() { .put("index.routing.allocation.exclude._name", "node2") .build())) .build(); - clusterState = ClusterState.builder(clusterState).metaData(metaData).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); + clusterState = ClusterState.builder(clusterState).metaData(metaData) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); - logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", numberOfShards, numberOfShards); + logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", + numberOfShards, numberOfShards); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(numberOfShards)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(numberOfShards)); @@ -111,8 +117,10 @@ public void testPreferLocalPrimaryAllocationOverFiltered() { relocatingInitializations++; } } - int needToInitialize = totalNumberOfShards - clusterState.getRoutingNodes().shardsWithState(STARTED).size() - clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(); - logger.info("local initializations: [{}], relocating: [{}], need to initialize: {}", localInitializations, relocatingInitializations, needToInitialize); + int needToInitialize = totalNumberOfShards - clusterState.getRoutingNodes().shardsWithState(STARTED).size() + - clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(); + logger.info("local initializations: [{}], relocating: [{}], need to initialize: {}", + localInitializations, relocatingInitializations, needToInitialize); assertThat(localInitializations, equalTo(Math.min(primaryRecoveries, needToInitialize))); clusterState = startRandomInitializingShard(clusterState, strategy); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java index cdd868c158eeb..d54d798544c84 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java @@ -57,10 +57,12 @@ public void testPreferPrimaryAllocationOverReplicas() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("adding two nodes and performing rerouting till all are allocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); while (!clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty()) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java index f306184c5764f..d6220ade9ef34 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java @@ -41,7 +41,8 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(PrimaryElectionRoutingTests.class); public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() { - AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService strategy = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("Building initial routing table"); @@ -53,13 +54,15 @@ public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) + .add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); logger.info("Start the primary shard (on node1)"); @@ -71,7 +74,8 @@ public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() clusterState = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)); logger.info("Adding third node and reroute and kill first node"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3")).remove("node1")).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) + .add(newNode("node3")).remove("node1")).build(); RoutingTable prevRoutingTable = clusterState.routingTable(); clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); routingNodes = clusterState.getRoutingNodes(); @@ -89,7 +93,8 @@ public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() } public void testRemovingInitializingReplicasIfPrimariesFails() { - AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService allocation = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("Building initial routing table"); @@ -101,10 +106,12 @@ public void testRemovingInitializingReplicasIfPrimariesFails() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")) + .add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); logger.info("Start the primary shards"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java index dcca97369e7f9..a3fddda7b6e33 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java @@ -55,7 +55,8 @@ public void testPrimaryNotRelocatedWhileBeingRecoveredFrom() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding two nodes and performing rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); @@ -68,14 +69,16 @@ public void testPrimaryNotRelocatedWhileBeingRecoveredFrom() { assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5)); logger.info("start another node, replica will start recovering form primary"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5)); assertThat(clusterState.routingTable().shardsWithState(INITIALIZING).size(), equalTo(5)); logger.info("start another node, make sure the primary is not relocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index ce26e41e053aa..fe7c4a89c9fa2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -58,10 +58,11 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { * balance.*/ public void testRandomDecisions() { RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(random()); - AllocationService strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, + AllocationService strategy = new AllocationService(new AllocationDeciders( new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY), - randomAllocationDecider))), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + new ReplicaAfterPrimaryActiveAllocationDecider(), randomAllocationDecider))), + new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); int indices = scaledRandomIntBetween(1, 20); Builder metaBuilder = MetaData.builder(); int maxNumReplicas = 1; @@ -71,7 +72,8 @@ public void testRandomDecisions() { maxNumReplicas = Math.max(maxNumReplicas, replicas + 1); int numShards = scaledRandomIntBetween(1, 20); totalNumShards += numShards * (replicas + 1); - metaBuilder.put(IndexMetaData.builder("INDEX_" + i).settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(replicas)); + metaBuilder.put(IndexMetaData.builder("INDEX_" + i).settings(settings(Version.CURRENT)) + .numberOfShards(numShards).numberOfReplicas(replicas)); } MetaData metaData = metaBuilder.build(); @@ -81,7 +83,8 @@ public void testRandomDecisions() { } RoutingTable initialRoutingTable = routingTableBuilder.build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); int numIters = scaledRandomIntBetween(5, 15); int nodeIdCounter = 0; int atMostNodes = scaledRandomIntBetween(Math.max(1, maxNumReplicas), 15); @@ -175,7 +178,8 @@ public void testRandomDecisions() { continue; } assertThat(clusterState.getRoutingNodes().node("NODE_" + i).size(), Matchers.anyOf( - Matchers.anyOf(equalTo((shards / numNodes) + 1), equalTo((shards / numNodes) - 1), equalTo((shards / numNodes))), + Matchers.anyOf(equalTo((shards / numNodes) + 1), + equalTo((shards / numNodes) - 1), equalTo((shards / numNodes))), Matchers.allOf(Matchers.greaterThanOrEqualTo(lowerBound), Matchers.lessThanOrEqualTo(upperBound)))); } } @@ -185,7 +189,6 @@ public static final class RandomAllocationDecider extends AllocationDecider { private final Random random; public RandomAllocationDecider(Random random) { - super(Settings.EMPTY); this.random = random; } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index 565f9c919d0e2..ff54fa06095f2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -75,7 +75,8 @@ public Long getShardSize(ShardRouting shardRouting) { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(5)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -87,7 +88,8 @@ public Long getShardSize(ShardRouting shardRouting) { } logger.info("start two nodes and fully start the shards"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -110,7 +112,8 @@ public Long getShardSize(ShardRouting shardRouting) { logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10"))) + .add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")) + .add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10"))) .build(); clusterState = strategy.reroute(clusterState, "reroute"); routingNodes = clusterState.getRoutingNodes(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java index 7a90f93516ab1..5e61b35b5ec48 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java @@ -42,7 +42,8 @@ public class ReplicaAllocatedAfterPrimaryTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(ReplicaAllocatedAfterPrimaryTests.class); public void testBackupIsAllocatedAfterPrimary() { - AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService strategy = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("Building initial routing table"); @@ -54,7 +55,8 @@ public void testBackupIsAllocatedAfterPrimary() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); assertThat(routingTable.index("test").shards().size(), equalTo(1)); assertThat(routingTable.index("test").shard(0).size(), equalTo(2)); @@ -65,7 +67,8 @@ public void testBackupIsAllocatedAfterPrimary() { assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue()); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -86,7 +89,8 @@ public void testBackupIsAllocatedAfterPrimary() { logger.info("Start all the primary shards"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); prevRoutingTable = routingTable; - routingTable = strategy.applyStartedShards(clusterState, routingNodes.node(nodeHoldingPrimary).shardsWithState(INITIALIZING)).routingTable(); + routingTable = strategy.applyStartedShards(clusterState, + routingNodes.node(nodeHoldingPrimary).shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(); assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica))); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java index eeec65f0e2e29..c64bc51bd5b7c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java @@ -55,8 +55,8 @@ public class ResizeAllocationDeciderTests extends ESAllocationTestCase { @Override public void setUp() throws Exception { super.setUp(); - strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new ResizeAllocationDecider(Settings.EMPTY))), + strategy = new AllocationService(new AllocationDeciders( + Collections.singleton(new ResizeAllocationDecider())), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); } @@ -108,7 +108,7 @@ private ClusterState createInitialClusterState(boolean startShards, Version node public void testNonResizeRouting() { ClusterState clusterState = createInitialClusterState(true); - ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); + ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(); RoutingAllocation routingAllocation = new RoutingAllocation(null, null, clusterState, null, 0); ShardRouting shardRouting = TestShardRouting.newShardRouting("non-resize", 0, null, true, ShardRoutingState.UNASSIGNED); assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); @@ -132,7 +132,7 @@ public void testShrink() { // we don't handle shrink yet .metaData(metaData).build(); Index idx = clusterState.metaData().index("target").getIndex(); - ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); + ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(); RoutingAllocation routingAllocation = new RoutingAllocation(null, null, clusterState, null, 0); ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, 0), null, true, ShardRoutingState.UNASSIGNED, RecoverySource.LocalShardsRecoverySource.INSTANCE); @@ -160,7 +160,7 @@ public void testSourceNotActive() { Index idx = clusterState.metaData().index("target").getIndex(); - ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); + ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(); RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); int shardId = randomIntBetween(0, 3); int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); @@ -200,7 +200,7 @@ public void testSourcePrimaryActive() { Index idx = clusterState.metaData().index("target").getIndex(); - ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); + ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(); RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); int shardId = randomIntBetween(0, 3); int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index bc88158356c63..deed0b2b2fb72 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -51,12 +51,15 @@ public void testBalanceAllNodesStarted() { logger.info("Building initial routing table"); - MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)) - .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build(); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)) + .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build(); - RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build(); + RoutingTable initialRoutingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) @@ -111,12 +114,15 @@ public void testBalanceIncrementallyStartNodes() { logger.info("Building initial routing table"); - MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)) - .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build(); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)) + .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build(); - RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build(); + RoutingTable initialRoutingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding one node and performing rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); @@ -191,15 +197,17 @@ public void testBalanceAllNodesStartedAddIndex() { logger.info("Building initial routing table"); - MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build(); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build(); RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding three node and performing rerouting"); - clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")) + .add(newNode("node2")).add(newNode("node3"))).build(); RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -324,7 +332,8 @@ public void testBalanceAllNodesStartedAddIndex() { logger.info("kill one node"); IndexShardRoutingTable indexShardRoutingTable = clusterState.routingTable().index("test").shard(0); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) + .remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); clusterState = strategy.deassociateDeadNodes(clusterState, true, "reroute"); routingNodes = clusterState.getRoutingNodes(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java index f059125f1ea3d..9856bd064ca72 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java @@ -105,7 +105,7 @@ public void testForceAllocatePrimaryOnSameNodeNotAllowed() { Index index = clusterState.getMetaData().index("idx").getIndex(); ShardRouting primaryShard = clusterState.routingTable().index(index).shard(0).primaryShard(); RoutingNode routingNode = clusterState.getRoutingNodes().node(primaryShard.currentNodeId()); - RoutingAllocation routingAllocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), + RoutingAllocation routingAllocation = new RoutingAllocation(new AllocationDeciders(Collections.emptyList()), new RoutingNodes(clusterState, false), clusterState, ClusterInfo.EMPTY, System.nanoTime()); // can't force allocate same shard copy to the same node diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java index 3cf53e60c4844..fb3f75aad5f21 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java @@ -41,7 +41,8 @@ public class ShardVersioningTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(ShardVersioningTests.class); public void testSimple() { - AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), + AllocationService strategy = createAllocationService(Settings.builder() + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() @@ -54,10 +55,12 @@ public void testSimple() { .addAsNew(metaData.index("test2")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 314318fc29f0d..87339868e4c2c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -44,7 +44,8 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(ShardsLimitAllocationTests.class); public void testIndexLevelShardsLimitAllocate() { - AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService strategy = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("Building initial routing table"); @@ -59,9 +60,11 @@ public void testIndexLevelShardsLimitAllocate() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2)); @@ -100,9 +103,11 @@ public void testClusterLevelShardsLimitAllocate() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1)); @@ -159,7 +164,8 @@ public void testIndexLevelShardsLimitRemain() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("Adding one node and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); @@ -184,7 +190,8 @@ public void testIndexLevelShardsLimitRemain() { clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(updatedRoutingTable).build(); logger.info("Add another one node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); routingNodes = clusterState.getRoutingNodes(); @@ -199,7 +206,8 @@ public void testIndexLevelShardsLimitRemain() { assertThat(shardRouting.getIndexName(), equalTo("test1")); } - logger.info("update {} for test, see that things move", ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey()); + logger.info("update {} for test, see that things move", + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey()); metaData = MetaData.builder(clusterState.metaData()) .put(IndexMetaData.builder(clusterState.metaData().index("test")).settings(settings(Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index 7bdad46d61c11..c674b8c3a292d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -53,7 +53,8 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(SingleShardNoReplicasRoutingTests.class); public void testSingleIndexStartedShard() { - AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService strategy = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("Building initial routing table"); @@ -63,7 +64,8 @@ public void testSingleIndexStartedShard() { RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1)); @@ -100,7 +102,8 @@ public void testSingleIndexStartedShard() { assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1")); logger.info("Starting another node and making sure nothing changed"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, equalTo(clusterState)); clusterState = newState; @@ -126,7 +129,8 @@ public void testSingleIndexStartedShard() { logger.info("Bring node1 back, and see it's assinged"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node1"))).build(); newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -139,7 +143,8 @@ public void testSingleIndexStartedShard() { logger.info("Start another node, make sure that things remain the same (shard is in node2 and initializing)"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, equalTo(clusterState)); @@ -157,7 +162,8 @@ public void testSingleIndexStartedShard() { } public void testSingleIndexShardFailed() { - AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService strategy = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("Building initial routing table"); @@ -168,7 +174,8 @@ public void testSingleIndexShardFailed() { RoutingTable.Builder routingTableBuilder = RoutingTable.builder() .addAsNew(metaData.index("test")); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(1)); @@ -191,7 +198,8 @@ public void testSingleIndexShardFailed() { logger.info("Marking the shard as failed"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); - newState = strategy.applyFailedShard(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0), randomBoolean()); + newState = strategy.applyFailedShard(clusterState, + routingNodes.node("node1").shardsWithState(INITIALIZING).get(0), randomBoolean()); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -214,7 +222,8 @@ public void testMultiIndexEvenDistribution() { MetaData.Builder metaDataBuilder = MetaData.builder(); for (int i = 0; i < numberOfIndices; i++) { - metaDataBuilder.put(IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)); + metaDataBuilder.put(IndexMetaData.builder("test" + i) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)); } MetaData metaData = metaDataBuilder.build(); @@ -222,7 +231,8 @@ public void testMultiIndexEvenDistribution() { for (int i = 0; i < numberOfIndices; i++) { routingTableBuilder.addAsNew(metaData.index("test" + i)); } - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); assertThat(clusterState.routingTable().indicesRouting().size(), equalTo(numberOfIndices)); for (int i = 0; i < numberOfIndices; i++) { @@ -291,7 +301,8 @@ public void testMultiIndexEvenDistribution() { assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1)); assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false)); - assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(STARTED), equalTo(RELOCATING))); + assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), + anyOf(equalTo(STARTED), equalTo(RELOCATING))); if (clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state() == STARTED) { numberOfStartedShards++; } else if (clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state() == RELOCATING) { @@ -319,7 +330,8 @@ public void testMultiIndexUnevenNodes() { MetaData.Builder metaDataBuilder = MetaData.builder(); for (int i = 0; i < numberOfIndices; i++) { - metaDataBuilder.put(IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)); + metaDataBuilder.put(IndexMetaData.builder("test" + i) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)); } MetaData metaData = metaDataBuilder.build(); @@ -328,7 +340,8 @@ public void testMultiIndexUnevenNodes() { routingTableBuilder.addAsNew(metaData.index("test" + i)); } - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); assertThat(clusterState.routingTable().indicesRouting().size(), equalTo(numberOfIndices)); @@ -369,7 +382,8 @@ public void testMultiIndexUnevenNodes() { assertThat(clusterState.routingTable().index("test" + i).shards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1)); assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().size(), equalTo(1)); - assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED))); + assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), + anyOf(equalTo(RELOCATING), equalTo(STARTED))); } routingNodes = clusterState.getRoutingNodes(); assertThat("4 source shard routing are relocating", numberOfShardsOfType(routingNodes, RELOCATING), equalTo(4)); @@ -385,7 +399,8 @@ public void testMultiIndexUnevenNodes() { assertThat(clusterState.routingTable().index("test" + i).shards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1)); assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().size(), equalTo(1)); - assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED))); + assertThat(clusterState.routingTable().index("test" + i).shard(0).shards().get(0).state(), + anyOf(equalTo(RELOCATING), equalTo(STARTED))); } routingNodes = clusterState.getRoutingNodes(); assertThat(numberOfShardsOfType(routingNodes, STARTED), equalTo(numberOfIndices)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java index ac8f1af219b3f..1bbbb2c8b91a3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java @@ -42,7 +42,8 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(SingleShardOneReplicaRoutingTests.class); public void testSingleIndexFirstStartPrimaryThenBackups() { - AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService strategy = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("Building initial routing table"); @@ -54,7 +55,8 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2)); @@ -81,7 +83,8 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue()); logger.info("Add another node and perform rerouting, nothing will happen since primary shards not started"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, equalTo(clusterState)); @@ -97,9 +100,11 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED)); assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1")); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1)); - // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + // backup shards are initializing as well, we make sure that they recover + // from primary *started* shards in the IndicesClusterStateService assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2")); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), + equalTo("node2")); logger.info("Reroute, nothing should change"); @@ -119,7 +124,8 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1")); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2")); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), + equalTo("node2")); logger.info("Kill node1, backup shard should become primary"); @@ -134,13 +140,15 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED)); assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2")); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1)); - // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + // backup shards are initializing as well, we make sure that they + // recover from primary *started* shards in the IndicesClusterStateService assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue()); logger.info("Start another node, backup shard should start initializing"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -151,8 +159,10 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED)); assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2")); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1)); - // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + // backup shards are initializing as well, we make sure that they + // recover from primary *started* shards in the IndicesClusterStateService assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3")); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), + equalTo("node3")); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java index ee9d69a220593..d39912f0b1e6a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -62,8 +62,10 @@ public void testStartedShardsMatching() { .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .metaData(MetaData.builder().put(indexMetaData, false)); - final ShardRouting initShard = TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.INITIALIZING); - final ShardRouting relocatingShard = TestShardRouting.newShardRouting(new ShardId(index, 1), "node1", "node2", true, ShardRoutingState.RELOCATING, allocationId); + final ShardRouting initShard = TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", + true, ShardRoutingState.INITIALIZING); + final ShardRouting relocatingShard = TestShardRouting.newShardRouting(new ShardId(index, 1), "node1", + "node2", true, ShardRoutingState.RELOCATING, allocationId); stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index) .addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(initShard).build()) .addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()).addShard(relocatingShard).build())).build()); @@ -73,7 +75,8 @@ public void testStartedShardsMatching() { logger.info("--> test starting of shard"); ClusterState newState = allocation.applyStartedShards(state, Arrays.asList(initShard)); - assertThat("failed to start " + initShard + "\ncurrent routing table:" + newState.routingTable(), newState, not(equalTo(state))); + assertThat("failed to start " + initShard + "\ncurrent routing table:" + + newState.routingTable(), newState, not(equalTo(state))); assertTrue(initShard + "isn't started \ncurrent routing table:" + newState.routingTable(), newState.routingTable().index("test").shard(initShard.id()).allShardsStarted()); state = newState; @@ -107,11 +110,12 @@ public void testRelocatingPrimariesWithInitializingReplicas() { .build(); final Index index = indexMetaData.getIndex(); ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")) + .add(newNode("node3")).add(newNode("node4"))) .metaData(MetaData.builder().put(indexMetaData, false)); - final ShardRouting relocatingPrimary = TestShardRouting.newShardRouting( - new ShardId(index, 0), "node1", "node2", true, ShardRoutingState.RELOCATING, primaryId); + final ShardRouting relocatingPrimary = TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", + "node2", true, ShardRoutingState.RELOCATING, primaryId); final ShardRouting replica = TestShardRouting.newShardRouting( new ShardId(index, 0), "node3", relocatingReplica ? "node4" : null, false, relocatingReplica ? ShardRoutingState.RELOCATING : ShardRoutingState.INITIALIZING, replicaId); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index a7179bfba7870..1a5127bcda501 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -65,7 +65,8 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(clusterState.routingTable().index("test").shards().size(), equalTo(10)); for (int i = 0; i < clusterState.routingTable().index("test").shards().size(); i++) { @@ -96,7 +97,8 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { } logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) + .add(newNode("node2"))).build(); newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, equalTo(clusterState)); @@ -113,7 +115,8 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED)); assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1")); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1)); - // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService + // backup shards are initializing as well, we make sure that they + // recover from primary *started* shards in the IndicesClusterStateService assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING)); assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2")); } @@ -143,7 +146,8 @@ public void testSingleIndexFirstStartPrimaryThenBackups() { assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10)); logger.info("Add another node and perform rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 769399a804998..b67e0ccae22b3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -157,7 +157,8 @@ public void testReplicaAndPrimaryRecoveryThrottling() { assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(5)); logger.info("start another node, replicas should start being allocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(5)); @@ -212,7 +213,8 @@ public void testThrottleIncomingAndOutgoing() { clusterState = strategy.applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)); logger.info("start another 2 nodes, 5 shards should be relocating - at most 5 are allowed per node"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2")).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2")).add(newNode("node3"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(4)); @@ -266,7 +268,8 @@ public void testOutgoingThrottlesAllocation() { assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(2)); logger.info("start one more node, first non-primary should start being allocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(1)); @@ -282,7 +285,8 @@ public void testOutgoingThrottlesAllocation() { assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); logger.info("start one more node, initializing second non-primary"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertThat(clusterState.routingTable().shardsWithState(STARTED).size(), equalTo(2)); @@ -291,7 +295,8 @@ public void testOutgoingThrottlesAllocation() { assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); logger.info("start one more node"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java index 44da514c0b5c5..0d063d7623410 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java @@ -43,7 +43,8 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase { private final Logger logger = LogManager.getLogger(UpdateNumberOfReplicasTests.class); public void testUpdateNumberOfReplicas() { - AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); + AllocationService strategy = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10).build()); logger.info("Building initial routing table"); @@ -55,7 +56,8 @@ public void testUpdateNumberOfReplicas() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); assertThat(initialRoutingTable.index("test").shards().size(), equalTo(1)); assertThat(initialRoutingTable.index("test").shard(0).size(), equalTo(2)); @@ -67,7 +69,8 @@ public void testUpdateNumberOfReplicas() { logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = strategy.reroute(clusterState, "reroute"); @@ -91,7 +94,8 @@ public void testUpdateNumberOfReplicas() { assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica)); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), + equalTo(nodeHoldingReplica)); logger.info("add another replica"); @@ -110,11 +114,13 @@ public void testUpdateNumberOfReplicas() { assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica)); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), + equalTo(nodeHoldingReplica)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED)); logger.info("Add another node and start the added replica"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); newState = strategy.reroute(clusterState, "reroute"); assertThat(newState, not(equalTo(clusterState))); clusterState = newState; @@ -125,9 +131,11 @@ public void testUpdateNumberOfReplicas() { assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(1)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), equalTo(nodeHoldingReplica)); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), + equalTo(nodeHoldingReplica)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).size(), equalTo(1)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), equalTo("node3")); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), + equalTo("node3")); routingNodes = clusterState.getRoutingNodes(); newState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); @@ -140,8 +148,10 @@ public void testUpdateNumberOfReplicas() { assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(2)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3"))); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(1).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3"))); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), + anyOf(equalTo(nodeHoldingReplica), equalTo("node3"))); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(1).currentNodeId(), + anyOf(equalTo(nodeHoldingReplica), equalTo("node3"))); logger.info("now remove a replica"); routingNodes = clusterState.getRoutingNodes(); @@ -157,7 +167,8 @@ public void testUpdateNumberOfReplicas() { assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1)); assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED)); - assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3"))); + assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), + anyOf(equalTo(nodeHoldingReplica), equalTo("node3"))); logger.info("do a reroute, should remain the same"); newState = strategy.reroute(clusterState, "reroute"); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index d2e86c13d4f1c..24838b22d47e2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -94,7 +94,7 @@ public void testDiskThreshold() { final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, + AllocationDeciders deciders = new AllocationDeciders( new HashSet<>(Arrays.asList( new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings)))); @@ -106,11 +106,8 @@ public ClusterInfo getClusterInfo() { return clusterInfo; } }; - AllocationService strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + AllocationService strategy = new AllocationService(deciders, + new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) @@ -183,16 +180,13 @@ public ClusterInfo getClusterInfo() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.7) .build(); - deciders = new AllocationDeciders(Settings.EMPTY, + deciders = new AllocationDeciders( new HashSet<>(Arrays.asList( new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings)))); - strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); clusterState = strategy.reroute(clusterState, "reroute"); logShardStates(clusterState); @@ -214,16 +208,13 @@ public ClusterInfo getClusterInfo() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.6) .build(); - deciders = new AllocationDeciders(Settings.EMPTY, + deciders = new AllocationDeciders( new HashSet<>(Arrays.asList( new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings)))); - strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); clusterState = strategy.reroute(clusterState, "reroute"); @@ -281,7 +272,7 @@ public void testDiskThresholdWithAbsoluteSizes() { final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, + AllocationDeciders deciders = new AllocationDeciders( new HashSet<>(Arrays.asList( new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings)))); @@ -294,11 +285,8 @@ public ClusterInfo getClusterInfo() { } }; - AllocationService strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) @@ -346,11 +334,8 @@ public ClusterInfo getClusterInfo() { return clusterInfo2; } }; - strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); clusterState = strategy.reroute(clusterState, "reroute"); logShardStates(clusterState); @@ -406,16 +391,13 @@ public ClusterInfo getClusterInfo() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "20b") .build(); - deciders = new AllocationDeciders(Settings.EMPTY, + deciders = new AllocationDeciders( new HashSet<>(Arrays.asList( new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings)))); - strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); clusterState = strategy.reroute(clusterState, "reroute"); logShardStates(clusterState); @@ -438,16 +420,13 @@ public ClusterInfo getClusterInfo() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "30b") .build(); - deciders = new AllocationDeciders(Settings.EMPTY, + deciders = new AllocationDeciders( new HashSet<>(Arrays.asList( new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), makeDecider(diskSettings)))); - strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); clusterState = strategy.reroute(clusterState, "reroute"); @@ -529,7 +508,7 @@ public void testDiskThresholdWithShardSizes() { ImmutableOpenMap shardSizes = shardSizesBuilder.build(); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, + AllocationDeciders deciders = new AllocationDeciders( new HashSet<>(Arrays.asList( new SameShardAllocationDecider( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -544,11 +523,8 @@ public ClusterInfo getClusterInfo() { } }; - AllocationService strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) @@ -596,7 +572,7 @@ public void testUnknownDiskUsage() { ImmutableOpenMap shardSizes = shardSizesBuilder.build(); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, + AllocationDeciders deciders = new AllocationDeciders( new HashSet<>(Arrays.asList( new SameShardAllocationDecider( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) @@ -611,11 +587,8 @@ public ClusterInfo getClusterInfo() { } }; - AllocationService strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) @@ -698,7 +671,7 @@ public void testShardRelocationsTakenIntoAccount() { final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); DiskThresholdDecider decider = makeDecider(diskSettings); - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, + AllocationDeciders deciders = new AllocationDeciders( new HashSet<>(Arrays.asList(new SameShardAllocationDecider( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ), decider))); @@ -711,11 +684,8 @@ public ClusterInfo getClusterInfo() { } }; - AllocationService strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) @@ -892,17 +862,14 @@ public ClusterInfo getClusterInfo() { return clusterInfo; } }; - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( + AllocationDeciders deciders = new AllocationDeciders(new HashSet<>(Arrays.asList( new SameShardAllocationDecider( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ), diskThresholdDecider ))); - AllocationService strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); // Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away // and therefor we will have sufficient disk space on node1. ClusterState result = strategy.reroute(clusterState, "reroute"); @@ -989,19 +956,15 @@ public ClusterInfo getClusterInfo() { } }; - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( + AllocationDeciders deciders = new AllocationDeciders(new HashSet<>(Arrays.asList( new SameShardAllocationDecider( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ), diskThresholdDecider ))); - AllocationService strategy = new AllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - - .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), cis); + AllocationService strategy = new AllocationService(deciders, new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), cis); ClusterState result = strategy.reroute(clusterState, "reroute"); assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index ec61439ee14e9..77967788adffb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -70,7 +70,8 @@ public void testCanAllocateUsesMaxAvailableSpace() { final Index index = metaData.index("test").getIndex(); - ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(), @@ -94,22 +95,25 @@ public void testCanAllocateUsesMaxAvailableSpace() { leastAvailableUsages.put("node_1", new DiskUsage("node_1", "node_1", "_na_", 100, 0)); // all full ImmutableOpenMap.Builder mostAvailableUsage = ImmutableOpenMap.builder(); - mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, randomIntBetween(20, 100))); // 20 - 99 percent since after allocation there must be at least 10% left and shard is 10byte - mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "_na_", 100, randomIntBetween(0, 10))); // this is weird and smells like a bug! it should be up to 20%? + // 20 - 99 percent since after allocation there must be at least 10% left and shard is 10byte + mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, randomIntBetween(20, 100))); + // this is weird and smells like a bug! it should be up to 20%? + mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "_na_", 100, randomIntBetween(0, 10))); ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); shardSizes.put("[test][0][p]", 10L); // 10 bytes - final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of()); - RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); + final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), + mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of()); + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.singleton(decider)), + clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); allocation.debugDecision(true); Decision decision = decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation); assertEquals(mostAvailableUsage.toString(), Decision.Type.YES, decision.type()); assertThat(((Decision.Single) decision).getExplanation(), containsString("enough disk for shard on node")); decision = decider.canAllocate(test_0, new RoutingNode("node_1", node_1), allocation); assertEquals(mostAvailableUsage.toString(), Decision.Type.NO, decision.type()); - assertThat(((Decision.Single) decision).getExplanation(), containsString( - "the node is above the high watermark cluster setting [cluster.routing.allocation.disk.watermark.high=90%], using more " + - "disk space than the maximum allowed [90.0%]")); + assertThat(((Decision.Single) decision).getExplanation(), containsString("the node is above the high watermark cluster " + + "setting [cluster.routing.allocation.disk.watermark.high=90%], using more disk space than the maximum allowed [90.0%]")); } public void testCannotAllocateDueToLackOfDiskResources() { @@ -153,8 +157,9 @@ public void testCannotAllocateDueToLackOfDiskResources() { // way bigger than available space final long shardSize = randomIntBetween(110, 1000); shardSizes.put("[test][0][p]", shardSize); - ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of()); - RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), + ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), + shardSizes.build(), ImmutableOpenMap.of()); + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); allocation.debugDecision(true); Decision decision = decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation); @@ -182,22 +187,26 @@ public void testCanRemainUsesLeastAvailableSpace() { .build(); final IndexMetaData indexMetaData = metaData.index("test"); - ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 0), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 0), true, + EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_0 = ShardRoutingHelper.initialize(test_0, node_0.getId()); test_0 = ShardRoutingHelper.moveToStarted(test_0); shardRoutingMap.put(test_0, "/node0/least"); - ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 1), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 1), true, + EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_1 = ShardRoutingHelper.initialize(test_1, node_1.getId()); test_1 = ShardRoutingHelper.moveToStarted(test_1); shardRoutingMap.put(test_1, "/node1/least"); - ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 2), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 2), true, + EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_2 = ShardRoutingHelper.initialize(test_2, node_1.getId()); test_2 = ShardRoutingHelper.moveToStarted(test_2); shardRoutingMap.put(test_2, "/node1/most"); - ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 3), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(indexMetaData.getIndex(), 3), true, + EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_3 = ShardRoutingHelper.initialize(test_3, node_1.getId()); test_3 = ShardRoutingHelper.moveToStarted(test_3); // Intentionally not in the shardRoutingMap. We want to test what happens when we don't know where it is. @@ -206,7 +215,8 @@ public void testCanRemainUsesLeastAvailableSpace() { .addAsNew(indexMetaData) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding two nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -228,8 +238,10 @@ public void testCanRemainUsesLeastAvailableSpace() { shardSizes.put("[test][1][p]", 10L); shardSizes.put("[test][2][p]", 10L); - final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), shardRoutingMap.build()); - RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); + final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), + shardSizes.build(), shardRoutingMap.build()); + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.singleton(decider)), + clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); allocation.debugDecision(true); Decision decision = decider.canRemain(test_0, new RoutingNode("node_0", node_0), allocation); assertEquals(Decision.Type.YES, decision.type()); @@ -273,28 +285,33 @@ public void testShardSizeAndRelocatingSize() { shardSizes.put("[other][0][p]", 10000L); ClusterInfo info = new DevNullClusterInfo(ImmutableOpenMap.of(), ImmutableOpenMap.of(), shardSizes.build()); MetaData.Builder metaBuilder = MetaData.builder(); - metaBuilder.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put("index.uuid", "1234")).numberOfShards(3).numberOfReplicas(1)); - metaBuilder.put(IndexMetaData.builder("other").settings(settings(Version.CURRENT).put("index.uuid", "5678")).numberOfShards(1).numberOfReplicas(1)); + metaBuilder.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT) + .put("index.uuid", "1234")).numberOfShards(3).numberOfReplicas(1)); + metaBuilder.put(IndexMetaData.builder("other").settings(settings(Version.CURRENT) + .put("index.uuid", "5678")).numberOfShards(1).numberOfReplicas(1)); MetaData metaData = metaBuilder.build(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); routingTableBuilder.addAsNew(metaData.index("test")); routingTableBuilder.addAsNew(metaData.index("other")); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .metaData(metaData).routingTable(routingTableBuilder.build()).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); RoutingAllocation allocation = new RoutingAllocation(null, null, clusterState, info, 0); final Index index = new Index("test", "1234"); - ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_0 = ShardRoutingHelper.initialize(test_0, "node1"); test_0 = ShardRoutingHelper.moveToStarted(test_0); test_0 = ShardRoutingHelper.relocate(test_0, "node2"); - ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_1 = ShardRoutingHelper.initialize(test_1, "node2"); test_1 = ShardRoutingHelper.moveToStarted(test_1); test_1 = ShardRoutingHelper.relocate(test_1, "node1"); - ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(index, 2), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(index, 2), false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_2 = ShardRoutingHelper.initialize(test_2, "node1"); test_2 = ShardRoutingHelper.moveToStarted(test_2); @@ -309,19 +326,21 @@ public void testShardSizeAndRelocatingSize() { assertEquals(0L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, true, "/dev/some/other/dev")); assertEquals(0L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, true, "/dev/some/other/dev")); - ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(index, 3), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(index, 3), false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_3 = ShardRoutingHelper.initialize(test_3, "node1"); test_3 = ShardRoutingHelper.moveToStarted(test_3); assertEquals(0L, DiskThresholdDecider.getExpectedShardSize(test_3, allocation, 0)); - ShardRouting other_0 = ShardRouting.newUnassigned(new ShardId("other", "5678", 0), randomBoolean(), PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting other_0 = ShardRouting.newUnassigned(new ShardId("other", "5678", 0), randomBoolean(), + PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); other_0 = ShardRoutingHelper.initialize(other_0, "node2"); other_0 = ShardRoutingHelper.moveToStarted(other_0); other_0 = ShardRoutingHelper.relocate(other_0, "node1"); - node = new RoutingNode("node1", new DiscoveryNode("node1", buildNewFakeTransportAddress(), - emptyMap(), emptySet(), Version.CURRENT), test_0, test_1.getTargetRelocatingShard(), test_2, other_0.getTargetRelocatingShard()); + node = new RoutingNode("node1", new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), + Version.CURRENT), test_0, test_1.getTargetRelocatingShard(), test_2, other_0.getTargetRelocatingShard()); if (other_0.primary()) { assertEquals(10100L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, false, "/dev/null")); assertEquals(10090L, DiskThresholdDecider.sizeOfRelocatingShards(node, allocation, true, "/dev/null")); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index 20bd5957aeb1d..b5f68c3956f97 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -67,7 +67,8 @@ public void testClusterEnableNone() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding two nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -95,7 +96,8 @@ public void testClusterEnableOnlyPrimaries() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("--> adding two nodes do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -107,7 +109,8 @@ public void testClusterEnableOnlyPrimaries() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); logger.info("--> start the shards (primaries)"); - routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable(); + routingTable = strategy.applyStartedShards(clusterState, + clusterState.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); @@ -129,7 +132,8 @@ public void testIndexEnableNone() { .addAsNew(metaData.index("enabled")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding two nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -152,18 +156,24 @@ public void testEnableClusterBalance() { final boolean useClusterSetting = randomBoolean(); final Rebalance allowedOnes = RandomPicks.randomFrom(random(), EnumSet.of(Rebalance.PRIMARIES, Rebalance.REPLICAS, Rebalance.ALL)); Settings build = Settings.builder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(random(), Rebalance.values())) // index settings override cluster settings + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), + // index settings override cluster settings + useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(random(), Rebalance.values())) .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 10) .build(); ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); AllocationService strategy = createAllocationService(build, clusterSettings, random()); - Settings indexSettings = useClusterSetting ? Settings.EMPTY : Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE).build(); + Settings indexSettings = useClusterSetting ? Settings.EMPTY : Settings.builder() + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE).build(); logger.info("Building initial routing table"); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(3).numberOfReplicas(1)) - .put(IndexMetaData.builder("always_disabled").settings(settings(Version.CURRENT).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)) + .numberOfShards(3).numberOfReplicas(1)) + .put(IndexMetaData.builder("always_disabled").settings(settings(Version.CURRENT) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE)) + .numberOfShards(1).numberOfReplicas(1)) .build(); RoutingTable initialRoutingTable = RoutingTable.builder() @@ -171,7 +181,8 @@ public void testEnableClusterBalance() { .addAsNew(metaData.index("always_disabled")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -200,21 +211,23 @@ public void testEnableClusterBalance() { assertThat(clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(0)); if (useClusterSetting) { - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()).transientSettings(Settings.builder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes) + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) + .transientSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes) .build())).build(); } else { IndexMetaData meta = clusterState.getMetaData().index("test"); IndexMetaData meta1 = clusterState.getMetaData().index("always_disabled"); - clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()).removeAllIndices().put(IndexMetaData.builder(meta1)) - .put(IndexMetaData.builder(meta).settings(Settings.builder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes).build()))) - .build(); + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()).removeAllIndices() + .put(IndexMetaData.builder(meta1)).put(IndexMetaData.builder(meta).settings(Settings.builder().put(meta.getSettings()) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes).build()))).build(); } clusterSettings.applySettings(clusterState.metaData().settings()); clusterState = strategy.reroute(clusterState, "reroute"); - assertThat("expected 6 shards to be started 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6)); - assertThat("expected 2 shards to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(2)); + assertThat("expected 6 shards to be started 2 to relocate useClusterSettings: " + useClusterSetting, + clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6)); + assertThat("expected 2 shards to relocate useClusterSettings: " + useClusterSetting, + clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(2)); List mutableShardRoutings = clusterState.getRoutingNodes().shardsWithState(RELOCATING); switch (allowedOnes) { case PRIMARIES: @@ -246,23 +259,26 @@ public void testEnableClusterBalance() { public void testEnableClusterBalanceNoReplicas() { final boolean useClusterSetting = randomBoolean(); Settings build = Settings.builder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(random(), Rebalance.values())) // index settings override cluster settings + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), + // index settings override cluster settings + useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(random(), Rebalance.values())) .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .build(); ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); AllocationService strategy = createAllocationService(build, clusterSettings, random()); - Settings indexSettings = useClusterSetting ? Settings.EMPTY : Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE).build(); + Settings indexSettings = useClusterSetting ? Settings.EMPTY : Settings.builder() + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE).build(); logger.info("Building initial routing table"); - MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(6).numberOfReplicas(0)) - .build(); + MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test") + .settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(6).numberOfReplicas(0)).build(); RoutingTable initialRoutingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(initialRoutingTable).build(); logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() @@ -293,12 +309,16 @@ public void testEnableClusterBalanceNoReplicas() { } else { IndexMetaData meta = clusterState.getMetaData().index("test"); clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices() - .put(IndexMetaData.builder(meta).settings(Settings.builder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build(); + .put(IndexMetaData.builder(meta).settings(Settings.builder().put(meta.getSettings()) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), + randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build(); } clusterSettings.applySettings(clusterState.metaData().settings()); clusterState = strategy.reroute(clusterState, "reroute"); - assertThat("expected 4 primaries to be started and 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); - assertThat("expected 2 primaries to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(2)); + assertThat("expected 4 primaries to be started and 2 to relocate useClusterSettings: " + + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); + assertThat("expected 2 primaries to relocate useClusterSettings: " + useClusterSetting, + clusterState.getRoutingNodes().shardsWithState(RELOCATING).size(), equalTo(2)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index 4d5639a05ea07..70b728487ae91 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -53,11 +53,11 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase { public void testFilterInitialRecovery() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(Settings.EMPTY, clusterSettings); - AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, + AllocationDeciders allocationDeciders = new AllocationDeciders( Arrays.asList(filterAllocationDecider, new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), - new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY))); - AllocationService service = new AllocationService(Settings.builder().build(), allocationDeciders, + new ReplicaAfterPrimaryActiveAllocationDecider())); + AllocationService service = new AllocationService(allocationDeciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); ClusterState state = createInitialClusterState(service, Settings.builder().put("index.routing.allocation.initial_recovery._id", "node2").build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java index 49d69272af629..86190b107e5e7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; @@ -186,8 +185,8 @@ private ClusterState createInitialClusterState() { } private Decision executeAllocation(final ClusterState clusterState, final ShardRouting shardRouting) { - final AllocationDecider decider = new RestoreInProgressAllocationDecider(Settings.EMPTY); - final RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), + final AllocationDecider decider = new RestoreInProgressAllocationDecider(); + final RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, null, 0L); allocation.debugDecision(true); diff --git a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index b2878008f7055..0379b706c8280 100644 --- a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -63,9 +63,11 @@ public void testClusterStateSerialization() throws Exception { .addAsNew(metaData.index("test")) .build(); - DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).localNodeId("node1").masterNodeId("node2").build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")) + .add(newNode("node3")).localNodeId("node1").masterNodeId("node2").build(); - ClusterState clusterState = ClusterState.builder(new ClusterName("clusterName1")).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("clusterName1")) + .nodes(nodes).metaData(metaData).routingTable(routingTable).build(); AllocationService strategy = createAllocationService(); clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState, "reroute").routingTable()).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java b/server/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java index 4475aed7dc398..dd4f5bedc91e1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java @@ -196,11 +196,12 @@ protected MapDiff readDiff(StreamInput in) throws IOException { */ public abstract class MapDriver { protected final Set keys = randomPositiveIntSet(); - protected final Set keysToRemove = new HashSet<>(randomSubsetOf(randomInt(keys.size()), keys.toArray(new Integer[keys.size()]))); + protected final Set keysToRemove = new HashSet<>(randomSubsetOf(randomInt(keys.size()), keys.toArray(new Integer[0]))); protected final Set keysThatAreNotRemoved = Sets.difference(keys, keysToRemove); protected final Set keysToOverride = new HashSet<>(randomSubsetOf(randomInt(keysThatAreNotRemoved.size()), keysThatAreNotRemoved.toArray(new Integer[keysThatAreNotRemoved.size()]))); - protected final Set keysToAdd = Sets.difference(randomPositiveIntSet(), keys); // make sure keysToAdd does not contain elements in keys + // make sure keysToAdd does not contain elements in keys + protected final Set keysToAdd = Sets.difference(randomPositiveIntSet(), keys); protected final Set keysUnchanged = Sets.difference(keysThatAreNotRemoved, keysToOverride); protected final DiffableUtils.KeySerializer keySerializer = randomBoolean() diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 0ca47377bdd0d..989b84f9f1a6b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -20,6 +20,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -128,7 +129,7 @@ public void testClusterStateUpdateLogging() throws Exception { Level.TRACE, "*failed to execute cluster state applier in [2s]*")); - Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); + Logger clusterLogger = LogManager.getLogger("org.elasticsearch.cluster.service"); Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(3); @@ -208,7 +209,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { Level.WARN, "*cluster state applier task [test3] took [34s] above the warn threshold of *")); - Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); + Logger clusterLogger = LogManager.getLogger("org.elasticsearch.cluster.service"); Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); @@ -412,7 +413,7 @@ static class TimedClusterApplierService extends ClusterApplierService { public volatile Long currentTimeOverride = null; TimedClusterApplierService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { - super(settings, clusterSettings, threadPool); + super("test_node", settings, clusterSettings, threadPool); } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 1168e1034fe6c..26e36afc551ee 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -20,6 +20,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -325,7 +326,7 @@ public void testClusterStateUpdateLogging() throws Exception { Level.DEBUG, "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)")); - Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); + Logger clusterLogger = LogManager.getLogger(masterService.getClass().getPackage().getName()); Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); @@ -672,7 +673,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { Level.WARN, "*cluster state update task [test4] took [34s] above the warn threshold of *")); - Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); + Logger clusterLogger = LogManager.getLogger(masterService.getClass().getPackage().getName()); Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(5); @@ -906,7 +907,7 @@ static class TimedMasterService extends MasterService { public volatile Long currentTimeOverride = null; TimedMasterService(Settings settings, ThreadPool threadPool) { - super(settings, threadPool); + super("test_node", settings, threadPool); } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/server/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 6d23866112d76..038d16f479df2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -87,7 +87,8 @@ public void testDeleteIsAppliedFirst() { response = client().admin().cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*")).put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "2s")) + .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*")) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "2s")) .get(); assertEquals(response.getTransientSettings().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), "2s"); assertNull(response.getTransientSettings().getAsBoolean(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), null)); @@ -274,7 +275,8 @@ public void testUpdateDiscoveryPublishTimeout() { .get(); fail("bogus value"); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "failed to parse setting [discovery.zen.publish_timeout] with value [whatever] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [discovery.zen.publish_timeout] with value [whatever]" + + " as a time value: unit is missing or unrecognized"); } assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L)); @@ -292,7 +294,9 @@ public void testUpdateDiscoveryPublishTimeout() { assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L)); } - private DiscoverySettings getDiscoverySettings() {return ((ZenDiscovery) internalCluster().getInstance(Discovery.class)).getDiscoverySettings();} + private DiscoverySettings getDiscoverySettings() { + return ((ZenDiscovery) internalCluster().getInstance(Discovery.class)).getDiscoverySettings(); + } public void testClusterUpdateSettingsWithBlocks() { String key1 = "cluster.routing.allocation.enable"; @@ -344,7 +348,8 @@ public void testMissingUnits() { assertAcked(prepareCreate("test")); try { - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "10")).execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.refresh_interval", "10")).execute().actionGet(); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("[index.refresh_interval] with value [10]")); @@ -360,7 +365,8 @@ public void testLoggerLevelUpdate() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("logger._root", "BOOM")).execute().actionGet()); + () -> client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("logger._root", "BOOM")).execute().actionGet()); assertEquals("Unknown level constant [BOOM].", e.getMessage()); try { diff --git a/server/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java b/server/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java index 6e1475d0aebdc..3f2e3677cb11b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java @@ -53,7 +53,8 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testSingleShardAllocation() throws Exception { client().admin().indices().prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", "1").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet(); + .put("index.number_of_shards", "1").put("index.number_of_replicas", 0) + .put("index.routing.allocation.include.tag", "A")).execute().actionGet(); ensureGreen(); ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet(); assertThat(response.getGroups().length, equalTo(1)); @@ -75,7 +76,8 @@ public void testSingleShardAllocation() throws Exception { public void testMultipleShardsSingleNodeAllocation() throws Exception { client().admin().indices().prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", "4").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet(); + .put("index.number_of_shards", "4").put("index.number_of_replicas", 0) + .put("index.routing.allocation.include.tag", "A")).execute().actionGet(); ensureGreen(); ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet(); diff --git a/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index c48b745743efe..e9bae772f4ea0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -60,28 +60,32 @@ public class RoutingIteratorTests extends ESAllocationTestCase { public void testEmptyIterator() { ShardShuffler shuffler = new RotationShardShuffler(0); - ShardIterator shardIterator = new PlainShardIterator(new ShardId("test1", "_na_", 0), shuffler.shuffle(Collections.emptyList())); + ShardIterator shardIterator = new PlainShardIterator( + new ShardId("test1", "_na_", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); - shardIterator = new PlainShardIterator(new ShardId("test1", "_na_", 0), shuffler.shuffle(Collections.emptyList())); + shardIterator = new PlainShardIterator( + new ShardId("test1", "_na_", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); - shardIterator = new PlainShardIterator(new ShardId("test1", "_na_", 0), shuffler.shuffle(Collections.emptyList())); + shardIterator = new PlainShardIterator( + new ShardId("test1", "_na_", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); - shardIterator = new PlainShardIterator(new ShardId("test1", "_na_", 0), shuffler.shuffle(Collections.emptyList())); + shardIterator = new PlainShardIterator( + new ShardId("test1", "_na_", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); @@ -243,7 +247,8 @@ public void testAttributePreferenceRouting() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); Map node1Attributes = new HashMap<>(); node1Attributes.put("rack_id", "rack_1"); @@ -263,7 +268,8 @@ public void testAttributePreferenceRouting() { clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); // after all are started, check routing iteration - ShardIterator shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(Arrays.asList("rack_id"), clusterState.nodes()); + ShardIterator shardIterator = clusterState.routingTable().index("test").shard(0) + .preferAttributesActiveInitializingShardsIt(Arrays.asList("rack_id"), clusterState.nodes()); ShardRouting shardRouting = shardIterator.nextOrNull(); assertThat(shardRouting, notNullValue()); assertThat(shardRouting.currentNodeId(), equalTo("node1")); @@ -271,7 +277,8 @@ public void testAttributePreferenceRouting() { assertThat(shardRouting, notNullValue()); assertThat(shardRouting.currentNodeId(), equalTo("node2")); - shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(Arrays.asList("rack_id"), clusterState.nodes()); + shardIterator = clusterState.routingTable().index("test").shard(0) + .preferAttributesActiveInitializingShardsIt(Arrays.asList("rack_id"), clusterState.nodes()); shardRouting = shardIterator.nextOrNull(); assertThat(shardRouting, notNullValue()); assertThat(shardRouting.currentNodeId(), equalTo("node1")); @@ -294,7 +301,8 @@ public void testNodeSelectorRouting(){ .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() .add(newNode("fred", "node1", singletonMap("disk", "ebs"))) @@ -306,19 +314,23 @@ public void testNodeSelectorRouting(){ clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); - ShardsIterator shardsIterator = clusterState.routingTable().index("test").shard(0).onlyNodeSelectorActiveInitializingShardsIt("disk:ebs",clusterState.nodes()); + ShardsIterator shardsIterator = clusterState.routingTable().index("test") + .shard(0).onlyNodeSelectorActiveInitializingShardsIt("disk:ebs",clusterState.nodes()); assertThat(shardsIterator.size(), equalTo(1)); assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo("node1")); - shardsIterator = clusterState.routingTable().index("test").shard(0).onlyNodeSelectorActiveInitializingShardsIt("dis*:eph*",clusterState.nodes()); + shardsIterator = clusterState.routingTable().index("test").shard(0) + .onlyNodeSelectorActiveInitializingShardsIt("dis*:eph*",clusterState.nodes()); assertThat(shardsIterator.size(), equalTo(1)); assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo("node2")); - shardsIterator = clusterState.routingTable().index("test").shard(0).onlyNodeSelectorActiveInitializingShardsIt("fred",clusterState.nodes()); + shardsIterator = clusterState.routingTable().index("test").shard(0) + .onlyNodeSelectorActiveInitializingShardsIt("fred",clusterState.nodes()); assertThat(shardsIterator.size(), equalTo(1)); assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo("node1")); - shardsIterator = clusterState.routingTable().index("test").shard(0).onlyNodeSelectorActiveInitializingShardsIt("bar*",clusterState.nodes()); + shardsIterator = clusterState.routingTable().index("test").shard(0) + .onlyNodeSelectorActiveInitializingShardsIt("bar*",clusterState.nodes()); assertThat(shardsIterator.size(), equalTo(1)); assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo("node2")); @@ -341,13 +353,15 @@ public void testNodeSelectorRouting(){ assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo("node1")); try { - shardsIterator = clusterState.routingTable().index("test").shard(0).onlyNodeSelectorActiveInitializingShardsIt("welma", clusterState.nodes()); + shardsIterator = clusterState.routingTable().index("test").shard(0) + .onlyNodeSelectorActiveInitializingShardsIt("welma", clusterState.nodes()); fail("should have raised illegalArgumentException"); } catch (IllegalArgumentException illegal) { //expected exception } - shardsIterator = clusterState.routingTable().index("test").shard(0).onlyNodeSelectorActiveInitializingShardsIt("fred",clusterState.nodes()); + shardsIterator = clusterState.routingTable().index("test").shard(0) + .onlyNodeSelectorActiveInitializingShardsIt("fred",clusterState.nodes()); assertThat(shardsIterator.size(), equalTo(1)); assertThat(shardsIterator.nextOrNull().currentNodeId(),equalTo("node1")); } @@ -366,7 +380,8 @@ public void testShardsAndPreferNodeRouting() { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() .add(newNode("node1")) @@ -382,7 +397,8 @@ public void testShardsAndPreferNodeRouting() { OperationRouting operationRouting = new OperationRouting(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0"); + GroupShardsIterator shardIterators = operationRouting + .searchShards(clusterState, new String[]{"test"}, null, "_shards:0"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); @@ -401,12 +417,14 @@ public void testShardsAndPreferNodeRouting() { assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), not(equalTo(firstRoundNodeId))); - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0|_prefer_nodes:node1"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, + null, "_shards:0|_prefer_nodes:node1"); assertThat(shardIterators.size(), equalTo(1)); assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0)); assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1")); - shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0|_prefer_nodes:node1,node2"); + shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, + null, "_shards:0|_prefer_nodes:node1,node2"); assertThat(shardIterators.size(), equalTo(1)); Iterator iterator = shardIterators.iterator(); final ShardIterator it = iterator.next(); diff --git a/server/src/test/java/org/elasticsearch/common/logging/PrefixLoggerTests.java b/server/src/test/java/org/elasticsearch/common/logging/PrefixLoggerTests.java new file mode 100644 index 0000000000000..b0b6182fa4629 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/logging/PrefixLoggerTests.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; + +public class PrefixLoggerTests extends ESTestCase { + public void testNullPrefix() { + Exception e = expectThrows(IllegalArgumentException.class, () -> new PrefixLogger(logger, null)); + assertThat(e.getMessage(), containsString("use a regular logger")); + } + + public void testEmptyPrefix() { + Exception e = expectThrows(IllegalArgumentException.class, () -> new PrefixLogger(logger, "")); + assertThat(e.getMessage(), containsString("use a regular logger")); + } +} \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java index e6b31d95c85d1..9c94bfab7acd6 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java @@ -20,6 +20,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting.Property; @@ -40,7 +41,7 @@ public class SettingsFilterTests extends ESTestCase { public void testAddingAndRemovingFilters() { HashSet hashSet = new HashSet<>(Arrays.asList("foo", "bar", "baz")); - SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY, hashSet); + SettingsFilter settingsFilter = new SettingsFilter(hashSet); assertEquals(settingsFilter.getPatterns(), hashSet); } @@ -132,7 +133,7 @@ public void testRegularSettingUpdateIsFullyLogged() throws Exception { private void assertExpectedLogMessages(Consumer consumer, MockLogAppender.LoggingExpectation ... expectations) throws IllegalAccessException { - Logger testLogger = Loggers.getLogger("org.elasticsearch.test"); + Logger testLogger = LogManager.getLogger("org.elasticsearch.test"); MockLogAppender appender = new MockLogAppender(); Loggers.addAppender(testLogger, appender); try { @@ -146,7 +147,7 @@ private void assertExpectedLogMessages(Consumer consumer, } private void testFiltering(Settings source, Settings filtered, String... patterns) throws IOException { - SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY, Arrays.asList(patterns)); + SettingsFilter settingsFilter = new SettingsFilter(Arrays.asList(patterns)); // Test using direct filtering Settings filteredSettings = settingsFilter.filter(source); diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index 0bb72a4050de8..c91c58647b9fa 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery; -import java.nio.file.Path; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -27,15 +26,12 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.FaultDetection; import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.discovery.zen.ZenPing; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.Bridge; @@ -56,7 +52,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -64,19 +59,13 @@ public abstract class AbstractDisruptionTestCase extends ESIntegTestCase { static final TimeValue DISRUPTION_HEALING_OVERHEAD = TimeValue.timeValueSeconds(40); // we use 30s as timeout in many places. - private NodeConfigurationSource discoveryConfig; @Override protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(discoveryConfig.nodeSettings(nodeOrdinal)) + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(DEFAULT_SETTINGS) .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false).build(); } - @Before - public void clearConfig() { - discoveryConfig = null; - } - @Override protected int numberOfShards() { return 3; @@ -119,11 +108,6 @@ protected void beforeIndexDeletion() throws Exception { } List startCluster(int numberOfNodes) { - return startCluster(numberOfNodes, -1); - } - - List startCluster(int numberOfNodes, int minimumMasterNode) { - configureCluster(numberOfNodes, minimumMasterNode); InternalTestCluster internalCluster = internalCluster(); List nodes = internalCluster.startNodes(numberOfNodes); ensureStableCluster(numberOfNodes); @@ -152,38 +136,6 @@ protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } - void configureCluster(int numberOfNodes, int minimumMasterNode) { - configureCluster(DEFAULT_SETTINGS, numberOfNodes, minimumMasterNode); - } - - void configureCluster(Settings settings, int numberOfNodes, int minimumMasterNode) { - if (minimumMasterNode < 0) { - minimumMasterNode = numberOfNodes / 2 + 1; - } - logger.info("---> configured unicast"); - // TODO: Rarely use default settings form some of these - Settings nodeSettings = Settings.builder() - .put(settings) - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numberOfNodes) - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) - .putList(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "file") - .build(); - - if (discoveryConfig == null) { - discoveryConfig = new NodeConfigurationSource() { - @Override - public Settings nodeSettings(final int nodeOrdinal) { - return nodeSettings; - } - - @Override - public Path nodeConfigPath(final int nodeOrdinal) { - return null; - } - }; - } - } - ClusterState getNodeClusterState(String node) { return client(node).admin().cluster().prepareState().setLocal(true).get().getState(); } diff --git a/server/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java b/server/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java index 9504344236b86..da9a4d6c2bf19 100644 --- a/server/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java @@ -47,7 +47,8 @@ private static class PublishResponder extends AbstractRunnable { final Logger logger; final BlockingClusterStatePublishResponseHandler handler; - PublishResponder(boolean fail, DiscoveryNode node, CyclicBarrier barrier, Logger logger, BlockingClusterStatePublishResponseHandler handler) { + PublishResponder(boolean fail, DiscoveryNode node, CyclicBarrier barrier, Logger logger, + BlockingClusterStatePublishResponseHandler handler) { this.fail = fail; this.node = node; @@ -80,7 +81,8 @@ public void testConcurrentAccess() throws InterruptedException { allNodes[i] = node; } - BlockingClusterStatePublishResponseHandler handler = new BlockingClusterStatePublishResponseHandler(new HashSet<>(Arrays.asList(allNodes))); + BlockingClusterStatePublishResponseHandler handler = + new BlockingClusterStatePublishResponseHandler(new HashSet<>(Arrays.asList(allNodes))); int firstRound = randomIntBetween(5, nodeCount - 1); Thread[] threads = new Thread[firstRound]; diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java new file mode 100644 index 0000000000000..2d0604d8d2894 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionCleanSettingsIT.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +public class ClusterDisruptionCleanSettingsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + /** + * This test creates a scenario where a primary shard (0 replicas) relocates and is in POST_RECOVERY on the target + * node but already deleted on the source node. Search request should still work. + */ + public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception { + // Don't use AbstractDisruptionTestCase.DEFAULT_SETTINGS as settings + // (which can cause node disconnects on a slow CI machine) + internalCluster().startMasterOnlyNode(); + final String node_1 = internalCluster().startDataOnlyNode(); + + logger.info("--> creating index [test] with one shard and on replica"); + assertAcked(prepareCreate("test").setSettings( + Settings.builder().put(indexSettings()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + ensureGreen("test"); + + final String node_2 = internalCluster().startDataOnlyNode(); + List indexRequestBuilderList = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setType("_doc") + .setSource("{\"int_field\":1}", XContentType.JSON)); + } + indexRandom(true, indexRequestBuilderList); + + IndicesStoreIntegrationIT.relocateAndBlockCompletion(logger, "test", 0, node_1, node_2); + // now search for the documents and see if we get a reply + assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits(), equalTo(100L)); + } +} diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index b35bf8444e95e..5dc9f537f320e 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; @@ -36,9 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.Bridge; @@ -72,8 +69,8 @@ /** * Tests various cluster operations (e.g., indexing) during disruptions. */ -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false) @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class ClusterDisruptionIT extends AbstractDisruptionTestCase { /** @@ -289,7 +286,7 @@ public void testRejoinDocumentExistsInAllShardCopies() throws Exception { // simulate handling of sending shard failure during an isolation public void testSendingShardFailure() throws Exception { - List nodes = startCluster(3, 2); + List nodes = startCluster(3); String masterNode = internalCluster().getMasterName(); List nonMasterNodes = nodes.stream().filter(node -> !node.equals(masterNode)).collect(Collectors.toList()); String nonMasterNode = randomFrom(nonMasterNodes); @@ -357,43 +354,10 @@ public void onFailure(Exception e) { } } - /** - * This test creates a scenario where a primary shard (0 replicas) relocates and is in POST_RECOVERY on the target - * node but already deleted on the source node. Search request should still work. - */ - public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Exception { - // don't use DEFAULT settings (which can cause node disconnects on a slow CI machine) - configureCluster(Settings.EMPTY, 3, 1); - internalCluster().startMasterOnlyNode(); - final String node_1 = internalCluster().startDataOnlyNode(); - - logger.info("--> creating index [test] with one shard and on replica"); - assertAcked(prepareCreate("test").setSettings( - Settings.builder().put(indexSettings()) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) - ); - ensureGreen("test"); - - final String node_2 = internalCluster().startDataOnlyNode(); - List indexRequestBuilderList = new ArrayList<>(); - for (int i = 0; i < 100; i++) { - indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setType("_doc") - .setSource("{\"int_field\":1}", XContentType.JSON)); - } - indexRandom(true, indexRequestBuilderList); - - IndicesStoreIntegrationIT.relocateAndBlockCompletion(logger, "test", 0, node_1, node_2); - // now search for the documents and see if we get a reply - assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - } - public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception { // test for https://github.com/elastic/elasticsearch/issues/8823 - configureCluster(2, 1); String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(Settings.EMPTY); - ensureStableCluster(2); assertAcked(prepareCreate("index").setSettings(Settings.builder().put("index.number_of_replicas", 0))); index("index", "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); @@ -416,14 +380,12 @@ public boolean clearData(String nodeName) { */ public void testIndicesDeleted() throws Exception { final Settings settings = Settings.builder() - .put(DEFAULT_SETTINGS) .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait on isolated data node .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed .build(); final String idxName = "test"; - configureCluster(settings, 3, 2); - final List allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(2); - final String dataNode = internalCluster().startDataOnlyNode(); + final List allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(2, settings); + final String dataNode = internalCluster().startDataOnlyNode(settings); ensureStableCluster(3); assertAcked(prepareCreate("test")); diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 2c7f17468ac3a..610965b5a519a 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -54,13 +54,13 @@ /** * Tests for discovery during disruptions. */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false) @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase { public void testIsolatedUnicastNodes() throws Exception { internalCluster().setHostsListContainsOnlyFirstNode(true); - List nodes = startCluster(4, -1); + List nodes = startCluster(4); // Figure out what is the elected master node final String unicastTarget = nodes.get(0); @@ -100,7 +100,7 @@ public void testIsolatedUnicastNodes() throws Exception { */ public void testUnicastSinglePingResponseContainsMaster() throws Exception { internalCluster().setHostsListContainsOnlyFirstNode(true); - List nodes = startCluster(4, -1); + List nodes = startCluster(4); // Figure out what is the elected master node final String masterNode = internalCluster().getMasterName(); logger.info("---> legit elected master node={}", masterNode); @@ -138,15 +138,8 @@ public void testUnicastSinglePingResponseContainsMaster() throws Exception { * Test cluster join with issues in cluster state publishing * */ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { - List nodes = startCluster(2, 1); - - String masterNode = internalCluster().getMasterName(); - String nonMasterNode; - if (masterNode.equals(nodes.get(0))) { - nonMasterNode = nodes.get(1); - } else { - nonMasterNode = nodes.get(0); - } + String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY); + String nonMasterNode = internalCluster().startDataOnlyNode(Settings.EMPTY); DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonMasterNode).state().nodes(); @@ -196,7 +189,6 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { } public void testClusterFormingWithASlowNode() throws Exception { - configureCluster(3, 2); SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(random(), 0, 0, 1000, 2000); @@ -212,7 +204,6 @@ public void testClusterFormingWithASlowNode() throws Exception { } public void testElectMasterWithLatestVersion() throws Exception { - configureCluster(3, 2); final Set nodes = new HashSet<>(internalCluster().startNodes(3)); ensureStableCluster(3); ServiceDisruptionScheme isolateAllNodes = diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index f7716c6f146ff..9050f95698fb4 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -67,8 +67,8 @@ /** * Tests relating to the loss of the master. */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false) @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { /** @@ -153,8 +153,8 @@ public void testNodesFDAfterMasterReelection() throws Exception { */ @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.test.disruption:TRACE") public void testStaleMasterNotHijackingMajority() throws Exception { - // 3 node cluster with unicast discovery and minimum_master_nodes set to 2: - final List nodes = startCluster(3, 2); + // 3 node cluster with unicast discovery and minimum_master_nodes set to the default of 2: + final List nodes = startCluster(3); // Save the current master node as old master node, because that node will get frozen final String oldMasterNode = internalCluster().getMasterName(); @@ -267,7 +267,7 @@ public void onFailure(String source, Exception e) { * Test that cluster recovers from a long GC on master that causes other nodes to elect a new one */ public void testMasterNodeGCs() throws Exception { - List nodes = startCluster(3, -1); + List nodes = startCluster(3); String oldMasterNode = internalCluster().getMasterName(); // a very long GC, but it's OK as we remove the disruption when it has had an effect diff --git a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index 4c9edf6e17eb1..b5ca74a35465f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.discovery; +import java.util.Arrays; +import java.util.Collection; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; @@ -28,10 +30,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -40,8 +44,8 @@ import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import org.elasticsearch.test.transport.MockTransportService; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.instanceOf; @@ -49,17 +53,26 @@ /** * Tests snapshot operations during disruptions. */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false) @TestLogging("org.elasticsearch.snapshot:TRACE") -public class SnapshotDisruptionIT extends AbstractDisruptionTestCase { +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) +public class SnapshotDisruptionIT extends ESIntegTestCase { - public void testDisruptionOnSnapshotInitialization() throws Exception { - final Settings settings = Settings.builder() - .put(DEFAULT_SETTINGS) - .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(AbstractDisruptionTestCase.DEFAULT_SETTINGS) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false) + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") .build(); + } + + public void testDisruptionOnSnapshotInitialization() throws Exception { final String idxName = "test"; - configureCluster(settings, 4, 2); final List allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); ensureStableCluster(4); @@ -159,7 +172,7 @@ public void clusterChanged(ClusterChangedEvent event) { } } - private void createRandomIndex(String idxName) throws ExecutionException, InterruptedException { + private void createRandomIndex(String idxName) throws InterruptedException { assertAcked(prepareCreate(idxName, 0, Settings.builder().put("number_of_shards", between(1, 20)) .put("number_of_replicas", 0))); logger.info("--> indexing some data"); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java index b45daaadfa576..8fba12197cabe 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java @@ -114,7 +114,7 @@ public void testEmptyUnicastHostsFile() throws Exception { public void testUnicastHostsDoesNotExist() { final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); - final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(settings, createTempDir().toAbsolutePath()); + final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(createTempDir().toAbsolutePath()); final List addresses = provider.buildDynamicHosts((hosts, limitPortCounts) -> UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, TimeValue.timeValueSeconds(10))); @@ -148,7 +148,7 @@ private List setupAndRunHostProvider(final List hostEn writer.write(String.join("\n", hostEntries)); } - return new FileBasedUnicastHostsProvider(settings, configPath).buildDynamicHosts((hosts, limitPortCounts) -> + return new FileBasedUnicastHostsProvider(configPath).buildDynamicHosts((hosts, limitPortCounts) -> UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, TimeValue.timeValueSeconds(10))); } diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 9e57382bb4bc8..a3ae6b07b19c9 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -142,7 +142,7 @@ private void setupMasterServiceAndNodeJoinController(ClusterState initialState) } masterService = ClusterServiceUtils.createMasterService(threadPool, initialState); nodeJoinController = new NodeJoinController(masterService, createAllocationService(Settings.EMPTY), - new ElectMasterService(Settings.EMPTY), Settings.EMPTY); + new ElectMasterService(Settings.EMPTY)); } public void testSimpleJoinAccumulation() throws InterruptedException, ExecutionException { diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java index ac1719269e7ae..23b118ebbed7e 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java @@ -267,7 +267,6 @@ private static MockPublishAction buildPublishClusterStateAction( new DiscoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); return new MockPublishAction( - settings, transportService, namedWriteableRegistry, listener, @@ -873,9 +872,9 @@ public static class MockPublishAction extends PublishClusterStateAction { AtomicBoolean timeoutOnCommit = new AtomicBoolean(); AtomicBoolean errorOnCommit = new AtomicBoolean(); - public MockPublishAction(Settings settings, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, + public MockPublishAction(TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, IncomingClusterStateListener listener, DiscoverySettings discoverySettings) { - super(settings, transportService, namedWriteableRegistry, listener, discoverySettings); + super(transportService, namedWriteableRegistry, listener, discoverySettings); } @Override diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 2c4fb0c7e8db2..b82ddde7eca71 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -106,13 +106,16 @@ public void testShouldIgnoreNewClusterState() { currentState.version(2); newState.version(1); - assertTrue("should ignore, because new state's version is lower to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); + assertTrue("should ignore, because new state's version is lower to current state's version", + shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentState.version(1); newState.version(1); - assertTrue("should ignore, because new state's version is equal to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); + assertTrue("should ignore, because new state's version is equal to current state's version", + shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentState.version(1); newState.version(2); - assertFalse("should not ignore, because new state's version is higher to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); + assertFalse("should not ignore, because new state's version is higher to current state's version", + shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentNodes = DiscoveryNodes.builder(); currentNodes.masterNodeId("b").add(new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)); @@ -144,7 +147,8 @@ public void testShouldIgnoreNewClusterState() { currentState.version(1); newState.version(2); } - assertFalse("should not ignore, because current state doesn't have a master", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); + assertFalse("should not ignore, because current state doesn't have a master", + shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); } public void testFilterNonMasterPingResponse() { @@ -311,8 +315,10 @@ public void onNewClusterState(String source, Supplier clusterState listener.onSuccess(source); } }; - ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), - masterService, clusterApplier, clusterSettings, hostsResolver -> Collections.emptyList(), ESAllocationTestCase.createAllocationService(), + ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, + new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), + masterService, clusterApplier, clusterSettings, hostsResolver -> Collections.emptyList(), + ESAllocationTestCase.createAllocationService(), Collections.emptyList()); zenDiscovery.start(); return zenDiscovery; @@ -341,8 +347,9 @@ public void testValidateOnUnsupportedIndexVersionCreated() throws Exception { (() -> localNode, ZenDiscovery.addBuiltInJoinValidators(Collections.emptyList())); final boolean incompatible = randomBoolean(); IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(Settings.builder() - .put(SETTING_VERSION_CREATED, incompatible ? VersionUtils.getPreviousVersion(Version.CURRENT.minimumIndexCompatibilityVersion()) - : VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT)) + .put(SETTING_VERSION_CREATED, + incompatible ? VersionUtils.getPreviousVersion(Version.CURRENT.minimumIndexCompatibilityVersion()) + : VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT)) .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0) .put(SETTING_CREATION_DATE, System.currentTimeMillis())) .state(IndexMetaData.State.OPEN) diff --git a/server/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java b/server/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java index 4502e32e6d375..9593b58eae97c 100644 --- a/server/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java @@ -47,7 +47,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testCleanupWhenEmpty() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); assertTrue(danglingState.getDanglingIndices().isEmpty()); @@ -58,7 +58,7 @@ public void testCleanupWhenEmpty() throws Exception { } public void testDanglingIndicesDiscovery() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); assertTrue(danglingState.getDanglingIndices().isEmpty()); @@ -76,7 +76,7 @@ public void testDanglingIndicesDiscovery() throws Exception { public void testInvalidIndexFolder() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); MetaData metaData = MetaData.builder().build(); @@ -100,7 +100,7 @@ public void testInvalidIndexFolder() throws Exception { public void testDanglingProcessing() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); MetaData metaData = MetaData.builder().build(); @@ -144,7 +144,7 @@ public void testDanglingProcessing() throws Exception { public void testDanglingIndicesNotImportedWhenTombstonePresent() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID"); @@ -159,7 +159,6 @@ public void testDanglingIndicesNotImportedWhenTombstonePresent() throws Exceptio } private DanglingIndicesState createDanglingIndicesState(NodeEnvironment env, MetaStateService metaStateService) { - return new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null, - mock(ClusterService.class)); + return new DanglingIndicesState(env, metaStateService, null, mock(ClusterService.class)); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java index 3e4a3dce09153..7cf8bf5e1644b 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java @@ -58,7 +58,8 @@ public void testDefaultRecoverAfterTime() throws IOException { // ensure settings override default TimeValue timeValue = TimeValue.timeValueHours(3); // ensure default is set when setting expected_nodes - service = createService(Settings.builder().put("gateway.expected_nodes", 1).put("gateway.recover_after_time", timeValue.toString())); + service = createService(Settings.builder().put("gateway.expected_nodes", 1).put("gateway.recover_after_time", + timeValue.toString())); assertThat(service.recoverAfterTime().millis(), Matchers.equalTo(timeValue.millis())); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index 8f89e59003cb1..330947b21e983 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -102,7 +102,8 @@ public void testReadWriteState() throws IOException { } final long id = addDummyFiles("foo-", dirs); Format format = new Format("foo-"); - DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); + DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), + randomDouble(), randomBoolean()); format.write(state, dirs); for (Path file : dirs) { Path[] list = content("*", file); @@ -116,7 +117,8 @@ public void testReadWriteState() throws IOException { DummyState read = format.read(NamedXContentRegistry.EMPTY, list[0]); assertThat(read, equalTo(state)); } - DummyState state2 = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); + DummyState state2 = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), + randomDouble(), randomBoolean()); format.write(state2, dirs); for (Path file : dirs) { @@ -142,7 +144,8 @@ public void testVersionMismatch() throws IOException { final long id = addDummyFiles("foo-", dirs); Format format = new Format("foo-"); - DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); + DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), + randomDouble(), randomBoolean()); format.write(state, dirs); for (Path file : dirs) { Path[] list = content("*", file); @@ -165,7 +168,8 @@ public void testCorruption() throws IOException { } final long id = addDummyFiles("foo-", dirs); Format format = new Format("foo-"); - DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); + DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), + randomDouble(), randomBoolean()); format.write(state, dirs); for (Path file : dirs) { Path[] list = content("*", file); @@ -207,7 +211,8 @@ public static void corruptFile(Path file, Logger logger) throws IOException { byte newValue = (byte) ~oldValue; bb.put(0, newValue); raf.write(bb, filePointer); - logger.debug("Corrupting file {} -- flipping at position {} from {} to {} ", fileToCorrupt.getFileName().toString(), filePointer, Integer.toHexString(oldValue), Integer.toHexString(newValue)); + logger.debug("Corrupting file {} -- flipping at position {} from {} to {} ", fileToCorrupt.getFileName().toString(), + filePointer, Integer.toHexString(oldValue), Integer.toHexString(newValue)); } long checksumAfterCorruption; long actualChecksumAfterCorruption; @@ -221,7 +226,8 @@ public static void corruptFile(Path file, Logger logger) throws IOException { msg.append("Checksum before: [").append(checksumBeforeCorruption).append("]"); msg.append(" after: [").append(checksumAfterCorruption).append("]"); msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]"); - msg.append(" file: ").append(fileToCorrupt.getFileName().toString()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString())); + msg.append(" file: ").append(fileToCorrupt.getFileName().toString()).append(" length: ") + .append(dir.fileLength(fileToCorrupt.getFileName().toString())); logger.debug("{}", msg.toString()); assumeTrue("Checksum collision - " + msg.toString(), checksumAfterCorruption != checksumBeforeCorruption // collision @@ -243,7 +249,8 @@ public void testLoadState() throws IOException { Files.createDirectories(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME)); for (int j = 0; j < numStates; j++) { format.write(meta.get(j), dirs[i]); - if (randomBoolean() && (j < numStates - 1 || dirs.length > 0 && i != 0)) { // corrupt a file that we do not necessarily need here.... + if (randomBoolean() && (j < numStates - 1 || dirs.length > 0 && i != 0)) { // corrupt a file that we do not necessarily + // need here.... Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + j + ".st"); corruptedFiles.add(file); MetaDataStateFormatTests.corruptFile(file, logger); @@ -320,7 +327,8 @@ private MetaData randomMeta() throws IOException { private IndexMetaData.Builder indexBuilder(String index) throws IOException { return IndexMetaData.builder(index) - .settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5))); + .settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5))); } @@ -471,7 +479,8 @@ public long addDummyFiles(String prefix, Path... paths) throws IOException { } else { realId = Math.max(realId, id); } - try (OutputStream stream = Files.newOutputStream(stateDir.resolve(actualPrefix + id + MetaDataStateFormat.STATE_FILE_EXTENSION))) { + try (OutputStream stream = + Files.newOutputStream(stateDir.resolve(actualPrefix + id + MetaDataStateFormat.STATE_FILE_EXTENSION))) { stream.write(0); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java b/server/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java index f2bacc154bf46..81d1442727de7 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java @@ -63,7 +63,8 @@ public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception { String node2 = nodeNames.get(1); String index = "index"; - assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.number_of_replicas", 0).put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node1))); + assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.number_of_replicas", 0) + .put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node1))); index(index, "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); ensureGreen(); assertIndexInMetaState(node1, index); @@ -72,7 +73,8 @@ public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception { assertIndexInMetaState(masterNode, index); logger.debug("relocating index..."); - client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node2)).get(); + client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder() + .put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node2)).get(); client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).get(); ensureGreen(); assertIndexDirectoryDeleted(node1, resolveIndex); @@ -109,11 +111,13 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { .endObject()).get(); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes("_doc").get(); - assertNotNull(((Map) (getMappingsResponse.getMappings().get(index).get("_doc").getSourceAsMap().get("properties"))).get("integer_field")); + assertNotNull(((Map) (getMappingsResponse.getMappings().get(index).get("_doc").getSourceAsMap().get("properties"))) + .get("integer_field")); // make sure it was also written on red node although index is closed ImmutableOpenMap indicesMetaData = getIndicesMetaDataOnNode(dataNode); - assertNotNull(((Map) (indicesMetaData.get(index).getMappings().get("_doc").getSourceAsMap().get("properties"))).get("integer_field")); + assertNotNull(((Map) (indicesMetaData.get(index).getMappings().get("_doc").getSourceAsMap().get("properties"))) + .get("integer_field")); assertThat(indicesMetaData.get(index).getState(), equalTo(IndexMetaData.State.CLOSE)); /* Try the same and see if this also works if node was just restarted. @@ -134,11 +138,13 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { .endObject()).get(); getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes("_doc").get(); - assertNotNull(((Map) (getMappingsResponse.getMappings().get(index).get("_doc").getSourceAsMap().get("properties"))).get("float_field")); + assertNotNull(((Map) (getMappingsResponse.getMappings().get(index).get("_doc").getSourceAsMap().get("properties"))) + .get("float_field")); // make sure it was also written on red node although index is closed indicesMetaData = getIndicesMetaDataOnNode(dataNode); - assertNotNull(((Map) (indicesMetaData.get(index).getMappings().get("_doc").getSourceAsMap().get("properties"))).get("float_field")); + assertNotNull(((Map) (indicesMetaData.get(index).getMappings().get("_doc").getSourceAsMap().get("properties"))) + .get("float_field")); assertThat(indicesMetaData.get(index).getState(), equalTo(IndexMetaData.State.CLOSE)); // finally check that meta data is also written of index opened again @@ -152,7 +158,8 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { protected void assertIndexDirectoryDeleted(final String nodeName, final Index index) throws Exception { assertBusy(() -> { logger.info("checking if index directory exists..."); - assertFalse("Expecting index directory of " + index + " to be deleted from node " + nodeName, indexDirectoryExists(nodeName, index)); + assertFalse("Expecting index directory of " + index + " to be deleted from node " + nodeName, + indexDirectoryExists(nodeName, index)); } ); } @@ -161,7 +168,8 @@ protected void assertIndexInMetaState(final String nodeName, final String indexN assertBusy(() -> { logger.info("checking if meta state exists..."); try { - assertTrue("Expecting meta state of index " + indexName + " to be on node " + nodeName, getIndicesMetaDataOnNode(nodeName).containsKey(indexName)); + assertTrue("Expecting meta state of index " + indexName + " to be on node " + nodeName, + getIndicesMetaDataOnNode(nodeName).containsKey(indexName)); } catch (Exception e) { logger.info("failed to load meta state", e); fail("could not load meta state"); diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java index d0bf02e3c4e1f..938c28fe855c9 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java @@ -38,7 +38,7 @@ public class MetaStateServiceTests extends ESTestCase { public void testWriteLoadIndex() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build(); metaStateService.writeIndex("test_write", index); @@ -48,14 +48,14 @@ public void testWriteLoadIndex() throws Exception { public void testLoadMissingIndex() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); assertThat(metaStateService.loadIndexState(new Index("test1", "test1UUID")), nullValue()); } } public void testWriteLoadGlobal() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); MetaData metaData = MetaData.builder() .persistentSettings(Settings.builder().put("test1", "value1").build()) @@ -67,7 +67,7 @@ public void testWriteLoadGlobal() throws Exception { public void testWriteGlobalStateWithIndexAndNoIndexIsLoaded() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); MetaData metaData = MetaData.builder() .persistentSettings(Settings.builder().put("test1", "value1").build()) @@ -83,7 +83,7 @@ public void testWriteGlobalStateWithIndexAndNoIndexIsLoaded() throws Exception { public void testLoadGlobal() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env, xContentRegistry()); + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build(); MetaData metaData = MetaData.builder() diff --git a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index ae643b7f094c2..e6fc0c535dfc4 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -118,7 +118,8 @@ public void testNoAllocationFound() { } /** - * Tests when the node returns data with a shard allocation id that does not match active allocation ids, it will be moved to ignore unassigned. + * Tests when the node returns data with a shard allocation id that does not match active allocation ids, it will be moved to ignore + * unassigned. */ public void testNoMatchingAllocationIdFound() { RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "id2"); @@ -155,9 +156,11 @@ public void testShardLockObtainFailedException() { assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId())); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node1.getId())); // check that allocation id is reused - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("allocId1")); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo("allocId1")); assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); } @@ -177,9 +180,11 @@ public void testShardLockObtainFailedExceptionPreferOtherValidCopies() { assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId())); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node2.getId())); // check that allocation id is reused - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo(allocId2)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo(allocId2)); assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); } @@ -187,16 +192,18 @@ public void testShardLockObtainFailedExceptionPreferOtherValidCopies() { * Tests that when there is a node to allocate the shard to, it will be allocated to it. */ public void testFoundAllocationAndAllocating() { - final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomFrom(CLUSTER_RECOVERED, INDEX_REOPENED), - "allocId1"); + final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), + randomFrom(CLUSTER_RECOVERED, INDEX_REOPENED), "allocId1"); testAllocator.addData(node1, "allocId1", randomBoolean()); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId())); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node1.getId())); // check that allocation id is reused - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("allocId1")); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), + equalTo("allocId1")); assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); } @@ -207,7 +214,7 @@ public void testFoundAllocationAndAllocating() { */ public void testForceAllocatePrimary() { testAllocator.addData(node1, "allocId1", randomBoolean()); - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList( + AllocationDeciders deciders = new AllocationDeciders(Arrays.asList( // since the deciders return a NO decision for allocating a shard (due to the guaranteed NO decision from the second decider), // the allocator will see if it can force assign the primary, where the decision will be YES new TestAllocateDecision(randomBoolean() ? Decision.YES : Decision.NO), getNoDeciderThatAllowsForceAllocate() @@ -228,7 +235,7 @@ public void testForceAllocatePrimary() { public void testDontAllocateOnNoOrThrottleForceAllocationDecision() { testAllocator.addData(node1, "allocId1", randomBoolean()); boolean forceDecisionNo = randomBoolean(); - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList( + AllocationDeciders deciders = new AllocationDeciders(Arrays.asList( // since both deciders here return a NO decision for allocating a shard, // the allocator will see if it can force assign the primary, where the decision will be either NO or THROTTLE, // so the shard will remain un-initialized @@ -251,7 +258,7 @@ public void testDontAllocateOnNoOrThrottleForceAllocationDecision() { */ public void testDontForceAllocateOnThrottleDecision() { testAllocator.addData(node1, "allocId1", randomBoolean()); - AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, Arrays.asList( + AllocationDeciders deciders = new AllocationDeciders(Arrays.asList( // since we have a NO decision for allocating a shard (because the second decider returns a NO decision), // the allocator will see if it can force assign the primary, and in this case, // the TestAllocateDecision's decision for force allocating is to THROTTLE (using @@ -284,7 +291,8 @@ public void testPreferAllocatingPreviousPrimary() { assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); DiscoveryNode allocatedNode = node1HasPrimaryShard ? node1 : node2; - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(allocatedNode.getId())); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(allocatedNode.getId())); assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); } @@ -315,7 +323,8 @@ public void testFoundAllocationButNoDecider() { assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId())); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node1.getId())); assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); } @@ -458,10 +467,6 @@ class TestAllocator extends PrimaryShardAllocator { private Map data; - TestAllocator() { - super(Settings.EMPTY); - } - public TestAllocator clear() { data = null; return this; @@ -475,12 +480,14 @@ public TestAllocator addData(DiscoveryNode node, String allocationId, boolean pr if (data == null) { data = new HashMap<>(); } - data.put(node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, allocationId, primary, storeException)); + data.put(node, + new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, allocationId, primary, storeException)); return this; } @Override - protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { + protected AsyncShardFetch.FetchResult + fetchData(ShardRouting shard, RoutingAllocation allocation) { return new AsyncShardFetch.FetchResult<>(shardId, data, Collections.emptySet()); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java b/server/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java index 3de96448a4a3e..d5ce6644f0ec5 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java @@ -111,7 +111,8 @@ public void testPriorityComparatorSort() { for (int i = 0; i < indices.length; i++) { if (frequently()) { - indices[i] = new IndexMeta("idx_2015_04_" + String.format(Locale.ROOT, "%02d", i), randomIntBetween(1, 1000), randomIntBetween(1, 10000)); + indices[i] = new IndexMeta("idx_2015_04_" + String.format(Locale.ROOT, "%02d", i), randomIntBetween(1, 1000), + randomIntBetween(1, 10000)); } else { // sometimes just use defaults indices[i] = new IndexMeta("idx_2015_04_" + String.format(Locale.ROOT, "%02d", i)); } @@ -121,7 +122,8 @@ public void testPriorityComparatorSort() { for (int i = 0; i < numShards; i++) { IndexMeta indexMeta = randomFrom(indices); shards.add(TestShardRouting.newShardRouting(indexMeta.name, randomIntBetween(1, 5), null, null, - randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); + randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), + "foobar"))); } shards.sort(new PriorityComparator() { @Override @@ -138,13 +140,16 @@ protected Settings getIndexSettings(Index index) { if (prevMeta.priority == currentMeta.priority) { if (prevMeta.creationDate == currentMeta.creationDate) { if (prevMeta.name.equals(currentMeta.name) == false) { - assertTrue("indexName mismatch, expected:" + currentMeta.name + " after " + prevMeta.name + " " + prevMeta.name.compareTo(currentMeta.name), prevMeta.name.compareTo(currentMeta.name) > 0); + assertTrue("indexName mismatch, expected:" + currentMeta.name + " after " + prevMeta.name + " " + + prevMeta.name.compareTo(currentMeta.name), prevMeta.name.compareTo(currentMeta.name) > 0); } } else { - assertTrue("creationDate mismatch, expected:" + currentMeta.creationDate + " after " + prevMeta.creationDate, prevMeta.creationDate > currentMeta.creationDate); + assertTrue("creationDate mismatch, expected:" + currentMeta.creationDate + " after " + prevMeta.creationDate, + prevMeta.creationDate > currentMeta.creationDate); } } else { - assertTrue("priority mismatch, expected:" + currentMeta.priority + " after " + prevMeta.priority, prevMeta.priority > currentMeta.priority); + assertTrue("priority mismatch, expected:" + currentMeta.priority + " after " + prevMeta.priority, + prevMeta.priority > currentMeta.priority); } } previous = routing; diff --git a/server/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/server/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java index 3abaff3295924..b16e2e2f6c505 100644 --- a/server/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -75,12 +75,14 @@ public void doAfterNodes(int numNodes, final Client activeClient) throws Excepti if (numNodes == 1) { assertTrue(awaitBusy(() -> { logger.info("--> running cluster_health (wait for the shards to startup)"); - ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(test.numPrimaries * 2)).actionGet(); + ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest() + .waitForYellowStatus().waitForNodes("2").waitForActiveShards(test.numPrimaries * 2)).actionGet(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW; }, 30, TimeUnit.SECONDS)); logger.info("--> one node is closed -- index 1 document into the remaining nodes"); - activeClient.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).get(); + activeClient.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3") + .endObject()).get(); assertNoFailures(activeClient.admin().indices().prepareRefresh().get()); for (int i = 0; i < 10; i++) { assertHitCount(activeClient.prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 3L); diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index b0b6c35f92a1a..a8f2cfab2b79b 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -145,7 +145,8 @@ private Map assertAndCapturePrimaryTerms(Map pre } else { assertThat("number of terms changed for index [" + index + "]", current.length, equalTo(previous.length)); for (int shard = 0; shard < current.length; shard++) { - assertThat("primary term didn't increase for [" + index + "][" + shard + "]", current[shard], greaterThan(previous[shard])); + assertThat("primary term didn't increase for [" + index + "][" + shard + "]", current[shard], + greaterThan(previous[shard])); } result.put(index, current); } @@ -158,7 +159,8 @@ public void testSingleNodeNoFlush() throws Exception { internalCluster().startNode(); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") - .startObject("properties").startObject("field").field("type", "text").endObject().startObject("num").field("type", "integer").endObject().endObject() + .startObject("properties").startObject("field").field("type", "text").endObject().startObject("num").field("type", "integer") + .endObject().endObject() .endObject().endObject()); // note: default replica settings are tied to #data nodes-1 which is 0 here. We can do with 1 in this test. int numberOfShards = numberOfShards(); @@ -243,9 +245,11 @@ public void testSingleNodeNoFlush() throws Exception { public void testSingleNodeWithFlush() throws Exception { internalCluster().startNode(); - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet(); + client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute() + .actionGet(); flush(); - client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet(); + client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute() + .actionGet(); refresh(); assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet(), 2); @@ -280,9 +284,11 @@ public void testTwoNodeFirstNodeCleared() throws Exception { final String firstNode = internalCluster().startNode(); internalCluster().startNode(); - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet(); + client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute() + .actionGet(); flush(); - client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet(); + client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute() + .actionGet(); refresh(); logger.info("Running Cluster Health (wait for the shards to startup)"); @@ -321,9 +327,11 @@ public void testLatestVersionLoaded() throws Exception { internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); assertAcked(client().admin().indices().prepareCreate("test")); - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet(); + client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute() + .actionGet(); client().admin().indices().prepareFlush().execute().actionGet(); - client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet(); + client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute() + .actionGet(); client().admin().indices().prepareRefresh().execute().actionGet(); logger.info("--> running cluster_health (wait for the shards to startup)"); @@ -340,7 +348,8 @@ public void testLatestVersionLoaded() throws Exception { internalCluster().stopRandomDataNode(); logger.info("--> one node is closed - start indexing data into the second one"); - client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute().actionGet(); + client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute() + .actionGet(); // TODO: remove once refresh doesn't fail immediately if there a master block: // https://github.com/elastic/elasticsearch/issues/9997 // client().admin().cluster().prepareHealth("test").setWaitForYellowStatus().get(); @@ -361,7 +370,8 @@ public void testLatestVersionLoaded() throws Exception { .startObject("field2").field("type", "keyword").field("store", true).endObject() .endObject().endObject().endObject()) .execute().actionGet(); - client().admin().indices().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).execute().actionGet(); + client().admin().indices().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).execute() + .actionGet(); logger.info("--> stopping the second node"); internalCluster().stopRandomDataNode(); @@ -476,10 +486,13 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertThat("bytes should have been recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered)); assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0L)); // we have to recover the segments file since we commit the translog ID on engine startup - assertThat("all existing files should be reused, byte count mismatch", recoveryState.getIndex().reusedBytes(), equalTo(reused)); + assertThat("all existing files should be reused, byte count mismatch", recoveryState.getIndex().reusedBytes(), + equalTo(reused)); assertThat(recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes() - recovered)); - assertThat("the segment from the last round of indexing should be recovered", recoveryState.getIndex().recoveredFileCount(), equalTo(filesRecovered)); - assertThat("all existing files should be reused, file count mismatch", recoveryState.getIndex().reusedFileCount(), equalTo(filesReused)); + assertThat("the segment from the last round of indexing should be recovered", recoveryState.getIndex().recoveredFileCount(), + equalTo(filesRecovered)); + assertThat("all existing files should be reused, file count mismatch", recoveryState.getIndex().reusedFileCount(), + equalTo(filesReused)); assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount() - filesRecovered)); assertThat("> 0 files should be reused", recoveryState.getIndex().reusedFileCount(), greaterThan(0)); assertThat("no translog ops should be recovered", recoveryState.getTranslog().recoveredOperations(), equalTo(0)); @@ -498,12 +511,14 @@ public void testRecoveryDifferentNodeOrderStartup() throws Exception { // we need different data paths so we make sure we start the second node fresh final Path pathNode1 = createTempDir(); - final String node_1 = internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), pathNode1).build()); + final String node_1 = + internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), pathNode1).build()); client().prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet(); final Path pathNode2 = createTempDir(); - final String node_2 = internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), pathNode2).build()); + final String node_2 = + internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), pathNode2).build()); ensureGreen(); Map primaryTerms = assertAndCapturePrimaryTerms(null); diff --git a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index f53c8da2f2d96..d30f7eafce4a8 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -95,7 +95,8 @@ public void testNoAsyncFetchData() { * the shard allocator to allocate it. There isn't a copy around to find anyhow. */ public void testNoAsyncFetchOnIndexCreation() { - RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY, UnassignedInfo.Reason.INDEX_CREATED); + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY, + UnassignedInfo.Reason.INDEX_CREATED); testAllocator.clean(); testAllocator.allocateUnassigned(allocation); assertThat(testAllocator.getFetchDataCalledAndClean(), equalTo(false)); @@ -108,7 +109,8 @@ public void testNoAsyncFetchOnIndexCreation() { * and find a better copy for the shard. */ public void testAsyncFetchOnAnythingButIndexCreation() { - UnassignedInfo.Reason reason = RandomPicks.randomFrom(random(), EnumSet.complementOf(EnumSet.of(UnassignedInfo.Reason.INDEX_CREATED))); + UnassignedInfo.Reason reason = RandomPicks.randomFrom(random(), + EnumSet.complementOf(EnumSet.of(UnassignedInfo.Reason.INDEX_CREATED))); RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY, reason); testAllocator.clean(); testAllocator.allocateUnassigned(allocation); @@ -125,7 +127,8 @@ public void testSimpleFullMatchAllocation() { .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(nodeToMatch.getId())); } /** @@ -138,7 +141,8 @@ public void testSyncIdMatch() { .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM" ,MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(nodeToMatch.getId())); } /** @@ -151,7 +155,8 @@ public void testFileChecksumMatch() { .addData(nodeToMatch, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(nodeToMatch.getId())); } /** @@ -198,7 +203,8 @@ public void testNoMatchingFilesForReplicaOnAnyNode() { * moves to the ignore unassigned list. */ public void testNoOrThrottleDecidersRemainsInUnassigned() { - RoutingAllocation allocation = onePrimaryOnNode1And1Replica(randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders()); + RoutingAllocation allocation = + onePrimaryOnNode1And1Replica(randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders()); testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); @@ -211,11 +217,11 @@ public void testNoOrThrottleDecidersRemainsInUnassigned() { * to wait till throttling on it is done. */ public void testThrottleWhenAllocatingToMatchingNode() { - RoutingAllocation allocation = onePrimaryOnNode1And1Replica(new AllocationDeciders(Settings.EMPTY, + RoutingAllocation allocation = onePrimaryOnNode1And1Replica(new AllocationDeciders( Arrays.asList(new TestAllocateDecision(Decision.YES), new SameShardAllocationDecider( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - new AllocationDecider(Settings.EMPTY) { + new AllocationDecider() { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { if (node.node().equals(node2)) { @@ -246,12 +252,14 @@ public void testDelayedAllocation() { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), - Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT); + Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), + TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT); testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId())); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), + equalTo(node2.getId())); } public void testCancelRecoveryBetterSyncId() { @@ -330,7 +338,9 @@ private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDecid .add(IndexRoutingTable.builder(shardId.getIndex()) .addIndexShard(new IndexShardRoutingTable.Builder(shardId) .addShard(primaryShard) - .addShard(TestShardRouting.newShardRouting(shardId, node2.getId(), null, false, ShardRoutingState.INITIALIZING, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null))) + .addShard(TestShardRouting.newShardRouting(shardId, node2.getId(), null, false, + ShardRoutingState.INITIALIZING, + new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null))) .build()) ) .build(); @@ -346,10 +356,6 @@ class TestAllocator extends ReplicaShardAllocator { private Map data = null; private AtomicBoolean fetchDataCalled = new AtomicBoolean(false); - TestAllocator() { - super(Settings.EMPTY); - } - public void clean() { data = null; } @@ -380,13 +386,15 @@ public TestAllocator addData(DiscoveryNode node, String syncId, StoreFileMetaDat } @Override - protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { + protected AsyncShardFetch.FetchResult + fetchData(ShardRouting shard, RoutingAllocation allocation) { fetchDataCalled.set(true); Map tData = null; if (data != null) { tData = new HashMap<>(); for (Map.Entry entry : data.entrySet()) { - tData.put(entry.getKey(), new TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData(entry.getKey(), entry.getValue())); + tData.put(entry.getKey(), + new TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData(entry.getKey(), entry.getValue())); } } return new AsyncShardFetch.FetchResult<>(shardId, tData, Collections.emptySet()); diff --git a/server/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/server/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java index 81be3057b0161..847c1801510a1 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java +++ b/server/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java @@ -87,7 +87,8 @@ public static void testCase(Settings indexSettings, Runnable restartCluster, Log // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)).get(); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), + EnableAllocationDecider.Allocation.NONE)).get(); logger.info("--> full cluster restart"); restartCluster.run(); @@ -102,7 +103,8 @@ public static void testCase(Settings indexSettings, Runnable restartCluster, Log logger.info("--> disabling allocation while the cluster is shut down{}", useSyncIds ? "" : " a second time"); // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings().setTransientSettings( - Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), + EnableAllocationDecider.Allocation.NONE)) .get(); logger.info("--> full cluster restart"); restartCluster.run(); @@ -138,7 +140,8 @@ public static void testCase(Settings indexSettings, Runnable restartCluster, Log } else { if (useSyncIds && !recoveryState.getPrimary()) { logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}", - recoveryState.getShardId().getId(), recoveryState.getSourceNode().getName(), recoveryState.getTargetNode().getName(), + recoveryState.getShardId().getId(), recoveryState.getSourceNode().getName(), + recoveryState.getTargetNode().getName(), recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); } assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0L)); diff --git a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java index e9eb5d8b83d2e..c3c2a8176e3eb 100644 --- a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; import org.apache.logging.log4j.core.filter.RegexFilter; @@ -70,7 +71,7 @@ public boolean ignoreExceptions() { public void testUpdateAutoThrottleSettings() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); - final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); + final Logger settingsLogger = LogManager.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); Loggers.addAppender(settingsLogger, mockAppender); Loggers.setLevel(settingsLogger, Level.TRACE); try { @@ -101,7 +102,7 @@ public void testUpdateAutoThrottleSettings() throws Exception { public void testUpdateMergeMaxThreadCount() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); - final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); + final Logger settingsLogger = LogManager.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); Loggers.addAppender(settingsLogger, mockAppender); Loggers.setLevel(settingsLogger, Level.TRACE); try { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index efbce034de47e..379043fa93954 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; - import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -2368,7 +2367,7 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce MockAppender mockAppender = new MockAppender("testIndexWriterIFDInfoStream"); mockAppender.start(); - final Logger iwIFDLogger = Loggers.getLogger("org.elasticsearch.index.engine.Engine.IFD"); + final Logger iwIFDLogger = LogManager.getLogger("org.elasticsearch.index.engine.Engine.IFD"); Loggers.addAppender(iwIFDLogger, mockAppender); Loggers.setLevel(iwIFDLogger, Level.DEBUG); @@ -5049,6 +5048,60 @@ public void testLastRefreshCheckpoint() throws Exception { assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getLocalCheckpoint())); } + public void testLuceneSnapshotRefreshesOnlyOnce() throws Exception { + final MapperService mapperService = createMapperService("test"); + final long maxSeqNo = randomLongBetween(10, 50); + final AtomicLong refreshCounter = new AtomicLong(); + try (Store store = createStore(); + InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), + null, + new ReferenceManager.RefreshListener() { + @Override + public void beforeRefresh() throws IOException { + refreshCounter.incrementAndGet(); + } + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + + } + }, null, () -> SequenceNumbers.NO_OPS_PERFORMED))) { + for (long seqNo = 0; seqNo <= maxSeqNo; seqNo++) { + final ParsedDocument doc = testParsedDocument("id_" + seqNo, null, testDocumentWithTextField("test"), + new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + engine.index(replicaIndexForDoc(doc, 1, seqNo, randomBoolean())); + } + + final long initialRefreshCount = refreshCounter.get(); + final Thread[] snapshotThreads = new Thread[between(1, 3)]; + CountDownLatch latch = new CountDownLatch(1); + for (int i = 0; i < snapshotThreads.length; i++) { + final long min = randomLongBetween(0, maxSeqNo - 5); + final long max = randomLongBetween(min, maxSeqNo); + snapshotThreads[i] = new Thread(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + + @Override + protected void doRun() throws Exception { + latch.await(); + Translog.Snapshot changes = engine.newChangesSnapshot("test", mapperService, min, max, true); + changes.close(); + } + }); + snapshotThreads[i].start(); + } + latch.countDown(); + for (Thread thread : snapshotThreads) { + thread.join(); + } + assertThat(refreshCounter.get(), equalTo(initialRefreshCount + 1L)); + assertThat(engine.lastRefreshedCheckpoint(), equalTo(maxSeqNo)); + } + } + public void testAcquireSearcherOnClosingEngine() throws Exception { engine.close(); expectThrows(AlreadyClosedException.class, () -> engine.acquireSearcher("test")); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java index d3f0db6204d4e..80f3285dfb6d2 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -294,4 +294,14 @@ private List drainAll(Translog.Snapshot snapshot) throws IOE } return operations; } + + public void testOverFlow() throws Exception { + long fromSeqNo = randomLongBetween(0, 5); + long toSeqNo = randomLongBetween(Long.MAX_VALUE - 5, Long.MAX_VALUE); + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) { + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat(error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index cc09ae16c0578..513ba039c955e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -69,7 +69,8 @@ public void testDefaultConfiguration() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class)); @@ -101,7 +102,8 @@ public void testCompletionAnalyzerSettings() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class)); @@ -135,7 +137,8 @@ public void testTypeParsing() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class)); @@ -159,11 +162,12 @@ public void testParsingMinimal() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("completion", "suggestion") .endObject()), @@ -179,11 +183,12 @@ public void testParsingFailure() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); MapperParsingException e = expectThrows(MapperParsingException.class, () -> - defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("completion", 1.0) .endObject()), @@ -212,10 +217,11 @@ public void testKeywordWithSubCompletionAndContext() throws Exception { .endObject().endObject() ); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .array("keywordfield", "key1", "key2", "key3") .endObject()), @@ -266,10 +272,11 @@ public void testCompletionWithContextAndSubCompletion() throws Exception { .endObject().endObject() ); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startObject("suggest") .array("input","timmy","starbucks") @@ -321,10 +328,11 @@ public void testCompletionWithContextAndSubCompletionIndexByPath() throws Except .endObject().endObject() ); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .array("suggest", "timmy","starbucks") .array("cat","cafe","food") @@ -357,10 +365,11 @@ public void testKeywordWithSubCompletionAndStringInsert() throws Exception { .endObject().endObject() ); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("geofield", "drm3btev3e86")//"41.12,-71.34" .endObject()), @@ -387,10 +396,11 @@ public void testCompletionTypeWithSubCompletionFieldAndStringInsert() throws Exc .endObject().endObject() ); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("suggest", "suggestion") .endObject()), @@ -418,10 +428,11 @@ public void testCompletionTypeWithSubCompletionFieldAndObjectInsert() throws Exc .endObject().endObject() ); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startObject("completion") .array("input","New York", "NY") @@ -455,10 +466,11 @@ public void testCompletionTypeWithSubKeywordFieldAndObjectInsert() throws Except .endObject().endObject() ); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startObject("completion") .array("input","New York", "NY") @@ -494,10 +506,11 @@ public void testCompletionTypeWithSubKeywordFieldAndStringInsert() throws Except .endObject().endObject() ); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("completion", "suggestion") .endObject()), @@ -520,11 +533,12 @@ public void testParsingMultiValued() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .array("completion", "suggestion1", "suggestion2") .endObject()), @@ -543,11 +557,12 @@ public void testParsingWithWeight() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startObject("completion") .field("input", "suggestion") @@ -568,11 +583,12 @@ public void testParsingMultiValueWithWeight() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startObject("completion") .array("input", "suggestion1", "suggestion2", "suggestion3") @@ -635,11 +651,12 @@ public void testParsingFull() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startArray("completion") .startObject() @@ -672,11 +689,12 @@ public void testParsingMixed() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); - ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startArray("completion") .startObject() @@ -712,10 +730,11 @@ public void testNonContextEnabledParsingWithContexts() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); try { - defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startObject("field1") .field("input", "suggestion1") @@ -739,13 +758,14 @@ public void testFieldValueValidation() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); CharsRefBuilder charsRefBuilder = new CharsRefBuilder(); charsRefBuilder.append("sugg"); charsRefBuilder.setCharAt(2, '\u001F'); try { - defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("completion", charsRefBuilder.get().toString()) .endObject()), @@ -759,8 +779,8 @@ public void testFieldValueValidation() throws Exception { charsRefBuilder.setCharAt(2, '\u0000'); try { - defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("completion", charsRefBuilder.get().toString()) .endObject()), @@ -774,8 +794,8 @@ public void testFieldValueValidation() throws Exception { charsRefBuilder.setCharAt(2, '\u001E'); try { - defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("completion", charsRefBuilder.get().toString()) .endObject()), @@ -788,8 +808,8 @@ public void testFieldValueValidation() throws Exception { } // empty inputs are ignored - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .array("completion", " ", "") .endObject()), @@ -801,8 +821,8 @@ public void testFieldValueValidation() throws Exception { assertThat(ignoredFields.stringValue(), equalTo("completion")); // null inputs are ignored - ParsedDocument nullDoc = defaultMapper.parse(SourceToParse.source("test", "type1", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument nullDoc = defaultMapper.parse(SourceToParse.source("test", "type1", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .nullField("completion") .endObject()), @@ -819,7 +839,8 @@ public void testPrefixQueryType() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper; Query prefixQuery = completionFieldMapper.fieldType().prefixQuery(new BytesRef("co")); @@ -833,7 +854,8 @@ public void testFuzzyQueryType() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper; Query prefixQuery = completionFieldMapper.fieldType().fuzzyQuery("co", @@ -850,7 +872,8 @@ public void testRegexQueryType() throws Exception { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("completion"); CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper; Query prefixQuery = completionFieldMapper.fieldType() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldTypeTests.java index 587ac2e0605cc..1386d2e29a0fd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldTypeTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.index.mapper.CompletionFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.suggest.completion.context.ContextBuilder; import org.elasticsearch.search.suggest.completion.context.ContextMappings; import org.junit.Before; @@ -52,7 +50,8 @@ public void modify(MappedFieldType ft) { @Override public void modify(MappedFieldType ft) { CompletionFieldMapper.CompletionFieldType cft = (CompletionFieldMapper.CompletionFieldType)ft; - ContextMappings contextMappings = new ContextMappings(Arrays.asList(ContextBuilder.category("foo").build(), ContextBuilder.geo("geo").build())); + ContextMappings contextMappings = new ContextMappings(Arrays.asList(ContextBuilder.category("foo").build(), + ContextBuilder.geo("geo").build())); cft.setContextMappings(contextMappings); } }); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java index c24b023a4d54b..7936b97fad4f2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java @@ -145,7 +145,8 @@ public void testCopyToFieldsInnerObjectParsing() throws Exception { .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); BytesReference json = BytesReference.bytes(jsonBuilder().startObject() .field("copy_test", "foo") @@ -172,7 +173,8 @@ public void testCopyToDynamicInnerObjectParsing() throws Exception { .endObject() .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); BytesReference json = BytesReference.bytes(jsonBuilder().startObject() .field("copy_test", "foo") @@ -209,7 +211,8 @@ public void testCopyToDynamicInnerInnerObjectParsing() throws Exception { .endObject() .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); BytesReference json = BytesReference.bytes(jsonBuilder().startObject() .field("copy_test", "foo") @@ -239,7 +242,8 @@ public void testCopyToStrictDynamicInnerObjectParsing() throws Exception { .endObject() .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); BytesReference json = BytesReference.bytes(jsonBuilder().startObject() .field("copy_test", "foo") @@ -273,7 +277,8 @@ public void testCopyToInnerStrictDynamicInnerObjectParsing() throws Exception { .endObject() .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); BytesReference json = BytesReference.bytes(jsonBuilder().startObject() .field("copy_test", "foo") @@ -283,7 +288,8 @@ public void testCopyToInnerStrictDynamicInnerObjectParsing() throws Exception { docMapper.parse(SourceToParse.source("test", "type1", "1", json, XContentType.JSON)).rootDoc(); fail(); } catch (MapperParsingException ex) { - assertThat(ex.getMessage(), startsWith("mapping set to strict, dynamic introduction of [field] within [very.far] is not allowed")); + assertThat(ex.getMessage(), + startsWith("mapping set to strict, dynamic introduction of [field] within [very.far] is not allowed")); } } @@ -307,12 +313,14 @@ public void testCopyToFieldMerge() throws Exception { .endObject().endObject().endObject()); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapperBefore = mapperService.merge("type1", new CompressedXContent(mappingBefore), MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper docMapperBefore = mapperService.merge("type1", new CompressedXContent(mappingBefore), + MapperService.MergeReason.MAPPING_UPDATE); FieldMapper fieldMapperBefore = (FieldMapper) docMapperBefore.mappers().getMapper("copy_test"); assertEquals(Arrays.asList("foo", "bar"), fieldMapperBefore.copyTo().copyToFields()); - DocumentMapper docMapperAfter = mapperService.merge("type1", new CompressedXContent(mappingAfter), MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper docMapperAfter = mapperService.merge("type1", new CompressedXContent(mappingAfter), + MapperService.MergeReason.MAPPING_UPDATE); FieldMapper fieldMapperAfter = (FieldMapper) docMapperAfter.mappers().getMapper("copy_test"); assertEquals(Arrays.asList("baz", "bar"), fieldMapperAfter.copyTo().copyToFields()); @@ -385,7 +393,8 @@ public void testCopyToNestedField() throws Exception { .endArray() .endObject(); - ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference.bytes(jsonDoc), XContentType.JSON)); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(jsonDoc), XContentType.JSON)); assertEquals(6, doc.docs().size()); Document nested = doc.docs().get(0); @@ -544,7 +553,8 @@ public void testCopyToDynamicNestedObjectParsing() throws Exception { .endObject() .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); BytesReference json = BytesReference.bytes(jsonBuilder().startObject() .field("copy_test", "foo") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index d16bdc444e6e7..97921f57ca592 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -77,7 +77,7 @@ public void testDefaults() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(8, pointField.fieldType().pointNumBytes()); assertFalse(pointField.fieldType().stored()); assertEquals(1457654400000L, pointField.numericValue().longValue()); @@ -128,7 +128,7 @@ public void testNoDocValues() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); } public void testStore() throws Exception { @@ -150,7 +150,7 @@ public void testStore() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); IndexableField storedField = fields[2]; @@ -304,7 +304,7 @@ public void testNullValue() throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(8, pointField.fieldType().pointNumBytes()); assertFalse(pointField.fieldType().stored()); assertEquals(1457654400000L, pointField.numericValue().longValue()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 5a46b9a889fd0..b3bdd9f33cf18 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -135,19 +135,22 @@ public void testUnexpectedFieldMappingType() throws Exception { .endObject().endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); { - BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("foo", true).endObject()); + BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("foo", true) + .endObject()); MapperException exception = expectThrows(MapperException.class, () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertThat(exception.getMessage(), containsString("failed to parse field [foo] of type [long]")); } { - BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("bar", "bar").endObject()); + BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("bar", "bar") + .endObject()); MapperException exception = expectThrows(MapperException.class, () -> mapper.parse(SourceToParse.source("test", "type", "2", bytes, XContentType.JSON))); assertThat(exception.getMessage(), containsString("failed to parse field [bar] of type [boolean]")); } { - BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("geo", 123).endObject()); + BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("geo", 123) + .endObject()); MapperException exception = expectThrows(MapperException.class, () -> mapper.parse(SourceToParse.source("test", "type", "2", bytes, XContentType.JSON))); assertThat(exception.getMessage(), containsString("failed to parse field [geo] of type [geo_shape]")); @@ -222,7 +225,8 @@ public void testNestedHaveIdAndTypeFields() throws Exception { doc.endObject(); // Verify in the case where only a single type is allowed that the _id field is added to nested documents: - ParsedDocument result = mapper.parse(SourceToParse.source("index2", "type", "1", BytesReference.bytes(doc), XContentType.JSON)); + ParsedDocument result = mapper.parse(SourceToParse.source("index2", "type", "1", + BytesReference.bytes(doc), XContentType.JSON)); assertEquals(2, result.docs().size()); // Nested document: assertNotNull(result.docs().get(0).getField(IdFieldMapper.NAME)); @@ -463,7 +467,8 @@ public void testDynamicLongArray() throws Exception { public void testDynamicFalseLongArray() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "false") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "false") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -478,7 +483,8 @@ public void testDynamicFalseLongArray() throws Exception { public void testDynamicStrictLongArray() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "strict") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "strict") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -495,7 +501,8 @@ public void testDynamicStrictLongArray() throws Exception { public void testMappedGeoPointArray() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("foo").field("type", "geo_point").field("doc_values", false) + .startObject("properties").startObject("foo").field("type", "geo_point") + .field("doc_values", false) .endObject().endObject().endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -544,7 +551,8 @@ public void testDynamicObjectWithTemplate() throws Exception { public void testDynamicFalseObject() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "false") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "false") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -558,7 +566,8 @@ public void testDynamicFalseObject() throws Exception { public void testDynamicStrictObject() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "strict") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "strict") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -573,7 +582,8 @@ public void testDynamicStrictObject() throws Exception { public void testDynamicFalseValue() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "false") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "false") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -587,7 +597,8 @@ public void testDynamicFalseValue() throws Exception { public void testDynamicStrictValue() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "strict") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "strict") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -602,7 +613,8 @@ public void testDynamicStrictValue() throws Exception { public void testDynamicFalseNull() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "false") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "false") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -616,7 +628,8 @@ public void testDynamicFalseNull() throws Exception { public void testDynamicStrictNull() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "strict") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "strict") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -741,7 +754,8 @@ public void testDynamicDottedFieldNameLongArrayWithExistingParentWrongType() thr public void testDynamicFalseDottedFieldNameLongArray() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "false") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "false") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -756,7 +770,8 @@ public void testDynamicFalseDottedFieldNameLongArray() throws Exception { public void testDynamicStrictDottedFieldNameLongArray() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "strict") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "strict") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -860,7 +875,8 @@ public void testDynamicDottedFieldNameLongWithExistingParentWrongType() throws E public void testDynamicFalseDottedFieldNameLong() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "false") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "false") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -873,7 +889,8 @@ public void testDynamicFalseDottedFieldNameLong() throws Exception { public void testDynamicStrictDottedFieldNameLong() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "strict") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "strict") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -942,12 +959,12 @@ public void testDynamicDottedFieldNameObjectWithParentTemplate() throws Exceptio public void testDynamicDottedFieldNameObjectWithExistingParent() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("foo") - .field("type", "object").endObject().endObject().endObject().endObject()); + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("foo").field("type", "object").endObject().endObject().endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); - BytesReference bytes = BytesReference - .bytes(XContentFactory.jsonBuilder().startObject().startObject("foo.bar.baz").field("a", 0).endObject().endObject()); + BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().startObject("foo.bar.baz") + .field("a", 0).endObject().endObject()); ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo.bar.baz.a").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); @@ -972,8 +989,8 @@ public void testDynamicDottedFieldNameObjectWithExistingParentWrongType() throws .endObject().endObject().endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); - BytesReference bytes = BytesReference - .bytes(XContentFactory.jsonBuilder().startObject().startObject("foo.bar.baz").field("a", 0).endObject().endObject()); + BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().startObject("foo.bar.baz") + .field("a", 0).endObject().endObject()); MapperParsingException exception = expectThrows(MapperParsingException.class, () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); @@ -983,7 +1000,8 @@ public void testDynamicDottedFieldNameObjectWithExistingParentWrongType() throws public void testDynamicFalseDottedFieldNameObject() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "false") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "false") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -997,7 +1015,8 @@ public void testDynamicFalseDottedFieldNameObject() throws Exception { public void testDynamicStrictDottedFieldNameObject() throws Exception { DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "strict") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .field("dynamic", "strict") .endObject().endObject()); DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); @@ -1020,7 +1039,8 @@ public void testDocumentContainsMetadataField() throws Exception { mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertTrue(e.getMessage(), e.getMessage().contains("cannot be added inside a document")); - BytesReference bytes2 = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("foo._ttl", 0).endObject()); + BytesReference bytes2 = BytesReference.bytes(XContentFactory.jsonBuilder().startObject() + .field("foo._ttl", 0).endObject()); mapper.parse(SourceToParse.source("test", "type", "1", bytes2, XContentType.JSON)); // parses without error } @@ -1028,7 +1048,8 @@ public void testSimpleMapper() throws Exception { IndexService indexService = createIndex("test"); DocumentMapper docMapper = new DocumentMapper.Builder( new RootObjectMapper.Builder("person") - .add(new ObjectMapper.Builder("name").add(new TextFieldMapper.Builder("first").store(true).index(false))), + .add(new ObjectMapper.Builder("name") + .add(new TextFieldMapper.Builder("first").store(true).index(false))), indexService.mapperService()).build(indexService.mapperService()); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json")); @@ -1053,7 +1074,8 @@ public void testParseToJsonAndParse() throws Exception { public void testSimpleParser() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json"); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("person", new CompressedXContent(mapping)); assertThat((String) docMapper.meta().get("param1"), equalTo("value1")); @@ -1065,7 +1087,8 @@ public void testSimpleParser() throws Exception { public void testSimpleParserNoTypeNoId() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json"); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("person", new CompressedXContent(mapping)); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1-notype-noid.json")); Document doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); assertThat(doc.getBinaryValue(docMapper.idFieldMapper().name()), equalTo(Uid.encodeId("1"))); @@ -1088,7 +1111,8 @@ public void testNoDocumentSent() throws Exception { IndexService indexService = createIndex("test"); DocumentMapper docMapper = new DocumentMapper.Builder( new RootObjectMapper.Builder("person") - .add(new ObjectMapper.Builder("name").add(new TextFieldMapper.Builder("first").store(true).index(false))), + .add(new ObjectMapper.Builder("name") + .add(new TextFieldMapper.Builder("first").store(true).index(false))), indexService.mapperService()).build(indexService.mapperService()); BytesReference json = new BytesArray("".getBytes(StandardCharsets.UTF_8)); @@ -1103,7 +1127,8 @@ public void testNoDocumentSent() throws Exception { public void testNoLevel() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1122,7 +1147,8 @@ public void testNoLevel() throws Exception { public void testTypeLevel() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1141,7 +1167,8 @@ public void testTypeLevel() throws Exception { public void testNoLevelWithFieldTypeAsValue() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1162,7 +1189,8 @@ public void testNoLevelWithFieldTypeAsValue() throws Exception { public void testTypeLevelWithFieldTypeAsValue() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1183,7 +1211,8 @@ public void testTypeLevelWithFieldTypeAsValue() throws Exception { public void testNoLevelWithFieldTypeAsObject() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1204,7 +1233,8 @@ public void testNoLevelWithFieldTypeAsObject() throws Exception { public void testTypeLevelWithFieldTypeAsObject() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1225,7 +1255,8 @@ public void testTypeLevelWithFieldTypeAsObject() throws Exception { public void testNoLevelWithFieldTypeAsValueNotFirst() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1246,7 +1277,8 @@ public void testNoLevelWithFieldTypeAsValueNotFirst() throws Exception { public void testTypeLevelWithFieldTypeAsValueNotFirst() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1267,7 +1299,8 @@ public void testTypeLevelWithFieldTypeAsValueNotFirst() throws Exception { public void testNoLevelWithFieldTypeAsObjectNotFirst() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1289,7 +1322,8 @@ public void testNoLevelWithFieldTypeAsObjectNotFirst() throws Exception { public void testTypeLevelWithFieldTypeAsObjectNotFirst() throws Exception { String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(defaultMapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -1380,7 +1414,7 @@ public void testDynamicFieldsStartingAndEndingWithDot() throws Exception { fail("should have failed to dynamically introduce a double-dot field"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), - containsString("object field starting or ending with a [.] makes object resolution ambiguous: [top..foo..bar]")); + containsString("object field starting or ending with a [.] makes object resolution ambiguous: [top..foo..bar]")); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 95175af54214a..26c814f4dcdc3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -67,7 +67,8 @@ public void testDynamicTrue() throws IOException { .endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(jsonBuilder() @@ -89,7 +90,8 @@ public void testDynamicFalse() throws IOException { .endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(jsonBuilder() @@ -112,10 +114,12 @@ public void testDynamicStrict() throws IOException { .endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - StrictDynamicMappingException e = expectThrows(StrictDynamicMappingException.class, () -> defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(jsonBuilder() + StrictDynamicMappingException e = expectThrows(StrictDynamicMappingException.class, + () -> defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(jsonBuilder() .startObject() .field("field1", "value1") .field("field2", "value2") @@ -123,8 +127,9 @@ public void testDynamicStrict() throws IOException { XContentType.JSON))); assertThat(e.getMessage(), equalTo("mapping set to strict, dynamic introduction of [field2] within [type] is not allowed")); - e = expectThrows(StrictDynamicMappingException.class, () -> defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + e = expectThrows(StrictDynamicMappingException.class, + () -> defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field1", "value1") .field("field2", (String) null) @@ -143,10 +148,11 @@ public void testDynamicFalseWithInnerObjectButDynamicSetOnRoot() throws IOExcept .endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(jsonBuilder() .startObject().startObject("obj1") .field("field1", "value1") .field("field2", "value2") @@ -168,11 +174,12 @@ public void testDynamicStrictWithInnerObjectButDynamicSetOnRoot() throws IOExcep .endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); StrictDynamicMappingException e = expectThrows(StrictDynamicMappingException.class, () -> - defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(jsonBuilder() .startObject().startObject("obj1") .field("field1", "value1") .field("field2", "value2") @@ -200,7 +207,8 @@ private Mapper parse(DocumentMapper mapper, DocumentMapperParser parser, XConten .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) .numberOfShards(1).numberOfReplicas(0).build(); IndexSettings settings = new IndexSettings(build, Settings.EMPTY); - SourceToParse source = SourceToParse.source("test", mapper.type(), "some_id", BytesReference.bytes(builder), builder.contentType()); + SourceToParse source = SourceToParse.source("test", mapper.type(), "some_id", + BytesReference.bytes(builder), builder.contentType()); try (XContentParser xContentParser = createParser(JsonXContent.jsonXContent, source.source())) { ParseContext.InternalParseContext ctx = new ParseContext.InternalParseContext(settings, parser, mapper, source, xContentParser); assertEquals(XContentParser.Token.START_OBJECT, ctx.parser().nextToken()); @@ -264,7 +272,8 @@ public void testIncremental() throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); - Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").field("bar", "baz").endObject()); + Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar") + .field("bar", "baz").endObject()); assertNotNull(update); // original mapping not modified assertEquals(mapping, serialize(mapper)); @@ -292,7 +301,8 @@ public void testIntroduceTwoFields() throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); - Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").field("bar", "baz").endObject()); + Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar") + .field("bar", "baz").endObject()); assertNotNull(update); // original mapping not modified assertEquals(mapping, serialize(mapper)); @@ -327,14 +337,17 @@ public void testObject() throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); - Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo").startObject("bar").field("baz", "foo").endObject().endObject().endObject()); + Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo").startObject("bar") + .field("baz", "foo").endObject().endObject().endObject()); assertNotNull(update); // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "text") - .startObject("fields").startObject("keyword").field("type", "keyword").field("ignore_above", 256).endObject() + .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz") + .field("type", "text") + .startObject("fields").startObject("keyword").field("type", "keyword") + .field("ignore_above", 256).endObject() .endObject().endObject().endObject().endObject().endObject().endObject() .endObject().endObject().endObject()), serialize(update)); } @@ -349,7 +362,8 @@ public void testArray() throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); - Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startArray("foo").value("bar").value("baz").endArray().endObject()); + Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject() + .startArray("foo").value("bar").value("baz").endArray().endObject()); assertNotNull(update); // original mapping not modified assertEquals(mapping, serialize(mapper)); @@ -377,13 +391,15 @@ public void testInnerDynamicMapping() throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); - Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo").startObject("bar").field("baz", "foo").endObject().endObject().endObject()); + Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo") + .startObject("bar").field("baz", "foo").endObject().endObject().endObject()); assertNotNull(update); // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update assertEquals(Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "text").startObject("fields") + .startObject("foo").startObject("properties").startObject("bar").startObject("properties") + .startObject("baz").field("type", "text").startObject("fields") .startObject("keyword").field("type", "keyword").field("ignore_above", 256).endObject() .endObject().endObject().endObject().endObject().endObject().endObject() .endObject().endObject().endObject()), serialize(update)); @@ -580,7 +596,8 @@ private void doTestDefaultFloatingPointMappings(DocumentMapper mapper, XContentB .field("baz", (double) 3.2f) // double that can be accurately represented as a float .field("quux", "3.2") // float detected through numeric detection .endObject()); - ParsedDocument parsedDocument = mapper.parse(SourceToParse.source("index", "type", "id", source, builder.contentType())); + ParsedDocument parsedDocument = mapper.parse(SourceToParse.source("index", "type", "id", + source, builder.contentType())); Mapping update = parsedDocument.dynamicMappingsUpdate(); assertNotNull(update); assertThat(((FieldMapper) update.root().getMapper("foo")).fieldType().typeName(), equalTo("float")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java index 79f01288fa84f..3ffc19f10dee7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java @@ -92,8 +92,8 @@ public ExternalMapper build(BuilderContext context) { setupFieldType(context); - return new ExternalMapper(name, fieldType, generatedValue, mapperName, binMapper, boolMapper, pointMapper, shapeMapper, stringMapper, - context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); + return new ExternalMapper(name, fieldType, generatedValue, mapperName, binMapper, boolMapper, pointMapper, + shapeMapper, stringMapper, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); } } @@ -156,7 +156,8 @@ public Query existsQuery(QueryShardContext context) { public ExternalMapper(String simpleName, MappedFieldType fieldType, String generatedValue, String mapperName, BinaryFieldMapper binMapper, BooleanFieldMapper boolMapper, GeoPointFieldMapper pointMapper, - GeoShapeFieldMapper shapeMapper, FieldMapper stringMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + GeoShapeFieldMapper shapeMapper, FieldMapper stringMapper, Settings indexSettings, + MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, new ExternalFieldType(), indexSettings, multiFields, copyTo); this.generatedValue = generatedValue; this.mapperName = mapperName; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java index 2ae6345d44e6e..e8c38bc1a5a93 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java @@ -24,13 +24,6 @@ import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.mapper.BooleanFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.Mapper.TypeParser.ParserContext; import java.io.IOException; import java.util.Collections; @@ -99,7 +92,8 @@ public ExternalMetadataMapper build(BuilderContext context) { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { return new Builder(); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index 8138d46e95689..6d7601357a336 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -67,7 +67,8 @@ public void testFieldType() throws Exception { .startObject("_field_names").endObject() .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); FieldNamesFieldMapper fieldNamesMapper = docMapper.metadataMapper(FieldNamesFieldMapper.class); assertFalse(fieldNamesMapper.fieldType().hasDocValues()); assertEquals(IndexOptions.DOCS, fieldNamesMapper.fieldType().indexOptions()); @@ -78,10 +79,11 @@ public void testFieldType() throws Exception { public void testInjectIntoDocDuringParsing() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("a", "100") .startObject("b") @@ -96,14 +98,16 @@ public void testInjectIntoDocDuringParsing() throws Exception { public void testExplicitEnabled() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_field_names").field("enabled", true).endObject() - .startObject("properties").startObject("field").field("type", "keyword").field("doc_values", false).endObject().endObject() - .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + .startObject("properties") + .startObject("field").field("type", "keyword").field("doc_values", false).endObject() + .endObject().endObject().endObject()); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); FieldNamesFieldMapper fieldNamesMapper = docMapper.metadataMapper(FieldNamesFieldMapper.class); assertTrue(fieldNamesMapper.fieldType().isEnabled()); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field", "value") .endObject()), @@ -116,12 +120,13 @@ public void testDisabled() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_field_names").field("enabled", false).endObject() .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); FieldNamesFieldMapper fieldNamesMapper = docMapper.metadataMapper(FieldNamesFieldMapper.class); assertFalse(fieldNamesMapper.fieldType().isEnabled()); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field", "value") .endObject()), @@ -139,8 +144,10 @@ public void testMergingMappings() throws Exception { .endObject().endObject()); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE); - DocumentMapper mapperDisabled = mapperService.merge("type", new CompressedXContent(disabledMapping), MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), + MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper mapperDisabled = mapperService.merge("type", new CompressedXContent(disabledMapping), + MapperService.MergeReason.MAPPING_UPDATE); assertFalse(mapperDisabled.metadataMapper(FieldNamesFieldMapper.class).fieldType().isEnabled()); mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index eabf0a849fa39..b3474fb5efbcd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -62,10 +62,11 @@ public void testGeoHashValue() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("point", stringEncode(1.3, 1.2)) .endObject()), @@ -78,10 +79,11 @@ public void testLatLonValuesStored() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); String mapping = Strings.toString(xContentBuilder.field("store", true).endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject() .endObject()), @@ -94,10 +96,11 @@ public void testArrayLatLonValues() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point").field("doc_values", false); String mapping = Strings.toString(xContentBuilder.field("store", true).endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startArray("point") .startObject().field("lat", 1.2).field("lon", 1.3).endObject() @@ -115,11 +118,11 @@ public void testLatLonInOneValue() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", - new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("point", "1.2,1.3") .endObject()), @@ -133,11 +136,11 @@ public void testLatLonStringWithZValue() throws Exception { .startObject("properties").startObject("point").field("type", "geo_point") .field(IGNORE_Z_VALUE.getPreferredName(), true); String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", - new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("point", "1.2,1.3,10.0") .endObject()), @@ -151,11 +154,11 @@ public void testLatLonStringWithZValueException() throws Exception { .startObject("properties").startObject("point").field("type", "geo_point") .field(IGNORE_Z_VALUE.getPreferredName(), false); String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", - new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - SourceToParse source = SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + SourceToParse source = SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("point", "1.2,1.3,10.0") .endObject()), @@ -169,11 +172,11 @@ public void testLatLonInOneValueStored() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); String mapping = Strings.toString(xContentBuilder.field("store", true).endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", - new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("point", "1.2,1.3") .endObject()), @@ -185,11 +188,11 @@ public void testLatLonInOneValueArray() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point").field("doc_values", false); String mapping = Strings.toString(xContentBuilder.field("store", true).endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", - new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startArray("point") .value("1.2,1.3") @@ -207,10 +210,11 @@ public void testLonLatArray() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startArray("point").value(1.3).value(1.2).endArray() .endObject()), @@ -224,10 +228,11 @@ public void testLonLatArrayDynamic() throws Exception { .startArray("dynamic_templates").startObject().startObject("point").field("match", "point*") .startObject("mapping").field("type", "geo_point"); String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endArray().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startArray("point").value(1.3).value(1.2).endArray() .endObject()), @@ -240,10 +245,11 @@ public void testLonLatArrayStored() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); String mapping = Strings.toString(xContentBuilder.field("store", true).endObject().endObject().endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startArray("point").value(1.3).value(1.2).endArray() .endObject()), @@ -256,12 +262,14 @@ public void testLonLatArrayStored() throws Exception { public void testLonLatArrayArrayStored() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); - String mapping = Strings.toString(xContentBuilder.field("store", true).field("doc_values", false).endObject().endObject() + String mapping = Strings.toString(xContentBuilder.field("store", true) + .field("doc_values", false).endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startArray("point") .startArray().value(1.3).value(1.2).endArray() @@ -311,7 +319,8 @@ public void testIgnoreZValue() throws IOException { public void testMultiField() throws Exception { int numDocs = randomIntBetween(10, 100); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("pin").startObject("properties").startObject("location") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("pin") + .startObject("properties").startObject("location") .field("type", "geo_point") .startObject("fields") .startObject("geohash").field("type", "keyword").endObject() // test geohash as keyword @@ -326,13 +335,15 @@ public void testMultiField() throws Exception { client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); for (int i=0; i defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + () -> defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("location", "1234.333") .endObject()), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java index 4c947a44a0a12..20e689e9d7e89 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java @@ -58,7 +58,8 @@ public void testDefaultConfiguration() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -82,7 +83,8 @@ public void testOrientationParsing() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -99,7 +101,8 @@ public void testOrientationParsing() throws IOException { .endObject().endObject() .endObject().endObject()); - defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + defaultMapper = createIndex("test2").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -120,7 +123,8 @@ public void testCoerceParsing() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -135,7 +139,8 @@ public void testCoerceParsing() throws IOException { .endObject().endObject() .endObject().endObject()); - defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + defaultMapper = createIndex("test2").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -171,7 +176,8 @@ public void testIgnoreZValue() throws IOException { .endObject().endObject() .endObject().endObject()); - defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + defaultMapper = createIndex("test2").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -190,7 +196,8 @@ public void testIgnoreMalformedParsing() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -205,7 +212,8 @@ public void testIgnoreMalformedParsing() throws IOException { .endObject().endObject() .endObject().endObject()); - defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + defaultMapper = createIndex("test2").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -224,7 +232,8 @@ public void testGeohashConfiguration() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -247,7 +256,8 @@ public void testQuadtreeConfiguration() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -394,7 +404,8 @@ public void testPointsOnlyOption() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); @@ -455,14 +466,19 @@ public void testLevelDefaults() throws IOException { public void testGeoShapeMapperMerge() throws Exception { String stage1Mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("shape").field("type", "geo_shape").field("tree", "geohash").field("strategy", "recursive") - .field("precision", "1m").field("tree_levels", 8).field("distance_error_pct", 0.01).field("orientation", "ccw") + .startObject("shape").field("type", "geo_shape").field("tree", "geohash") + .field("strategy", "recursive") + .field("precision", "1m").field("tree_levels", 8).field("distance_error_pct", 0.01) + .field("orientation", "ccw") .endObject().endObject().endObject().endObject()); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapper = mapperService.merge("type", new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper docMapper = mapperService.merge("type", new CompressedXContent(stage1Mapping), + MapperService.MergeReason.MAPPING_UPDATE); String stage2Mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("shape").field("type", "geo_shape").field("tree", "quadtree") - .field("strategy", "term").field("precision", "1km").field("tree_levels", 26).field("distance_error_pct", 26) + .startObject("properties").startObject("shape").field("type", "geo_shape") + .field("tree", "quadtree") + .field("strategy", "term").field("precision", "1km") + .field("tree_levels", 26).field("distance_error_pct", 26) .field("orientation", "cw").endObject().endObject().endObject().endObject()); try { mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); @@ -490,7 +506,8 @@ public void testGeoShapeMapperMerge() throws Exception { // correct mapping stage2Mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("shape").field("type", "geo_shape").field("precision", "1m") - .field("tree_levels", 8).field("distance_error_pct", 0.001).field("orientation", "cw").endObject().endObject().endObject().endObject()); + .field("tree_levels", 8).field("distance_error_pct", 0.001) + .field("orientation", "cw").endObject().endObject().endObject().endObject()); docMapper = mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); fieldMapper = docMapper.mappers().getMapper("shape"); @@ -599,7 +616,8 @@ public void testPointsOnlyDefaultsWithTermStrategy() throws IOException { .endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); Mapper fieldMapper = defaultMapper.mappers().getMapper("location"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java index 63656a2c2bf35..aab5e98ed0a12 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java @@ -46,7 +46,8 @@ protected Collection> getPlugins() { public void testIncludeInObjectNotAllowed() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); try { docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder() @@ -61,7 +62,8 @@ public void testDefaults() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); - ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); + ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", + new BytesArray("{}"), XContentType.JSON)); IndexableField[] fields = document.rootDoc().getFields(IdFieldMapper.NAME); assertEquals(1, fields.length); assertEquals(IndexOptions.DOCS, fields[0].fieldType().indexOptions()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IndexFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IndexFieldMapperTests.java index 5e60e248927d7..e60b097aaca36 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IndexFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IndexFieldMapperTests.java @@ -44,10 +44,11 @@ protected Collection> getPlugins() { public void testDefaultDisabledIndexMapper() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field", "value") .endObject()), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index a20c88fe69366..c5eded8f5ab11 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -78,7 +78,7 @@ public void testDefaults() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(16, pointField.fieldType().pointNumBytes()); assertFalse(pointField.fieldType().stored()); assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::1"))), pointField.binaryValue()); @@ -129,7 +129,7 @@ public void testNoDocValues() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::1"))), pointField.binaryValue()); } @@ -152,7 +152,7 @@ public void testStore() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_SET, dvField.fieldType().docValuesType()); IndexableField storedField = fields[2]; @@ -240,7 +240,7 @@ public void testNullValue() throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(16, pointField.fieldType().pointNumBytes()); assertFalse(pointField.fieldType().stored()); assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::1"))), pointField.binaryValue()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java index e16b04748a18b..2c70b25d6a446 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java @@ -71,7 +71,7 @@ public void testStoreCidr() throws Exception { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); IndexableField storedField = fields[2]; assertTrue(storedField.fieldType().stored()); String strVal = diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index cfd92db37f0af..b5eeef0fa2847 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collection; @@ -77,7 +76,8 @@ public void testTypeNameTooLong() { .addMapping(type, field, "type=text") .execute().actionGet(); }); - assertTrue(e.getMessage(), e.getMessage().contains("mapping type name [" + type + "] is too long; limit is length 255 but was [256]")); + assertTrue(e.getMessage(), e.getMessage().contains("mapping type name [" + type + + "] is too long; limit is length 255 but was [256]")); } public void testTypeValidation() { @@ -92,9 +92,9 @@ public void testTypeValidation() { public void testIndexIntoDefaultMapping() throws Throwable { // 1. test implicit index creation - ExecutionException e = expectThrows(ExecutionException.class, () -> { - client().prepareIndex("index1", MapperService.DEFAULT_MAPPING, "1").setSource("{}", XContentType.JSON).execute().get(); - }); + ExecutionException e = expectThrows(ExecutionException.class, + () -> client().prepareIndex("index1", MapperService.DEFAULT_MAPPING, "1") + .setSource("{}", XContentType.JSON).execute().get()); Throwable throwable = ExceptionsHelper.unwrapCause(e.getCause()); if (throwable instanceof IllegalArgumentException) { assertEquals("It is forbidden to index into the default mapping [_default_]", throwable.getMessage()); @@ -122,14 +122,15 @@ public void testIndexIntoDefaultMapping() throws Throwable { */ public void testTotalFieldsLimit() throws Throwable { int totalFieldsLimit = randomIntBetween(1, 10); - Settings settings = Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), totalFieldsLimit).build(); + Settings settings = Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), totalFieldsLimit) + .build(); createIndex("test1", settings).mapperService().merge("type", createMappingSpecifyingNumberOfFields(totalFieldsLimit), MergeReason.MAPPING_UPDATE); // adding one more field should trigger exception IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - createIndex("test2", settings).mapperService().merge("type", createMappingSpecifyingNumberOfFields(totalFieldsLimit + 1), - MergeReason.MAPPING_UPDATE); + createIndex("test2", settings).mapperService().merge("type", + createMappingSpecifyingNumberOfFields(totalFieldsLimit + 1), MergeReason.MAPPING_UPDATE); }); assertTrue(e.getMessage(), e.getMessage().contains("Limit of total fields [" + totalFieldsLimit + "] in index [test2] has been exceeded")); @@ -148,7 +149,8 @@ private CompressedXContent createMappingSpecifyingNumberOfFields(int numberOfFie } public void testMappingDepthExceedsLimit() throws Throwable { - IndexService indexService1 = createIndex("test1", Settings.builder().put(MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), 1).build()); + IndexService indexService1 = createIndex("test1", + Settings.builder().put(MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), 1).build()); // no exception indexService1.mapperService().merge("type", createMappingSpecifyingNumberOfFields(1), MergeReason.MAPPING_UPDATE); @@ -310,7 +312,7 @@ public void testForbidMultipleTypes() throws IOException { String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> mapperService.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); - assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); + assertThat(e.getMessage(), startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); } /** @@ -319,15 +321,17 @@ public void testForbidMultipleTypes() throws IOException { */ public void testForbidMultipleTypesWithConflictingMappings() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field1").field("type", "integer_range").endObject().endObject().endObject().endObject()); + .startObject("properties").startObject("field1").field("type", "integer_range") + .endObject().endObject().endObject().endObject()); MapperService mapperService = createIndex("test").mapperService(); mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type2") - .startObject("properties").startObject("field1").field("type", "integer").endObject().endObject().endObject().endObject()); + .startObject("properties").startObject("field1").field("type", "integer") + .endObject().endObject().endObject().endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> mapperService.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); - assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); + assertThat(e.getMessage(), startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); } public void testDefaultMappingIsRejectedOn7() throws IOException { @@ -335,8 +339,8 @@ public void testDefaultMappingIsRejectedOn7() throws IOException { MapperService mapperService = createIndex("test").mapperService(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> mapperService.merge("_default_", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE)); - assertEquals("The [default] mapping cannot be updated on index [test]: defaults mappings are not useful anymore now that indices " + - "can have at most one type.", e.getMessage()); + assertEquals("The [default] mapping cannot be updated on index [test]: defaults mappings are not useful anymore now" + + " that indices can have at most one type.", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java index b17abcc17b359..6ecf4b6408be3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java @@ -43,7 +43,8 @@ public void testExceptionForCopyToInMultiFields() throws IOException { mapperService.parse("type", new CompressedXContent(Strings.toString(mapping)), true); fail("Parsing should throw an exception because the mapping contains a copy_to in a multi field"); } catch (MapperParsingException e) { - assertThat(e.getMessage(), equalTo("copy_to in multi fields is not allowed. Found the copy_to in field [c] which is within a multi field.")); + assertThat(e.getMessage(), equalTo("copy_to in multi fields is not allowed. Found the copy_to in field [c]" + + " which is within a multi field.")); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java index 0f3d5193c285b..8e350cfe1c77d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java @@ -135,7 +135,8 @@ public void testBuildThenParse() throws Exception { String builtMapping = builderDocMapper.mappingSource().string(); // reparse it - DocumentMapper docMapper = indexService.mapperService().documentMapperParser().parse("person", new CompressedXContent(builtMapping)); + DocumentMapper docMapper = indexService.mapperService().documentMapperParser() + .parse("person", new CompressedXContent(builtMapping)); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json")); @@ -177,13 +178,15 @@ public void testMultiFieldsInConsistentOrder() throws Exception { } builder = builder.endObject().endObject().endObject().endObject().endObject(); String mapping = Strings.toString(builder); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); Arrays.sort(multiFieldNames); Map sourceAsMap = XContentHelper.convertToMap(docMapper.mappingSource().compressedReference(), true, builder.contentType()).v2(); @SuppressWarnings("unchecked") - Map multiFields = (Map) XContentMapValues.extractValue("type.properties.my_field.fields", sourceAsMap); + Map multiFields = + (Map) XContentMapValues.extractValue("type.properties.my_field.fields", sourceAsMap); assertThat(multiFields.size(), equalTo(multiFieldNames.length)); int i = 0; @@ -195,7 +198,8 @@ public void testMultiFieldsInConsistentOrder() throws Exception { public void testObjectFieldNotAllowed() throws Exception { String mapping = Strings.toString(jsonBuilder().startObject().startObject("type").startObject("properties").startObject("my_field") - .field("type", "text").startObject("fields").startObject("multi").field("type", "object").endObject().endObject() + .field("type", "text").startObject("fields").startObject("multi").field("type", "object") + .endObject().endObject() .endObject().endObject().endObject().endObject()); final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); try { @@ -208,7 +212,8 @@ public void testObjectFieldNotAllowed() throws Exception { public void testNestedFieldNotAllowed() throws Exception { String mapping = Strings.toString(jsonBuilder().startObject().startObject("type").startObject("properties").startObject("my_field") - .field("type", "text").startObject("fields").startObject("multi").field("type", "nested").endObject().endObject() + .field("type", "text").startObject("fields").startObject("multi").field("type", "nested") + .endObject().endObject() .endObject().endObject().endObject().endObject()); final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); try { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 01bdcd362fcf3..1d339aa9bbb00 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -60,7 +60,8 @@ public void testEmptyNested() throws Exception { .startObject("nested1").field("type", "nested").endObject() .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -88,7 +89,8 @@ public void testSingleNested() throws Exception { .startObject("nested1").field("type", "nested").endObject() .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); assertThat(docMapper.hasNestedObjects(), equalTo(true)); ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); @@ -139,7 +141,8 @@ public void testMultiNested() throws Exception { .endObject().endObject().endObject() .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); assertThat(docMapper.hasNestedObjects(), equalTo(true)); ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); @@ -151,13 +154,21 @@ public void testMultiNested() throws Exception { assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false)); assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") - .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject() - .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject() + .startObject().field("field1", "1").startArray("nested2") + .startObject().field("field2", "2").endObject() + .startObject().field("field2", "3").endObject() + .endArray() + .endObject() + .startObject().field("field1", "4") + .startArray("nested2") + .startObject().field("field2", "5").endObject() + .startObject().field("field2", "6").endObject() + .endArray().endObject() .endArray() .endObject()), XContentType.JSON)); @@ -191,7 +202,8 @@ public void testMultiObjectAndNested1() throws Exception { .endObject().endObject().endObject() .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); assertThat(docMapper.hasNestedObjects(), equalTo(true)); ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); @@ -203,13 +215,21 @@ public void testMultiObjectAndNested1() throws Exception { assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true)); assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") - .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject() - .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject() + .startObject().field("field1", "1") + .startArray("nested2") + .startObject().field("field2", "2").endObject() + .startObject().field("field2", "3").endObject() + .endArray().endObject() + .startObject().field("field1", "4") + .startArray("nested2") + .startObject().field("field2", "5").endObject() + .startObject().field("field2", "6").endObject() + .endArray().endObject() .endArray() .endObject()), XContentType.JSON)); @@ -237,13 +257,16 @@ public void testMultiObjectAndNested1() throws Exception { } public void testMultiObjectAndNested2() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("nested1").field("type", "nested").field("include_in_parent", true).startObject("properties") + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("nested1").field("type", "nested").field("include_in_parent", true) + .startObject("properties") .startObject("nested2").field("type", "nested").field("include_in_parent", true) .endObject().endObject().endObject() .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); assertThat(docMapper.hasNestedObjects(), equalTo(true)); ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); @@ -255,13 +278,21 @@ public void testMultiObjectAndNested2() throws Exception { assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true)); assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") - .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject() - .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject() + .startObject().field("field1", "1") + .startArray("nested2") + .startObject().field("field2", "2").endObject() + .startObject().field("field2", "3").endObject() + .endArray().endObject() + .startObject().field("field1", "4") + .startArray("nested2") + .startObject().field("field2", "5").endObject() + .startObject().field("field2", "6").endObject() + .endArray().endObject() .endArray() .endObject()), XContentType.JSON)); @@ -295,7 +326,8 @@ public void testMultiRootAndNested1() throws Exception { .endObject().endObject().endObject() .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); assertThat(docMapper.hasNestedObjects(), equalTo(true)); ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); @@ -307,13 +339,21 @@ public void testMultiRootAndNested1() throws Exception { assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false)); assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(true)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") - .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject() - .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject() + .startObject().field("field1", "1") + .startArray("nested2") + .startObject().field("field2", "2").endObject() + .startObject().field("field2", "3").endObject() + .endArray().endObject() + .startObject().field("field1", "4") + .startArray("nested2") + .startObject().field("field2", "5").endObject() + .startObject().field("field2", "6").endObject() + .endArray().endObject() .endArray() .endObject()), XContentType.JSON)); @@ -348,15 +388,18 @@ public void testMultiRootAndNested1() throws Exception { public void testMultipleLevelsIncludeRoot1() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder() .startObject().startObject("type").startObject("properties") - .startObject("nested1").field("type", "nested").field("include_in_root", true).field("include_in_parent", true).startObject("properties") - .startObject("nested2").field("type", "nested").field("include_in_root", true).field("include_in_parent", true) + .startObject("nested1").field("type", "nested").field("include_in_root", true) + .field("include_in_parent", true).startObject("properties") + .startObject("nested2").field("type", "nested").field("include_in_root", true) + .field("include_in_parent", true) .endObject().endObject().endObject() .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject().startArray("nested1") .startObject().startArray("nested2").startObject().field("foo", "bar") .endObject().endArray().endObject().endArray() @@ -386,10 +429,11 @@ public void testMultipleLevelsIncludeRoot2() throws Exception { .endObject().endObject().endObject().endObject().endObject() .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject().startArray("nested1") .startObject().startArray("nested2") .startObject().startArray("nested3").startObject().field("foo", "bar") @@ -408,15 +452,16 @@ public void testNestedArrayStrict() throws Exception { .endObject().endObject().endObject() .endObject().endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); assertThat(docMapper.hasNestedObjects(), equalTo(true)); ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); assertThat(nested1Mapper.nested().isNested(), equalTo(true)); assertThat(nested1Mapper.dynamic(), equalTo(Dynamic.STRICT)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") @@ -448,22 +493,26 @@ public void testLimitOfNestedFieldsPerIndex() throws Exception { }; // default limit allows at least two nested fields - createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE); + createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), + MergeReason.MAPPING_UPDATE); // explicitly setting limit to 0 prevents nested fields Exception e = expectThrows(IllegalArgumentException.class, () -> - createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) + createIndex("test2", Settings.builder() + .put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of nested fields [0] in index [test2] has been exceeded")); // setting limit to 1 with 2 nested fields fails e = expectThrows(IllegalArgumentException.class, () -> - createIndex("test3", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 1).build()) + createIndex("test3", Settings.builder() + .put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 1).build()) .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded")); // do not check nested fields limit if mapping is not updated - createIndex("test4", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) + createIndex("test4", Settings.builder() + .put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY); } @@ -519,7 +568,8 @@ public void testLimitNestedDocsDefaultSettings() throws Exception{ docBuilder.endArray(); } docBuilder.endObject(); - SourceToParse source1 = SourceToParse.source("test1", "type", "1", BytesReference.bytes(docBuilder), XContentType.JSON); + SourceToParse source1 = SourceToParse.source("test1", "type", "1", + BytesReference.bytes(docBuilder), XContentType.JSON); MapperParsingException e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source1)); assertEquals( "The number of nested documents has exceeded the allowed limit of [" + defaultMaxNoNestedDocs @@ -551,7 +601,8 @@ public void testLimitNestedDocs() throws Exception{ docBuilder.endArray(); } docBuilder.endObject(); - SourceToParse source1 = SourceToParse.source("test1", "type", "1", BytesReference.bytes(docBuilder), XContentType.JSON); + SourceToParse source1 = SourceToParse.source("test1", "type", "1", + BytesReference.bytes(docBuilder), XContentType.JSON); ParsedDocument doc = docMapper.parse(source1); assertThat(doc.docs().size(), equalTo(3)); @@ -568,7 +619,8 @@ public void testLimitNestedDocs() throws Exception{ docBuilder2.endArray(); } docBuilder2.endObject(); - SourceToParse source2 = SourceToParse.source("test1", "type", "2", BytesReference.bytes(docBuilder2), XContentType.JSON); + SourceToParse source2 = SourceToParse.source("test1", "type", "2", + BytesReference.bytes(docBuilder2), XContentType.JSON); MapperParsingException e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source2)); assertEquals( "The number of nested documents has exceeded the allowed limit of [" + maxNoNestedDocs @@ -605,7 +657,8 @@ public void testLimitNestedDocsMultipleNestedFields() throws Exception{ docBuilder.endArray(); } docBuilder.endObject(); - SourceToParse source1 = SourceToParse.source("test1", "type", "1", BytesReference.bytes(docBuilder), XContentType.JSON); + SourceToParse source1 = SourceToParse.source("test1", "type", "1", + BytesReference.bytes(docBuilder), XContentType.JSON); ParsedDocument doc = docMapper.parse(source1); assertThat(doc.docs().size(), equalTo(3)); @@ -627,7 +680,8 @@ public void testLimitNestedDocsMultipleNestedFields() throws Exception{ } docBuilder2.endObject(); - SourceToParse source2 = SourceToParse.source("test1", "type", "2", BytesReference.bytes(docBuilder2), XContentType.JSON); + SourceToParse source2 = SourceToParse.source("test1", "type", "2", + BytesReference.bytes(docBuilder2), XContentType.JSON); MapperParsingException e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source2)); assertEquals( "The number of nested documents has exceeded the allowed limit of [" + maxNoNestedDocs @@ -660,8 +714,8 @@ public void testReorderParentBWC() throws IOException { ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); assertThat(nested1Mapper.nested().isNested(), equalTo(true)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java index 815388eeffc55..550802f6c9dfd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java @@ -36,10 +36,11 @@ public void testNullValueObject() throws IOException { .startObject("properties").startObject("obj1").field("type", "object").endObject().endObject() .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startObject("obj1").endObject() .field("value1", "test1") @@ -48,8 +49,8 @@ public void testNullValueObject() throws IOException { assertThat(doc.rootDoc().get("value1"), equalTo("test1")); - doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .nullField("obj1") .field("value1", "test1") @@ -58,8 +59,8 @@ public void testNullValueObject() throws IOException { assertThat(doc.rootDoc().get("value1"), equalTo("test1")); - doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() + doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() .startObject("obj1").field("field", "value").endObject() .field("value1", "test1") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index 9167c0d5a7d97..8b8e174dba83c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -66,7 +66,7 @@ public void doTestDefaults(String type) throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; @@ -117,7 +117,7 @@ public void doTestNoDocValues(String type) throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); } @@ -141,7 +141,7 @@ public void doTestStore(String type) throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -170,7 +170,7 @@ public void doTestCoerce(String type) throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -344,7 +344,7 @@ protected void doTestNullValue(String type) throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 94e63cdb85936..676cefda36559 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -40,7 +40,8 @@ public void testDifferentInnerObjectTokenFailure() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .endObject().endObject()); - DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { defaultMapper.parse(SourceToParse.source("test", "type", "1", new BytesArray(" {\n" + " \"object\": {\n" + @@ -68,38 +69,37 @@ public void testEmptyArrayProperties() throws Exception { } public void testEmptyFieldsArrayMultiFields() throws Exception { - String mapping = Strings - .toString(XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startObject("name") - .field("type", "text") - .startArray("fields") - .endArray() - .endObject() - .endObject() - .endObject() - .endObject()); + String mapping = + Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startObject("name") + .field("type", "text") + .startArray("fields") + .endArray() + .endObject() + .endObject() + .endObject() + .endObject()); createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); } public void testFieldsArrayMultiFieldsShouldThrowException() throws Exception { - String mapping = Strings - .toString(XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startObject("name") - .field("type", "text") - .startArray("fields") - .startObject().field("test", "string").endObject() - .startObject().field("test2", "string").endObject() - .endArray() - .endObject() - .endObject() + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startObject("name") + .field("type", "text") + .startArray("fields") + .startObject().field("test", "string").endObject() + .startObject().field("test2", "string").endObject() + .endArray() .endObject() - .endObject()); + .endObject() + .endObject() + .endObject()); try { createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); fail("Expected MapperParsingException"); @@ -110,32 +110,30 @@ public void testFieldsArrayMultiFieldsShouldThrowException() throws Exception { } public void testEmptyFieldsArray() throws Exception { - String mapping = Strings - .toString(XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startArray("fields") - .endArray() - .endObject() - .endObject() - .endObject()); + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startArray("fields") + .endArray() + .endObject() + .endObject() + .endObject()); createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); } public void testFieldsWithFilledArrayShouldThrowException() throws Exception { - String mapping = Strings - .toString(XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startArray("fields") - .startObject().field("test", "string").endObject() - .startObject().field("test2", "string").endObject() - .endArray() - .endObject() - .endObject() - .endObject()); + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startArray("fields") + .startObject().field("test", "string").endObject() + .startObject().field("test2", "string").endObject() + .endArray() + .endObject() + .endObject() + .endObject()); try { createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); fail("Expected MapperParsingException"); @@ -145,22 +143,21 @@ public void testFieldsWithFilledArrayShouldThrowException() throws Exception { } public void testFieldPropertiesArray() throws Exception { - String mapping = Strings - .toString(XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startObject("name") - .field("type", "text") - .startObject("fields") - .startObject("raw") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject()); + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startObject("name") + .field("type", "text") + .startObject("fields") + .startObject("raw") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject()); createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); } @@ -185,7 +182,8 @@ public void testMerge() throws IOException { } public void testEmptyName() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() .startObject("") .startObject("properties") .startObject("name") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/PathMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/PathMapperTests.java index 6bb15432b1fd3..c96b8c44ee707 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/PathMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/PathMapperTests.java @@ -31,7 +31,8 @@ public class PathMapperTests extends ESSingleNodeTestCase { public void testPathMapping() throws IOException { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/path/test-mapping.json"); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("person", new CompressedXContent(mapping)); // test full name assertThat(docMapper.mappers().getMapper("first1"), nullValue()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index 00068f76e753d..1f8b0b58af813 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -136,7 +136,7 @@ public void doTestDefaults(String type) throws Exception { assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); } @@ -188,7 +188,7 @@ protected void doTestNoDocValues(String type) throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); } @Override @@ -216,7 +216,7 @@ protected void doTestStore(String type) throws Exception { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); IndexableField storedField = fields[2]; assertTrue(storedField.fieldType().stored()); String strVal = "5"; @@ -255,7 +255,7 @@ public void doTestCoerce(String type) throws IOException { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); // date_range ignores the coerce parameter and epoch_millis date format truncates floats (see issue: #14641) if (type.equals("date_range") == false) { @@ -353,7 +353,7 @@ protected void doTestNullValue(String type) throws IOException { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); storedField = fields[2]; assertTrue(storedField.fieldType().stored()); @@ -406,7 +406,7 @@ public void doTestNoBounds(String type) throws IOException { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); IndexableField storedField = fields[2]; assertTrue(storedField.fieldType().stored()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java index 1b83b1bcb5b67..11c858545d03f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESSingleNodeTestCase; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class RoutingFieldMapperTests extends ESSingleNodeTestCase { @@ -33,7 +34,8 @@ public class RoutingFieldMapperTests extends ESSingleNodeTestCase { public void testRoutingMapper() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -48,14 +50,16 @@ public void testRoutingMapper() throws Exception { public void testIncludeInObjectNotAllowed() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); try { docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder() .startObject().field("_routing", "foo").endObject()),XContentType.JSON)); fail("Expected failure to parse metadata field"); } catch (MapperParsingException e) { - assertTrue(e.getMessage(), e.getMessage().contains("Field [_routing] is a metadata field and cannot be added inside a document")); + assertThat(e.getMessage(), e.getMessage(), + containsString("Field [_routing] is a metadata field and cannot be added inside a document")); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index ec3c548cda4d4..baccb4f474ecc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -54,7 +54,8 @@ public void testNoFormat() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder().startObject() .field("field", "value") .endObject()), XContentType.JSON)); @@ -62,7 +63,8 @@ public void testNoFormat() throws Exception { assertThat(XContentFactory.xContentType(doc.source().toBytesRef().bytes), equalTo(XContentType.JSON)); documentMapper = parser.parse("type", new CompressedXContent(mapping)); - doc = documentMapper.parse(SourceToParse.source("test", "type", "1", BytesReference.bytes(XContentFactory.smileBuilder().startObject() + doc = documentMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.smileBuilder().startObject() .field("field", "value") .endObject()), XContentType.SMILE)); @@ -75,9 +77,11 @@ public void testIncludes() throws Exception { .startObject("_source").array("includes", new String[]{"path1*"}).endObject() .endObject().endObject()); - DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder().startObject() .startObject("path1").field("field1", "value1").endObject() .startObject("path2").field("field2", "value2").endObject() .endObject()), @@ -97,9 +101,11 @@ public void testExcludes() throws Exception { .startObject("_source").array("excludes", new String[]{"path1*"}).endObject() .endObject().endObject()); - DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder().startObject() .startObject("path1").field("field1", "value1").endObject() .startObject("path2").field("field2", "value2").endObject() .endObject()), @@ -206,10 +212,12 @@ public void testComplete() throws Exception { public void testSourceObjectContainsExtraTokens() throws Exception { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); + DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); try { - documentMapper.parse(SourceToParse.source("test", "type", "1", new BytesArray("{}}"), XContentType.JSON)); // extra end object (invalid JSON) + documentMapper.parse(SourceToParse.source("test", "type", "1", + new BytesArray("{}}"), XContentType.JSON)); // extra end object (invalid JSON) fail("Expected parse exception"); } catch (MapperParsingException e) { assertNotNull(e.getRootCause()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index d8650331d2323..84f0dc31093a6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -80,7 +80,8 @@ protected void testConflictWhileMergingAndMappingUnchanged(XContentBuilder mappi CompressedXContent mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource(); // simulate like in MetaDataMappingService#putMapping try { - indexService.mapperService().merge("type", new CompressedXContent(BytesReference.bytes(mappingUpdate)), MapperService.MergeReason.MAPPING_UPDATE); + indexService.mapperService().merge("type", new CompressedXContent(BytesReference.bytes(mappingUpdate)), + MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 69bb5943a7c98..1067ed62db46e 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -21,9 +21,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CannedBinaryTokenStream; -import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; @@ -52,7 +49,6 @@ import org.hamcrest.Matcher; import java.io.IOException; -import java.io.Reader; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -69,7 +65,7 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { Map alternateVersions = new HashMap<>(); MatchQueryBuilder matchQuery = new MatchQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)); String contentString = "{\n" + - " \"match\" : {\n" + - " \"" + matchQuery.fieldName() + "\" : \"" + matchQuery.value() + "\"\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"" + matchQuery.fieldName() + "\" : \"" + matchQuery.value() + "\"\n" + + " }\n" + + "}"; alternateVersions.put(contentString, matchQuery); return alternateVersions; } @@ -238,7 +234,7 @@ public void testIllegalValues() { { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> matchQuery.maxExpansions(randomIntBetween(-10, 0))); + () -> matchQuery.maxExpansions(randomIntBetween(-10, 0))); assertEquals("[match] requires maxExpansions to be positive.", e.getMessage()); } @@ -261,20 +257,20 @@ public void testIllegalValues() { public void testSimpleMatchQuery() throws IOException { String json = "{\n" + - " \"match\" : {\n" + - " \"message\" : {\n" + - " \"query\" : \"to be or not to be\",\n" + - " \"operator\" : \"AND\",\n" + - " \"prefix_length\" : 0,\n" + - " \"max_expansions\" : 50,\n" + - " \"fuzzy_transpositions\" : true,\n" + - " \"lenient\" : false,\n" + - " \"zero_terms_query\" : \"ALL\",\n" + - " \"auto_generate_synonyms_phrase_query\" : true,\n" + - " \"boost\" : 1.0\n" + - " }\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message\" : {\n" + + " \"query\" : \"to be or not to be\",\n" + + " \"operator\" : \"AND\",\n" + + " \"prefix_length\" : 0,\n" + + " \"max_expansions\" : 50,\n" + + " \"fuzzy_transpositions\" : true,\n" + + " \"lenient\" : false,\n" + + " \"zero_terms_query\" : \"ALL\",\n" + + " \"auto_generate_synonyms_phrase_query\" : true,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + "}"; MatchQueryBuilder qb = (MatchQueryBuilder) parseQuery(json); checkGeneratedJson(json, qb); @@ -287,14 +283,14 @@ public void testFuzzinessOnNonStringField() throws Exception { query.fuzziness(randomFuzziness(INT_FIELD_NAME)); QueryShardContext context = createShardContext(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> query.toQuery(context)); + () -> query.toQuery(context)); assertEquals("Can only use fuzzy queries on keyword and text fields - not on [mapped_int] which is of type [integer]", - e.getMessage()); + e.getMessage()); query.analyzer("keyword"); // triggers a different code path e = expectThrows(IllegalArgumentException.class, - () -> query.toQuery(context)); + () -> query.toQuery(context)); assertEquals("Can only use fuzzy queries on keyword and text fields - not on [mapped_int] which is of type [integer]", - e.getMessage()); + e.getMessage()); query.lenient(true); query.toQuery(context); // no exception @@ -313,43 +309,43 @@ public void testExactOnUnsupportedField() throws Exception { public void testParseFailsWithMultipleFields() throws IOException { String json = "{\n" + - " \"match\" : {\n" + - " \"message1\" : {\n" + - " \"query\" : \"this is a test\"\n" + - " },\n" + - " \"message2\" : {\n" + - " \"query\" : \"this is a test\"\n" + - " }\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message1\" : {\n" + + " \"query\" : \"this is a test\"\n" + + " },\n" + + " \"message2\" : {\n" + + " \"query\" : \"this is a test\"\n" + + " }\n" + + " }\n" + + "}"; ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); assertEquals("[match] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); String shortJson = "{\n" + - " \"match\" : {\n" + - " \"message1\" : \"this is a test\",\n" + - " \"message2\" : \"this is a test\"\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message1\" : \"this is a test\",\n" + + " \"message2\" : \"this is a test\"\n" + + " }\n" + + "}"; e = expectThrows(ParsingException.class, () -> parseQuery(shortJson)); assertEquals("[match] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } public void testParseFailsWithTermsArray() throws Exception { String json1 = "{\n" + - " \"match\" : {\n" + - " \"message1\" : {\n" + - " \"query\" : [\"term1\", \"term2\"]\n" + - " }\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message1\" : {\n" + + " \"query\" : [\"term1\", \"term2\"]\n" + + " }\n" + + " }\n" + + "}"; expectThrows(ParsingException.class, () -> parseQuery(json1)); String json2 = "{\n" + - " \"match\" : {\n" + - " \"message1\" : [\"term1\", \"term2\"]\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message1\" : [\"term1\", \"term2\"]\n" + + " }\n" + + "}"; expectThrows(IllegalStateException.class, () -> parseQuery(json2)); } @@ -364,9 +360,9 @@ public void testExceptionUsingAnalyzerOnNumericField() { @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { mapperService.merge("_doc", new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef( - "_doc", - "string_boost", "type=text,boost=4", "string_no_pos", - "type=text,index_options=docs")) + "_doc", + "string_boost", "type=text,boost=4", "string_no_pos", + "type=text,index_options=docs")) ), MapperService.MergeReason.MAPPING_UPDATE); } @@ -408,26 +404,18 @@ public void testMaxBooleanClause() { query.setAnalyzer(new MockGraphAnalyzer(createGiantGraphMultiTerms())); expectThrows(BooleanQuery.TooManyClauses.class, () -> query.parse(Type.PHRASE, STRING_FIELD_NAME, "")); } - + private static class MockGraphAnalyzer extends Analyzer { - final CannedBinaryTokenStream.BinaryToken[] tokens; - private MockGraphAnalyzer(CannedBinaryTokenStream.BinaryToken[] tokens ) { - this.tokens = tokens; + CannedBinaryTokenStream tokenStream; + + MockGraphAnalyzer(CannedBinaryTokenStream.BinaryToken[] tokens) { + this.tokenStream = new CannedBinaryTokenStream(tokens); } + @Override protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer tokenizer = new MockTokenizer(MockTokenizer.SIMPLE, true); - return new TokenStreamComponents(tokenizer) { - @Override - public TokenStream getTokenStream() { - return new CannedBinaryTokenStream(tokens); - } - - @Override - protected void setReader(final Reader reader) { - } - }; + return new TokenStreamComponents(r -> {}, tokenStream); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index 72898dd3911cd..1a4e69af25342 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -21,7 +21,6 @@ import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.index.Fields; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -256,7 +255,7 @@ private static Fields generateFields(String[] fieldNames, String text) throws IO for (String fieldName : fieldNames) { index.addField(fieldName, text, new WhitespaceAnalyzer()); } - return MultiFields.getFields(index.createSearcher().getIndexReader()); + return index.createSearcher().getIndexReader().getTermVectors(0); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index 6be12cc841a59..1fe157255b6b7 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -444,7 +444,7 @@ protected MappedFieldType.Relation getRelation(QueryRewriteContext queryRewriteC DateTime queryToValue = new DateTime(2016, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); query.from(queryFromValue); query.to(queryToValue); - query.timeZone(randomFrom(DateTimeZone.getAvailableIDs())); + query.timeZone(randomDateTimeZone().getID()); query.format("yyyy-MM-dd"); QueryShardContext queryShardContext = createShardContext(); QueryBuilder rewritten = query.rewrite(queryShardContext); diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 36da37c44c66f..2d76b524ce603 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -104,6 +104,12 @@ protected SimpleQueryStringBuilder doCreateTestQueryBuilder() { fields.put(STRING_FIELD_NAME_2, 2.0f / randomIntBetween(1, 20)); } } + // special handling if query is "now" and no field specified. This hits the "mapped_date" field which leads to the query not being + // cacheable and trigger later test failures (see https://github.com/elastic/elasticsearch/issues/35183) + if (fieldCount == 0 && result.value().equalsIgnoreCase("now")) { + fields.put(STRING_FIELD_NAME_2, 2.0f / randomIntBetween(1, 20)); + } + result.fields(fields); if (randomBoolean()) { result.autoGenerateSynonymsPhraseQuery(randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index 163386c12b6c9..ba673cf2ea4a9 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.query.functionscore; import com.fasterxml.jackson.core.JsonParseException; - import org.apache.lucene.index.Term; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; @@ -672,6 +671,29 @@ public void testRewriteWithFunction() throws IOException { assertSame(rewrite.filterFunctionBuilders()[1].getFilter(), secondFunction); } + /** + * Please see https://github.com/elastic/elasticsearch/issues/35123 for context. + */ + public void testSingleScriptFunction() throws IOException { + QueryBuilder queryBuilder = RandomQueryBuilder.createQuery(random()); + ScoreFunctionBuilder functionBuilder = new ScriptScoreFunctionBuilder( + new Script(ScriptType.INLINE, MockScriptEngine.NAME, "1", Collections.emptyMap())); + + FunctionScoreQueryBuilder builder = functionScoreQuery(queryBuilder, functionBuilder); + if (randomBoolean()) { + builder.boostMode(randomFrom(CombineFunction.values())); + } + + Query query = builder.toQuery(createShardContext()); + assertThat(query, instanceOf(FunctionScoreQuery.class)); + + CombineFunction expectedBoostMode = builder.boostMode() != null + ? builder.boostMode() + : FunctionScoreQueryBuilder.DEFAULT_BOOST_MODE; + CombineFunction actualBoostMode = ((FunctionScoreQuery) query).getCombineFunction(); + assertEquals(expectedBoostMode, actualBoostMode); + } + public void testQueryMalformedArrayNotSupported() throws IOException { String json = "{\n" + diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 0888dfd3c40c5..21439e3aa40ae 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -60,7 +60,7 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); - shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); + shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); } public void tearDown() throws Exception { @@ -110,7 +110,7 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { threadPool, shardStateAction, new ActionFilters(Collections.emptySet()), - new IndexNameExpressionResolver(Settings.EMPTY)); + new IndexNameExpressionResolver()); final GlobalCheckpointSyncAction.Request primaryRequest = new GlobalCheckpointSyncAction.Request(indexShard.shardId()); if (randomBoolean()) { action.shardOperationOnPrimary(primaryRequest, indexShard); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/SequenceNumbersTests.java b/server/src/test/java/org/elasticsearch/index/seqno/SequenceNumbersTests.java index f835cff3f4656..9a279feddb973 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/SequenceNumbersTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/SequenceNumbersTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.seqno; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -57,4 +58,11 @@ public void testMax() { assertThat(e, hasToString(containsString("sequence number must be assigned"))); } + public void testSeqNoStatsEqualsAndHashCode() { + final long maxSeqNo = randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, Long.MAX_VALUE); + final long localCheckpoint = randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, maxSeqNo); + final long globalCheckpoint = randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, localCheckpoint); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint), + stats -> new SeqNoStats(stats.getMaxSeqNo(), stats.getLocalCheckpoint(), stats.getGlobalCheckpoint())); + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 487ac7e0694ef..23380f9c171f8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -928,6 +928,7 @@ public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException, final long maxSeqNoOfUpdatesOrDeletes = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, maxSeqNo); final Set docsBeforeRollback = getShardDocUIDs(indexShard); final CountDownLatch latch = new CountDownLatch(1); + final boolean shouldRollback = Math.max(globalCheckpointOnReplica, globalCheckpoint) < maxSeqNo; indexShard.acquireReplicaOperationPermit( indexShard.getPendingPrimaryTerm() + 1, globalCheckpoint, @@ -947,10 +948,13 @@ public void onFailure(Exception e) { ThreadPool.Names.SAME, ""); latch.await(); - assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Collections.max( - Arrays.asList(maxSeqNoOfUpdatesOrDeletes, globalCheckpoint, globalCheckpointOnReplica)) - )); - + if (shouldRollback) { + assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Collections.max( + Arrays.asList(maxSeqNoOfUpdatesOrDeletes, globalCheckpoint, globalCheckpointOnReplica)) + )); + } else { + assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Math.max(maxSeqNoOfUpdatesOrDeletes, currentMaxSeqNoOfUpdates))); + } final ShardRouting newRouting = indexShard.routingEntry().moveActiveReplicaToPrimary(); final CountDownLatch resyncLatch = new CountDownLatch(1); indexShard.updateShardState( @@ -965,9 +969,13 @@ public void onFailure(Exception e) { assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(indexShard.seqNoStats().getMaxSeqNo(), equalTo(maxSeqNo)); assertThat(getShardDocUIDs(indexShard), equalTo(docsBeforeRollback)); - assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Collections.max( - Arrays.asList(currentMaxSeqNoOfUpdates, maxSeqNoOfUpdatesOrDeletes, globalCheckpoint, globalCheckpointOnReplica)) - )); + if (shouldRollback) { + assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Collections.max( + Arrays.asList(currentMaxSeqNoOfUpdates, maxSeqNoOfUpdatesOrDeletes, globalCheckpoint, globalCheckpointOnReplica)) + )); + } else { + assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Math.max(currentMaxSeqNoOfUpdates, maxSeqNoOfUpdatesOrDeletes))); + } closeShard(indexShard, false); } @@ -1050,7 +1058,7 @@ public void testConcurrentTermIncreaseOnReplicaShard() throws BrokenBarrierExcep @Override public void onResponse(Releasable releasable) { counter.incrementAndGet(); - assertThat(indexShard.getPendingPrimaryTerm(), equalTo(primaryTerm + increment)); + assertThat(indexShard.getOperationPrimaryTerm(), equalTo(primaryTerm + increment)); latch.countDown(); releasable.close(); } @@ -1095,6 +1103,7 @@ public void onFailure(Exception e) { } assertThat(indexShard.getPendingPrimaryTerm(), equalTo(primaryTerm + Math.max(firstIncrement, secondIncrement))); + assertThat(indexShard.getOperationPrimaryTerm(), equalTo(indexShard.getPendingPrimaryTerm())); closeShards(indexShard); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index 28e625b34dfd6..62f6a7b234588 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -68,7 +68,7 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { assertThat(parentTask, instanceOf(PrimaryReplicaSyncer.ResyncTask.class)); listener.onResponse(new ResyncReplicationResponse()); }; - PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(Settings.EMPTY, taskManager, syncAction); + PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(taskManager, syncAction); syncer.setChunkSize(new ByteSizeValue(randomIntBetween(1, 10))); int numDocs = randomInt(10); @@ -136,7 +136,7 @@ public void testSyncerOnClosingShard() throws Exception { syncActionCalled.set(true); threadPool.generic().execute(() -> listener.onResponse(new ResyncReplicationResponse())); }; - PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(Settings.EMPTY, + PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer( new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), syncAction); syncer.setChunkSize(new ByteSizeValue(1)); // every document is sent off separately diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index 4411d3f3e934a..e27aefdf13fff 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -48,7 +47,6 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Collection; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; @@ -501,11 +499,6 @@ public long ramBytesUsed() { return 0; } - @Override - public Collection getChildResources() { - return null; - } - @Override public boolean isFragment() { return false; diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 6f8689a9664ba..2159d8ed976b2 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -117,10 +117,12 @@ public CircuitBreaker getBreaker(String name) { @Override public void checkParentLimit(long newBytesReserved, String label) throws CircuitBreakingException { // Parent will trip right before regular breaker would trip - if (getBreaker(CircuitBreaker.REQUEST).getUsed() > parentLimit) { + long requestBreakerUsed = getBreaker(CircuitBreaker.REQUEST).getUsed(); + if (requestBreakerUsed > parentLimit) { parentTripped.incrementAndGet(); logger.info("--> parent tripped"); - throw new CircuitBreakingException("parent tripped"); + throw new CircuitBreakingException("parent tripped", requestBreakerUsed + newBytesReserved, + parentLimit, CircuitBreaker.Durability.PERMANENT); } } }; @@ -201,6 +203,7 @@ public void testBorrowingSiblingBreakerMemory() throws Exception { assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); assertThat(exception.getMessage(), containsString("usages [request=157286400/150mb, fielddata=54001664/51.5mb, in_flight_requests=0/0b, accounting=0/0b]")); + assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); } } @@ -244,6 +247,7 @@ long currentMemoryUsage() { assertThat(exception.getMessage(), containsString("real usage: [181/181b], new bytes reserved: [" + (reservationInBytes * 2) + "/" + new ByteSizeValue(reservationInBytes * 2) + "]")); + assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); assertEquals(0, requestBreaker.getTrippedCount()); assertEquals(1, service.stats().getStats(CircuitBreaker.PARENT).getTrippedCount()); @@ -252,4 +256,41 @@ long currentMemoryUsage() { requestBreaker.addEstimateBytesAndMaybeBreak(reservationInBytes, "request"); assertEquals(0, requestBreaker.getTrippedCount()); } + + public void testTrippedCircuitBreakerDurability() { + Settings clusterSettings = Settings.builder() + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), Boolean.FALSE) + .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "200mb") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb") + .build(); + try (CircuitBreakerService service = new HierarchyCircuitBreakerService(clusterSettings, + new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + CircuitBreaker requestCircuitBreaker = service.getBreaker(MemoryCircuitBreaker.REQUEST); + CircuitBreaker fieldDataCircuitBreaker = service.getBreaker(MemoryCircuitBreaker.FIELDDATA); + + CircuitBreaker.Durability expectedDurability; + if (randomBoolean()) { + fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(100), "should not break"); + requestCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(70), "should not break"); + expectedDurability = CircuitBreaker.Durability.PERMANENT; + } else { + fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(70), "should not break"); + requestCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(120), "should not break"); + expectedDurability = CircuitBreaker.Durability.TRANSIENT; + } + + CircuitBreakingException exception = expectThrows(CircuitBreakingException.class, () -> + fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(mb(40), "should break")); + + assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be")); + assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); + assertThat("Expected [" + expectedDurability + "] due to [" + exception.getMessage() + "]", + exception.getDurability(), equalTo(expectedDurability)); + } + } + + private long mb(long size) { + return new ByteSizeValue(size, ByteSizeUnit.MB).getBytes(); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 3d8b1decea41e..76918a916124c 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -106,6 +106,9 @@ import static org.mockito.Mockito.when; public class ClusterStateChanges extends AbstractComponent { + private static final Settings SETTINGS = Settings.builder() + .put(PATH_HOME_SETTING.getKey(), "dummy") + .build(); private final AllocationService allocationService; private final ClusterService clusterService; @@ -124,21 +127,19 @@ public class ClusterStateChanges extends AbstractComponent { private final NodeJoinController.JoinTaskExecutor joinTaskExecutor; public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool threadPool) { - super(Settings.builder().put(PATH_HOME_SETTING.getKey(), "dummy").build()); - - ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - allocationService = new AllocationService(settings, new AllocationDeciders(settings, - new HashSet<>(Arrays.asList(new SameShardAllocationDecider(settings, clusterSettings), - new ReplicaAfterPrimaryActiveAllocationDecider(settings), + ClusterSettings clusterSettings = new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + allocationService = new AllocationService(new AllocationDeciders( + new HashSet<>(Arrays.asList(new SameShardAllocationDecider(SETTINGS, clusterSettings), + new ReplicaAfterPrimaryActiveAllocationDecider(), new RandomAllocationDeciderTests.RandomAllocationDecider(getRandom())))), - new TestGatewayAllocator(), new BalancedShardsAllocator(settings), + new TestGatewayAllocator(), new BalancedShardsAllocator(SETTINGS), EmptyClusterInfoService.INSTANCE); shardFailedClusterStateTaskExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger); shardStartedClusterStateTaskExecutor = new ShardStateAction.ShardStartedClusterStateTaskExecutor(allocationService, logger); ActionFilters actionFilters = new ActionFilters(Collections.emptySet()); - IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); - DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterSettings); - Environment environment = TestEnvironment.newEnvironment(settings); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + DestructiveOperations destructiveOperations = new DestructiveOperations(SETTINGS, clusterSettings); + Environment environment = TestEnvironment.newEnvironment(SETTINGS); Transport transport = mock(Transport.class); // it's not used // mocks @@ -165,11 +166,11 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th } // services - TransportService transportService = new TransportService(settings, transport, threadPool, + TransportService transportService = new TransportService(SETTINGS, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), clusterSettings, + boundAddress -> DiscoveryNode.createLocal(SETTINGS, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), clusterSettings, Collections.emptySet()); - MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, xContentRegistry, null, null, + MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(SETTINGS, xContentRegistry, null, null, null) { // metaData upgrader should do nothing @Override @@ -177,29 +178,29 @@ public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version m return indexMetaData; } }; - MetaDataIndexStateService indexStateService = new MetaDataIndexStateService(settings, clusterService, allocationService, + MetaDataIndexStateService indexStateService = new MetaDataIndexStateService(clusterService, allocationService, metaDataIndexUpgradeService, indicesService, threadPool); - MetaDataDeleteIndexService deleteIndexService = new MetaDataDeleteIndexService(settings, clusterService, allocationService); - MetaDataUpdateSettingsService metaDataUpdateSettingsService = new MetaDataUpdateSettingsService(settings, clusterService, + MetaDataDeleteIndexService deleteIndexService = new MetaDataDeleteIndexService(SETTINGS, clusterService, allocationService); + MetaDataUpdateSettingsService metaDataUpdateSettingsService = new MetaDataUpdateSettingsService(clusterService, allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, indicesService, threadPool); - MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(settings, clusterService, indicesService, - allocationService, new AliasValidator(settings), environment, + MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(SETTINGS, clusterService, indicesService, + allocationService, new AliasValidator(), environment, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool, xContentRegistry, true); - transportCloseIndexAction = new TransportCloseIndexAction(settings, transportService, clusterService, threadPool, + transportCloseIndexAction = new TransportCloseIndexAction(SETTINGS, transportService, clusterService, threadPool, indexStateService, clusterSettings, actionFilters, indexNameExpressionResolver, destructiveOperations); - transportOpenIndexAction = new TransportOpenIndexAction(settings, transportService, + transportOpenIndexAction = new TransportOpenIndexAction(transportService, clusterService, threadPool, indexStateService, actionFilters, indexNameExpressionResolver, destructiveOperations); - transportDeleteIndexAction = new TransportDeleteIndexAction(settings, transportService, + transportDeleteIndexAction = new TransportDeleteIndexAction(transportService, clusterService, threadPool, deleteIndexService, actionFilters, indexNameExpressionResolver, destructiveOperations); - transportUpdateSettingsAction = new TransportUpdateSettingsAction(settings, + transportUpdateSettingsAction = new TransportUpdateSettingsAction( transportService, clusterService, threadPool, metaDataUpdateSettingsService, actionFilters, indexNameExpressionResolver); - transportClusterRerouteAction = new TransportClusterRerouteAction(settings, + transportClusterRerouteAction = new TransportClusterRerouteAction( transportService, clusterService, threadPool, allocationService, actionFilters, indexNameExpressionResolver); - transportCreateIndexAction = new TransportCreateIndexAction(settings, + transportCreateIndexAction = new TransportCreateIndexAction( transportService, clusterService, threadPool, createIndexService, actionFilters, indexNameExpressionResolver); - ElectMasterService electMasterService = new ElectMasterService(settings); + ElectMasterService electMasterService = new ElectMasterService(SETTINGS); nodeRemovalExecutor = new ZenDiscovery.NodeRemovalClusterStateTaskExecutor(allocationService, electMasterService, s -> { throw new AssertionError("rejoin not implemented"); }, logger); joinTaskExecutor = new NodeJoinController.JoinTaskExecutor(allocationService, electMasterService, logger); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 39091ce04ec6e..4625aa04be372 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -462,7 +462,7 @@ private IndicesClusterStateService createIndicesClusterStateService(DiscoveryNod final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService, transportService, null, threadPool); - final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(settings, threadPool, + final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(threadPool, transportService, null, clusterService); final ShardStateAction shardStateAction = mock(ShardStateAction.class); final PrimaryReplicaSyncer primaryReplicaSyncer = mock(PrimaryReplicaSyncer.class); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java index 524795bfa2480..72eb2baeca942 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java @@ -37,7 +37,7 @@ public class PeerRecoverySourceServiceTests extends IndexShardTestCase { public void testDuplicateRecoveries() throws IOException { IndexShard primary = newStartedShard(true); - PeerRecoverySourceService peerRecoverySourceService = new PeerRecoverySourceService(Settings.EMPTY, + PeerRecoverySourceService peerRecoverySourceService = new PeerRecoverySourceService( mock(TransportService.class), mock(IndicesService.class), new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))); StartRecoveryRequest startRecoveryRequest = new StartRecoveryRequest(primary.shardId(), randomAlphaOfLength(10), diff --git a/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java b/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java index 6972656d386d0..54750933ecd90 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/InternalOrPrivateSettingsPlugin.java @@ -126,14 +126,12 @@ public static class TransportUpdateInternalOrPrivateAction @Inject public TransportUpdateInternalOrPrivateAction( - final Settings settings, final TransportService transportService, final ClusterService clusterService, final ThreadPool threadPool, final ActionFilters actionFilters, final IndexNameExpressionResolver indexNameExpressionResolver) { super( - settings, UpdateInternalOrPrivateAction.NAME, transportService, clusterService, diff --git a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index fdff68510af84..fb3eac28b6793 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -445,7 +445,8 @@ public void testEngineGCDeletesSetting() throws InterruptedException { client().prepareDelete("test", "type", "1").get(); // sets version to 4 Thread.sleep(300); // wait for cache time to change TODO: this needs to be solved better. To be discussed. // delete is should not be in cache - assertThrows(client().prepareIndex("test", "type", "1").setSource("f", 3).setVersion(4), VersionConflictEngineException.class); + assertThrows(client().prepareIndex("test", "type", "1").setSource("f", 3) + .setVersion(4), VersionConflictEngineException.class); } diff --git a/server/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java b/server/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java index 234524f16f454..873ebf17f2382 100644 --- a/server/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java @@ -21,7 +21,6 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment.NodePath; import org.elasticsearch.test.ESTestCase; @@ -51,7 +50,7 @@ public class FsProbeTests extends ESTestCase { public void testFsInfo() throws IOException { try (NodeEnvironment env = newNodeEnvironment()) { - FsProbe probe = new FsProbe(Settings.EMPTY, env); + FsProbe probe = new FsProbe(env); FsInfo stats = probe.stats(null, null); assertNotNull(stats); @@ -166,7 +165,7 @@ public void testIoStats() { " 253 1 dm-1 112 0 4624 13 0 0 0 0 0 5 13", " 253 2 dm-2 47802 0 710658 49312 1371977 0 64126096 33730596 0 1058193 33781827")); - final FsProbe probe = new FsProbe(Settings.EMPTY, null) { + final FsProbe probe = new FsProbe(null) { @Override List readProcDiskStats() throws IOException { return diskStats.get(); diff --git a/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java b/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java index 8aa0f3ec5bad6..5fedfa7869e8b 100644 --- a/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/node/ResponseCollectorServiceTests.java @@ -53,7 +53,7 @@ public void setUp() throws Exception { clusterService = new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadpool); - collector = new ResponseCollectorService(Settings.EMPTY, clusterService); + collector = new ResponseCollectorService(clusterService); } @After diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java index f13a35613d530..3eec748808ecf 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java @@ -640,8 +640,8 @@ private void changeRoutingTable(MetaData.Builder metaData, RoutingTable.Builder /** Creates a PersistentTasksClusterService with a single PersistentTasksExecutor implemented by a BiFunction **/ private

      PersistentTasksClusterService createService(final BiFunction fn) { - PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, - singleton(new PersistentTasksExecutor

      (Settings.EMPTY, TestPersistentTasksExecutor.NAME, null) { + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry( + singleton(new PersistentTasksExecutor

      (TestPersistentTasksExecutor.NAME, null) { @Override public Assignment getAssignment(P params, ClusterState clusterState) { return fn.apply(params, clusterState); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java index 655a21a5f5390..7f2dada7c4cb7 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java @@ -58,10 +58,10 @@ public static void setUpThreadPool() { public void setUp() throws Exception { super.setUp(); clusterService = createClusterService(threadPool); - PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(clusterService.getSettings(), emptyList()) { + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(emptyList()) { @Override public PersistentTasksExecutor getPersistentTaskExecutorSafe(String taskName) { - return new PersistentTasksExecutor(clusterService.getSettings(), taskName, null) { + return new PersistentTasksExecutor(taskName, null) { @Override protected void nodeOperation(AllocatedPersistentTask task, Params params, PersistentTaskState state) { logger.debug("Executing task {}", task); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java index 50bcf5949267e..8aa553639ccde 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java @@ -106,10 +106,10 @@ public void testStartTask() { when(action.createTask(anyLong(), anyString(), anyString(), eq(parentId), any(), any())).thenReturn( new TestPersistentTasksPlugin.TestTask(i, "persistent", "test", "", parentId, Collections.emptyMap())); } - PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, Collections.singletonList(action)); + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Collections.singletonList(action)); MockExecutor executor = new MockExecutor(); - PersistentTasksNodeService coordinator = new PersistentTasksNodeService(Settings.EMPTY, persistentTasksService, + PersistentTasksNodeService coordinator = new PersistentTasksNodeService(persistentTasksService, registry, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), executor); ClusterState state = createInitialClusterState(nonLocalNodesCount, Settings.EMPTY); @@ -202,10 +202,10 @@ public void testParamsStatusAndNodeTaskAreDelegated() throws Exception { AllocatedPersistentTask nodeTask = new TestPersistentTasksPlugin.TestTask(0, "persistent", "test", "", parentId, Collections.emptyMap()); when(action.createTask(anyLong(), anyString(), anyString(), eq(parentId), any(), any())).thenReturn(nodeTask); - PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, Collections.singletonList(action)); + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Collections.singletonList(action)); MockExecutor executor = new MockExecutor(); - PersistentTasksNodeService coordinator = new PersistentTasksNodeService(Settings.EMPTY, persistentTasksService, + PersistentTasksNodeService coordinator = new PersistentTasksNodeService(persistentTasksService, registry, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), executor); ClusterState state = createInitialClusterState(1, Settings.EMPTY); @@ -231,7 +231,7 @@ public void testParamsStatusAndNodeTaskAreDelegated() throws Exception { public void testTaskCancellation() { AtomicLong capturedTaskId = new AtomicLong(); AtomicReference> capturedListener = new AtomicReference<>(); - PersistentTasksService persistentTasksService = new PersistentTasksService(Settings.EMPTY, null, null, null) { + PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, null) { @Override void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { capturedTaskId.set(taskId); @@ -250,12 +250,12 @@ public void sendCompletionRequest(final String taskId, final long taskAllocation when(action.createTask(anyLong(), anyString(), anyString(), any(), any(), any())) .thenReturn(new TestPersistentTasksPlugin.TestTask(1, "persistent", "test", "", new TaskId("cluster", 1), Collections.emptyMap())); - PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, Collections.singletonList(action)); + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Collections.singletonList(action)); int nonLocalNodesCount = randomInt(10); MockExecutor executor = new MockExecutor(); TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); - PersistentTasksNodeService coordinator = new PersistentTasksNodeService(Settings.EMPTY, persistentTasksService, + PersistentTasksNodeService coordinator = new PersistentTasksNodeService(persistentTasksService, registry, taskManager, executor); ClusterState state = createInitialClusterState(nonLocalNodesCount, Settings.EMPTY); diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java index 745b883656958..7722a8ad66527 100644 --- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java @@ -44,7 +44,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -91,7 +90,7 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P @Override public List> getPersistentTasksExecutor(ClusterService clusterService, ThreadPool threadPool, Client client) { - return Collections.singletonList(new TestPersistentTasksExecutor(Settings.EMPTY, clusterService)); + return Collections.singletonList(new TestPersistentTasksExecutor(clusterService)); } @Override @@ -292,8 +291,8 @@ public static class TestPersistentTasksExecutor extends PersistentTasksExecutor< public static final String NAME = "cluster:admin/persistent/test"; private final ClusterService clusterService; - public TestPersistentTasksExecutor(Settings settings, ClusterService clusterService) { - super(settings, NAME, ThreadPool.Names.GENERIC); + public TestPersistentTasksExecutor(ClusterService clusterService) { + super(NAME, ThreadPool.Names.GENERIC); this.clusterService = clusterService; } @@ -510,9 +509,8 @@ public static class TransportTestTaskAction extends TransportTasksAction { @Inject - public TransportTestTaskAction(Settings settings, ClusterService clusterService, - TransportService transportService, ActionFilters actionFilters) { - super(settings, TestTaskAction.NAME, clusterService, transportService, actionFilters, + public TransportTestTaskAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { + super(TestTaskAction.NAME, clusterService, transportService, actionFilters, TestTasksRequest::new, TestTasksResponse::new, ThreadPool.Names.MANAGEMENT); } diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index d35a8b5d24955..29cbb8f48ec9f 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.http.HttpInfo; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; -import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; @@ -85,12 +84,12 @@ public void setup() { .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - usageService = new UsageService(settings); + usageService = new UsageService(); // we can do this here only because we know that we don't adjust breaker settings dynamically in the test inFlightRequestsBreaker = circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); HttpServerTransport httpServerTransport = new TestHttpServerTransport(); - restController = new RestController(settings, Collections.emptySet(), null, null, circuitBreakerService, usageService); + restController = new RestController(Collections.emptySet(), null, null, circuitBreakerService, usageService); restController.registerHandler(RestRequest.Method.GET, "/", (request, channel, client) -> channel.sendResponse( new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY))); @@ -104,7 +103,7 @@ public void setup() { public void testApplyRelevantHeaders() throws Exception { final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); Set headers = new HashSet<>(Arrays.asList("header.1", "header.2")); - final RestController restController = new RestController(Settings.EMPTY, headers, null, null, circuitBreakerService, usageService); + final RestController restController = new RestController(headers, null, null, circuitBreakerService, usageService); Map> restHeaders = new HashMap<>(); restHeaders.put("header.1", Collections.singletonList("true")); restHeaders.put("header.2", Collections.singletonList("true")); @@ -137,8 +136,7 @@ public MethodHandlers next() { } public void testCanTripCircuitBreaker() throws Exception { - RestController controller = new RestController(Settings.EMPTY, Collections.emptySet(), null, null, circuitBreakerService, - usageService); + RestController controller = new RestController(Collections.emptySet(), null, null, circuitBreakerService, usageService); // trip circuit breaker by default controller.registerHandler(RestRequest.Method.GET, "/trip", new FakeRestHandler(true)); controller.registerHandler(RestRequest.Method.GET, "/do-not-trip", new FakeRestHandler(false)); @@ -210,8 +208,8 @@ public void testRestHandlerWrapper() throws Exception { assertSame(handler, h); return (RestRequest request, RestChannel channel, NodeClient client) -> wrapperCalled.set(true); }; - final RestController restController = new RestController(Settings.EMPTY, Collections.emptySet(), wrapper, null, - circuitBreakerService, usageService); + final RestController restController = new RestController(Collections.emptySet(), wrapper, null, + circuitBreakerService, usageService); restController.dispatchRequest(new FakeRestRequest.Builder(xContentRegistry()).build(), null, null, Optional.of(handler)); assertTrue(wrapperCalled.get()); assertFalse(handlerCalled.get()); @@ -279,7 +277,7 @@ public void testDispatchRequestLimitsBytes() { int contentLength = BREAKER_LIMIT.bytesAsInt() + 1; String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, XContentType.JSON); - AssertingChannel channel = new AssertingChannel(request, true, RestStatus.SERVICE_UNAVAILABLE); + AssertingChannel channel = new AssertingChannel(request, true, RestStatus.TOO_MANY_REQUESTS); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); @@ -291,9 +289,7 @@ public void testDispatchRequiresContentTypeForRequestsWithContent() { String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, null); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.NOT_ACCEPTABLE); - restController = new RestController( - Settings.builder().put(HttpTransportSettings.SETTING_HTTP_CONTENT_TYPE_REQUIRED.getKey(), true).build(), - Collections.emptySet(), null, null, circuitBreakerService, usageService); + restController = new RestController(Collections.emptySet(), null, null, circuitBreakerService, usageService); restController.registerHandler(RestRequest.Method.GET, "/", (r, c, client) -> c.sendResponse( new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY))); diff --git a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java index e5e8bce6d6da1..6a4a8749397ab 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java @@ -87,8 +87,8 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); final Settings settings = Settings.EMPTY; - UsageService usageService = new UsageService(settings); - RestController restController = new RestController(settings, Collections.emptySet(), + UsageService usageService = new UsageService(); + RestController restController = new RestController(Collections.emptySet(), null, null, circuitBreakerService, usageService); // A basic RestHandler handles requests to the endpoint diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java index 29b19739e7587..79d04e2816117 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java @@ -52,7 +52,7 @@ private void runTestFilterSettingsTest( final Settings settings = Settings.builder().put("foo.filtered", "bar").put("foo.non_filtered", "baz").build(); md.accept(mdBuilder, settings); final ClusterState.Builder builder = new ClusterState.Builder(ClusterState.EMPTY_STATE).metaData(mdBuilder); - final SettingsFilter filter = new SettingsFilter(Settings.EMPTY, Collections.singleton("foo.filtered")); + final SettingsFilter filter = new SettingsFilter(Collections.singleton("foo.filtered")); final Setting.Property[] properties = {Setting.Property.Dynamic, Setting.Property.Filtered, Setting.Property.NodeScope}; final Set> settingsSet = Stream.concat( ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsActionTests.java index 640b97605af15..330dee49f9f06 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsActionTests.java @@ -44,9 +44,9 @@ public class RestNodesStatsActionTests extends ESTestCase { @Override public void setUp() throws Exception { super.setUp(); - UsageService usageService = new UsageService(Settings.EMPTY); + UsageService usageService = new UsageService(); action = new RestNodesStatsAction(Settings.EMPTY, - new RestController(Settings.EMPTY, Collections.emptySet(), null, null, null, usageService)); + new RestController(Collections.emptySet(), null, null, null, usageService)); } public void testUnrecognizedMetric() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsActionTests.java index 26c1e1fa17779..1eda721f53b25 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsActionTests.java @@ -42,9 +42,9 @@ public class RestIndicesStatsActionTests extends ESTestCase { @Override public void setUp() throws Exception { super.setUp(); - UsageService usageService = new UsageService(Settings.EMPTY); + UsageService usageService = new UsageService(); action = new RestIndicesStatsAction(Settings.EMPTY, - new RestController(Settings.EMPTY, Collections.emptySet(), null, null, null, usageService)); + new RestController(Collections.emptySet(), null, null, null, usageService)); } public void testUnrecognizedMetric() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java index 980a2c2e34ed3..da31e3ad13e6e 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java @@ -55,8 +55,8 @@ public class RestValidateQueryActionTests extends AbstractSearchTestCase { private static ThreadPool threadPool = new TestThreadPool(RestValidateQueryActionTests.class.getName()); private static NodeClient client = new NodeClient(Settings.EMPTY, threadPool); - private static UsageService usageService = new UsageService(Settings.EMPTY); - private static RestController controller = new RestController(Settings.EMPTY, emptySet(), null, client, null, usageService); + private static UsageService usageService = new UsageService(); + private static RestController controller = new RestController(emptySet(), null, client, null, usageService); private static RestValidateQueryAction action = new RestValidateQueryAction(Settings.EMPTY, controller); /** @@ -68,7 +68,7 @@ public class RestValidateQueryActionTests extends AbstractSearchTestCase { public static void stubValidateQueryAction() { final TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); - final TransportAction transportAction = new TransportAction(Settings.EMPTY, ValidateQueryAction.NAME, + final TransportAction transportAction = new TransportAction(ValidateQueryAction.NAME, new ActionFilters(Collections.emptySet()), taskManager) { @Override protected void doExecute(Task task, ActionRequest request, ActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java index 4535bf7a91b0d..13e94f7fe5368 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestIndicesActionTests.java @@ -75,9 +75,9 @@ public class RestIndicesActionTests extends ESTestCase { public void testBuildTable() { final Settings settings = Settings.EMPTY; - UsageService usageService = new UsageService(settings); - final RestController restController = new RestController(settings, Collections.emptySet(), null, null, null, usageService); - final RestIndicesAction action = new RestIndicesAction(settings, restController, new IndexNameExpressionResolver(settings)); + UsageService usageService = new UsageService(); + final RestController restController = new RestController(Collections.emptySet(), null, null, null, usageService); + final RestIndicesAction action = new RestIndicesAction(settings, restController, new IndexNameExpressionResolver()); // build a (semi-)random table final int numIndices = randomIntBetween(0, 5); diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java index 32993a6b7c720..bf3d40af5e0e9 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java @@ -47,9 +47,9 @@ public class RestNodesActionTests extends ESTestCase { @Before public void setUpAction() { - UsageService usageService = new UsageService(Settings.EMPTY); + UsageService usageService = new UsageService(); action = new RestNodesAction(Settings.EMPTY, - new RestController(Settings.EMPTY, Collections.emptySet(), null, null, null, usageService)); + new RestController(Collections.emptySet(), null, null, null, usageService)); } public void testBuildTableDoesNotThrowGivenNullNodeInfoAndStats() { diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java index e99fb4cc1f258..25f04532ac8ce 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -51,8 +51,8 @@ public class RestRecoveryActionTests extends ESTestCase { public void testRestRecoveryAction() { final Settings settings = Settings.EMPTY; - UsageService usageService = new UsageService(settings); - final RestController restController = new RestController(settings, Collections.emptySet(), null, null, null, usageService); + UsageService usageService = new UsageService(); + final RestController restController = new RestController(Collections.emptySet(), null, null, null, usageService); final RestRecoveryAction action = new RestRecoveryAction(settings, restController); final int totalShards = randomIntBetween(1, 32); final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); diff --git a/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 22a250710ba3c..271007f99787d 100644 --- a/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; @@ -54,6 +55,7 @@ public class ScriptServiceTests extends ESTestCase { private Map> contexts; private ScriptService scriptService; private Settings baseSettings; + private ClusterSettings clusterSettings; @Before public void setup() throws IOException { @@ -78,12 +80,22 @@ public void setup() throws IOException { private void buildScriptService(Settings additionalSettings) throws IOException { Settings finalSettings = Settings.builder().put(baseSettings).put(additionalSettings).build(); scriptService = new ScriptService(finalSettings, engines, contexts) { + @Override + Map getScriptsFromClusterState() { + Map scripts = new HashMap<>(); + scripts.put("test1", new StoredScriptSource("test", "1+1", Collections.emptyMap())); + scripts.put("test2", new StoredScriptSource("test", "1", Collections.emptyMap())); + return scripts; + } + @Override StoredScriptSource getScriptFromClusterState(String id) { //mock the script that gets retrieved from an index return new StoredScriptSource("test", "1+1", Collections.emptyMap()); } }; + clusterSettings = new ClusterSettings(finalSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + scriptService.registerClusterSettingsListeners(clusterSettings); } // even though circuit breaking is allowed to be configured per minute, we actually weigh this over five minutes @@ -305,6 +317,24 @@ public void testGetStoredScript() throws Exception { assertNull(scriptService.getStoredScript(cs, new GetStoredScriptRequest("_id"))); } + public void testMaxSizeLimit() throws Exception { + buildScriptService(Settings.builder().put(ScriptService.SCRIPT_MAX_SIZE_IN_BYTES.getKey(), 4).build()); + scriptService.compile(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(contexts.values())); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> { + scriptService.compile(new Script(ScriptType.INLINE, "test", "10+10", Collections.emptyMap()), randomFrom(contexts.values())); + }); + assertEquals("exceeded max allowed inline script size in bytes [4] with size [5] for script [10+10]", iae.getMessage()); + clusterSettings.applySettings(Settings.builder().put(ScriptService.SCRIPT_MAX_SIZE_IN_BYTES.getKey(), 6).build()); + scriptService.compile(new Script(ScriptType.INLINE, "test", "10+10", Collections.emptyMap()), randomFrom(contexts.values())); + clusterSettings.applySettings(Settings.builder().put(ScriptService.SCRIPT_MAX_SIZE_IN_BYTES.getKey(), 5).build()); + scriptService.compile(new Script(ScriptType.INLINE, "test", "10+10", Collections.emptyMap()), randomFrom(contexts.values())); + iae = expectThrows(IllegalArgumentException.class, () -> { + clusterSettings.applySettings(Settings.builder().put(ScriptService.SCRIPT_MAX_SIZE_IN_BYTES.getKey(), 2).build()); + }); + assertEquals("script.max_size_in_bytes cannot be set to [2], stored script [test1] exceeds the new value with a size of [3]", + iae.getMessage()); + } + private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { try { scriptService.compile(new Script(scriptType, lang, script, Collections.emptyMap()), scriptContext); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index ed9b8992577d8..37fc9cac029fe 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.Strings; @@ -85,6 +86,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -552,4 +554,17 @@ public void testSetSearchThrottled() { // we still make sure can match is executed on the network thread service.canMatch(req, ActionListener.wrap(r -> assertSame(Thread.currentThread(), currentThread), e -> fail("unexpected"))); } + + public void testExpandSearchThrottled() { + createIndex("throttled_threadpool_index"); + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request("throttled_threadpool_index", + IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), "true")) + .actionGet(); + + client().prepareIndex("throttled_threadpool_index", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertHitCount(client().prepareSearch().get(), 0L); + assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED).get(), 1L); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java index ac34a96f0d992..097a3949fc27a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.StringTermsTests; import org.elasticsearch.search.aggregations.metrics.InternalExtendedStatsTests; import org.elasticsearch.search.aggregations.metrics.InternalMaxTests; +import org.elasticsearch.search.aggregations.metrics.InternalMedianAbsoluteDeviationTests; import org.elasticsearch.search.aggregations.metrics.InternalMinTests; import org.elasticsearch.search.aggregations.metrics.InternalStatsBucketTests; import org.elasticsearch.search.aggregations.metrics.InternalStatsTests; @@ -148,6 +149,7 @@ private static List> getAggsTests() { aggsTests.add(new InternalBinaryRangeTests()); aggsTests.add(new InternalTopHitsTests()); aggsTests.add(new InternalCompositeTests()); + aggsTests.add(new InternalMedianAbsoluteDeviationTests()); return Collections.unmodifiableList(aggsTests); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviationTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviationTests.java new file mode 100644 index 0000000000000..1825afa2a3f5f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviationTests.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.test.InternalAggregationTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class InternalMedianAbsoluteDeviationTests extends InternalAggregationTestCase { + + @Override + protected InternalMedianAbsoluteDeviation createTestInstance(String name, + List pipelineAggregators, + Map metaData) { + + final TDigestState valuesSketch = new TDigestState(randomDoubleBetween(20, 1000, true)); + final int numberOfValues = frequently() + ? randomIntBetween(0, 1000) + : 0; + for (int i = 0; i < numberOfValues; i++) { + valuesSketch.add(randomDouble()); + } + + return new InternalMedianAbsoluteDeviation(name, pipelineAggregators, metaData, randomNumericDocValueFormat(), valuesSketch); + } + + @Override + protected void assertReduced(InternalMedianAbsoluteDeviation reduced, List inputs) { + final TDigestState expectedValuesSketch = new TDigestState(reduced.getValuesSketch().compression()); + + long totalCount = 0; + for (InternalMedianAbsoluteDeviation input : inputs) { + expectedValuesSketch.add(input.getValuesSketch()); + totalCount += input.getValuesSketch().size(); + } + + assertEquals(totalCount, reduced.getValuesSketch().size()); + if (totalCount > 0) { + assertEquals(expectedValuesSketch.quantile(0), reduced.getValuesSketch().quantile(0), 0d); + assertEquals(expectedValuesSketch.quantile(1), reduced.getValuesSketch().quantile(1), 0d); + } + } + + @Override + protected void assertFromXContent(InternalMedianAbsoluteDeviation internalMAD, ParsedAggregation parsedAggregation) throws IOException { + assertTrue(parsedAggregation instanceof ParsedMedianAbsoluteDeviation); + ParsedMedianAbsoluteDeviation parsedMAD = (ParsedMedianAbsoluteDeviation) parsedAggregation; + // Double.compare handles NaN, which we use for no result + assertEquals(internalMAD.getMedianAbsoluteDeviation(), parsedMAD.getMedianAbsoluteDeviation(), 0); + } + + @Override + protected Writeable.Reader instanceReader() { + return InternalMedianAbsoluteDeviation::new; + } + + @Override + protected InternalMedianAbsoluteDeviation mutateInstance(InternalMedianAbsoluteDeviation instance) throws IOException { + String name = instance.getName(); + TDigestState valuesSketch = instance.getValuesSketch(); + Map metaData = instance.getMetaData(); + + switch (between(0, 2)) { + case 0: + name += randomAlphaOfLengthBetween(2, 10); + break; + case 1: + final TDigestState newValuesSketch = new TDigestState(instance.getValuesSketch().compression()); + final int numberOfValues = between(10, 100); + for (int i = 0; i < numberOfValues; i++) { + newValuesSketch.add(randomDouble()); + } + valuesSketch = newValuesSketch; + break; + case 2: + if (metaData == null) { + metaData = new HashMap<>(1); + } else { + metaData = new HashMap<>(metaData); + } + metaData.put(randomAlphaOfLengthBetween(2, 10), randomInt()); + break; + } + + return new InternalMedianAbsoluteDeviation(name, instance.pipelineAggregators(), metaData, instance.format, valuesSketch); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java new file mode 100644 index 0000000000000..d47d9006b7f38 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java @@ -0,0 +1,251 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.hamcrest.Description; +import org.hamcrest.TypeSafeMatcher; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.IntStream; + +import static java.util.Collections.singleton; +import static org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.ExactMedianAbsoluteDeviation.calculateMAD; +import static org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.IsCloseToRelative.closeToRelative; +import static org.hamcrest.Matchers.equalTo; + +public class MedianAbsoluteDeviationAggregatorTests extends AggregatorTestCase { + + private static final int SAMPLE_MIN = -1000000; + private static final int SAMPLE_MAX = 1000000; + + private static CheckedConsumer randomSample( + int size, + Function> field) { + + return writer -> { + for (int i = 0; i < size; i++) { + final long point = randomLongBetween(SAMPLE_MIN, SAMPLE_MAX); + Iterable document = field.apply(point); + writer.addDocument(document); + } + }; + } + + // intentionally not writing any docs + public void testNoDocs() throws IOException { + testCase(new MatchAllDocsQuery(), writer -> {}, agg -> assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN))); + } + + public void testNoMatchingField() throws IOException { + testCase( + new MatchAllDocsQuery(), + writer -> { + writer.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); + writer.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 2))); + }, + agg -> assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)) + ); + } + + public void testSomeMatchesSortedNumericDocValues() throws IOException { + final int size = randomIntBetween(100, 1000); + final List sample = new ArrayList<>(size); + testCase( + new DocValuesFieldExistsQuery("number"), + randomSample(size, point -> { + sample.add(point); + return singleton(new SortedNumericDocValuesField("number", point)); + }), + agg -> assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))) + ); + } + + public void testSomeMatchesNumericDocValues() throws IOException { + final int size = randomIntBetween(100, 1000); + final List sample = new ArrayList<>(size); + testCase( + new DocValuesFieldExistsQuery("number"), + randomSample(size, point -> { + sample.add(point); + return singleton(new NumericDocValuesField("number", point)); + }), + agg -> assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))) + ); + } + + public void testQueryFiltering() throws IOException { + final int lowerRange = 1; + final int upperRange = 500; + final int[] sample = IntStream.rangeClosed(1, 1000).toArray(); + final int[] filteredSample = Arrays.stream(sample).filter(point -> point >= lowerRange && point <= upperRange).toArray(); + testCase( + IntPoint.newRangeQuery("number", lowerRange, upperRange), + writer -> { + for (int point : sample) { + writer.addDocument(Arrays.asList(new IntPoint("number", point), new SortedNumericDocValuesField("number", point))); + } + }, + agg -> assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(filteredSample))) + ); + } + + public void testQueryFiltersAll() throws IOException { + testCase( + IntPoint.newRangeQuery("number", -1, 0), + writer -> { + writer.addDocument(Arrays.asList(new IntPoint("number", 1), new SortedNumericDocValuesField("number", 1))); + writer.addDocument(Arrays.asList(new IntPoint("number", 2), new SortedNumericDocValuesField("number", 2))); + }, + agg -> assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)) + ); + } + + private void testCase(Query query, + CheckedConsumer buildIndex, + Consumer verify) throws IOException { + + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + buildIndex.accept(indexWriter); + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + MedianAbsoluteDeviationAggregationBuilder builder = new MedianAbsoluteDeviationAggregationBuilder("mad") + .field("number") + .compression(randomDoubleBetween(20, 1000, true)); + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + fieldType.setName("number"); + + MedianAbsoluteDeviationAggregator aggregator = createAggregator(builder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(query, aggregator); + aggregator.postCollection(); + + verify.accept((InternalMedianAbsoluteDeviation) aggregator.buildAggregation(0L)); + } + } + + } + + public static class IsCloseToRelative extends TypeSafeMatcher { + + private final double expected; + private final double error; + + public IsCloseToRelative(double expected, double error) { + this.expected = expected; + this.error = error; + } + + @Override + protected boolean matchesSafely(Double actual) { + final double deviation = Math.abs(actual - expected); + final double observedError = deviation / Math.abs(expected); + return observedError <= error; + } + + @Override + public void describeTo(Description description) { + description + .appendText("within ") + .appendValue(error * 100) + .appendText(" percent of ") + .appendValue(expected); + } + + public static IsCloseToRelative closeToRelative(double expected, double error) { + return new IsCloseToRelative(expected, error); + } + + public static IsCloseToRelative closeToRelative(double expected) { + return closeToRelative(expected, 0.1); + } + } + + /** + * This class is an implementation of median absolute deviation that computes an exact value, rather than the approximation used in the + * aggregation. It's used to verify that the aggregation's approximate results are close enough to the exact result + */ + public static class ExactMedianAbsoluteDeviation { + + public static double calculateMAD(int[] sample) { + return calculateMAD(Arrays.stream(sample) + .mapToDouble(point -> (double) point) + .toArray()); + } + + public static double calculateMAD(long[] sample) { + return calculateMAD(Arrays.stream(sample) + .mapToDouble(point -> (double) point) + .toArray()); + } + + public static double calculateMAD(List sample) { + return calculateMAD(sample.stream() + .mapToDouble(Long::doubleValue) + .toArray()); + } + + public static double calculateMAD(double[] sample) { + final double median = calculateMedian(sample); + + final double[] deviations = Arrays.stream(sample) + .map(point -> Math.abs(median - point)) + .toArray(); + + final double mad = calculateMedian(deviations); + return mad; + } + + private static double calculateMedian(double[] sample) { + final double[] sorted = Arrays.copyOf(sample, sample.length); + Arrays.sort(sorted); + + final int halfway = (int) Math.ceil(sorted.length / 2d); + final double median = (sorted[halfway - 1] + sorted[halfway]) / 2d; + return median; + } + + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java new file mode 100644 index 0000000000000..4e056ceb1b897 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -0,0 +1,621 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; +import org.elasticsearch.search.aggregations.bucket.global.Global; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.range.Range; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.LongStream; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; +import static org.elasticsearch.search.aggregations.AggregationBuilders.global; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.range; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.IsCloseToRelative.closeToRelative; +import static org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.ExactMedianAbsoluteDeviation.calculateMAD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.sameInstance; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsNull.notNullValue; + +public class MedianAbsoluteDeviationIT extends AbstractNumericTestCase { + + private static final int MIN_SAMPLE_VALUE = -1000000; + private static final int MAX_SAMPLE_VALUE = 1000000; + private static final int NUMBER_OF_DOCS = 1000; + private static final Supplier sampleSupplier = () -> randomLongBetween(MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE); + + private static long[] singleValueSample; + private static long[] multiValueSample; + private static double singleValueExactMAD; + private static double multiValueExactMAD; + + @Override + public void setupSuiteScopeCluster() throws Exception { + final Settings settings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + + createIndex("idx", settings); + createIndex("idx_unmapped", settings); + + minValue = MIN_SAMPLE_VALUE; + minValues = MIN_SAMPLE_VALUE; + maxValue = MAX_SAMPLE_VALUE; + maxValues = MAX_SAMPLE_VALUE; + + singleValueSample = new long[NUMBER_OF_DOCS]; + multiValueSample = new long[NUMBER_OF_DOCS * 2]; + + List builders = new ArrayList<>(); + + for (int i = 0; i < NUMBER_OF_DOCS; i++) { + final long singleValueDatapoint = sampleSupplier.get(); + final long firstMultiValueDatapoint = sampleSupplier.get(); + final long secondMultiValueDatapoint = sampleSupplier.get(); + + singleValueSample[i] = singleValueDatapoint; + multiValueSample[i * 2] = firstMultiValueDatapoint; + multiValueSample[(i * 2) + 1] = secondMultiValueDatapoint; + + IndexRequestBuilder builder = client().prepareIndex("idx", "_doc", String.valueOf(i)) + .setSource(jsonBuilder() + .startObject() + .field("value", singleValueDatapoint) + .startArray("values") + .value(firstMultiValueDatapoint) + .value(secondMultiValueDatapoint) + .endArray() + .endObject()); + + builders.add(builder); + } + + singleValueExactMAD = calculateMAD(singleValueSample); + multiValueExactMAD = calculateMAD(multiValueSample); + + indexRandom(true, builders); + + prepareCreate("empty_bucket_idx") + .addMapping("type", "value", "type=integer") + .execute() + .actionGet(); + + builders = new ArrayList<>(); + for (int i = 0; i < 2; i++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type", String.valueOf(i)).setSource(jsonBuilder() + .startObject() + .field("value", i*2) + .endObject())); + } + indexRandom(true, builders); + ensureSearchable(); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(AggregationTestScriptsPlugin.class); + } + + private static MedianAbsoluteDeviationAggregationBuilder randomBuilder() { + final MedianAbsoluteDeviationAggregationBuilder builder = new MedianAbsoluteDeviationAggregationBuilder("mad"); + if (randomBoolean()) { + builder.compression(randomDoubleBetween(20, 1000, false)); + } + return builder; + } + + @Override + public void testEmptyAggregation() throws Exception { + final SearchResponse response = client() + .prepareSearch("empty_bucket_idx") + .addAggregation( + histogram("histogram") + .field("value") + .interval(1) + .minDocCount(0) + .subAggregation( + randomBuilder() + .field("value"))) + .execute() + .actionGet(); + + assertHitCount(response, 2); + + final Histogram histogram = response.getAggregations().get("histogram"); + assertThat(histogram, notNullValue()); + final Histogram.Bucket bucket = histogram.getBuckets().get(1); + assertThat(bucket, notNullValue()); + + final MedianAbsoluteDeviation mad = bucket.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), is(Double.NaN)); + } + + @Override + public void testUnmapped() throws Exception { + final SearchResponse response = client() + .prepareSearch("idx_unmapped") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .field("value")) + .execute() + .actionGet(); + + assertHitCount(response, 0); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), is(Double.NaN)); + } + + @Override + public void testSingleValuedField() throws Exception { + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .field("value")) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + } + + @Override + public void testSingleValuedFieldGetProperty() throws Exception { + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + global("global") + .subAggregation( + randomBuilder() + .field("value"))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final Global global = response.getAggregations().get("global"); + assertThat(global, notNullValue()); + assertThat(global.getName(), is("global")); + assertThat(global.getDocCount(), is((long) NUMBER_OF_DOCS)); + assertThat(global.getAggregations(), notNullValue()); + assertThat(global.getAggregations().asMap().entrySet(), hasSize(1)); + + final MedianAbsoluteDeviation mad = global.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(((InternalAggregation) global).getProperty("mad"), sameInstance(mad)); + } + + @Override + public void testSingleValuedFieldPartiallyUnmapped() throws Exception { + final SearchResponse response = client() + .prepareSearch("idx", "idx_unmapped") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .field("value")) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + } + + @Override + public void testSingleValuedFieldWithValueScript() throws Exception { + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap()))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample) + .map(point -> point + 1) + .toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + + @Override + public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { + final Map params = new HashMap<>(); + params.put("inc", 1); + + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample) + .map(point -> point + 1) + .toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + + @Override + public void testMultiValuedField() throws Exception { + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .field("values")) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + } + + @Override + public void testMultiValuedFieldWithValueScript() throws Exception { + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap()))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample) + .map(point -> point + 1) + .toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + + @Override + public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { + final Map params = new HashMap<>(); + params.put("inc", 1); + + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample) + .map(point -> point + 1) + .toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + + @Override + public void testScriptSingleValued() throws Exception { + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(singleValueExactMAD)); + } + + @Override + public void testScriptSingleValuedWithParams() throws Exception { + final Map params = new HashMap<>(); + params.put("inc", 1); + + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample) + .map(point -> point + 1) + .toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + + @Override + public void testScriptMultiValued() throws Exception { + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .script(new Script( + ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, + "doc['values'].values", + Collections.emptyMap()))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(multiValueExactMAD)); + } + + @Override + public void testScriptMultiValuedWithParams() throws Exception { + final Map params = new HashMap<>(); + params.put("inc", 1); + + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + randomBuilder() + .script(new Script( + ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, + "[ doc['value'].value, doc['value'].value + inc ]", + params))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getName(), is("mad")); + + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample) + .flatMap(point -> LongStream.of(point, point + 1)) + .toArray()); + assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); + } + + public void testAsSubAggregation() throws Exception { + final int rangeBoundary = (MAX_SAMPLE_VALUE + MIN_SAMPLE_VALUE) / 2; + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + range("range") + .field("value") + .addRange(MIN_SAMPLE_VALUE, rangeBoundary) + .addRange(rangeBoundary, MAX_SAMPLE_VALUE) + .subAggregation( + randomBuilder() + .field("value"))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final long[] lowerBucketSample = Arrays.stream(singleValueSample) + .filter(point -> point >= MIN_SAMPLE_VALUE && point < rangeBoundary) + .toArray(); + final long[] upperBucketSample = Arrays.stream(singleValueSample) + .filter(point -> point >= rangeBoundary && point < MAX_SAMPLE_VALUE) + .toArray(); + + final Range range = response.getAggregations().get("range"); + assertThat(range, notNullValue()); + List buckets = range.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets, hasSize(2)); + + final Range.Bucket lowerBucket = buckets.get(0); + assertThat(lowerBucket, notNullValue()); + + final MedianAbsoluteDeviation lowerBucketMAD = lowerBucket.getAggregations().get("mad"); + assertThat(lowerBucketMAD, notNullValue()); + assertThat(lowerBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(lowerBucketSample))); + + final Range.Bucket upperBucket = buckets.get(1); + assertThat(upperBucket, notNullValue()); + + final MedianAbsoluteDeviation upperBucketMAD = upperBucket.getAggregations().get("mad"); + assertThat(upperBucketMAD, notNullValue()); + assertThat(upperBucketMAD.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(upperBucketSample))); + + } + + @Override + public void testOrderByEmptyAggregation() throws Exception { + final int numberOfBuckets = 10; + final SearchResponse response = client() + .prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms") + .field("value") + .size(numberOfBuckets) + .order(BucketOrder.compound(BucketOrder.aggregation("filter>mad", true))) + .subAggregation( + filter("filter", termQuery("value", MAX_SAMPLE_VALUE + 1)) + .subAggregation( + randomBuilder() + .field("value")))) + .execute() + .actionGet(); + + assertHitCount(response, NUMBER_OF_DOCS); + + final Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets, hasSize(numberOfBuckets)); + + for (int i = 0; i < numberOfBuckets; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + + MedianAbsoluteDeviation mad = filter.getAggregations().get("mad"); + assertThat(mad, notNullValue()); + assertThat(mad.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); + } + } + + /** + * Make sure that a request using a script does not get cached and a request + * not using a script does get cached. + */ + public void testDontCacheScripts() throws Exception { + assertAcked( + prepareCreate("cache_test_idx") + .addMapping("type", "d", "type=long") + .setSettings(Settings.builder() + .put("requests.cache.enable", true) + .put("number_of_shards", 1) + .put("number_of_replicas", 1)) + .get()); + + indexRandom(true, + client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), + client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); + + // Make sure we are starting with a clear cache + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // Test that a request using a script does not get cached + SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) + .addAggregation(randomBuilder() + .field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(0L)); + + // To make sure that the cache is working test that a request not using + // a script is cached + r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(randomBuilder().field("d")).get(); + assertSearchResponse(r); + + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getHitCount(), equalTo(0L)); + assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() + .getMissCount(), equalTo(1L)); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationTests.java new file mode 100644 index 0000000000000..61d696985861a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +public class MedianAbsoluteDeviationTests extends AbstractNumericMetricTestCase { + + @Override + protected MedianAbsoluteDeviationAggregationBuilder doCreateTestAggregatorFactory() { + MedianAbsoluteDeviationAggregationBuilder builder = + new MedianAbsoluteDeviationAggregationBuilder(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + builder.compression(randomDoubleBetween(0, 1000.0, false)); + } + + if (randomBoolean()) { + builder.missing("MISSING"); + } + + if (randomBoolean()) { + builder.format("###.00"); + } + + return builder; + } +} diff --git a/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java b/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java index e3ae802baba9b..72bd6d1fe2c87 100644 --- a/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java +++ b/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java @@ -56,7 +56,7 @@ public void setUp() throws Exception { when(mapperService.fullName("alias")).thenReturn(fieldType); FieldInfo mockFieldInfo = new FieldInfo("field", 1, false, false, true, - IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, false); + IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false); LeafReader leafReader = mock(LeafReader.class); doAnswer(invocation -> { diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index da7f5f8a4cf2c..f5cc517dd47de 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -107,6 +107,7 @@ public void testProfileQuery() throws Exception { * search for each query. It then does some basic sanity checking of score and hits * to make sure the profiling doesn't interfere with the hits being returned */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32492") public void testProfileMatchesRegular() throws Exception { createIndex("test"); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java index 171bb0bf1697f..d819d880c86d3 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java @@ -36,7 +36,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.search.spell.DirectSpellChecker; import org.apache.lucene.search.spell.SuggestMode; import org.apache.lucene.store.Directory; @@ -110,7 +110,7 @@ protected TokenStreamComponents createComponents(String fieldName) { } DirectoryReader ir = DirectoryReader.open(writer); - WordScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, + WordScorer wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); @@ -135,7 +135,7 @@ protected TokenStreamComponents createComponents(String fieldName) { assertThat(result.cutoffScore, equalTo(Double.MIN_VALUE)); suggester = new NoisyChannelSpellChecker(0.85); - wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f); corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections; @@ -159,7 +159,7 @@ protected TokenStreamComponents createComponents(String fieldName) { // Test some of the highlighting corner cases suggester = new NoisyChannelSpellChecker(0.85); - wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f); corrections = suggester.getCorrections(wrapper, new BytesRef("Xor teh Got-Jewel"), generator, 4f, 4, ir, "body", wordScorer, 1, 2).corrections; @@ -196,7 +196,7 @@ protected TokenStreamComponents createComponents(String fieldName) { spellchecker.setMinPrefix(1); spellchecker.setMinQueryLength(1); suggester = new NoisyChannelSpellChecker(0.85); - wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f); corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections; @@ -204,7 +204,7 @@ protected TokenStreamComponents createComponents(String fieldName) { assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("captain america")); generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, - 10, null, analyzer, MultiFields.getTerms(ir, "body")); + 10, null, analyzer, MultiTerms.getTerms(ir, "body")); corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections; assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america")); @@ -212,7 +212,7 @@ protected TokenStreamComponents createComponents(String fieldName) { // Make sure that user supplied text is not marked as highlighted in the presence of a synonym filter generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, - 10, null, analyzer, MultiFields.getTerms(ir, "body")); + 10, null, analyzer, MultiTerms.getTerms(ir, "body")); corrections = suggester.getCorrections(analyzer, new BytesRef("captain usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections; assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america")); @@ -280,7 +280,7 @@ protected TokenStreamComponents createComponents(String fieldName) { } DirectoryReader ir = DirectoryReader.open(writer); - LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, + LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); DirectSpellChecker spellchecker = new DirectSpellChecker(); @@ -288,7 +288,7 @@ protected TokenStreamComponents createComponents(String fieldName) { DirectCandidateGenerator forward = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10); DirectCandidateGenerator reverse = new DirectCandidateGenerator(spellchecker, "body_reverse", SuggestMode.SUGGEST_ALWAYS, ir, - 0.95, 10, wrapper, wrapper, MultiFields.getTerms(ir, "body_reverse")); + 0.95, 10, wrapper, wrapper, MultiTerms.getTerms(ir, "body_reverse")); CandidateGenerator generator = new MultiCandidateGeneratorWrapper(10, forward, reverse); Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), generator, 1, 1, @@ -388,7 +388,7 @@ protected TokenStreamComponents createComponents(String fieldName) { } DirectoryReader ir = DirectoryReader.open(writer); - WordScorer wordScorer = new LinearInterpolatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + WordScorer wordScorer = new LinearInterpolatingScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); @@ -406,7 +406,7 @@ protected TokenStreamComponents createComponents(String fieldName) { assertThat(corrections.length, equalTo(0)); // assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ape")); - wordScorer = new LinearInterpolatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new LinearInterpolatingScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1); corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 3).corrections; @@ -457,20 +457,20 @@ protected TokenStreamComponents createComponents(String fieldName) { spellchecker.setMinPrefix(1); spellchecker.setMinQueryLength(1); suggester = new NoisyChannelSpellChecker(0.95); - wordScorer = new LinearInterpolatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, + wordScorer = new LinearInterpolatingScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5, 0.4, 0.1); corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections; assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america")); generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, - 10, null, analyzer, MultiFields.getTerms(ir, "body")); + 10, null, analyzer, MultiTerms.getTerms(ir, "body")); corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections; assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america")); - wordScorer = new StupidBackoffScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new StupidBackoffScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.4); corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 2, ir, "body", wordScorer, 0, 3).corrections; @@ -492,7 +492,7 @@ public void testFewDocsEgdeCase() throws Exception { } try (DirectoryReader ir = DirectoryReader.open(dir)) { - WordScorer wordScorer = new StupidBackoffScorer(ir, MultiFields.getTerms(ir, "field"), "field", 0.95d, + WordScorer wordScorer = new StupidBackoffScorer(ir, MultiTerms.getTerms(ir, "field"), "field", 0.95d, new BytesRef(" "), 0.4f); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); DirectSpellChecker spellchecker = new DirectSpellChecker(); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java index 5923cd3332e5e..a65c75817a816 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java @@ -28,7 +28,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lucene.BytesRefs; @@ -118,7 +118,7 @@ public void testBuildWordScorer() throws IOException { writer.addDocument(doc); DirectoryReader ir = DirectoryReader.open(writer); - WordScorer wordScorer = testModel.buildWordScorerFactory().newScorer(ir, MultiFields.getTerms(ir, "field"), "field", 0.9d, + WordScorer wordScorer = testModel.buildWordScorerFactory().newScorer(ir, MultiTerms.getTerms(ir, "field"), "field", 0.9d, BytesRefs.toBytesRef(" ")); assertWordScorer(wordScorer, testModel); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 7f008d8721a9e..e67b981bf81ed 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -977,7 +977,7 @@ public void testSnapshotWithDateMath() { final String repo = "repo"; final AdminClient admin = client().admin(); - final IndexNameExpressionResolver nameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); + final IndexNameExpressionResolver nameExpressionResolver = new IndexNameExpressionResolver(); final String snapshotName = ""; logger.info("--> creating repository"); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 6c27680d74162..72722ff10b50d 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -18,9 +18,6 @@ */ package org.elasticsearch.transport; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -68,7 +65,9 @@ import java.util.Arrays; import java.util.Base64; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -79,6 +78,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -163,7 +163,7 @@ public void testRemoteProfileIsUsedForLocalCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -204,7 +204,7 @@ public void testRemoteProfileIsUsedForRemoteCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -256,7 +256,7 @@ public void testDiscoverSingleNode() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -285,7 +285,7 @@ public void testDiscoverSingleNodeWithIncompatibleSeed() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { updateSeedNodes(connection, seedNodes); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -312,7 +312,7 @@ public void testNodeDisconnected() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -362,7 +362,7 @@ public void testFilterDiscoveredNodes() throws Exception { service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, - n -> n.equals(rejectedNode) == false)) { + n -> n.equals(rejectedNode) == false, null)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); if (rejectedNode.equals(seedNode)) { assertFalse(service.nodeConnected(seedNode)); @@ -422,7 +422,7 @@ public void testConnectWithIncompatibleTransports() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(() -> seedNode))); assertFalse(service.nodeConnected(seedNode)); assertTrue(connection.assertNoRunningConnections()); @@ -485,7 +485,7 @@ public void close() { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { connection.addConnectedNode(seedNode); for (DiscoveryNode node : knownNodes) { final Transport.Connection transportConnection = connection.getConnection(node); @@ -528,7 +528,7 @@ public void run() { CountDownLatch listenerCalled = new CountDownLatch(1); AtomicReference exceptionReference = new AtomicReference<>(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -565,7 +565,7 @@ public void testFetchShards() throws Exception { service.acceptIncomingRequests(); List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { if (randomBoolean()) { updateSeedNodes(connection, nodes); } @@ -605,7 +605,7 @@ public void testFetchShardsThreadContextHeader() throws Exception { service.acceptIncomingRequests(); List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + nodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { SearchRequest request = new SearchRequest("test-index"); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { @@ -659,7 +659,8 @@ public void testFetchShardsSkipUnavailable() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, + n -> true, null)) { SearchRequest request = new SearchRequest("test-index"); ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") @@ -671,7 +672,7 @@ public void testFetchShardsSkipUnavailable() throws Exception { AtomicReference failReference = new AtomicReference<>(); connection.fetchSearchShards(searchShardsRequest, new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(5, TimeUnit.SECONDS)); + assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); assertNull(failReference.get()); assertNotNull(reference.get()); ClusterSearchShardsResponse response = reference.get(); @@ -702,7 +703,7 @@ public void onNodeDisconnected(DiscoveryNode node) { new LatchedActionListener<>(ActionListener.wrap((s) -> { reference.set(s); }, failReference::set), responseLatch)); - assertTrue(responseLatch.await(1, TimeUnit.SECONDS)); + assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); assertNotNull(failReference.get()); assertNull(reference.get()); assertThat(failReference.get(), instanceOf(TransportException.class)); @@ -715,7 +716,7 @@ public void onNodeDisconnected(DiscoveryNode node) { AtomicReference failReference = new AtomicReference<>(); connection.fetchSearchShards(searchShardsRequest, new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(1, TimeUnit.SECONDS)); + assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); assertNull(failReference.get()); assertNotNull(reference.get()); ClusterSearchShardsResponse response = reference.get(); @@ -724,7 +725,7 @@ public void onNodeDisconnected(DiscoveryNode node) { //give transport service enough time to realize that the node is down, and to notify the connection listeners //so that RemoteClusterConnection is left with no connected nodes, hence it will retry connecting next - assertTrue(disconnectedLatch.await(1, TimeUnit.SECONDS)); + assertTrue(disconnectedLatch.await(10, TimeUnit.SECONDS)); if (randomBoolean()) { connection.updateSkipUnavailable(false); @@ -738,7 +739,7 @@ public void onNodeDisconnected(DiscoveryNode node) { AtomicReference failReference = new AtomicReference<>(); connection.fetchSearchShards(searchShardsRequest, new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(1, TimeUnit.SECONDS)); + assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); assertNull(failReference.get()); assertNotNull(reference.get()); ClusterSearchShardsResponse response = reference.get(); @@ -769,7 +770,7 @@ public void testTriggerUpdatesConcurrently() throws IOException, InterruptedExce service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads); @@ -848,7 +849,7 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); @@ -937,7 +938,7 @@ public void testGetConnectionInfo() throws Exception { service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, service.connectionManager(), maxNumConnections, n -> true)) { + seedNodes, service, service.connectionManager(), maxNumConnections, n -> true, null)) { // test no nodes connected RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); @@ -1084,7 +1085,7 @@ public void testEnsureConnected() throws IOException, InterruptedException { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { assertFalse(service.nodeConnected(seedNode)); assertFalse(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -1133,7 +1134,7 @@ public void testCollectNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { if (randomBoolean()) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); } @@ -1181,7 +1182,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + seedNodes, service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { final int numGetThreads = randomIntBetween(4, 10); final Thread[] getThreads = new Thread[numGetThreads]; final int numModifyingThreads = randomIntBetween(4, 10); @@ -1271,7 +1272,7 @@ public void testClusterNameIsChecked() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList( () -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList( () -> seedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -1351,7 +1352,8 @@ public void close() { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(() -> connectedNode), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> connectedNode), service, service.getConnectionManager(), + Integer.MAX_VALUE, n -> true, null)) { connection.addConnectedNode(connectedNode); for (int i = 0; i < 10; i++) { //always a direct connection as the remote node is already connected @@ -1393,7 +1395,7 @@ public void testLazyResolveTransportAddress() throws Exception { return seedNode; }; try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedSupplier), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true)) { + Arrays.asList(seedSupplier), service, service.getConnectionManager(), Integer.MAX_VALUE, n -> true, null)) { updateSeedNodes(connection, Arrays.asList(seedSupplier)); // Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes // being called again so we try to resolve the same seed node's host twice diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 94ac7e963c1da..e2a0827c14d41 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -97,6 +98,7 @@ public void testSettingsAreRegistered() { assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER)); assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING)); assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_NODE_ATTRIBUTE)); + assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE)); } public void testRemoteClusterSeedSetting() { @@ -194,12 +196,12 @@ public void testBuildRemoteClustersDynamicConfigWithDuplicates() { public void testGroupClusterIndices() throws IOException { List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); - MockTransportService otherSeedTransport = startTransport("cluster_2_node", knownNodes, Version.CURRENT)) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - DiscoveryNode otherSeedNode = otherSeedTransport.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(otherSeedTransport.getLocalDiscoNode()); + try (MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT)) { + DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); + DiscoveryNode cluster2Seed = cluster2Transport.getLocalDiscoNode(); + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); try (MockTransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, @@ -207,8 +209,8 @@ public void testGroupClusterIndices() throws IOException { transportService.start(); transportService.acceptIncomingRequests(); Settings.Builder builder = Settings.builder(); - builder.putList("cluster.remote.cluster_1.seeds", seedNode.getAddress().toString()); - builder.putList("cluster.remote.cluster_2.seeds", otherSeedNode.getAddress().toString()); + builder.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); + builder.putList("cluster.remote.cluster_2.seeds", cluster2Seed.getAddress().toString()); try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); @@ -239,12 +241,12 @@ public void testGroupClusterIndices() throws IOException { public void testGroupIndices() throws IOException { List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); - MockTransportService otherSeedTransport = startTransport("cluster_2_node", knownNodes, Version.CURRENT)) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - DiscoveryNode otherSeedNode = otherSeedTransport.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(otherSeedTransport.getLocalDiscoNode()); + try (MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT)) { + DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); + DiscoveryNode cluster2Seed = cluster2Transport.getLocalDiscoNode(); + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); try (MockTransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, @@ -252,8 +254,8 @@ public void testGroupIndices() throws IOException { transportService.start(); transportService.acceptIncomingRequests(); Settings.Builder builder = Settings.builder(); - builder.putList("cluster.remote.cluster_1.seeds", seedNode.getAddress().toString()); - builder.putList("cluster.remote.cluster_2.seeds", otherSeedNode.getAddress().toString()); + builder.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); + builder.putList("cluster.remote.cluster_2.seeds", cluster2Seed.getAddress().toString()); try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); @@ -301,12 +303,12 @@ public void testGroupIndices() throws IOException { public void testIncrementallyAddClusters() throws IOException { List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); - MockTransportService otherSeedTransport = startTransport("cluster_2_node", knownNodes, Version.CURRENT)) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - DiscoveryNode otherSeedNode = otherSeedTransport.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(otherSeedTransport.getLocalDiscoNode()); + try (MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT)) { + DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); + DiscoveryNode cluster2Seed = cluster2Transport.getLocalDiscoNode(); + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); try (MockTransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, @@ -314,16 +316,16 @@ public void testIncrementallyAddClusters() throws IOException { transportService.start(); transportService.acceptIncomingRequests(); Settings.Builder builder = Settings.builder(); - builder.putList("cluster.remote.cluster_1.seeds", seedNode.getAddress().toString()); - builder.putList("cluster.remote.cluster_2.seeds", otherSeedNode.getAddress().toString()); + builder.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); + builder.putList("cluster.remote.cluster_2.seeds", cluster2Seed.getAddress().toString()); try (RemoteClusterService service = new RemoteClusterService(Settings.EMPTY, transportService)) { assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().toString()), null); + service.updateRemoteCluster("cluster_1", Collections.singletonList(cluster1Seed.getAddress().toString()), null); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); - service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().toString()), null); + service.updateRemoteCluster("cluster_2", Collections.singletonList(cluster2Seed.getAddress().toString()), null); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); assertTrue(service.isRemoteClusterRegistered("cluster_2")); @@ -337,6 +339,81 @@ public void testIncrementallyAddClusters() throws IOException { } } + public void testDefaultPingSchedule() throws IOException { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("cluster_1_node", knownNodes, Version.CURRENT)) { + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + knownNodes.add(seedTransport.getLocalDiscoNode()); + TimeValue pingSchedule; + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.putList("cluster.remote.cluster_1.seeds", seedNode.getAddress().toString()); + if (randomBoolean()) { + pingSchedule = TimeValue.timeValueSeconds(randomIntBetween(1, 10)); + settingsBuilder.put(TcpTransport.PING_SCHEDULE.getKey(), pingSchedule).build(); + } else { + pingSchedule = TimeValue.MINUS_ONE; + } + Settings settings = settingsBuilder.build(); + try (MockTransportService transportService = MockTransportService.createNewService(settings, + Version.CURRENT, threadPool, null)) { + transportService.start(); + transportService.acceptIncomingRequests(); + try (RemoteClusterService service = new RemoteClusterService(settings, transportService)) { + assertFalse(service.isCrossClusterSearchEnabled()); + service.initializeRemoteClusters(); + assertTrue(service.isCrossClusterSearchEnabled()); + service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().toString()), null); + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(service.isRemoteClusterRegistered("cluster_1")); + RemoteClusterConnection remoteClusterConnection = service.getRemoteClusterConnection("cluster_1"); + assertEquals(pingSchedule, remoteClusterConnection.getConnectionManager().getPingSchedule()); + } + } + } + } + + public void testCustomPingSchedule() throws IOException { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT)) { + DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); + DiscoveryNode cluster2Seed = cluster2Transport.getLocalDiscoNode(); + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + Settings.Builder settingsBuilder = Settings.builder(); + if (randomBoolean()) { + settingsBuilder.put(TcpTransport.PING_SCHEDULE.getKey(), TimeValue.timeValueSeconds(randomIntBetween(1, 10))); + } + Settings transportSettings = settingsBuilder.build(); + + try (MockTransportService transportService = MockTransportService.createNewService(transportSettings, Version.CURRENT, + threadPool, null)) { + transportService.start(); + transportService.acceptIncomingRequests(); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); + builder.putList("cluster.remote.cluster_2.seeds", cluster2Seed.getAddress().toString()); + TimeValue pingSchedule1 = randomBoolean() ? TimeValue.MINUS_ONE : TimeValue.timeValueSeconds(randomIntBetween(1, 10)); + builder.put("cluster.remote.cluster_1.transport.ping_schedule", pingSchedule1); + TimeValue pingSchedule2 = randomBoolean() ? TimeValue.MINUS_ONE : TimeValue.timeValueSeconds(randomIntBetween(1, 10)); + builder.put("cluster.remote.cluster_2.transport.ping_schedule", pingSchedule2); + try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { + assertFalse(service.isCrossClusterSearchEnabled()); + service.initializeRemoteClusters(); + assertTrue(service.isCrossClusterSearchEnabled()); + service.updateRemoteCluster("cluster_1", Collections.singletonList(cluster1Seed.getAddress().toString()), null); + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(service.isRemoteClusterRegistered("cluster_1")); + RemoteClusterConnection remoteClusterConnection1 = service.getRemoteClusterConnection("cluster_1"); + assertEquals(pingSchedule1, remoteClusterConnection1.getConnectionManager().getPingSchedule()); + RemoteClusterConnection remoteClusterConnection2 = service.getRemoteClusterConnection("cluster_2"); + assertEquals(pingSchedule2, remoteClusterConnection2.getConnectionManager().getPingSchedule()); + } + } + } + } + public void testRemoteNodeAttribute() throws IOException, InterruptedException { final Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build(); diff --git a/server/src/test/java/org/elasticsearch/usage/UsageServiceTests.java b/server/src/test/java/org/elasticsearch/usage/UsageServiceTests.java index c6347b014bfb8..c38030afbe973 100644 --- a/server/src/test/java/org/elasticsearch/usage/UsageServiceTests.java +++ b/server/src/test/java/org/elasticsearch/usage/UsageServiceTests.java @@ -52,7 +52,7 @@ public void testRestUsage() throws Exception { BaseRestHandler handlerD = new MockRestHandler("d", settings); BaseRestHandler handlerE = new MockRestHandler("e", settings); BaseRestHandler handlerF = new MockRestHandler("f", settings); - UsageService usageService = new UsageService(settings); + UsageService usageService = new UsageService(); usageService.addRestHandler(handlerA); usageService.addRestHandler(handlerB); usageService.addRestHandler(handlerC); diff --git a/server/src/test/resources/org/elasticsearch/action/bulk/simple-bulk-missing-index-type.json b/server/src/test/resources/org/elasticsearch/action/bulk/simple-bulk-missing-index-type.json new file mode 100644 index 0000000000000..2edb45742b749 --- /dev/null +++ b/server/src/test/resources/org/elasticsearch/action/bulk/simple-bulk-missing-index-type.json @@ -0,0 +1,5 @@ +{ "index":{"_id":"1"} } +{ "field1" : "value1" } +{ "delete" : { "_id" : "2" } } +{ "create" : { "_id" : "3" } } +{ "field1" : "value3" } diff --git a/settings.gradle b/settings.gradle index dedf3520bbbcd..c5acf583000d5 100644 --- a/settings.gradle +++ b/settings.gradle @@ -24,10 +24,10 @@ List projects = [ 'distribution:packages:deb', 'distribution:packages:oss-rpm', 'distribution:packages:rpm', - 'distribution:bwc:next-minor-snapshot', - 'distribution:bwc:staged-minor-snapshot', - 'distribution:bwc:next-bugfix-snapshot', - 'distribution:bwc:maintenance-bugfix-snapshot', + 'distribution:bwc:bugfix', + 'distribution:bwc:maintenance', + 'distribution:bwc:minor', + 'distribution:bwc:staged', 'distribution:tools:java-version-checker', 'distribution:tools:launchers', 'distribution:tools:plugin-cli', diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 5e5c53f4406c9..12653cc6489ae 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -71,4 +71,5 @@ precommit.dependsOn namingConventionsMain test.configure { systemProperty 'tests.gradle_index_compat_versions', bwcVersions.indexCompatible.join(',') systemProperty 'tests.gradle_wire_compat_versions', bwcVersions.wireCompatible.join(',') + systemProperty 'tests.gradle_unreleased_versions', bwcVersions.unreleased.join(',') } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index e1205ba846b36..b3681247b4e9c 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -70,19 +70,19 @@ public static MockAllocationService createAllocationService(Settings settings, R } public static MockAllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings, Random random) { - return new MockAllocationService(settings, + return new MockAllocationService( randomAllocationDeciders(settings, clusterSettings, random), new TestGatewayAllocator(), new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE); } public static MockAllocationService createAllocationService(Settings settings, ClusterInfoService clusterInfoService) { - return new MockAllocationService(settings, + return new MockAllocationService( randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()), new TestGatewayAllocator(), new BalancedShardsAllocator(settings), clusterInfoService); } public static MockAllocationService createAllocationService(Settings settings, GatewayAllocator gatewayAllocator) { - return new MockAllocationService(settings, + return new MockAllocationService( randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()), gatewayAllocator, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE); } @@ -91,7 +91,7 @@ public static AllocationDeciders randomAllocationDeciders(Settings settings, Clu List deciders = new ArrayList<>( ClusterModule.createAllocationDeciders(settings, clusterSettings, Collections.emptyList())); Collections.shuffle(deciders, random); - return new AllocationDeciders(settings, deciders); + return new AllocationDeciders(deciders); } protected static Set MASTER_DATA_ROLES = @@ -127,18 +127,18 @@ protected static ClusterState startRandomInitializingShard(ClusterState cluster } protected static AllocationDeciders yesAllocationDeciders() { - return new AllocationDeciders(Settings.EMPTY, Arrays.asList( + return new AllocationDeciders(Arrays.asList( new TestAllocateDecision(Decision.YES), new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)))); } protected static AllocationDeciders noAllocationDeciders() { - return new AllocationDeciders(Settings.EMPTY, Collections.singleton(new TestAllocateDecision(Decision.NO))); + return new AllocationDeciders(Collections.singleton(new TestAllocateDecision(Decision.NO))); } protected static AllocationDeciders throttleAllocationDeciders() { - return new AllocationDeciders(Settings.EMPTY, Arrays.asList( + return new AllocationDeciders(Arrays.asList( new TestAllocateDecision(Decision.THROTTLE), new SameShardAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)))); @@ -159,7 +159,6 @@ public static class TestAllocateDecision extends AllocationDecider { private final Decision decision; public TestAllocateDecision(Decision decision) { - super(Settings.EMPTY); this.decision = decision; } @@ -184,9 +183,9 @@ protected static class MockAllocationService extends AllocationService { private volatile long nanoTimeOverride = -1L; - public MockAllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator, + public MockAllocationService(AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator, ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { - super(settings, allocationDeciders, gatewayAllocator, shardsAllocator, clusterInfoService); + super(allocationDeciders, gatewayAllocator, shardsAllocator, clusterInfoService); } public void setNanoTimeOverride(long nanoTime) { @@ -203,10 +202,7 @@ protected long currentNanoTime() { * Mocks behavior in ReplicaShardAllocator to remove delayed shards from list of unassigned shards so they don't get reassigned yet. */ protected static class DelayedShardsMockGatewayAllocator extends GatewayAllocator { - - public DelayedShardsMockGatewayAllocator() { - super(Settings.EMPTY); - } + public DelayedShardsMockGatewayAllocator() {} @Override public void applyStartedShards(RoutingAllocation allocation, List startedShards) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/alias/RandomAliasActionsGenerator.java b/test/framework/src/main/java/org/elasticsearch/index/alias/RandomAliasActionsGenerator.java index d4d2d78789b8e..e8a554ca4aaae 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/alias/RandomAliasActionsGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/index/alias/RandomAliasActionsGenerator.java @@ -82,6 +82,9 @@ public static AliasActions randomAliasAction(boolean useStringAsFilter) { action.indexRouting(randomRouting().toString()); } } + if (randomBoolean()) { + action.writeIndex(randomBoolean()); + } } return action; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 3e563e6d5382e..bbe4dd268e5ef 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -573,7 +573,14 @@ public EngineConfig config(IndexSettings indexSettings, Store store, Path transl public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, ReferenceManager.RefreshListener refreshListener, Sort indexSort, LongSupplier globalCheckpointSupplier) { - IndexWriterConfig iwc = newIndexWriterConfig(); + return config(indexSettings, store, translogPath, mergePolicy, refreshListener, null, indexSort, globalCheckpointSupplier); + } + + public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, + ReferenceManager.RefreshListener externalRefreshListener, + ReferenceManager.RefreshListener internalRefreshListener, + Sort indexSort, LongSupplier globalCheckpointSupplier) { + IndexWriterConfig iwc = newIndexWriterConfig(); TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); Engine.EventListener listener = new Engine.EventListener() { @Override @@ -581,12 +588,14 @@ public void onFailedEngine(String reason, @Nullable Exception e) { // we don't need to notify anybody in this test } }; - final List refreshListenerList = - refreshListener == null ? emptyList() : Collections.singletonList(refreshListener); + final List extRefreshListenerList = + externalRefreshListener == null ? emptyList() : Collections.singletonList(externalRefreshListener); + final List intRefreshListenerList = + internalRefreshListener == null ? emptyList() : Collections.singletonList(internalRefreshListener); EngineConfig config = new EngineConfig(shardId, allocationId.getId(), threadPool, indexSettings, null, store, mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), refreshListenerList, Collections.emptyList(), indexSort, + TimeValue.timeValueMinutes(5), extRefreshListenerList, intRefreshListenerList, indexSort, new NoneCircuitBreakerService(), globalCheckpointSupplier == null ? new ReplicationTracker(shardId, allocationId.getId(), indexSettings, SequenceNumbers.NO_OPS_PERFORMED, update -> {}) : diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 60a7655e9ed46..c396cdfe84570 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -167,7 +167,7 @@ protected class ReplicationGroup implements AutoCloseable, Iterable boolean closed = false; private ReplicationTargets replicationTargets; - private final PrimaryReplicaSyncer primaryReplicaSyncer = new PrimaryReplicaSyncer(Settings.EMPTY, + private final PrimaryReplicaSyncer primaryReplicaSyncer = new PrimaryReplicaSyncer( new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), (request, parentTask, primaryAllocationId, primaryTerm, listener) -> { try { diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 70a42032ea469..63ec090dcc65b 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -77,7 +77,6 @@ private static String toCamelCase(String s) { .put("edgengram", MovedToAnalysisCommon.class) .put("keyword", MovedToAnalysisCommon.class) .put("letter", MovedToAnalysisCommon.class) - .put("lowercase", MovedToAnalysisCommon.class) .put("ngram", MovedToAnalysisCommon.class) .put("pathhierarchy", MovedToAnalysisCommon.class) .put("pattern", MovedToAnalysisCommon.class) diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 4c030ddb28518..cebd63dfa324c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -413,7 +413,6 @@ protected boolean builderGeneratesCacheableQueries() { * Test creates the {@link Query} from the {@link QueryBuilder} under test and delegates the * assertions being made on the result to the implementing subclass. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34188") public void testToQuery() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTQUERIES; runs++) { QueryShardContext context = createShardContext(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index fff29dff3f680..33682b976ca0f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -50,7 +50,7 @@ public class ClusterServiceUtils { public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) { - MasterService masterService = new MasterService(Settings.EMPTY, threadPool); + MasterService masterService = new MasterService("test_master_node", Settings.EMPTY, threadPool); AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); masterService.setClusterStatePublisher((event, ackListener) -> clusterStateRef.set(event.state())); masterService.setClusterStateSupplier(clusterStateRef::get); @@ -131,8 +131,11 @@ public static ClusterService createClusterService(ThreadPool threadPool, Discove } public static ClusterService createClusterService(ThreadPool threadPool, DiscoveryNode localNode, ClusterSettings clusterSettings) { - ClusterService clusterService = new ClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(), - clusterSettings, threadPool); + Settings settings = Settings.builder() + .put("node.name", "test") + .put("cluster.name", "ClusterServiceTests") + .build(); + ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool); clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override public void connectToNodes(DiscoveryNodes discoveryNodes) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 4b32745b62a9c..f5c9b50d7b8fa 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -29,7 +29,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; - import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -70,6 +69,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; @@ -131,8 +131,8 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import java.time.ZoneId; import java.security.Security; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -185,9 +185,9 @@ @LuceneTestCase.SuppressReproduceLine public abstract class ESTestCase extends LuceneTestCase { - private static final List JODA_TIMEZONE_IDS; - private static final List JAVA_TIMEZONE_IDS; - private static final List JAVA_ZONE_IDS; + protected static final List JODA_TIMEZONE_IDS; + protected static final List JAVA_TIMEZONE_IDS; + protected static final List JAVA_ZONE_IDS; private static final AtomicInteger portGenerator = new AtomicInteger(); @@ -228,8 +228,9 @@ public void append(LogEvent event) { BootstrapForTesting.ensureInitialized(); - List jodaTZIds = new ArrayList<>(DateTimeZone.getAvailableIDs()); - Collections.sort(jodaTZIds); + // filter out joda timezones that are deprecated for the java time migration + List jodaTZIds = DateTimeZone.getAvailableIDs().stream() + .filter(s -> DateUtils.DEPRECATED_SHORT_TZ_IDS.contains(s) == false).sorted().collect(Collectors.toList()); JODA_TIMEZONE_IDS = Collections.unmodifiableList(jodaTZIds); List javaTZIds = Arrays.asList(TimeZone.getAvailableIDs()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index fad2b4e1dff29..6e4dfc8fe254c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -95,6 +95,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ParsedAvg; import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; @@ -104,6 +105,7 @@ import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks; import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles; import org.elasticsearch.search.aggregations.metrics.ParsedMax; +import org.elasticsearch.search.aggregations.metrics.ParsedMedianAbsoluteDeviation; import org.elasticsearch.search.aggregations.metrics.ParsedMin; import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; import org.elasticsearch.search.aggregations.metrics.ParsedStats; @@ -178,6 +180,7 @@ public abstract class InternalAggregationTestCase map.put(InternalTDigestPercentiles.NAME, (p, c) -> ParsedTDigestPercentiles.fromXContent(p, (String) c)); map.put(InternalTDigestPercentileRanks.NAME, (p, c) -> ParsedTDigestPercentileRanks.fromXContent(p, (String) c)); map.put(PercentilesBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedPercentilesBucket.fromXContent(p, (String) c)); + map.put(MedianAbsoluteDeviationAggregationBuilder.NAME, (p, c) -> ParsedMedianAbsoluteDeviation.fromXContent(p, (String) c)); map.put(MinAggregationBuilder.NAME, (p, c) -> ParsedMin.fromXContent(p, (String) c)); map.put(MaxAggregationBuilder.NAME, (p, c) -> ParsedMax.fromXContent(p, (String) c)); map.put(SumAggregationBuilder.NAME, (p, c) -> ParsedSum.fromXContent(p, (String) c)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index de4226bf2755b..daaa134211fac 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1308,12 +1308,7 @@ public void assertSeqNos() throws Exception { } catch (AlreadyClosedException e) { continue; // shard is closed - just ignore } - assertThat(replicaShardRouting + " local checkpoint mismatch", - seqNoStats.getLocalCheckpoint(), equalTo(primarySeqNoStats.getLocalCheckpoint())); - assertThat(replicaShardRouting + " global checkpoint mismatch", - seqNoStats.getGlobalCheckpoint(), equalTo(primarySeqNoStats.getGlobalCheckpoint())); - assertThat(replicaShardRouting + " max seq no mismatch", - seqNoStats.getMaxSeqNo(), equalTo(primarySeqNoStats.getMaxSeqNo())); + assertThat(replicaShardRouting + " seq_no_stats mismatch", seqNoStats, equalTo(primarySeqNoStats)); // the local knowledge on the primary of the global checkpoint equals the global checkpoint on the shard assertThat(replicaShardRouting + " global checkpoint syncs mismatch", seqNoStats.getGlobalCheckpoint(), equalTo(syncGlobalCheckpoints.get(replicaShardRouting.allocationId().getId()))); @@ -1562,6 +1557,7 @@ private synchronized void startAndPublishNodesAndClients(List nod // if we're adding too many master-eligible nodes at once, we can't update the min master setting before adding the nodes. updateMinMasterNodes(currentMasters + newMasters); } + rebuildUnicastHostFiles(nodeAndClients); // ensure that new nodes can find the existing nodes when they start List> futures = nodeAndClients.stream().map(node -> executor.submit(node::startNode)).collect(Collectors.toList()); try { diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java index ae62acff9b734..37db06a15e6a9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.discovery.zen.PingContextProvider; @@ -45,8 +44,7 @@ public final class MockZenPing extends AbstractComponent implements ZenPing { private final PingContextProvider contextProvider; - public MockZenPing(Settings settings, PingContextProvider contextProvider) { - super(settings); + public MockZenPing(PingContextProvider contextProvider) { this.contextProvider = contextProvider; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java index 2c8305b4e12bb..a42ee370ece43 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java @@ -99,7 +99,7 @@ private TestZenDiscovery(Settings settings, ThreadPool threadPool, TransportServ protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, UnicastHostsProvider hostsProvider) { if (USE_MOCK_PINGS.get(settings)) { - return new MockZenPing(settings, this); + return new MockZenPing(this); } else { return super.newZenPing(settings, threadPool, transportService, hostsProvider); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java index d3e05d36f6ea5..9966bfb47fa47 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java +++ b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; import java.util.List; @@ -34,10 +33,6 @@ public class NoopGatewayAllocator extends GatewayAllocator { public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator(); - protected NoopGatewayAllocator() { - super(Settings.EMPTY); - } - @Override public void applyStartedShards(RoutingAllocation allocation, List startedShards) { // noop diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java index 2bbf2ce4c2caf..bcbe52e32cd43 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.PrimaryShardAllocator; @@ -60,7 +59,7 @@ public class TestGatewayAllocator extends GatewayAllocator { Map> knownAllocations = new HashMap<>(); DiscoveryNodes currentNodes = DiscoveryNodes.EMPTY_NODES; - PrimaryShardAllocator primaryShardAllocator = new PrimaryShardAllocator(Settings.EMPTY) { + PrimaryShardAllocator primaryShardAllocator = new PrimaryShardAllocator() { @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { // for now always return immediately what we know @@ -81,7 +80,7 @@ protected AsyncShardFetch.FetchResult fetchData(ShardR } }; - ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator(Settings.EMPTY) { + ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator() { @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { // for now, just pretend no node has data @@ -95,10 +94,6 @@ protected boolean hasInitiatedFetching(ShardRouting shard) { } }; - public TestGatewayAllocator() { - super(Settings.EMPTY); - } - @Override public void applyStartedShards(RoutingAllocation allocation, List startedShards) { currentNodes = allocation.nodes(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 65ed746accacb..1a7e1c16f7b6f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -58,6 +58,7 @@ import org.elasticsearch.test.NotEqualMessageBuilder; import org.hamcrest.CoreMatchers; import org.hamcrest.Matcher; +import org.hamcrest.core.CombinableMatcher; import java.io.IOException; import java.nio.file.Files; @@ -70,6 +71,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.function.Function; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; @@ -472,6 +474,14 @@ public static Matcher hasScore(final float score) { return new ElasticsearchMatchers.SearchHitHasScoreMatcher(score); } + public static CombinableMatcher hasProperty(Function property, Matcher valueMatcher) { + return ElasticsearchMatchers.HasPropertyLambdaMatcher.hasProperty(property, valueMatcher); + } + + public static Function fieldFromSource(String fieldName) { + return (response) -> response.getSourceAsMap().get(fieldName); + } + public static T assertBooleanSubQuery(Query query, Class subqueryType, int i) { assertThat(query, instanceOf(BooleanQuery.class)); BooleanQuery q = (BooleanQuery) query; diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java index f49cc3bd39ee7..3332058648106 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java @@ -20,7 +20,12 @@ import org.elasticsearch.search.SearchHit; import org.hamcrest.Description; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; +import org.hamcrest.core.CombinableMatcher; + +import java.util.function.Function; public class ElasticsearchMatchers { @@ -115,4 +120,27 @@ public void describeTo(final Description description) { description.appendText("searchHit score should be ").appendValue(score); } } + + public static class HasPropertyLambdaMatcher extends FeatureMatcher { + + private final Function property; + + private HasPropertyLambdaMatcher(Matcher subMatcher, Function property) { + super(subMatcher, "object with", "lambda"); + this.property = property; + } + + @Override + protected V featureValueOf(T actual) { + return property.apply(actual); + } + + /** + * @param valueMatcher The matcher to apply to the property + * @param property The lambda to fetch property + */ + public static CombinableMatcher hasProperty(Function property, Matcher valueMatcher) { + return new CombinableMatcher<>(new HasPropertyLambdaMatcher<>(valueMatcher, property)); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index eb300ba302b01..56f77bcefc81a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -19,8 +19,8 @@ package org.elasticsearch.test.junit.listeners; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.runner.Description; @@ -80,7 +80,7 @@ private static Logger resolveLogger(String loggerName) { if (loggerName.equalsIgnoreCase("_root")) { return LogManager.getRootLogger(); } - return Loggers.getLogger(loggerName); + return LogManager.getLogger(loggerName); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index f903536ece6be..7f2caa06c8e7e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -282,7 +282,8 @@ public static void waitForPendingTasks(final RestClient adminClient, final Predi /** * Returns whether to preserve the state of the cluster upon completion of this test. Defaults to false. If true, overrides the value of * {@link #preserveIndicesUponCompletion()}, {@link #preserveTemplatesUponCompletion()}, {@link #preserveReposUponCompletion()}, - * {@link #preserveSnapshotsUponCompletion()}, and {@link #preserveRollupJobsUponCompletion()}. + * {@link #preserveSnapshotsUponCompletion()},{@link #preserveRollupJobsUponCompletion()}, + * and {@link #preserveILMPoliciesUponCompletion()}. * * @return true if the state of the cluster should be preserved */ @@ -347,6 +348,15 @@ protected boolean preserveRollupJobsUponCompletion() { return false; } + /** + * Returns whether to preserve ILM Policies of this test. Defaults to not + * preserviing them. Only runs at all if xpack is installed on the cluster + * being tested. + */ + protected boolean preserveILMPoliciesUponCompletion() { + return false; + } + private void wipeCluster() throws Exception { if (preserveIndicesUponCompletion() == false) { // wipe indices @@ -399,6 +409,10 @@ private void wipeCluster() throws Exception { wipeRollupJobs(); waitForPendingRollupTasks(); } + + if (hasXPack && false == preserveILMPoliciesUponCompletion()) { + deleteAllPolicies(); + } } /** @@ -508,6 +522,29 @@ private void waitForPendingRollupTasks() throws Exception { waitForPendingTasks(adminClient(), taskName -> taskName.startsWith("xpack/rollup/job") == false); } + private static void deleteAllPolicies() throws IOException { + Map policies; + + try { + Response response = adminClient().performRequest(new Request("GET", "/_ilm/policy")); + policies = entityAsMap(response); + } catch (ResponseException e) { + if (RestStatus.METHOD_NOT_ALLOWED.getStatus() == e.getResponse().getStatusLine().getStatusCode()) { + // If bad request returned, ILM is not enabled. + return; + } + throw e; + } + + if (policies == null || policies.isEmpty()) { + return; + } + + for (String policyName : policies.keySet()) { + adminClient().performRequest(new Request("DELETE", "/_ilm/policy/" + policyName)); + } + } + /** * Logs a message if there are still running tasks. The reasoning is that any tasks still running are state the is trying to bleed into * other tests. @@ -702,6 +739,14 @@ protected static void createIndex(String name, Settings settings, String mapping client().performRequest(request); } + protected static void createIndex(String name, Settings settings, String mapping, String aliases) throws IOException { + Request request = new Request("PUT", "/" + name); + request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings) + + ", \"mappings\" : {" + mapping + "}" + + ", \"aliases\": {" + aliases + "} }"); + client().performRequest(request); + } + protected static void deleteIndex(String name) throws IOException { Request request = new Request("DELETE", "/" + name); client().performRequest(request); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 011da53384d5d..14a843d086916 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -38,7 +38,6 @@ import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSuite; -import org.elasticsearch.test.rest.yaml.section.DoSection; import org.elasticsearch.test.rest.yaml.section.ExecutableSection; import org.junit.AfterClass; import org.junit.Before; @@ -184,19 +183,44 @@ public static Iterable createParameters() throws Exception { */ public static Iterable createParameters(NamedXContentRegistry executeableSectionRegistry) throws Exception { String[] paths = resolvePathsProperty(REST_TESTS_SUITE, ""); // default to all tests under the test root - List tests = new ArrayList<>(); Map> yamlSuites = loadSuites(paths); + List suites = new ArrayList<>(); + IllegalArgumentException validationException = null; // yaml suites are grouped by directory (effectively by api) for (String api : yamlSuites.keySet()) { List yamlFiles = new ArrayList<>(yamlSuites.get(api)); for (Path yamlFile : yamlFiles) { - ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(executeableSectionRegistry, api, yamlFile); - for (ClientYamlTestSection testSection : restTestSuite.getTestSections()) { - tests.add(new Object[]{ new ClientYamlTestCandidate(restTestSuite, testSection) }); + ClientYamlTestSuite suite = ClientYamlTestSuite.parse(executeableSectionRegistry, api, yamlFile); + suites.add(suite); + try { + suite.validate(); + } catch(IllegalArgumentException e) { + if (validationException == null) { + validationException = new IllegalArgumentException("Validation errors for the following test suites:\n- " + + e.getMessage()); + } else { + String previousMessage = validationException.getMessage(); + Throwable[] suppressed = validationException.getSuppressed(); + validationException = new IllegalArgumentException(previousMessage + "\n- " + e.getMessage()); + for (Throwable t : suppressed) { + validationException.addSuppressed(t); + } + } + validationException.addSuppressed(e); } } } + if (validationException != null) { + throw validationException; + } + + List tests = new ArrayList<>(); + for (ClientYamlTestSuite yamlTestSuite : suites) { + for (ClientYamlTestSection testSection : yamlTestSuite.getTestSections()) { + tests.add(new Object[]{ new ClientYamlTestCandidate(yamlTestSuite, testSection) }); + } + } //sort the candidates so they will always be in the same order before being shuffled, for repeatability tests.sort(Comparator.comparing(o -> ((ClientYamlTestCandidate) o[0]).getTestPath())); return tests; @@ -361,7 +385,7 @@ public void test() throws IOException { } } finally { logger.debug("start teardown test [{}]", testCandidate.getTestPath()); - for (DoSection doSection : testCandidate.getTeardownSection().getDoSections()) { + for (ExecutableSection doSection : testCandidate.getTeardownSection().getDoSections()) { executeSection(doSection); } logger.debug("end teardown test [{}]", testCandidate.getTestPath()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java index 1ec2382fac596..48e7fc031139b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java @@ -18,14 +18,15 @@ */ package org.elasticsearch.test.rest.yaml.section; -import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Objects; /** * Represents a test section, which is composed of a skip section and multiple executable sections. @@ -33,34 +34,37 @@ public class ClientYamlTestSection implements Comparable { public static ClientYamlTestSection parse(XContentParser parser) throws IOException { ParserUtils.advanceToFieldName(parser); - ClientYamlTestSection testSection = new ClientYamlTestSection(parser.getTokenLocation(), parser.currentName()); + XContentLocation sectionLocation = parser.getTokenLocation(); + String sectionName = parser.currentName(); + List executableSections = new ArrayList<>(); try { parser.nextToken(); - testSection.setSkipSection(SkipSection.parseIfNext(parser)); + SkipSection skipSection = SkipSection.parseIfNext(parser); while (parser.currentToken() != XContentParser.Token.END_ARRAY) { ParserUtils.advanceToFieldName(parser); - testSection.addExecutableSection(ExecutableSection.parse(parser)); + executableSections.add(ExecutableSection.parse(parser)); } if (parser.nextToken() != XContentParser.Token.END_OBJECT) { - throw new IllegalArgumentException("malformed section [" + testSection.getName() + "] expected [" + throw new IllegalArgumentException("malformed section [" + sectionName + "] expected [" + XContentParser.Token.END_OBJECT + "] but was [" + parser.currentToken() + "]"); } parser.nextToken(); - return testSection; + return new ClientYamlTestSection(sectionLocation, sectionName, skipSection, executableSections); } catch (Exception e) { - throw new ParsingException(parser.getTokenLocation(), "Error parsing test named [" + testSection.getName() + "]", e); + throw new ParsingException(parser.getTokenLocation(), "Error parsing test named [" + sectionName + "]", e); } } private final XContentLocation location; private final String name; - private SkipSection skipSection; + private final SkipSection skipSection; private final List executableSections; - public ClientYamlTestSection(XContentLocation location, String name) { + ClientYamlTestSection(XContentLocation location, String name, SkipSection skipSection, List executableSections) { this.location = location; this.name = name; - this.executableSections = new ArrayList<>(); + this.skipSection = Objects.requireNonNull(skipSection, "skip section cannot be null"); + this.executableSections = Collections.unmodifiableList(executableSections); } public XContentLocation getLocation() { @@ -75,33 +79,10 @@ public SkipSection getSkipSection() { return skipSection; } - public void setSkipSection(SkipSection skipSection) { - this.skipSection = skipSection; - } - public List getExecutableSections() { return executableSections; } - public void addExecutableSection(ExecutableSection executableSection) { - if (executableSection instanceof DoSection) { - DoSection doSection = (DoSection) executableSection; - if (false == doSection.getExpectedWarningHeaders().isEmpty() - && false == skipSection.getFeatures().contains("warnings")) { - throw new IllegalArgumentException("Attempted to add a [do] with a [warnings] section without a corresponding [skip] so " - + "runners that do not support the [warnings] section can skip the test at line [" - + doSection.getLocation().lineNumber + "]"); - } - if (NodeSelector.ANY != doSection.getApiCallSection().getNodeSelector() - && false == skipSection.getFeatures().contains("node_selector")) { - throw new IllegalArgumentException("Attempted to add a [do] with a [node_selector] section without a corresponding " - + "[skip] so runners that do not support the [node_selector] section can skip the test at line [" - + doSection.getLocation().lineNumber + "]"); - } - } - this.executableSections.add(executableSection); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index 85796494ba964..eccf99a22607e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.section; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -32,9 +33,13 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.TreeSet; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Holds a REST test suite loaded from a specific yaml file. @@ -79,11 +84,9 @@ public static ClientYamlTestSuite parse(String api, String suiteName, XContentPa "expected token to be START_OBJECT but was " + parser.currentToken()); } - ClientYamlTestSuite restTestSuite = new ClientYamlTestSuite(api, suiteName); - - restTestSuite.setSetupSection(SetupSection.parseIfNext(parser)); - restTestSuite.setTeardownSection(TeardownSection.parseIfNext(parser)); - + SetupSection setupSection = SetupSection.parseIfNext(parser); + TeardownSection teardownSection = TeardownSection.parseIfNext(parser); + Set testSections = new TreeSet<>(); while(true) { //the "---" section separator is not understood by the yaml parser. null is returned, same as when the parser is closed //we need to somehow distinguish between a null in the middle of a test ("---") @@ -93,27 +96,28 @@ public static ClientYamlTestSuite parse(String api, String suiteName, XContentPa break; } } - ClientYamlTestSection testSection = ClientYamlTestSection.parse(parser); - if (!restTestSuite.addTestSection(testSection)) { + if (testSections.add(testSection) == false) { throw new ParsingException(testSection.getLocation(), "duplicate test section [" + testSection.getName() + "]"); } } - return restTestSuite; + return new ClientYamlTestSuite(api, suiteName, setupSection, teardownSection, new ArrayList<>(testSections)); } private final String api; private final String name; + private final SetupSection setupSection; + private final TeardownSection teardownSection; + private final List testSections; - private SetupSection setupSection; - private TeardownSection teardownSection; - - private Set testSections = new TreeSet<>(); - - public ClientYamlTestSuite(String api, String name) { + ClientYamlTestSuite(String api, String name, SetupSection setupSection, TeardownSection teardownSection, + List testSections) { this.api = api; this.name = name; + this.setupSection = Objects.requireNonNull(setupSection, "setup section cannot be null"); + this.teardownSection = Objects.requireNonNull(teardownSection, "teardown section cannot be null"); + this.testSections = Collections.unmodifiableList(testSections); } public String getApi() { @@ -132,27 +136,71 @@ public SetupSection getSetupSection() { return setupSection; } - public void setSetupSection(SetupSection setupSection) { - this.setupSection = setupSection; - } - public TeardownSection getTeardownSection() { return teardownSection; } - public void setTeardownSection(TeardownSection teardownSection) { - this.teardownSection = teardownSection; + public void validate() { + Stream errors = validateExecutableSections(setupSection.getExecutableSections(), null, setupSection, null); + errors = Stream.concat(errors, validateExecutableSections(teardownSection.getDoSections(), null, null, teardownSection)); + errors = Stream.concat(errors, testSections.stream() + .flatMap(section -> validateExecutableSections(section.getExecutableSections(), section, setupSection, teardownSection))); + String errorMessage = errors.collect(Collectors.joining(",\n")); + if (errorMessage.isEmpty() == false) { + throw new IllegalArgumentException(getPath() + ":\n" + errorMessage); + } + } + + private static Stream validateExecutableSections(List sections, + ClientYamlTestSection testSection, + SetupSection setupSection, TeardownSection teardownSection) { + + Stream errors = sections.stream().filter(section -> section instanceof DoSection) + .map(section -> (DoSection) section) + .filter(section -> false == section.getExpectedWarningHeaders().isEmpty()) + .filter(section -> false == hasSkipFeature("warnings", testSection, setupSection, teardownSection)) + .map(section -> "attempted to add a [do] with a [warnings] section " + + "without a corresponding [\"skip\": \"features\": \"warnings\"] so runners that do not support the [warnings] " + + "section can skip the test at line [" + section.getLocation().lineNumber + "]"); + + errors = Stream.concat(errors, sections.stream().filter(section -> section instanceof DoSection) + .map(section -> (DoSection) section) + .filter(section -> NodeSelector.ANY != section.getApiCallSection().getNodeSelector()) + .filter(section -> false == hasSkipFeature("node_selector", testSection, setupSection, teardownSection)) + .map(section -> "attempted to add a [do] with a [node_selector] " + + "section without a corresponding [\"skip\": \"features\": \"node_selector\"] so runners that do not support the " + + "[node_selector] section can skip the test at line [" + section.getLocation().lineNumber + "]")); + + errors = Stream.concat(errors, sections.stream() + .filter(section -> section instanceof ContainsAssertion) + .filter(section -> false == hasSkipFeature("contains", testSection, setupSection, teardownSection)) + .map(section -> "attempted to add a [contains] assertion " + + "without a corresponding [\"skip\": \"features\": \"contains\"] so runners that do not support the " + + "[contains] assertion can skip the test at line [" + section.getLocation().lineNumber + "]")); + + errors = Stream.concat(errors, sections.stream().filter(section -> section instanceof DoSection) + .map(section -> (DoSection) section) + .filter(section -> false == section.getApiCallSection().getHeaders().isEmpty()) + .filter(section -> false == hasSkipFeature("headers", testSection, setupSection, teardownSection)) + .map(section -> "attempted to add a [do] with a [headers] section without a corresponding " + + "[\"skip\": \"features\": \"headers\"] so runners that do not support the [headers] section can skip the test at " + + "line [" + section.getLocation().lineNumber + "]")); + + return errors; + } + + private static boolean hasSkipFeature(String feature, ClientYamlTestSection testSection, + SetupSection setupSection, TeardownSection teardownSection) { + return (testSection != null && hasSkipFeature(feature, testSection.getSkipSection())) || + (setupSection != null && hasSkipFeature(feature, setupSection.getSkipSection())) || + (teardownSection != null && hasSkipFeature(feature, teardownSection.getSkipSection())); } - /** - * Adds a {@link org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection} to the REST suite - * @return true if the test section was not already present, false otherwise - */ - public boolean addTestSection(ClientYamlTestSection testSection) { - return this.testSections.add(testSection); + private static boolean hasSkipFeature(String feature, SkipSection skipSection) { + return skipSection != null && skipSection.getFeatures().contains(feature); } public List getTestSections() { - return new ArrayList<>(testSections); + return testSections; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 5fb5c1d003dd4..dcda4a2a025e8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -182,7 +182,6 @@ public static DoSection parse(XContentParser parser) throws IOException { return doSection; } - private static final Logger logger = LogManager.getLogger(DoSection.class); private final XContentLocation location; @@ -206,7 +205,7 @@ public ApiCallSection getApiCallSection() { return apiCallSection; } - public void setApiCallSection(ApiCallSection apiCallSection) { + void setApiCallSection(ApiCallSection apiCallSection) { this.apiCallSection = apiCallSection; } @@ -214,7 +213,7 @@ public void setApiCallSection(ApiCallSection apiCallSection) { * Warning headers that we expect from this response. If the headers don't match exactly this request is considered to have failed. * Defaults to emptyList. */ - public List getExpectedWarningHeaders() { + List getExpectedWarningHeaders() { return expectedWarningHeaders; } @@ -222,7 +221,7 @@ public List getExpectedWarningHeaders() { * Set the warning headers that we expect from this response. If the headers don't match exactly this request is considered to have * failed. Defaults to emptyList. */ - public void setExpectedWarningHeaders(List expectedWarningHeaders) { + void setExpectedWarningHeaders(List expectedWarningHeaders) { this.expectedWarningHeaders = expectedWarningHeaders; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SetupSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SetupSection.java index 692888a003857..38a034d47e769 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SetupSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/SetupSection.java @@ -22,7 +22,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Objects; /** * Represents a setup section. Holds a skip section and multiple do sections. @@ -31,7 +33,7 @@ public class SetupSection { /** * Parse a {@link SetupSection} if the next field is {@code skip}, otherwise returns {@link SetupSection#EMPTY}. */ - public static SetupSection parseIfNext(XContentParser parser) throws IOException { + static SetupSection parseIfNext(XContentParser parser) throws IOException { ParserUtils.advanceToFieldName(parser); if ("setup".equals(parser.currentName())) { @@ -45,58 +47,42 @@ public static SetupSection parseIfNext(XContentParser parser) throws IOException } public static SetupSection parse(XContentParser parser) throws IOException { - SetupSection setupSection = new SetupSection(); - setupSection.setSkipSection(SkipSection.parseIfNext(parser)); - + SkipSection skipSection = SkipSection.parseIfNext(parser); + List executableSections = new ArrayList<>(); while (parser.currentToken() != XContentParser.Token.END_ARRAY) { ParserUtils.advanceToFieldName(parser); if ("do".equals(parser.currentName())) { - setupSection.addDoSection(DoSection.parse(parser)); + executableSections.add(DoSection.parse(parser)); } else if ("set".equals(parser.currentName())) { - setupSection.addSetSection(SetSection.parse(parser)); + executableSections.add(SetSection.parse(parser)); } else { throw new IllegalArgumentException("section [" + parser.currentName() + "] not supported within setup section"); } parser.nextToken(); } - parser.nextToken(); - - return setupSection; + return new SetupSection(skipSection, executableSections); } - public static final SetupSection EMPTY; + public static final SetupSection EMPTY = new SetupSection(SkipSection.EMPTY, Collections.emptyList()); - static { - EMPTY = new SetupSection(); - EMPTY.setSkipSection(SkipSection.EMPTY); - } - - private SkipSection skipSection; + private final SkipSection skipSection; + private final List executableSections; - private List executableSections = new ArrayList<>(); + SetupSection(SkipSection skipSection, List executableSections) { + this.skipSection = Objects.requireNonNull(skipSection, "skip section cannot be null"); + this.executableSections = Collections.unmodifiableList(executableSections); + } public SkipSection getSkipSection() { return skipSection; } - public void setSkipSection(SkipSection skipSection) { - this.skipSection = skipSection; - } - public List getExecutableSections() { return executableSections; } - public void addDoSection(DoSection doSection) { - this.executableSections.add(doSection); - } - - public void addSetSection(SetSection setSection) { - this.executableSections.add(setSection); - } - public boolean isEmpty() { return EMPTY.equals(this); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/TeardownSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/TeardownSection.java index 3b272fe673c76..5eecaa758f0bc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/TeardownSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/TeardownSection.java @@ -24,13 +24,15 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Objects; public class TeardownSection { /** * Parse a {@link TeardownSection} if the next field is {@code skip}, otherwise returns {@link TeardownSection#EMPTY}. */ - public static TeardownSection parseIfNext(XContentParser parser) throws IOException { + static TeardownSection parseIfNext(XContentParser parser) throws IOException { ParserUtils.advanceToFieldName(parser); if ("teardown".equals(parser.currentName())) { @@ -44,50 +46,40 @@ public static TeardownSection parseIfNext(XContentParser parser) throws IOExcept } public static TeardownSection parse(XContentParser parser) throws IOException { - TeardownSection teardownSection = new TeardownSection(); - teardownSection.setSkipSection(SkipSection.parseIfNext(parser)); - + SkipSection skipSection = SkipSection.parseIfNext(parser); + List executableSections = new ArrayList<>(); while (parser.currentToken() != XContentParser.Token.END_ARRAY) { ParserUtils.advanceToFieldName(parser); if (!"do".equals(parser.currentName())) { throw new ParsingException(parser.getTokenLocation(), "section [" + parser.currentName() + "] not supported within teardown section"); } - - teardownSection.addDoSection(DoSection.parse(parser)); + executableSections.add(DoSection.parse(parser)); parser.nextToken(); } parser.nextToken(); - return teardownSection; + return new TeardownSection(skipSection, executableSections); } - public static final TeardownSection EMPTY; + public static final TeardownSection EMPTY = new TeardownSection(SkipSection.EMPTY, Collections.emptyList()); - static { - EMPTY = new TeardownSection(); - EMPTY.setSkipSection(SkipSection.EMPTY); - } + private final SkipSection skipSection; + private final List doSections; - private SkipSection skipSection; - private List doSections = new ArrayList<>(); + TeardownSection(SkipSection skipSection, List doSections) { + this.skipSection = Objects.requireNonNull(skipSection, "skip section cannot be null"); + this.doSections = Collections.unmodifiableList(doSections); + } public SkipSection getSkipSection() { return skipSection; } - public void setSkipSection(SkipSection skipSection) { - this.skipSection = skipSection; - } - - public List getDoSections() { + public List getDoSections() { return doSections; } - public void addDoSection(DoSection doSection) { - this.doSections.add(doSection); - } - public boolean isEmpty() { return EMPTY.equals(this); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index d6c4f30a885d5..3fc4d030da046 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.transport; import com.carrotsearch.randomizedtesting.SysGlobals; +import java.util.concurrent.TimeUnit; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; @@ -599,21 +600,21 @@ public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile Transport.Connection connection = super.openConnection(node, profile); synchronized (openConnections) { - List connections = openConnections.computeIfAbsent(node, - (n) -> new CopyOnWriteArrayList<>()); - connections.add(connection); - } - - connection.addCloseListener(ActionListener.wrap(() -> { - synchronized (openConnections) { - List connections = openConnections.get(node); - boolean remove = connections.remove(connection); - assert remove : "Should have removed connection"; - if (connections.isEmpty()) { - openConnections.remove(node); + openConnections.computeIfAbsent(node, n -> new CopyOnWriteArrayList<>()).add(connection); + connection.addCloseListener(ActionListener.wrap(() -> { + synchronized (openConnections) { + List connections = openConnections.get(node); + boolean remove = connections.remove(connection); + assert remove : "Should have removed connection"; + if (connections.isEmpty()) { + openConnections.remove(node); + } + if (openConnections.isEmpty()) { + openConnections.notifyAll(); + } } - } - })); + })); + } return connection; } @@ -621,8 +622,15 @@ public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile @Override protected void doClose() throws IOException { super.doClose(); - synchronized (openConnections) { - assert openConnections.size() == 0 : "still open connections: " + openConnections; + try { + synchronized (openConnections) { + if (openConnections.isEmpty() == false) { + openConnections.wait(TimeUnit.SECONDS.toMillis(30L)); + } + assert openConnections.size() == 0 : "still open connections: " + openConnections; + } + } catch (InterruptedException e) { + throw new IllegalStateException(e); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index b36685d564593..dc08fbf257d66 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -46,7 +46,6 @@ import org.elasticsearch.transport.TcpServerChannel; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.Transports; import java.io.IOException; import java.net.InetSocketAddress; @@ -65,8 +64,6 @@ public class MockNioTransport extends TcpTransport { - private static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX; - private final PageCacheRecycler pageCacheRecycler; private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); private volatile NioGroup nioGroup; @@ -97,7 +94,7 @@ protected MockSocketChannel initiateChannel(DiscoveryNode node, ActionListener new TestingSocketEventHandler(this::onNonChannelException, s)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); diff --git a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java index b61182415eeb4..df13f2ba3b186 100644 --- a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -96,4 +96,9 @@ protected boolean preserveClusterSettings() { protected boolean preserveRollupJobsUponCompletion() { return true; } + + @Override + protected boolean preserveILMPoliciesUponCompletion() { + return true; + } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java index 59aa53a3453de..b96dd11be2a65 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java @@ -24,8 +24,10 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import static java.util.stream.Collectors.toCollection; import static java.util.stream.Collectors.toList; @@ -389,13 +391,19 @@ private class VersionsFromProperty { private final List unreleased = new ArrayList<>(); private VersionsFromProperty(String property) { + Set allUnreleased = new HashSet<>(Arrays.asList( + System.getProperty("tests.gradle_unreleased_versions", "").split(",") + )); + if (allUnreleased.isEmpty()) { + fail("[tests.gradle_unreleased_versions] not set or empty. Gradle should set this before running."); + } String versions = System.getProperty(property); - assertNotNull("Couldn't find [" + property + "]. Gradle should set these before running the tests.", versions); + assertNotNull("Couldn't find [" + property + "]. Gradle should set this before running the tests.", versions); logger.info("Looked up versions [{}={}]", property, versions); for (String version : versions.split(",")) { - if (version.endsWith("-SNAPSHOT")) { - unreleased.add(version.replace("-SNAPSHOT", "")); + if (allUnreleased.contains(version)) { + unreleased.add(version); } else { released.add(version); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 500cff893cb1f..7d7263699be88 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -20,78 +20,19 @@ package org.elasticsearch.test.rest.yaml.section; import org.elasticsearch.Version; -import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.yaml.YamlXContent; import java.io.IOException; import java.util.Map; -import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentParserTestCase { - public void testAddingDoWithoutSkips() { - int lineNumber = between(1, 10000); - ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); - section.setSkipSection(SkipSection.EMPTY); - DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); - doSection.setApiCallSection(new ApiCallSection("test")); - section.addExecutableSection(doSection); - } - - public void testAddingDoWithWarningWithSkip() { - int lineNumber = between(1, 10000); - ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); - section.setSkipSection(new SkipSection(null, singletonList("warnings"), null)); - DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); - doSection.setExpectedWarningHeaders(singletonList("foo")); - doSection.setApiCallSection(new ApiCallSection("test")); - section.addExecutableSection(doSection); - } - - public void testAddingDoWithWarningWithSkipButNotWarnings() { - int lineNumber = between(1, 10000); - ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); - section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); - DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); - doSection.setExpectedWarningHeaders(singletonList("foo")); - doSection.setApiCallSection(new ApiCallSection("test")); - Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); - assertEquals("Attempted to add a [do] with a [warnings] section without a corresponding [skip] so runners that do not support the" - + " [warnings] section can skip the test at line [" + lineNumber + "]", e.getMessage()); - } - - public void testAddingDoWithNodeSelectorWithSkip() { - int lineNumber = between(1, 10000); - ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); - section.setSkipSection(new SkipSection(null, singletonList("node_selector"), null)); - DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); - ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); - doSection.setApiCallSection(apiCall); - section.addExecutableSection(doSection); - } - - public void testAddingDoWithNodeSelectorWithSkipButNotWarnings() { - int lineNumber = between(1, 10000); - ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); - section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); - DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); - ApiCallSection apiCall = new ApiCallSection("test"); - apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); - doSection.setApiCallSection(apiCall); - Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); - assertEquals("Attempted to add a [do] with a [node_selector] section without a corresponding" - + " [skip] so runners that do not support the [node_selector] section can skip the test at" - + " line [" + lineNumber + "]", e.getMessage()); - } - public void testWrongIndentation() throws Exception { { XContentParser parser = createParser(YamlXContent.yamlXContent, @@ -297,7 +238,7 @@ public void testParseTestSectionWithDoSectionsAndAssertions() throws Exception { LengthAssertion lengthAssertion = (LengthAssertion) testSection.getExecutableSections().get(6); assertThat(lengthAssertion.getField(), equalTo("_index")); assertThat(lengthAssertion.getExpectedValue(), instanceOf(Integer.class)); - assertThat((Integer) lengthAssertion.getExpectedValue(), equalTo(6)); + assertThat(lengthAssertion.getExpectedValue(), equalTo(6)); IsFalseAssertion falseAssertion = (IsFalseAssertion)testSection.getExecutableSections().get(7); assertThat(falseAssertion.getField(), equalTo("whatever")); @@ -305,12 +246,12 @@ public void testParseTestSectionWithDoSectionsAndAssertions() throws Exception { GreaterThanAssertion greaterThanAssertion = (GreaterThanAssertion) testSection.getExecutableSections().get(8); assertThat(greaterThanAssertion.getField(), equalTo("size")); assertThat(greaterThanAssertion.getExpectedValue(), instanceOf(Integer.class)); - assertThat((Integer) greaterThanAssertion.getExpectedValue(), equalTo(5)); + assertThat(greaterThanAssertion.getExpectedValue(), equalTo(5)); LessThanAssertion lessThanAssertion = (LessThanAssertion) testSection.getExecutableSections().get(9); assertThat(lessThanAssertion.getField(), equalTo("size")); assertThat(lessThanAssertion.getExpectedValue(), instanceOf(Integer.class)); - assertThat((Integer) lessThanAssertion.getExpectedValue(), equalTo(10)); + assertThat(lessThanAssertion.getExpectedValue(), equalTo(10)); } public void testSmallSection() throws Exception { @@ -327,5 +268,4 @@ public void testSmallSection() throws Exception { assertThat(testSection.getName(), equalTo("node_info test")); assertThat(testSection.getExecutableSections().size(), equalTo(3)); } - } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 873702f7c68ce..da485a8430e28 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -20,11 +20,18 @@ package org.elasticsearch.test.rest.yaml.section; import org.elasticsearch.Version; +import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.yaml.YamlXContent; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.Map; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -106,9 +113,11 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(false)); assertThat(restTestSuite.getTeardownSection().getSkipSection().isEmpty(), equalTo(true)); assertThat(restTestSuite.getTeardownSection().getDoSections().size(), equalTo(1)); - assertThat(restTestSuite.getTeardownSection().getDoSections().get(0).getApiCallSection().getApi(), equalTo("indices.delete")); - assertThat(restTestSuite.getTeardownSection().getDoSections().get(0).getApiCallSection().getParams().size(), equalTo(1)); - assertThat(restTestSuite.getTeardownSection().getDoSections().get(0).getApiCallSection().getParams().get("index"), + assertThat(((DoSection)restTestSuite.getTeardownSection().getDoSections().get(0)).getApiCallSection().getApi(), + equalTo("indices.delete")); + assertThat(((DoSection)restTestSuite.getTeardownSection().getDoSections().get(0)).getApiCallSection().getParams().size(), + equalTo(1)); + assertThat(((DoSection)restTestSuite.getTeardownSection().getDoSections().get(0)).getApiCallSection().getParams().get("index"), equalTo("test_index")); } else { assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(true)); @@ -378,4 +387,188 @@ public void testParseTestDuplicateTestSections() throws Exception { ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), parser)); assertThat(e.getMessage(), containsString("duplicate test section")); } + + public void testAddingDoWithoutSkips() { + int lineNumber = between(1, 10000); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + doSection.setApiCallSection(new ApiCallSection("test")); + ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test", + SkipSection.EMPTY, Collections.singletonList(doSection)); + ClientYamlTestSuite clientYamlTestSuite = new ClientYamlTestSuite("api", "name", SetupSection.EMPTY, TeardownSection.EMPTY, + Collections.singletonList(section)); + clientYamlTestSuite.validate(); + } + + public void testAddingDoWithWarningWithoutSkipWarnings() { + int lineNumber = between(1, 10000); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + doSection.setExpectedWarningHeaders(singletonList("foo")); + doSection.setApiCallSection(new ApiCallSection("test")); + ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); + Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); + assertThat(e.getMessage(), containsString("api/name:\nattempted to add a [do] with a [warnings] section without a corresponding " + + "[\"skip\": \"features\": \"warnings\"] so runners that do not support the [warnings] section can skip the test " + + "at line [" + lineNumber + "]")); + } + + public void testAddingDoWithHeaderWithoutSkipHeaders() { + int lineNumber = between(1, 10000); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCallSection = new ApiCallSection("test"); + apiCallSection.addHeaders(Collections.singletonMap("header", "value")); + doSection.setApiCallSection(apiCallSection); + ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); + Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); + assertThat(e.getMessage(), containsString("api/name:\nattempted to add a [do] with a [headers] section without a corresponding " + + "[\"skip\": \"features\": \"headers\"] so runners that do not support the [headers] section can skip the test at line [" + + lineNumber + "]")); + } + + public void testAddingDoWithNodeSelectorWithoutSkipNodeSelector() { + int lineNumber = between(1, 10000); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCall = new ApiCallSection("test"); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); + doSection.setApiCallSection(apiCall); + ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); + Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); + assertThat(e.getMessage(), containsString("api/name:\nattempted to add a [do] with a [node_selector] section without a " + + "corresponding [\"skip\": \"features\": \"node_selector\"] so runners that do not support the [node_selector] section can " + + "skip the test at line [" + lineNumber + "]")); + } + + public void testAddingContainsWithoutSkipContains() { + int lineNumber = between(1, 10000); + ContainsAssertion containsAssertion = new ContainsAssertion( + new XContentLocation(lineNumber, 0), + randomAlphaOfLength(randomIntBetween(3, 30)), + randomDouble()); + ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, containsAssertion); + Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); + assertThat(e.getMessage(), containsString("api/name:\nattempted to add a [contains] assertion without a corresponding " + + "[\"skip\": \"features\": \"contains\"] so runners that do not support the [contains] assertion " + + "can skip the test at line [" + lineNumber + "]")); + } + + public void testMultipleValidationErrors() { + int firstLineNumber = between(1, 10000); + List sections = new ArrayList<>(); + { + ContainsAssertion containsAssertion = new ContainsAssertion( + new XContentLocation(firstLineNumber, 0), + randomAlphaOfLength(randomIntBetween(3, 30)), + randomDouble()); + sections.add(new ClientYamlTestSection( + new XContentLocation(0, 0), "section1", SkipSection.EMPTY, Collections.singletonList(containsAssertion))); + } + int secondLineNumber = between(1, 10000); + int thirdLineNumber = between(1, 10000); + List doSections = new ArrayList<>(); + { + DoSection doSection = new DoSection(new XContentLocation(secondLineNumber, 0)); + doSection.setExpectedWarningHeaders(singletonList("foo")); + doSection.setApiCallSection(new ApiCallSection("test")); + doSections.add(doSection); + } + { + DoSection doSection = new DoSection(new XContentLocation(thirdLineNumber, 0)); + ApiCallSection apiCall = new ApiCallSection("test"); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); + doSection.setApiCallSection(apiCall); + doSections.add(doSection); + } + sections.add(new ClientYamlTestSection(new XContentLocation(0, 0), "section2", SkipSection.EMPTY, doSections)); + + ClientYamlTestSuite testSuite = new ClientYamlTestSuite("api", "name", SetupSection.EMPTY, TeardownSection.EMPTY, sections); + Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); + assertEquals("api/name:\n" + + "attempted to add a [contains] assertion without a corresponding [\"skip\": \"features\": \"contains\"] so runners " + + "that do not support the [contains] assertion can skip the test at line [" + firstLineNumber + "],\n" + + "attempted to add a [do] with a [warnings] section without a corresponding [\"skip\": \"features\": \"warnings\"] so " + + "runners that do not support the [warnings] section can skip the test at line [" + secondLineNumber + "],\n" + + "attempted to add a [do] with a [node_selector] section without a corresponding [\"skip\": \"features\": \"node_selector\"] " + + "so runners that do not support the [node_selector] section can skip the test at line [" + thirdLineNumber + "]", + e.getMessage()); + } + + public void testAddingDoWithWarningWithSkip() { + int lineNumber = between(1, 10000); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + doSection.setExpectedWarningHeaders(singletonList("foo")); + doSection.setApiCallSection(new ApiCallSection("test")); + SkipSection skipSection = new SkipSection(null, singletonList("warnings"), null); + createTestSuite(skipSection, doSection).validate(); + } + + public void testAddingDoWithNodeSelectorWithSkip() { + int lineNumber = between(1, 10000); + SkipSection skipSection = new SkipSection(null, singletonList("node_selector"), null); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCall = new ApiCallSection("test"); + apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); + doSection.setApiCallSection(apiCall); + createTestSuite(skipSection, doSection).validate(); + } + + public void testAddingDoWithHeadersWithSkip() { + int lineNumber = between(1, 10000); + SkipSection skipSection = new SkipSection(null, singletonList("headers"), null); + DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); + ApiCallSection apiCallSection = new ApiCallSection("test"); + apiCallSection.addHeaders(singletonMap("foo", "bar")); + doSection.setApiCallSection(apiCallSection); + createTestSuite(skipSection, doSection).validate(); + } + + public void testAddingContainsWithSkip() { + int lineNumber = between(1, 10000); + SkipSection skipSection = new SkipSection(null, singletonList("contains"), null); + ContainsAssertion containsAssertion = new ContainsAssertion( + new XContentLocation(lineNumber, 0), + randomAlphaOfLength(randomIntBetween(3, 30)), + randomDouble()); + createTestSuite(skipSection, containsAssertion).validate(); + } + + private static ClientYamlTestSuite createTestSuite(SkipSection skipSection, ExecutableSection executableSection) { + final SetupSection setupSection; + final TeardownSection teardownSection; + final ClientYamlTestSection clientYamlTestSection; + switch(randomIntBetween(0, 4)) { + case 0: + setupSection = new SetupSection(skipSection, Collections.emptyList()); + teardownSection = TeardownSection.EMPTY; + clientYamlTestSection = new ClientYamlTestSection(new XContentLocation(0, 0), "test", + SkipSection.EMPTY, Collections.singletonList(executableSection)); + break; + case 1: + setupSection = SetupSection.EMPTY; + teardownSection = new TeardownSection(skipSection, Collections.emptyList()); + clientYamlTestSection = new ClientYamlTestSection(new XContentLocation(0, 0), "test", + SkipSection.EMPTY, Collections.singletonList(executableSection)); + break; + case 2: + setupSection = SetupSection.EMPTY; + teardownSection = TeardownSection.EMPTY; + clientYamlTestSection = new ClientYamlTestSection(new XContentLocation(0, 0), "test", + skipSection, Collections.singletonList(executableSection)); + break; + case 3: + setupSection = new SetupSection(skipSection, Collections.singletonList(executableSection)); + teardownSection = TeardownSection.EMPTY; + clientYamlTestSection = new ClientYamlTestSection(new XContentLocation(0, 0), "test", + SkipSection.EMPTY, randomBoolean() ? Collections.emptyList() : Collections.singletonList(executableSection)); + break; + case 4: + setupSection = SetupSection.EMPTY; + teardownSection = new TeardownSection(skipSection, Collections.singletonList(executableSection)); + clientYamlTestSection = new ClientYamlTestSection(new XContentLocation(0, 0), "test", + SkipSection.EMPTY, randomBoolean() ? Collections.emptyList() : Collections.singletonList(executableSection)); + break; + default: + throw new UnsupportedOperationException(); + } + return new ClientYamlTestSuite("api", "name", setupSection, teardownSection, + Collections.singletonList(clientYamlTestSection)); + } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index 07afa9f33b5b1..96bff85389c8a 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -49,8 +49,8 @@ public void testParseTeardownSection() throws Exception { assertThat(section, notNullValue()); assertThat(section.getSkipSection().isEmpty(), equalTo(true)); assertThat(section.getDoSections().size(), equalTo(2)); - assertThat(section.getDoSections().get(0).getApiCallSection().getApi(), equalTo("delete")); - assertThat(section.getDoSections().get(1).getApiCallSection().getApi(), equalTo("delete2")); + assertThat(((DoSection)section.getDoSections().get(0)).getApiCallSection().getApi(), equalTo("delete")); + assertThat(((DoSection)section.getDoSections().get(1)).getApiCallSection().getApi(), equalTo("delete2")); } public void testParseWithSkip() throws Exception { @@ -79,7 +79,7 @@ public void testParseWithSkip() throws Exception { assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_6_3_0)); assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); - assertThat(section.getDoSections().get(0).getApiCallSection().getApi(), equalTo("delete")); - assertThat(section.getDoSections().get(1).getApiCallSection().getApi(), equalTo("delete2")); + assertThat(((DoSection)section.getDoSections().get(0)).getApiCallSection().getApi(), equalTo("delete")); + assertThat(((DoSection)section.getDoSections().get(1)).getApiCallSection().getApi(), equalTo("delete2")); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java index fe5dedb2d5a7e..2c031d9e3acff 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java @@ -20,9 +20,8 @@ package org.elasticsearch.test.test; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.logging.Loggers; +import org.apache.logging.log4j.Logger; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.listeners.LoggingListener; @@ -48,8 +47,8 @@ public void testCustomLevelPerMethod() throws Exception { Description suiteDescription = Description.createSuiteDescription(TestClass.class); - Logger xyzLogger = Loggers.getLogger("xyz"); - Logger abcLogger = Loggers.getLogger("abc"); + Logger xyzLogger = LogManager.getLogger("xyz"); + Logger abcLogger = LogManager.getLogger("abc"); final Level level = LogManager.getRootLogger().getLevel(); @@ -80,13 +79,13 @@ public void testCustomLevelPerClass() throws Exception { Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class); - Logger abcLogger = Loggers.getLogger("abc"); - Logger xyzLogger = Loggers.getLogger("xyz"); + Logger abcLogger = LogManager.getLogger("abc"); + Logger xyzLogger = LogManager.getLogger("xyz"); // we include foo and foo.bar to maintain that logging levels are applied from the top of the hierarchy down; this ensures that // setting the logging level for a parent logger and a child logger applies the parent level first and then the child as otherwise // setting the parent level would overwrite the child level - Logger fooLogger = Loggers.getLogger("foo"); - Logger fooBarLogger = Loggers.getLogger("foo.bar"); + Logger fooLogger = LogManager.getLogger("foo"); + Logger fooBarLogger = LogManager.getLogger("foo.bar"); final Level level = LogManager.getRootLogger().getLevel(); @@ -125,8 +124,8 @@ public void testCustomLevelPerClassAndPerMethod() throws Exception { Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class); - Logger abcLogger = Loggers.getLogger("abc"); - Logger xyzLogger = Loggers.getLogger("xyz"); + Logger abcLogger = LogManager.getLogger("abc"); + Logger xyzLogger = LogManager.getLogger("xyz"); final Level level = LogManager.getRootLogger().getLevel(); diff --git a/x-pack/build.gradle b/x-pack/build.gradle index d2a19be2136fb..e1c72c734798e 100644 --- a/x-pack/build.gradle +++ b/x-pack/build.gradle @@ -16,19 +16,17 @@ subprojects { project.esplugin.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt') project.esplugin.noticeFile = xpackRootProject.file('NOTICE.txt') } + + tasks.withType(LicenseHeadersTask.class) { + approvedLicenses = ['Elastic License', 'Generated'] + additionalLicense 'ELAST', 'Elastic License', 'Licensed under the Elastic License' + } - if (project.name != 'protocol') { - tasks.withType(LicenseHeadersTask.class) { - approvedLicenses = ['Elastic License', 'Generated'] - additionalLicense 'ELAST', 'Elastic License', 'Licensed under the Elastic License' - } - - ext.licenseName = 'Elastic License' - ext.licenseUrl = ext.elasticLicenseUrl + ext.licenseName = 'Elastic License' + ext.licenseUrl = ext.elasticLicenseUrl - project.ext.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt') - project.ext.noticeFile = xpackRootProject.file('NOTICE.txt') - } + project.ext.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt') + project.ext.noticeFile = xpackRootProject.file('NOTICE.txt') } subprojects { @@ -36,6 +34,7 @@ subprojects { ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-core:${version}": xpackModule('core')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-deprecation:${version}": xpackModule('deprecation')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-graph:${version}": xpackModule('graph')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ilm:${version}": xpackModule('ilm')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-logstash:${version}": xpackModule('logstash')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ml:${version}": xpackModule('ml')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-monitoring:${version}": xpackModule('monitoring')] diff --git a/x-pack/docs/en/rest-api/watcher/stats.asciidoc b/x-pack/docs/en/rest-api/watcher/stats.asciidoc index 38f8ede925e4b..3f875485ba006 100644 --- a/x-pack/docs/en/rest-api/watcher/stats.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/stats.asciidoc @@ -25,7 +25,7 @@ currently being executed by {watcher}. Additional information is shared per watch that is currently executing. This information includes the `watch_id`, the time its execution started and its current execution phase. -To include this metric, the `metric` option should be set to `executing_watches` +To include this metric, the `metric` option should be set to `current_watches` or `_all`. In addition you can also specify the `emit_stacktraces=true` parameter, which adds stack traces for each watch that is being executed. These stack traces can give you more insight into an execution of a watch. @@ -51,7 +51,7 @@ To include this metric, the `metric` option should include `queued_watches` or `metric`:: (enum) Defines which additional metrics are included in the response. - `executing_watches`::: Includes the current executing watches in the response. + `current_watches`::: Includes the current executing watches in the response. `queued_watches`::: Includes the watches queued for execution in the response. `_all`::: Includes all metrics in the response. @@ -98,7 +98,7 @@ and will include the basic metrics and metrics about the current executing watch [source,js] -------------------------------------------------- -GET _xpack/watcher/stats?metric=executing_watches +GET _xpack/watcher/stats?metric=current_watches -------------------------------------------------- // CONSOLE diff --git a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc index ba554eb8595dd..ab8830a64f179 100644 --- a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc @@ -11,9 +11,9 @@ For more information about Active Directory realms, see {xpack-ref}/active-directory-realm.html[Active Directory User Authentication]. . Add a realm configuration of type `active_directory` to `elasticsearch.yml` -under the `xpack.security.authc.realms` namespace. At a minimum, you must set -the realm `type` to `active_directory` and specify the Active Directory -`domain_name`. If you are configuring multiple realms, you should also +under the `xpack.security.authc.realms.active_directory` namespace. +At a minimum, you must specify the Active Directory `domain_name`. +If you are configuring multiple realms, you should also explicitly set the `order` attribute to control the order in which the realms are consulted during authentication. + @@ -35,10 +35,10 @@ xpack: authc: realms: active_directory: - type: active_directory - order: 0 <1> - domain_name: ad.example.com - url: ldaps://ad.example.com:636 <2> + my_ad: + order: 0 <1> + domain_name: ad.example.com + url: ldaps://ad.example.com:636 <2> ------------------------------------------------------------ <1> The realm order controls the order in which the configured realms are checked when authenticating a user. @@ -71,12 +71,12 @@ xpack: authc: realms: active_directory: - type: active_directory - order: 0 - domain_name: example.com <1> - url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2> - load_balance: - type: "round_robin" <3> + my_ad: + order: 0 + domain_name: example.com <1> + url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2> + load_balance: + type: "round_robin" <3> ------------------------------------------------------------ <1> The `domain_name` is set to the name of the root domain in the forest. <2> The `url` value used in this example has URLs for two different Domain Controllers, @@ -135,11 +135,11 @@ xpack: authc: realms: active_directory: - type: active_directory - order: 0 - domain_name: ad.example.com - url: ldaps://ad.example.com:636 - bind_dn: es_svc_user@ad.example.com <1> + my_ad: + order: 0 + domain_name: ad.example.com + url: ldaps://ad.example.com:636 + bind_dn: es_svc_user@ad.example.com <1> ------------------------------------------------------------ <1> This is the user that all Active Directory search requests are executed as. Without a bind user configured, all requests run as the user that is authenticating @@ -152,7 +152,7 @@ the following command adds the password for the example realm above: [source, shell] ------------------------------------------------------------ bin/elasticsearch-keystore add \ -xpack.security.authc.realms.active_directory.secure_bind_password +xpack.security.authc.realms.active_directory.my_ad.secure_bind_password ------------------------------------------------------------ When a bind user is configured, connection pooling is enabled by default. diff --git a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc index fbf823dae7060..ac596c11e0e5a 100644 --- a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc @@ -25,10 +25,9 @@ explicitly configure a `file` realm. For more information about file realms, see {xpack-ref}/file-realm.html[File-based user authentication]. -. (Optional) Add a realm configuration of type `file` to `elasticsearch.yml` -under the `xpack.security.authc.realms` namespace. At a minimum, you must set -the realm `type` to `file`. If you are configuring multiple realms, you should -also explicitly set the `order` attribute. +. (Optional) Add a realm configuration to `elasticsearch.yml` under the +`xpack.security.authc.realms.file` namespace. At a minimum, you must set +the realm's `order` attribute. + -- //See <> for all of the options you can set for a `file` realm. @@ -42,9 +41,9 @@ xpack: security: authc: realms: - file1: - type: file - order: 0 + file: + file1: + order: 0 ------------------------------------------------------------ -- diff --git a/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc index cc0863112c7e4..25245b69cbea7 100644 --- a/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc @@ -113,15 +113,14 @@ NOTE: You can configure only one Kerberos realm on {es} nodes. To configure a Kerberos realm, there are a few mandatory realm settings and other optional settings that you need to configure in the `elasticsearch.yml` -configuration file. Add a realm of type `kerberos` under the -`xpack.security.authc.realms` namespace. +configuration file. Add a realm configuration under the +`xpack.security.authc.realms.kerberos` namespace. The most common configuration for a Kerberos realm is as follows: [source, yaml] ------------------------------------------------------------ -xpack.security.authc.realms.kerb1: - type: kerberos +xpack.security.authc.realms.kerberos.kerb1: order: 3 keytab.path: es.keytab remove_realm_name: false diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc index a5f8c3e441205..9cc54effa1f78 100644 --- a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -27,12 +27,12 @@ However, multiple bind operations might be needed to find the correct user DN. . To configure an `ldap` realm with user search: -.. Add a realm configuration of type `ldap` to `elasticsearch.yml` under the -`xpack.security.authc.realms` namespace. At a minimum, you must set the realm -`type` to `ldap`, specify the `url` of the LDAP server, and set -`user_search.base_dn` to the container DN where the users are searched for. If -you are configuring multiple realms, you should also explicitly set the `order` -attribute to control the order in which the realms are consulted during +.. Add a realm configuration of to `elasticsearch.yml` under the +`xpack.security.authc.realms.ldap` namespace. At a minimum, you must specify +the `url` of the LDAP server, and set `user_search.base_dn` to the container DN +where the users are searched for. +If you are configuring multiple realms, you should also explicitly set the +`order` attribute to control the order in which the realms are consulted during authentication. See <> for all of the options you can set for an `ldap` realm. + @@ -45,19 +45,19 @@ xpack: security: authc: realms: - ldap1: - type: ldap - order: 0 - url: "ldaps://ldap.example.com:636" - bind_dn: "cn=ldapuser, ou=users, o=services, dc=example, dc=com" - user_search: - base_dn: "dc=example,dc=com" - attribute: cn - group_search: - base_dn: "dc=example,dc=com" - files: - role_mapping: "ES_PATH_CONF/role_mapping.yml" - unmapped_groups_as_roles: false + ldap: + ldap1: + order: 0 + url: "ldaps://ldap.example.com:636" + bind_dn: "cn=ldapuser, ou=users, o=services, dc=example, dc=com" + user_search: + base_dn: "dc=example,dc=com" + attribute: cn + group_search: + base_dn: "dc=example,dc=com" + files: + role_mapping: "ES_PATH_CONF/role_mapping.yml" + unmapped_groups_as_roles: false ------------------------------------------------------------ The password for the `bind_dn` user should be configured by adding the appropriate @@ -67,7 +67,7 @@ For example, the following command adds the password for the example realm above [source, shell] ------------------------------------------------------------ bin/elasticsearch-keystore add \ -xpack.security.authc.realms.ldap1.secure_bind_password +xpack.security.authc.realms.ldap.ldap1.secure_bind_password ------------------------------------------------------------ IMPORTANT: When you configure realms in `elasticsearch.yml`, only the @@ -78,13 +78,13 @@ realms you specify are used for authentication. If you also want to use the . To configure an `ldap` realm with user DN templates: -.. Add a realm configuration of type `ldap` to `elasticsearch.yml` in the -`xpack.security.authc.realms` namespace. At a minimum, you must set the realm -`type` to `ldap`, specify the `url` of the LDAP server, and specify at least one -template with the `user_dn_templates` option. If you are configuring multiple -realms, you should also explicitly set the `order` attribute to control the -order in which the realms are consulted during authentication. See -<> for all of the options you can set for an `ldap` realm. +.. Add a realm configuration to `elasticsearch.yml` in the +`xpack.security.authc.realms.ldap` namespace. At a minimum, you must specify +the `url` of the LDAP server, and specify at least one template with the +`user_dn_templates` option. If you are configuring multiple realms, you should +also explicitly set the `order` attribute to control the order in which the +realms are consulted during authentication. +See <> for all of the options you can set for an `ldap` realm. + -- For example, the following snippet shows an LDAP realm configured with user DN @@ -96,18 +96,18 @@ xpack: security: authc: realms: - ldap1: - type: ldap - order: 0 - url: "ldaps://ldap.example.com:636" - user_dn_templates: - - "cn={0}, ou=users, o=marketing, dc=example, dc=com" - - "cn={0}, ou=users, o=engineering, dc=example, dc=com" - group_search: - base_dn: "dc=example,dc=com" - files: - role_mapping: "/mnt/elasticsearch/group_to_role_mapping.yml" - unmapped_groups_as_roles: false + ldap: + ldap1: + order: 0 + url: "ldaps://ldap.example.com:636" + user_dn_templates: + - "cn={0}, ou=users, o=marketing, dc=example, dc=com" + - "cn={0}, ou=users, o=engineering, dc=example, dc=com" + group_search: + base_dn: "dc=example,dc=com" + files: + role_mapping: "/mnt/elasticsearch/group_to_role_mapping.yml" + unmapped_groups_as_roles: false ------------------------------------------------------------ IMPORTANT: The `bind_dn` setting is not used in template mode. @@ -212,8 +212,8 @@ xpack: security: authc: realms: - ldap1: - type: ldap - metadata: cn + ldap: + ldap1: + metadata: cn -------------------------------------------------- -- diff --git a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc index e9fb9cd0eb8a0..55d5f361132c9 100644 --- a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc @@ -9,15 +9,15 @@ The native realm is available by default when no other realms are configured. If other realm settings have been configured in `elasticsearch.yml`, you must add the native realm to the realm chain. -You can configure options for the `native` realm in the -`xpack.security.authc.realms` namespace in `elasticsearch.yml`. Explicitly -configuring a native realm enables you to set the order in which it appears in -the realm chain, temporarily disable the realm, and control its cache options. - -. Add a realm configuration of type `native` to `elasticsearch.yml` under the -`xpack.security.authc.realms` namespace. At a minimum, you must set the realm -`type` to `native`. If you are configuring multiple realms, you should also -explicitly set the `order` attribute. +You can configure a `native` realm in the `xpack.security.authc.realms.native` +namespace in `elasticsearch.yml`. +Explicitly configuring a native realm enables you to set the order in which it +appears in the realm chain, temporarily disable the realm, and control its +cache options. + +. Add a realm configuration to `elasticsearch.yml` under the +`xpack.security.authc.realms.native` namespace. It is recommended that you +explicitly set the `order` attribute for the realm. + -- See <> for all of the options you can set for the `native` realm. @@ -30,9 +30,9 @@ xpack: security: authc: realms: - native1: - type: native - order: 0 + native: + native1: + order: 0 ------------------------------------------------------------ NOTE: To limit exposure to credential theft and mitigate credential compromise, diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc index 9a4d5fcf18bf6..587592b9f2001 100644 --- a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -24,9 +24,9 @@ IMPORTANT: You must enable SSL/TLS and enable client authentication to use PKI. For more information, see {xpack-ref}/pki-realm.html[PKI User Authentication]. -. Add a realm configuration of type `pki` to `elasticsearch.yml` under the -`xpack.security.authc.realms` namespace. At a minimum, you must set the realm -`type` to `pki`. If you are configuring multiple realms, you should also +. Add a realm configuration for a `pki` realm to `elasticsearch.yml` under the +`xpack.security.authc.realms.pki` namespace. +If you are configuring multiple realms, you should explicitly set the `order` attribute. See <> for all of the options you can set for a `pki` realm. + @@ -39,8 +39,9 @@ xpack: security: authc: realms: - pki1: - type: pki + pki: + pki1: + order: 1 ------------------------------------------------------------ With this configuration, any certificate trusted by the SSL/TLS layer is accepted @@ -61,9 +62,9 @@ xpack: security: authc: realms: - pki1: - type: pki - username_pattern: "EMAILADDRESS=(.*?)(?:,|$)" + pki: + pki1: + username_pattern: "EMAILADDRESS=(.*?)(?:,|$)" ------------------------------------------------------------ -- @@ -112,11 +113,11 @@ xpack: security: authc: realms: - pki1: - type: pki - truststore: - path: "/path/to/pki_truststore.jks" - password: "x-pack-test-password" + pki: + pki1: + truststore: + path: "/path/to/pki_truststore.jks" + password: "x-pack-test-password" ------------------------------------------------------------ The `certificate_authorities` option can be used as an alternative to the diff --git a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc index d16e13025509d..81859c4fd0490 100644 --- a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc @@ -84,18 +84,19 @@ configuration file) are the most common settings: [source, yaml] ------------------------------------------------------------ -xpack.security.authc.realms.saml1: <1> - type: saml <2> - order: 2 <3> - idp.metadata.path: saml/idp-metadata.xml <4> - idp.entity_id: "https://sso.example.com/" <5> - sp.entity_id: "https://kibana.example.com/" <6> - sp.acs: "https://kibana.example.com/api/security/v1/saml" <7> - sp.logout: "https://kibana.example.com/logout" <8> +xpack.security.authc.realms: + saml: <1> + saml1: <2> + order: 2 <3> + idp.metadata.path: saml/idp-metadata.xml <4> + idp.entity_id: "https://sso.example.com/" <5> + sp.entity_id: "https://kibana.example.com/" <6> + sp.acs: "https://kibana.example.com/api/security/v1/saml" <7> + sp.logout: "https://kibana.example.com/logout" <8> ------------------------------------------------------------ -<1> This setting defines a new authentication realm named "saml1". For an +<1> The realm must be within the `xpack.security.authc.realms.saml` namespace. +<2> This setting defines a new authentication realm named "saml1". For an introduction to realms, see {stack-ov}/realms.html[Realms]. -<2> The `type` must be `saml`. <3> You should define a unique order on each realm in your authentication chain. It is recommended that the SAML realm be at the bottom of your authentication chain (that is, it has the _highest_ order). @@ -169,7 +170,7 @@ file: [source, yaml] ------------------------------------------------------------ -xpack.security.authc.realms.saml1: +xpack.security.authc.realms.saml.saml1: ... attributes.principal: "urn:oid:0.9.2342.19200300.100.1.1" attributes.groups: "urn:oid:1.3.6.1.4.1.5923.1.5.1." diff --git a/x-pack/docs/en/security/authentication/custom-realm.asciidoc b/x-pack/docs/en/security/authentication/custom-realm.asciidoc index 0ae33d434a1f5..43a6195385559 100644 --- a/x-pack/docs/en/security/authentication/custom-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/custom-realm.asciidoc @@ -85,13 +85,15 @@ bin/elasticsearch-plugin install file:////my-realm-1.0.zip ---------------------------------------- . Add a realm configuration of the appropriate realm type to `elasticsearch.yml` -under the `xpack.security.authc.realms` namespace. The options you can set depend -on the settings exposed by the custom realm. At a minimum, you must set the realm -`type` to the type defined by the extension. If you are configuring multiple -realms, you should also explicitly set the `order` attribute to control the -order in which the realms are consulted during authentication. You should make -sure each configured realm has a distinct `order` setting. In the event that -two or more realms have the same `order`, they will be processed in realm `name` order. +under the `xpack.security.authc.realms` namespace. +You must define your realm within the namespace that matchesto the type defined +by the extension. +The options you can set depend on the settings exposed by the custom realm. +If you are configuring multiple realms, you should also explicitly set the +`order` attribute to control the order in which the realms are consulted during +authentication. You should make sure each configured realm has a distinct +`order` setting. In the event that two or more realms have the same `order`, +they will be processed in realm `name` order. + IMPORTANT: When you configure realms in `elasticsearch.yml`, only the realms you specify are used for authentication. If you also want to use the diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index b0077dc1ba9d4..97bbd483a0311 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -120,8 +120,7 @@ configuration file. Each configuration value is explained below. [source, yaml] ------------------------------------------------------------ -xpack.security.authc.realms.saml1: - type: saml +xpack.security.authc.realms.saml.saml1: order: 2 idp.metadata.path: saml/idp-metadata.xml idp.entity_id: "https://sso.example.com/" @@ -140,11 +139,10 @@ clients. The configuration values used in the example above are: -xpack.security.authc.realms.saml:: - This defines a new authentication realm named "saml1". +xpack.security.authc.realms.saml.saml1:: + This defines a new `saml` authentication realm named "saml1". See <> for more explanation of realms. -type:: The `type` must be `saml` order:: You should define a unique order on each realm in your authentication chain. It is recommended that the SAML realm be at the bottom of your authentication @@ -281,8 +279,7 @@ and the attribute with the friendlyName "roles" for the user's groups. [source, yaml] ------------------------------------------------------------ -xpack.security.authc.realms.saml1: - type: saml +xpack.security.authc.realms.saml.saml1: order: 2 idp.metadata.path: saml/idp-metadata.xml idp.entity_id: "https://sso.example.com/" @@ -327,8 +324,7 @@ realm, as demonstrated in the realm configuration below: [source, yaml] ------------------------------------------------------------ -xpack.security.authc.realms.saml1: - type: saml +xpack.security.authc.realms.saml.saml1: order: 2 idp.metadata.path: saml/idp-metadata.xml idp.entity_id: "https://sso.example.com/" @@ -830,8 +826,7 @@ use the same internal IdP, and another which uses a different IdP. [source, yaml] ------------------------------------------------------------ -xpack.security.authc.realms.saml_finance: - type: saml +xpack.security.authc.realms.saml.saml_finance: order: 2 idp.metadata.path: saml/idp-metadata.xml idp.entity_id: "https://sso.example.com/" @@ -840,8 +835,7 @@ xpack.security.authc.realms.saml_finance: sp.logout: "https://kibana.finance.example.com/logout" attributes.principal: "urn:oid:0.9.2342.19200300.100.1.1" attributes.groups: "urn:oid:1.3.6.1.4.1.5923.1.5.1." -xpack.security.authc.realms.saml_sales: - type: saml +xpack.security.authc.realms.saml.saml_sales: order: 3 idp.metadata.path: saml/idp-metadata.xml idp.entity_id: "https://sso.example.com/" @@ -850,8 +844,7 @@ xpack.security.authc.realms.saml_sales: sp.logout: "https://kibana.sales.example.com/logout" attributes.principal: "urn:oid:0.9.2342.19200300.100.1.1" attributes.groups: "urn:oid:1.3.6.1.4.1.5923.1.5.1." -xpack.security.authc.realms.saml_eng: - type: saml +xpack.security.authc.realms.saml.saml_eng: order: 4 idp.metadata.path: saml/idp-external.xml idp.entity_id: "https://engineering.sso.example.net/" diff --git a/x-pack/docs/en/security/authorization/set-security-user.asciidoc b/x-pack/docs/en/security/authorization/set-security-user.asciidoc index 92b9ae275aec8..c0f28ec0465e8 100644 --- a/x-pack/docs/en/security/authorization/set-security-user.asciidoc +++ b/x-pack/docs/en/security/authorization/set-security-user.asciidoc @@ -9,53 +9,18 @@ To guarantee that a user reads only their own documents, it makes sense to set u document level security. In this scenario, each document must have the username or role name associated with it, so that this information can be used by the role query for document level security. This is a situation where the -`set_security_user` ingest processor can help. +{ref}/ingest-node-set-security-user-processor.html[Set Security User Processor] ingest processor can help. NOTE: Document level security doesn't apply to write APIs. You must use unique ids for each user that uses the same index, otherwise they might overwrite other users' documents. The ingest processor just adds properties for the current authenticated user to the documents that are being indexed. -The `set_security_user` processor attaches user-related details (such as +The {ref}/ingest-node-set-security-user-processor.html[set security user processor] attaches user-related details (such as `username`, `roles`, `email`, `full_name` and `metadata` ) from the current authenticated user to the current document by pre-processing the ingest. When you index data with an ingest pipeline, user details are automatically attached -to the document. For example: +to the document. -[source,js] --------------------------------------------------- -PUT shared-logs/log/1?pipeline=my_pipeline_id -{ - ... -} --------------------------------------------------- -// NOTCONSOLE +For more information see {ref}/ingest.html[Ingest node] and {ref}/ingest-node-set-security-user-processor.html[Set security user processor]. -For more information about setting up a pipeline and other processors, see -{ref}/ingest.html[ingest node]. - -[[set-security-user-options]] -.Set Security User Options -[options="header"] -|====== -| Name | Required | Default | Description -| `field` | yes | - | The field to store the user information into. -| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`. -|====== - -The following example adds all user details for the current authenticated user -to the `user` field for all documents that are processed by this pipeline: - -[source,js] --------------------------------------------------- -{ - "processors" : [ - { - "set_security_user": { - "field": "user" - } - } - ] -} --------------------------------------------------- -// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle index f408e6a78b603..d3e95d997c3fb 100644 --- a/x-pack/plugin/ccr/qa/build.gradle +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -15,14 +15,3 @@ subprojects { } } } - -/* Remove assemble on all qa projects because we don't need to publish - * artifacts for them. */ -gradle.projectsEvaluated { - subprojects { - Task assemble = project.tasks.findByName('assemble') - if (assemble) { - assemble.enabled = false - } - } -} diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index ed3a03f0b17f5..8b1ed236c566b 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -102,8 +102,9 @@ public void testAutoFollowPatterns() throws Exception { } assertBusy(() -> { - Request statsRequest = new Request("GET", "/_ccr/auto_follow/stats"); - Map response = toMap(client().performRequest(statsRequest)); + Request statsRequest = new Request("GET", "/_ccr/stats"); + Map response = toMap(client().performRequest(statsRequest)); + response = (Map) response.get("auto_follow_stats"); assertThat(response.get("number_of_successful_follow_indices"), equalTo(1)); ensureYellow("logs-20190101"); diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow_stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow_stats.yml deleted file mode 100644 index 4d26eb1ff2460..0000000000000 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow_stats.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -"Test autofollow stats": - - do: - ccr.auto_follow_stats: {} - - - match: { number_of_successful_follow_indices: 0 } - - match: { number_of_failed_follow_indices: 0 } - - match: { number_of_failed_remote_cluster_state_requests: 0 } - - length: { recent_auto_follow_errors: 0 } - diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml index 97c538b60bc4e..aa63c804aba21 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml @@ -45,7 +45,7 @@ # we can not reliably wait for replication to occur so we test the endpoint without indexing any documents - do: - ccr.stats: + ccr.follow_stats: index: bar - match: { indices.0.index: "bar" } - match: { indices.0.shards.0.leader_index: "foo" } diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/stats.yml new file mode 100644 index 0000000000000..e9f5c0306df54 --- /dev/null +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/stats.yml @@ -0,0 +1,11 @@ +--- +"Test stats": + - do: + ccr.stats: {} + + - match: { auto_follow_stats.number_of_successful_follow_indices: 0 } + - match: { auto_follow_stats.number_of_failed_follow_indices: 0 } + - match: { auto_follow_stats.number_of_failed_remote_cluster_state_requests: 0 } + - length: { auto_follow_stats.recent_auto_follow_errors: 0 } + - length: { follow_stats.indices: 0 } + diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 68a6310dcaa97..085d58bce83da 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -43,10 +43,10 @@ import org.elasticsearch.xpack.ccr.action.TransportGetAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.action.TransportUnfollowAction; import org.elasticsearch.xpack.ccr.rest.RestGetAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.TransportAutoFollowStatsAction; -import org.elasticsearch.xpack.ccr.rest.RestAutoFollowStatsAction; +import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction; +import org.elasticsearch.xpack.ccr.rest.RestCcrStatsAction; import org.elasticsearch.xpack.ccr.rest.RestUnfollowAction; -import org.elasticsearch.xpack.core.ccr.action.AutoFollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; @@ -150,7 +150,7 @@ public Collection createComponents( @Override public List> getPersistentTasksExecutor(ClusterService clusterService, ThreadPool threadPool, Client client) { - return Collections.singletonList(new ShardFollowTasksExecutor(settings, client, threadPool, clusterService)); + return Collections.singletonList(new ShardFollowTasksExecutor(client, threadPool, clusterService)); } public List> getActions() { @@ -164,7 +164,7 @@ public List> getPersistentTasksExecutor(ClusterServic new ActionHandler<>(ShardChangesAction.INSTANCE, ShardChangesAction.TransportAction.class), // stats action new ActionHandler<>(FollowStatsAction.INSTANCE, TransportFollowStatsAction.class), - new ActionHandler<>(AutoFollowStatsAction.INSTANCE, TransportAutoFollowStatsAction.class), + new ActionHandler<>(CcrStatsAction.INSTANCE, TransportCcrStatsAction.class), // follow actions new ActionHandler<>(PutFollowAction.INSTANCE, TransportPutFollowAction.class), new ActionHandler<>(ResumeFollowAction.INSTANCE, TransportResumeFollowAction.class), @@ -187,7 +187,7 @@ public List getRestHandlers(Settings settings, RestController restC return Arrays.asList( // stats API new RestFollowStatsAction(settings, restController), - new RestAutoFollowStatsAction(settings, restController), + new RestCcrStatsAction(settings, restController), // follow APIs new RestPutFollowAction(settings, restController), new RestResumeFollowAction(settings, restController), diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index cf54a236a0451..8dacb7c745444 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.ccr.action; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; @@ -20,10 +22,10 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardNotStartedException; @@ -31,6 +33,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -298,15 +301,14 @@ public static class TransportAction extends TransportSingleShardAction listener) { + ActionListener wrappedListener = ActionListener.wrap(listener::onResponse, e -> { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof IllegalStateException && cause.getMessage().contains("Not all operations between from_seqno [")) { + String message = "Operations are no longer available for replicating. Maybe increase the retention setting [" + + IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey() + "]?"; + listener.onFailure(new ElasticsearchException(message, e)); + } else { + listener.onFailure(e); + } + }); + super.doExecute(task, request, wrappedListener); + } + private void globalCheckpointAdvanced( final ShardId shardId, final long globalCheckpoint, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index 8c302344ad86d..13dc736722e99 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.NodeDisconnectedException; @@ -416,6 +417,7 @@ static boolean shouldRetry(Exception e) { actual instanceof IndexClosedException || // If follow index is closed actual instanceof NodeDisconnectedException || actual instanceof NodeNotConnectedException || + actual instanceof NodeClosedException || (actual.getMessage() != null && actual.getMessage().contains("TransportService is closed")); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 88d07566c74bd..446e3aaee41d3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -18,7 +18,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.XContentType; @@ -58,8 +57,8 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor { +public class TransportCcrStatsAction extends TransportMasterNodeAction { + private final Client client; private final CcrLicenseChecker ccrLicenseChecker; private final AutoFollowCoordinator autoFollowCoordinator; @Inject - public TransportAutoFollowStatsAction( - Settings settings, + public TransportCcrStatsAction( TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, AutoFollowCoordinator autoFollowCoordinator, - CcrLicenseChecker ccrLicenseChecker + CcrLicenseChecker ccrLicenseChecker, + Client client ) { super( - settings, - AutoFollowStatsAction.NAME, + CcrStatsAction.NAME, transportService, clusterService, threadPool, actionFilters, - AutoFollowStatsAction.Request::new, + CcrStatsAction.Request::new, indexNameExpressionResolver ); + this.client = client; this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); this.autoFollowCoordinator = Objects.requireNonNull(autoFollowCoordinator); } @@ -64,12 +68,17 @@ protected String executor() { } @Override - protected AutoFollowStatsAction.Response newResponse() { - return new AutoFollowStatsAction.Response(); + protected CcrStatsAction.Response newResponse() { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override - protected void doExecute(Task task, AutoFollowStatsAction.Request request, ActionListener listener) { + protected CcrStatsAction.Response read(StreamInput in) throws IOException { + return new CcrStatsAction.Response(in); + } + + @Override + protected void doExecute(Task task, CcrStatsAction.Request request, ActionListener listener) { if (ccrLicenseChecker.isCcrAllowed() == false) { listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; @@ -79,16 +88,20 @@ protected void doExecute(Task task, AutoFollowStatsAction.Request request, Actio @Override protected void masterOperation( - AutoFollowStatsAction.Request request, + CcrStatsAction.Request request, ClusterState state, - ActionListener listener + ActionListener listener ) throws Exception { - AutoFollowStats stats = autoFollowCoordinator.getStats(); - listener.onResponse(new AutoFollowStatsAction.Response(stats)); + CheckedConsumer handler = statsResponse -> { + AutoFollowStats stats = autoFollowCoordinator.getStats(); + listener.onResponse(new CcrStatsAction.Response(stats, statsResponse)); + }; + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + client.execute(FollowStatsAction.INSTANCE, statsRequest, ActionListener.wrap(handler, listener::onFailure)); } @Override - protected ClusterBlockException checkBlock(AutoFollowStatsAction.Request request, ClusterState state) { + protected ClusterBlockException checkBlock(CcrStatsAction.Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java index 5d5885294f357..3062aa46d05d6 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; @@ -33,11 +32,11 @@ public class TransportDeleteAutoFollowPatternAction extends TransportMasterNodeAction { @Inject - public TransportDeleteAutoFollowPatternAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportDeleteAutoFollowPatternAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, DeleteAutoFollowPatternAction.NAME, transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver, DeleteAutoFollowPatternAction.Request::new); + super(DeleteAutoFollowPatternAction.NAME, transportService, clusterService, threadPool, actionFilters, + DeleteAutoFollowPatternAction.Request::new, indexNameExpressionResolver); } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java index 5ef790d3406e8..96b0cb018da16 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; @@ -40,13 +39,11 @@ public class TransportFollowStatsAction extends TransportTasksAction< @Inject public TransportFollowStatsAction( - final Settings settings, final ClusterService clusterService, final TransportService transportService, final ActionFilters actionFilters, final CcrLicenseChecker ccrLicenseChecker) { super( - settings, FollowStatsAction.NAME, clusterService, transportService, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java index 9f738026ef640..fc7c0c4920601 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java @@ -17,13 +17,14 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; +import java.io.IOException; import java.util.Collections; import java.util.Map; @@ -31,13 +32,12 @@ public class TransportGetAutoFollowPatternAction extends TransportMasterNodeReadAction { @Inject - public TransportGetAutoFollowPatternAction(Settings settings, - TransportService transportService, + public TransportGetAutoFollowPatternAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetAutoFollowPatternAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(GetAutoFollowPatternAction.NAME, transportService, clusterService, threadPool, actionFilters, GetAutoFollowPatternAction.Request::new, indexNameExpressionResolver); } @@ -48,7 +48,12 @@ protected String executor() { @Override protected GetAutoFollowPatternAction.Response newResponse() { - return new GetAutoFollowPatternAction.Response(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + protected GetAutoFollowPatternAction.Response read(StreamInput in) throws IOException { + return new GetAutoFollowPatternAction.Response(in); } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java index 02f483cc843a8..2c420f83d6e15 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.threadpool.ThreadPool; @@ -32,14 +31,13 @@ public class TransportPauseFollowAction extends TransportMasterNodeAction handler = ActionListener.wrap( diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index cdbe3b25f1d6a..d3f184b0f4759 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -52,7 +52,7 @@ public class TransportResumeFollowAction extends HandledTransportAction { - static final ByteSizeValue DEFAULT_MAX_READ_REQUEST_SIZE = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); + static final ByteSizeValue DEFAULT_MAX_READ_REQUEST_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB); static final ByteSizeValue DEFAULT_MAX_WRITE_REQUEST_SIZE = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); private static final TimeValue DEFAULT_MAX_RETRY_DELAY = new TimeValue(500); private static final int DEFAULT_MAX_OUTSTANDING_WRITE_REQUESTS = 9; @@ -72,7 +72,6 @@ public class TransportResumeFollowAction extends HandledTransportAction new ResumeFollowAction.Request(in)); this.client = client; this.threadPool = threadPool; this.clusterService = clusterService; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java index 1ce01f7ab0953..a42ff658dc9e1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -30,10 +31,10 @@ public class TransportUnfollowAction extends TransportMasterNodeAction { @Inject - public TransportUnfollowAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportUnfollowAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, UnfollowAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(UnfollowAction.NAME, transportService, clusterService, threadPool, actionFilters, UnfollowAction.Request::new, indexNameExpressionResolver); } @@ -78,6 +79,18 @@ protected ClusterBlockException checkBlock(UnfollowAction.Request request, Clust static ClusterState unfollow(String followerIndex, ClusterState current) { IndexMetaData followerIMD = current.metaData().index(followerIndex); + if (followerIMD == null) { + throw new IndexNotFoundException(followerIndex); + } + + if (followerIMD.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY) == null) { + throw new IllegalArgumentException("index [" + followerIndex + "] is not a follower index"); + } + + if (followerIMD.getState() != IndexMetaData.State.CLOSE) { + throw new IllegalArgumentException("cannot convert the follower index [" + followerIndex + + "] to a non-follower, because it has not been closed"); + } PersistentTasksCustomMetaData persistentTasks = current.metaData().custom(PersistentTasksCustomMetaData.TYPE); if (persistentTasks != null) { @@ -92,11 +105,6 @@ static ClusterState unfollow(String followerIndex, ClusterState current) { } } - if (followerIMD.getState() != IndexMetaData.State.CLOSE) { - throw new IllegalArgumentException("cannot convert the follower index [" + followerIndex + - "] to a non-follower, because it has not been closed"); - } - IndexMetaData.Builder newIMD = IndexMetaData.builder(followerIMD); // Remove index.xpack.ccr.following_index setting Settings.Builder builder = Settings.builder(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestAutoFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java similarity index 66% rename from x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestAutoFollowStatsAction.java rename to x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java index 924bc2f831d23..943f85010e2fb 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestAutoFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -12,15 +12,15 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.ccr.action.AutoFollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import java.io.IOException; -public class RestAutoFollowStatsAction extends BaseRestHandler { +public class RestCcrStatsAction extends BaseRestHandler { - public RestAutoFollowStatsAction(final Settings settings, final RestController controller) { + public RestCcrStatsAction(final Settings settings, final RestController controller) { super(settings); - controller.registerHandler(RestRequest.Method.GET, "/_ccr/auto_follow/stats", this); + controller.registerHandler(RestRequest.Method.GET, "/_ccr/stats", this); } @Override @@ -30,8 +30,8 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) throws IOException { - final AutoFollowStatsAction.Request request = new AutoFollowStatsAction.Request(); - return channel -> client.execute(AutoFollowStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); + final CcrStatsAction.Request request = new CcrStatsAction.Request(); + return channel -> client.execute(CcrStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java index 72c44a48f985e..9bfd260ce63b4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java @@ -31,8 +31,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - Request request = new Request(); - request.setName(restRequest.param("name")); + Request request = new Request(restRequest.param("name")); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java index 25572894af3e9..15b7329297644 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java @@ -21,7 +21,6 @@ public class RestFollowStatsAction extends BaseRestHandler { public RestFollowStatsAction(final Settings settings, final RestController controller) { super(settings); - controller.registerHandler(RestRequest.Method.GET, "/_ccr/stats", this); controller.registerHandler(RestRequest.Method.GET, "/{index}/_ccr/stats", this); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index c3cbc7436d987..9e5137d3cd1af 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack; +import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; @@ -14,6 +15,9 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; @@ -22,9 +26,11 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -35,11 +41,19 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; +import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.license.LicenseService; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; @@ -50,6 +64,7 @@ import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.LocalStateCcr; +import org.elasticsearch.xpack.ccr.index.engine.FollowingEngine; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -67,11 +82,15 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; @@ -80,6 +99,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; public abstract class CcrIntegTestCase extends ESTestCase { @@ -89,6 +109,8 @@ public abstract class CcrIntegTestCase extends ESTestCase { @Before public final void startClusters() throws Exception { if (clusterGroup != null && reuseClusters()) { + clusterGroup.leaderCluster.ensureAtMostNumDataNodes(numberOfNodesPerCluster()); + clusterGroup.followerCluster.ensureAtMostNumDataNodes(numberOfNodesPerCluster()); return; } @@ -222,10 +244,12 @@ protected final ClusterHealthStatus ensureLeaderYellow(String... indices) { } protected final ClusterHealthStatus ensureLeaderGreen(String... indices) { + logger.info("ensure green leader indices {}", Arrays.toString(indices)); return ensureColor(clusterGroup.leaderCluster, ClusterHealthStatus.GREEN, TimeValue.timeValueSeconds(30), false, indices); } protected final ClusterHealthStatus ensureFollowerGreen(String... indices) { + logger.info("ensure green follower indices {}", Arrays.toString(indices)); return ensureColor(clusterGroup.followerCluster, ClusterHealthStatus.GREEN, TimeValue.timeValueSeconds(30), false, indices); } @@ -373,6 +397,138 @@ public static ResumeFollowAction.Request resumeFollow(String followerIndex) { return request; } + /** + * This asserts the index is fully replicated from the leader index to the follower index. It first verifies that the seq_no_stats + * on the follower equal the leader's; then verifies the existing pairs of (docId, seqNo) on the follower also equal the leader. + */ + protected void assertIndexFullyReplicatedToFollower(String leaderIndex, String followerIndex) throws Exception { + logger.info("--> asserting seq_no_stats between {} and {}", leaderIndex, followerIndex); + assertBusy(() -> { + Map leaderStats = new HashMap<>(); + for (ShardStats shardStat : leaderClient().admin().indices().prepareStats(leaderIndex).clear().get().getShards()) { + if (shardStat.getSeqNoStats() == null) { + throw new AssertionError("leader seq_no_stats is not available [" + Strings.toString(shardStat) + "]"); + } + leaderStats.put(shardStat.getShardRouting().shardId().id(), shardStat.getSeqNoStats()); + } + Map followerStats = new HashMap<>(); + for (ShardStats shardStat : followerClient().admin().indices().prepareStats(followerIndex).clear().get().getShards()) { + if (shardStat.getSeqNoStats() == null) { + throw new AssertionError("follower seq_no_stats is not available [" + Strings.toString(shardStat) + "]"); + } + followerStats.put(shardStat.getShardRouting().shardId().id(), shardStat.getSeqNoStats()); + } + assertThat(leaderStats, equalTo(followerStats)); + }, 60, TimeUnit.SECONDS); + logger.info("--> asserting <> between {} and {}", leaderIndex, followerIndex); + assertBusy(() -> { + assertThat(getDocIdAndSeqNos(clusterGroup.leaderCluster, leaderIndex), + equalTo(getDocIdAndSeqNos(clusterGroup.followerCluster, followerIndex))); + }, 60, TimeUnit.SECONDS); + } + + private Map> getDocIdAndSeqNos(InternalTestCluster cluster, String index) throws IOException { + final ClusterState state = cluster.client().admin().cluster().prepareState().get().getState(); + List shardRoutings = state.routingTable().allShards(index); + Randomness.shuffle(shardRoutings); + final Map> docs = new HashMap<>(); + for (ShardRouting shardRouting : shardRoutings) { + if (shardRouting == null || shardRouting.assignedToNode() == false || docs.containsKey(shardRouting.shardId().id())) { + continue; + } + IndexShard indexShard = cluster.getInstance(IndicesService.class, state.nodes().get(shardRouting.currentNodeId()).getName()) + .indexServiceSafe(shardRouting.index()).getShard(shardRouting.id()); + docs.put(shardRouting.shardId().id(), IndexShardTestCase.getDocIdAndSeqNos(indexShard).stream() + .map(d -> new DocIdSeqNoAndTerm(d.getId(), d.getSeqNo(), 1L)) // normalize primary term as the follower use its own term + .collect(Collectors.toList())); + } + return docs; + } + + protected void atLeastDocsIndexed(Client client, String index, long numDocsReplicated) throws InterruptedException { + logger.info("waiting for at least [{}] documents to be indexed into index [{}]", numDocsReplicated, index); + awaitBusy(() -> { + refresh(client, index); + SearchRequest request = new SearchRequest(index); + request.source(new SearchSourceBuilder().size(0)); + SearchResponse response = client.search(request).actionGet(); + return response.getHits().getTotalHits() >= numDocsReplicated; + }, 60, TimeUnit.SECONDS); + } + + protected void awaitGlobalCheckpointAtLeast(Client client, ShardId shardId, long minimumGlobalCheckpoint) throws Exception { + logger.info("waiting for the global checkpoint on [{}] at least [{}]", shardId, minimumGlobalCheckpoint); + assertBusy(() -> { + ShardStats stats = client.admin().indices().prepareStats(shardId.getIndexName()).clear().get() + .asMap().entrySet().stream().filter(e -> e.getKey().shardId().equals(shardId)) + .map(Map.Entry::getValue).findFirst().orElse(null); + if (stats == null || stats.getSeqNoStats() == null) { + throw new AssertionError("seq_no_stats for shard [" + shardId + "] is not found"); // causes assertBusy to retry + } + assertThat(Strings.toString(stats.getSeqNoStats()), + stats.getSeqNoStats().getGlobalCheckpoint(), greaterThanOrEqualTo(minimumGlobalCheckpoint)); + }, 60, TimeUnit.SECONDS); + } + + protected void assertMaxSeqNoOfUpdatesIsTransferred(Index leaderIndex, Index followerIndex, int numberOfShards) throws Exception { + assertBusy(() -> { + long[] msuOnLeader = new long[numberOfShards]; + for (int i = 0; i < msuOnLeader.length; i++) { + msuOnLeader[i] = SequenceNumbers.UNASSIGNED_SEQ_NO; + } + Set leaderNodes = getLeaderCluster().nodesInclude(leaderIndex.getName()); + for (String leaderNode : leaderNodes) { + IndicesService indicesService = getLeaderCluster().getInstance(IndicesService.class, leaderNode); + for (int i = 0; i < numberOfShards; i++) { + IndexShard shard = indicesService.getShardOrNull(new ShardId(leaderIndex, i)); + if (shard != null) { + try { + msuOnLeader[i] = SequenceNumbers.max(msuOnLeader[i], shard.getMaxSeqNoOfUpdatesOrDeletes()); + } catch (AlreadyClosedException ignored) { + return; + } + } + } + } + + Set followerNodes = getFollowerCluster().nodesInclude(followerIndex.getName()); + for (String followerNode : followerNodes) { + IndicesService indicesService = getFollowerCluster().getInstance(IndicesService.class, followerNode); + for (int i = 0; i < numberOfShards; i++) { + IndexShard shard = indicesService.getShardOrNull(new ShardId(leaderIndex, i)); + if (shard != null) { + try { + assertThat(shard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(msuOnLeader[i])); + } catch (AlreadyClosedException ignored) { + + } + } + } + } + }); + } + + protected void assertTotalNumberOfOptimizedIndexing(Index followerIndex, int numberOfShards, long expectedTotal) throws Exception { + assertBusy(() -> { + long[] numOfOptimizedOps = new long[numberOfShards]; + for (int shardId = 0; shardId < numberOfShards; shardId++) { + for (String node : getFollowerCluster().nodesInclude(followerIndex.getName())) { + IndicesService indicesService = getFollowerCluster().getInstance(IndicesService.class, node); + IndexShard shard = indicesService.getShardOrNull(new ShardId(followerIndex, shardId)); + if (shard != null && shard.routingEntry().primary()) { + try { + FollowingEngine engine = ((FollowingEngine) IndexShardTestCase.getEngine(shard)); + numOfOptimizedOps[shardId] = engine.getNumberOfOptimizedIndexing(); + } catch (AlreadyClosedException e) { + throw new AssertionError(e); // causes assertBusy to retry + } + } + } + } + assertThat(Arrays.stream(numOfOptimizedOps).sum(), equalTo(expectedTotal)); + }); + } + static void removeCCRRelatedMetadataFromClusterState(ClusterService clusterService) throws Exception { CountDownLatch latch = new CountDownLatch(1); clusterService.submitStateUpdateTask("remove-ccr-related-metadata", new ClusterStateUpdateTask() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 305daa34d3010..9f22dc320cbdd 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.core.ccr.AutoFollowStats; -import org.elasticsearch.xpack.core.ccr.action.AutoFollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; @@ -137,9 +137,6 @@ public void testAutoFollowParameterAreDelegated() throws Exception { if (randomBoolean()) { request.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); } - if (randomBoolean()) { - request.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); - } if (randomBoolean()) { request.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); } @@ -155,6 +152,9 @@ public void testAutoFollowParameterAreDelegated() throws Exception { if (randomBoolean()) { request.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); } + if (randomBoolean()) { + request.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } assertTrue(followerClient().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged()); createLeaderIndex("logs-201901", leaderIndexSettings); @@ -254,14 +254,13 @@ private void putAutoFollowPatterns(String name, String[] patterns) { } private void deleteAutoFollowPatternSetting() { - DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request(); - request.setName("my-pattern"); + DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request("my-pattern"); assertTrue(followerClient().execute(DeleteAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged()); } private AutoFollowStats getAutoFollowStats() { - AutoFollowStatsAction.Request request = new AutoFollowStatsAction.Request(); - return followerClient().execute(AutoFollowStatsAction.INSTANCE, request).actionGet().getStats(); + CcrStatsAction.Request request = new CcrStatsAction.Request(); + return followerClient().execute(CcrStatsAction.INSTANCE, request).actionGet().getAutoFollowStats(); } private void createLeaderIndex(String index, Settings settings) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java new file mode 100644 index 0000000000000..6685776e9805d --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -0,0 +1,221 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.xpack.CcrIntegTestCase; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; + +import java.util.Locale; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class FollowerFailOverIT extends CcrIntegTestCase { + + @Override + protected boolean reuseClusters() { + return false; + } + + public void testFailOverOnFollower() throws Exception { + int numberOfReplicas = between(1, 2); + getFollowerCluster().startMasterOnlyNode(); + getFollowerCluster().ensureAtLeastNumDataNodes(numberOfReplicas + between(1, 2)); + String leaderIndexSettings = getIndexSettings(1, numberOfReplicas, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate("leader-index").setSource(leaderIndexSettings, XContentType.JSON)); + AtomicBoolean stopped = new AtomicBoolean(); + Thread[] threads = new Thread[between(1, 8)]; + AtomicInteger docID = new AtomicInteger(); + Semaphore availableDocs = new Semaphore(0); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + while (stopped.get() == false) { + try { + if (availableDocs.tryAcquire(10, TimeUnit.MILLISECONDS) == false) { + continue; + } + } catch (InterruptedException e) { + throw new AssertionError(e); + } + if (frequently()) { + String id = Integer.toString(frequently() ? docID.incrementAndGet() : between(0, 10)); // sometimes update + IndexResponse indexResponse = leaderClient().prepareIndex("leader-index", "doc", id) + .setSource("{\"f\":" + id + "}", XContentType.JSON).get(); + logger.info("--> index id={} seq_no={}", indexResponse.getId(), indexResponse.getSeqNo()); + } else { + String id = Integer.toString(between(0, docID.get())); + DeleteResponse deleteResponse = leaderClient().prepareDelete("leader-index", "doc", id).get(); + logger.info("--> delete id={} seq_no={}", deleteResponse.getId(), deleteResponse.getSeqNo()); + } + } + }); + threads[i].start(); + } + availableDocs.release(between(100, 200)); + PutFollowAction.Request follow = putFollow("leader-index", "follower-index"); + follow.getFollowRequest().setMaxReadRequestOperationCount(randomIntBetween(32, 2048)); + follow.getFollowRequest().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + follow.getFollowRequest().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); + follow.getFollowRequest().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048)); + follow.getFollowRequest().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + follow.getFollowRequest().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); + logger.info("--> follow params {}", Strings.toString(follow.getFollowRequest())); + followerClient().execute(PutFollowAction.INSTANCE, follow).get(); + ensureFollowerGreen("follower-index"); + awaitGlobalCheckpointAtLeast(followerClient(), new ShardId(resolveFollowerIndex("follower-index"), 0), between(30, 80)); + final ClusterState clusterState = getFollowerCluster().clusterService().state(); + for (ShardRouting shardRouting : clusterState.routingTable().allShards("follower-index")) { + if (shardRouting.primary()) { + DiscoveryNode assignedNode = clusterState.nodes().get(shardRouting.currentNodeId()); + getFollowerCluster().restartNode(assignedNode.getName(), new InternalTestCluster.RestartCallback()); + break; + } + } + availableDocs.release(between(50, 200)); + ensureFollowerGreen("follower-index"); + availableDocs.release(between(50, 200)); + awaitGlobalCheckpointAtLeast(followerClient(), new ShardId(resolveFollowerIndex("follower-index"), 0), between(100, 150)); + stopped.set(true); + for (Thread thread : threads) { + thread.join(); + } + assertIndexFullyReplicatedToFollower("leader-index", "follower-index"); + pauseFollow("follower-index"); + } + + public void testFollowIndexAndCloseNode() throws Exception { + getFollowerCluster().ensureAtLeastNumDataNodes(3); + String leaderIndexSettings = getIndexSettings(3, 1, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderGreen("index1"); + + AtomicBoolean run = new AtomicBoolean(true); + Semaphore availableDocs = new Semaphore(0); + Thread thread = new Thread(() -> { + int counter = 0; + while (run.get()) { + try { + if (availableDocs.tryAcquire(10, TimeUnit.MILLISECONDS) == false) { + continue; + } + } catch (InterruptedException e) { + throw new AssertionError(e); + } + final String source = String.format(Locale.ROOT, "{\"f\":%d}", counter++); + IndexResponse indexResp = leaderClient().prepareIndex("index1", "doc") + .setSource(source, XContentType.JSON) + .setTimeout(TimeValue.timeValueSeconds(1)) + .get(); + logger.info("--> index id={} seq_no={}", indexResp.getId(), indexResp.getSeqNo()); + } + }); + thread.start(); + + PutFollowAction.Request followRequest = putFollow("index1", "index2"); + followRequest.getFollowRequest().setMaxReadRequestOperationCount(randomIntBetween(32, 2048)); + followRequest.getFollowRequest().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + followRequest.getFollowRequest().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); + followRequest.getFollowRequest().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048)); + followRequest.getFollowRequest().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + followRequest.getFollowRequest().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + logger.info("--> follow params {}", Strings.toString(followRequest.getFollowRequest())); + + int maxOpsPerRead = followRequest.getFollowRequest().getMaxReadRequestOperationCount(); + int maxNumDocsReplicated = Math.min(between(50, 500), between(maxOpsPerRead, maxOpsPerRead * 10)); + availableDocs.release(maxNumDocsReplicated / 2 + 1); + atLeastDocsIndexed(followerClient(), "index2", maxNumDocsReplicated / 3); + getFollowerCluster().stopRandomNonMasterNode(); + availableDocs.release(maxNumDocsReplicated / 2 + 1); + atLeastDocsIndexed(followerClient(), "index2", maxNumDocsReplicated * 2 / 3); + run.set(false); + thread.join(); + + assertIndexFullyReplicatedToFollower("index1", "index2"); + pauseFollow("index2"); + assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), 3); + } + + public void testAddNewReplicasOnFollower() throws Exception { + int numberOfReplicas = between(0, 1); + String leaderIndexSettings = getIndexSettings(1, numberOfReplicas, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate("leader-index").setSource(leaderIndexSettings, XContentType.JSON)); + PutFollowAction.Request follow = putFollow("leader-index", "follower-index"); + followerClient().execute(PutFollowAction.INSTANCE, follow).get(); + getFollowerCluster().ensureAtLeastNumDataNodes(numberOfReplicas + between(2, 3)); + ensureFollowerGreen("follower-index"); + AtomicBoolean stopped = new AtomicBoolean(); + AtomicInteger docID = new AtomicInteger(); + boolean appendOnly = randomBoolean(); + Thread indexingOnLeader = new Thread(() -> { + while (stopped.get() == false) { + try { + if (appendOnly) { + String id = Integer.toString(docID.incrementAndGet()); + leaderClient().prepareIndex("leader-index", "doc", id).setSource("{\"f\":" + id + "}", XContentType.JSON).get(); + } else if (frequently()) { + String id = Integer.toString(frequently() ? docID.incrementAndGet() : between(0, 100)); + leaderClient().prepareIndex("leader-index", "doc", id).setSource("{\"f\":" + id + "}", XContentType.JSON).get(); + } else { + String id = Integer.toString(between(0, docID.get())); + leaderClient().prepareDelete("leader-index", "doc", id).get(); + } + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + }); + indexingOnLeader.start(); + Thread flushingOnFollower = new Thread(() -> { + while (stopped.get() == false) { + try { + if (rarely()) { + followerClient().admin().indices().prepareFlush("follower-index").get(); + } + if (rarely()) { + followerClient().admin().indices().prepareRefresh("follower-index").get(); + } + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + }); + flushingOnFollower.start(); + awaitGlobalCheckpointAtLeast(followerClient(), new ShardId(resolveFollowerIndex("follower-index"), 0), 50); + followerClient().admin().indices().prepareUpdateSettings("follower-index") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas + 1).build()).get(); + ensureFollowerGreen("follower-index"); + awaitGlobalCheckpointAtLeast(followerClient(), new ShardId(resolveFollowerIndex("follower-index"), 0), 100); + stopped.set(true); + flushingOnFollower.join(); + indexingOnLeader.join(); + assertIndexFullyReplicatedToFollower("leader-index", "follower-index"); + pauseFollow("follower-index"); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index d755e495b7d08..8f42787010c16 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ccr; -import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -20,15 +19,10 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -38,22 +32,13 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; -import org.elasticsearch.xpack.ccr.index.engine.FollowingEngine; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction.StatsRequest; @@ -65,7 +50,6 @@ import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -73,10 +57,9 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.Set; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -228,14 +211,23 @@ public void afterBulk(long executionId, BulkRequest request, BulkResponse respon @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) {} }; + int bulkSize = between(1, 20); BulkProcessor bulkProcessor = BulkProcessor.builder(leaderClient(), listener) - .setBulkActions(100) + .setBulkActions(bulkSize) .setConcurrentRequests(4) .build(); AtomicBoolean run = new AtomicBoolean(true); + Semaphore availableDocs = new Semaphore(0); Thread thread = new Thread(() -> { int counter = 0; while (run.get()) { + try { + if (availableDocs.tryAcquire(10, TimeUnit.MILLISECONDS) == false) { + continue; + } + } catch (InterruptedException e) { + throw new AssertionError(e); + } final String source = String.format(Locale.ROOT, "{\"f\":%d}", counter++); IndexRequest indexRequest = new IndexRequest("index1", "doc") .source(source, XContentType.JSON) @@ -246,77 +238,31 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) thread.start(); // Waiting for some document being index before following the index: - int maxReadSize = randomIntBetween(128, 2048); - long numDocsIndexed = Math.min(3000 * 2, randomLongBetween(maxReadSize, maxReadSize * 10)); + int maxOpsPerRead = randomIntBetween(10, 100); + int numDocsIndexed = Math.min(between(20, 300), between(maxOpsPerRead, maxOpsPerRead * 10)); + availableDocs.release(numDocsIndexed / 2 + bulkSize); atLeastDocsIndexed(leaderClient(), "index1", numDocsIndexed / 3); PutFollowAction.Request followRequest = putFollow("index1", "index2"); - followRequest.getFollowRequest().setMaxReadRequestOperationCount(maxReadSize); - followRequest.getFollowRequest().setMaxOutstandingReadRequests(randomIntBetween(2, 10)); - followRequest.getFollowRequest().setMaxOutstandingWriteRequests(randomIntBetween(2, 10)); + followRequest.getFollowRequest().setMaxReadRequestOperationCount(maxOpsPerRead); + followRequest.getFollowRequest().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); + followRequest.getFollowRequest().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); followRequest.getFollowRequest().setMaxWriteBufferCount(randomIntBetween(1024, 10240)); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); - + availableDocs.release(numDocsIndexed * 2 + bulkSize); atLeastDocsIndexed(leaderClient(), "index1", numDocsIndexed); run.set(false); thread.join(); assertThat(bulkProcessor.awaitClose(1L, TimeUnit.MINUTES), is(true)); - assertSameDocCount("index1", "index2"); + assertIndexFullyReplicatedToFollower("index1", "index2"); + pauseFollow("index2"); + leaderClient().admin().indices().prepareRefresh("index1").get(); assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfShards, leaderClient().prepareSearch("index1").get().getHits().totalHits); - pauseFollow("index2"); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfShards); } - public void testFollowIndexAndCloseNode() throws Exception { - getFollowerCluster().ensureAtLeastNumDataNodes(3); - String leaderIndexSettings = getIndexSettings(3, 1, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); - assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); - ensureLeaderGreen("index1"); - - AtomicBoolean run = new AtomicBoolean(true); - Thread thread = new Thread(() -> { - int counter = 0; - while (run.get()) { - final String source = String.format(Locale.ROOT, "{\"f\":%d}", counter++); - try { - leaderClient().prepareIndex("index1", "doc") - .setSource(source, XContentType.JSON) - .setTimeout(TimeValue.timeValueSeconds(1)) - .get(); - } catch (Exception e) { - logger.error("Error while indexing into leader index", e); - } - } - }); - thread.start(); - - PutFollowAction.Request followRequest = putFollow("index1", "index2"); - followRequest.getFollowRequest().setMaxReadRequestOperationCount(randomIntBetween(32, 2048)); - followRequest.getFollowRequest().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); - followRequest.getFollowRequest().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); - followRequest.getFollowRequest().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048)); - followRequest.getFollowRequest().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); - followRequest.getFollowRequest().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); - followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); - - long maxNumDocsReplicated = Math.min(1000, randomLongBetween(followRequest.getFollowRequest().getMaxReadRequestOperationCount(), - followRequest.getFollowRequest().getMaxReadRequestOperationCount() * 10)); - long minNumDocsReplicated = maxNumDocsReplicated / 3L; - logger.info("waiting for at least [{}] documents to be indexed and then stop a random data node", minNumDocsReplicated); - atLeastDocsIndexed(followerClient(), "index2", minNumDocsReplicated); - getFollowerCluster().stopRandomNonMasterNode(); - logger.info("waiting for at least [{}] documents to be indexed", maxNumDocsReplicated); - atLeastDocsIndexed(followerClient(), "index2", maxNumDocsReplicated); - run.set(false); - thread.join(); - - assertSameDocCount("index1", "index2"); - pauseFollow("index2"); - assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), 3); - } - public void testFollowIndexWithNestedField() throws Exception { final String leaderIndexSettings = getIndexSettingsWithNestedMapping(1, between(0, 1), singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); @@ -587,61 +533,6 @@ public void testUnfollowIndex() throws Exception { assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits(), equalTo(2L)); } - public void testFailOverOnFollower() throws Exception { - int numberOfReplicas = between(1, 2); - getFollowerCluster().startMasterOnlyNode(); - getFollowerCluster().ensureAtLeastNumDataNodes(numberOfReplicas + between(1, 2)); - String leaderIndexSettings = getIndexSettings(1, numberOfReplicas, - singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); - assertAcked(leaderClient().admin().indices().prepareCreate("leader-index").setSource(leaderIndexSettings, XContentType.JSON)); - AtomicBoolean stopped = new AtomicBoolean(); - Thread[] threads = new Thread[between(1, 8)]; - AtomicInteger docID = new AtomicInteger(); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - while (stopped.get() == false) { - try { - if (frequently()) { - String id = Integer.toString(frequently() ? docID.incrementAndGet() : between(0, 10)); // sometimes update - leaderClient().prepareIndex("leader-index", "doc", id).setSource("{\"f\":" + id + "}", XContentType.JSON).get(); - } else { - String id = Integer.toString(between(0, docID.get())); - leaderClient().prepareDelete("leader-index", "doc", id).get(); - } - } catch (NodeClosedException ignored) { - } - } - }); - threads[i].start(); - } - PutFollowAction.Request follow = putFollow("leader-index", "follower-index"); - follow.getFollowRequest().setMaxReadRequestOperationCount(randomIntBetween(32, 2048)); - follow.getFollowRequest().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); - follow.getFollowRequest().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); - follow.getFollowRequest().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048)); - follow.getFollowRequest().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); - follow.getFollowRequest().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); - followerClient().execute(PutFollowAction.INSTANCE, follow).get(); - ensureFollowerGreen("follower-index"); - atLeastDocsIndexed(followerClient(), "follower-index", between(20, 60)); - final ClusterState clusterState = getFollowerCluster().clusterService().state(); - for (ShardRouting shardRouting : clusterState.routingTable().allShards("follower-index")) { - if (shardRouting.primary()) { - DiscoveryNode assignedNode = clusterState.nodes().get(shardRouting.currentNodeId()); - getFollowerCluster().restartNode(assignedNode.getName(), new InternalTestCluster.RestartCallback()); - break; - } - } - ensureFollowerGreen("follower-index"); - atLeastDocsIndexed(followerClient(), "follower-index", between(80, 150)); - stopped.set(true); - for (Thread thread : threads) { - thread.join(); - } - assertSameDocCount("leader-index", "follower-index"); - pauseFollow("follower-index"); - } - public void testUnknownClusterAlias() throws Exception { String leaderIndexSettings = getIndexSettings(1, 0, Collections.singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); @@ -661,64 +552,6 @@ public void testUnknownClusterAlias() throws Exception { assertThat(e.getMessage(), equalTo("unknown cluster alias [another_cluster]")); } - public void testAddNewReplicasOnFollower() throws Exception { - int numberOfReplicas = between(0, 1); - String leaderIndexSettings = getIndexSettings(1, numberOfReplicas, - singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); - assertAcked(leaderClient().admin().indices().prepareCreate("leader-index").setSource(leaderIndexSettings, XContentType.JSON)); - PutFollowAction.Request follow = putFollow("leader-index", "follower-index"); - followerClient().execute(PutFollowAction.INSTANCE, follow).get(); - getFollowerCluster().ensureAtLeastNumDataNodes(numberOfReplicas + between(2, 3)); - ensureFollowerGreen("follower-index"); - AtomicBoolean stopped = new AtomicBoolean(); - AtomicInteger docID = new AtomicInteger(); - boolean appendOnly = randomBoolean(); - Thread indexingOnLeader = new Thread(() -> { - while (stopped.get() == false) { - try { - if (appendOnly) { - String id = Integer.toString(docID.incrementAndGet()); - leaderClient().prepareIndex("leader-index", "doc", id).setSource("{\"f\":" + id + "}", XContentType.JSON).get(); - } else if (frequently()) { - String id = Integer.toString(frequently() ? docID.incrementAndGet() : between(0, 100)); - leaderClient().prepareIndex("leader-index", "doc", id).setSource("{\"f\":" + id + "}", XContentType.JSON).get(); - } else { - String id = Integer.toString(between(0, docID.get())); - leaderClient().prepareDelete("leader-index", "doc", id).get(); - } - } catch (Exception ex) { - throw new AssertionError(ex); - } - } - }); - indexingOnLeader.start(); - Thread flushingOnFollower = new Thread(() -> { - while (stopped.get() == false) { - try { - if (rarely()) { - followerClient().admin().indices().prepareFlush("follower-index").get(); - } - if (rarely()) { - followerClient().admin().indices().prepareRefresh("follower-index").get(); - } - } catch (Exception ex) { - throw new AssertionError(ex); - } - } - }); - flushingOnFollower.start(); - atLeastDocsIndexed(followerClient(), "follower-index", 50); - followerClient().admin().indices().prepareUpdateSettings("follower-index") - .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas + 1).build()).get(); - ensureFollowerGreen("follower-index"); - atLeastDocsIndexed(followerClient(), "follower-index", 100); - stopped.set(true); - flushingOnFollower.join(); - indexingOnLeader.join(); - assertSameDocCount("leader-index", "follower-index"); - pauseFollow("follower-index"); - } - private CheckedRunnable assertTask(final int numberOfPrimaryShards, final Map numDocsPerShard) { return () -> { final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); @@ -817,88 +650,4 @@ private String getIndexSettingsWithNestedMapping(final int numberOfShards, final return settings; } - private void atLeastDocsIndexed(Client client, String index, long numDocsReplicated) throws InterruptedException { - logger.info("waiting for at least [{}] documents to be indexed into index [{}]", numDocsReplicated, index); - awaitBusy(() -> { - refresh(client, index); - SearchRequest request = new SearchRequest(index); - request.source(new SearchSourceBuilder().size(0)); - SearchResponse response = client.search(request).actionGet(); - return response.getHits().getTotalHits() >= numDocsReplicated; - }, 60, TimeUnit.SECONDS); - } - - private void assertSameDocCount(String leaderIndex, String followerIndex) throws Exception { - refresh(leaderClient(), leaderIndex); - SearchRequest request1 = new SearchRequest(leaderIndex); - request1.source(new SearchSourceBuilder().size(0)); - SearchResponse response1 = leaderClient().search(request1).actionGet(); - assertBusy(() -> { - refresh(followerClient(), followerIndex); - SearchRequest request2 = new SearchRequest(followerIndex); - request2.source(new SearchSourceBuilder().size(0)); - SearchResponse response2 = followerClient().search(request2).actionGet(); - assertThat(response2.getHits().getTotalHits(), equalTo(response1.getHits().getTotalHits())); - }, 60, TimeUnit.SECONDS); - } - - private void assertMaxSeqNoOfUpdatesIsTransferred(Index leaderIndex, Index followerIndex, int numberOfShards) throws Exception { - assertBusy(() -> { - long[] msuOnLeader = new long[numberOfShards]; - for (int i = 0; i < msuOnLeader.length; i++) { - msuOnLeader[i] = SequenceNumbers.UNASSIGNED_SEQ_NO; - } - Set leaderNodes = getLeaderCluster().nodesInclude(leaderIndex.getName()); - for (String leaderNode : leaderNodes) { - IndicesService indicesService = getLeaderCluster().getInstance(IndicesService.class, leaderNode); - for (int i = 0; i < numberOfShards; i++) { - IndexShard shard = indicesService.getShardOrNull(new ShardId(leaderIndex, i)); - if (shard != null) { - try { - msuOnLeader[i] = SequenceNumbers.max(msuOnLeader[i], shard.getMaxSeqNoOfUpdatesOrDeletes()); - } catch (AlreadyClosedException ignored) { - return; - } - } - } - } - - Set followerNodes = getFollowerCluster().nodesInclude(followerIndex.getName()); - for (String followerNode : followerNodes) { - IndicesService indicesService = getFollowerCluster().getInstance(IndicesService.class, followerNode); - for (int i = 0; i < numberOfShards; i++) { - IndexShard shard = indicesService.getShardOrNull(new ShardId(leaderIndex, i)); - if (shard != null) { - try { - assertThat(shard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(msuOnLeader[i])); - } catch (AlreadyClosedException ignored) { - - } - } - } - } - }); - } - - private void assertTotalNumberOfOptimizedIndexing(Index followerIndex, int numberOfShards, long expectedTotal) throws Exception { - assertBusy(() -> { - long[] numOfOptimizedOps = new long[numberOfShards]; - for (int shardId = 0; shardId < numberOfShards; shardId++) { - for (String node : getFollowerCluster().nodesInclude(followerIndex.getName())) { - IndicesService indicesService = getFollowerCluster().getInstance(IndicesService.class, node); - IndexShard shard = indicesService.getShardOrNull(new ShardId(followerIndex, shardId)); - if (shard != null && shard.routingEntry().primary()) { - try { - FollowingEngine engine = ((FollowingEngine) IndexShardTestCase.getEngine(shard)); - numOfOptimizedOps[shardId] = engine.getNumberOfOptimizedIndexing(); - } catch (AlreadyClosedException e) { - throw new AssertionError(e); // causes assertBusy to retry - } - } - } - } - assertThat(Arrays.stream(numOfOptimizedOps).sum(), equalTo(expectedTotal)); - }); - } - } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowStatsResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowStatsResponseTests.java index 742eb761005e8..c651cca5b6a71 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowStatsResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowStatsResponseTests.java @@ -5,27 +5,31 @@ */ package org.elasticsearch.xpack.ccr.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ccr.AutoFollowStats; -import org.elasticsearch.xpack.core.ccr.action.AutoFollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import static org.elasticsearch.xpack.ccr.action.AutoFollowStatsTests.randomReadExceptions; +import static org.elasticsearch.xpack.ccr.action.StatsResponsesTests.createStatsResponse; -public class AutoFollowStatsResponseTests extends AbstractStreamableTestCase { +public class AutoFollowStatsResponseTests extends AbstractWireSerializingTestCase { @Override - protected AutoFollowStatsAction.Response createBlankInstance() { - return new AutoFollowStatsAction.Response(); + protected Writeable.Reader instanceReader() { + return CcrStatsAction.Response::new; } @Override - protected AutoFollowStatsAction.Response createTestInstance() { + protected CcrStatsAction.Response createTestInstance() { AutoFollowStats autoFollowStats = new AutoFollowStats( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomReadExceptions() ); - return new AutoFollowStatsAction.Response(autoFollowStats); + FollowStatsAction.StatsResponses statsResponse = createStatsResponse(); + return new CcrStatsAction.Response(autoFollowStats, statsResponse); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java index b993132bcfa6c..8b8cd534c814d 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/DeleteAutoFollowPatternRequestTests.java @@ -5,20 +5,19 @@ */ package org.elasticsearch.xpack.ccr.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; -public class DeleteAutoFollowPatternRequestTests extends AbstractStreamableTestCase { +public class DeleteAutoFollowPatternRequestTests extends AbstractWireSerializingTestCase { @Override - protected DeleteAutoFollowPatternAction.Request createBlankInstance() { - return new DeleteAutoFollowPatternAction.Request(); + protected Writeable.Reader instanceReader() { + return DeleteAutoFollowPatternAction.Request::new; } @Override protected DeleteAutoFollowPatternAction.Request createTestInstance() { - DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request(); - request.setName(randomAlphaOfLength(4)); - return request; + return new DeleteAutoFollowPatternAction.Request(randomAlphaOfLength(4)); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java index c74afd6075c95..7130c830baa01 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java @@ -5,10 +5,11 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; @@ -16,11 +17,11 @@ import java.util.HashMap; import java.util.Map; -public class GetAutoFollowPatternResponseTests extends AbstractStreamableTestCase { +public class GetAutoFollowPatternResponseTests extends AbstractWireSerializingTestCase { @Override - protected GetAutoFollowPatternAction.Response createBlankInstance() { - return new GetAutoFollowPatternAction.Response(); + protected Writeable.Reader instanceReader() { + return GetAutoFollowPatternAction.Response::new; } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java index 3814e561b42c4..47da4aee1677e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java @@ -6,11 +6,12 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import java.io.IOException; @@ -21,7 +22,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class PutAutoFollowPatternRequestTests extends AbstractStreamableXContentTestCase { +public class PutAutoFollowPatternRequestTests extends AbstractSerializingTestCase { @Override protected boolean supportsUnknownFields() { @@ -34,8 +35,8 @@ protected PutAutoFollowPatternAction.Request doParseInstance(XContentParser pars } @Override - protected PutAutoFollowPatternAction.Request createBlankInstance() { - return new PutAutoFollowPatternAction.Request(); + protected Writeable.Reader instanceReader() { + return PutAutoFollowPatternAction.Request::new; } @Override @@ -53,6 +54,15 @@ protected PutAutoFollowPatternAction.Request createTestInstance() { if (randomBoolean()) { request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); } + if (randomBoolean()) { + request.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + request.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } if (randomBoolean()) { request.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java index 1385b383b940c..726a1c9893a50 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java @@ -5,17 +5,18 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import java.io.IOException; -public class PutFollowActionRequestTests extends AbstractStreamableXContentTestCase { +public class PutFollowActionRequestTests extends AbstractSerializingTestCase { @Override - protected PutFollowAction.Request createBlankInstance() { - return new PutFollowAction.Request(); + protected Writeable.Reader instanceReader() { + return PutFollowAction.Request::new; } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionResponseTests.java index 506a5e6ffbb22..71b0074307eb4 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionResponseTests.java @@ -5,14 +5,15 @@ */ package org.elasticsearch.xpack.ccr.action; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; -public class PutFollowActionResponseTests extends AbstractStreamableTestCase { +public class PutFollowActionResponseTests extends AbstractWireSerializingTestCase { @Override - protected PutFollowAction.Response createBlankInstance() { - return new PutFollowAction.Response(); + protected Writeable.Reader instanceReader() { + return PutFollowAction.Response::new; } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java index ae9bc1bbd3339..3d3e869f53e8a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java @@ -6,11 +6,12 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; import java.io.IOException; @@ -19,11 +20,11 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class ResumeFollowActionRequestTests extends AbstractStreamableXContentTestCase { +public class ResumeFollowActionRequestTests extends AbstractSerializingTestCase { @Override - protected ResumeFollowAction.Request createBlankInstance() { - return new ResumeFollowAction.Request(); + protected Writeable.Reader instanceReader() { + return ResumeFollowAction.Request::new; } @Override @@ -59,6 +60,12 @@ static ResumeFollowAction.Request createTestRequest() { if (randomBoolean()) { request.setMaxWriteBufferCount(randomIntBetween(1, Integer.MAX_VALUE)); } + if (randomBoolean()) { + request.setMaxWriteRequestOperationCount(randomIntBetween(1, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } if (randomBoolean()) { request.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java index 422122dcbe400..f66a673e19dfa 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java @@ -5,6 +5,9 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; @@ -84,4 +87,33 @@ public void testGetOperationsBasedOnGlobalSequenceId() throws Exception { assertThat(operation.id(), equalTo("5")); } + public void testMissingOperations() { + client().admin().indices().prepareCreate("index") + .setSettings(Settings.builder() + .put("index.soft_deletes.enabled", true) + .put("index.soft_deletes.retention.operations", 0) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0)) + .get(); + + for (int i = 0; i < 16; i++) { + client().prepareIndex("index", "_doc", "1").setSource("{}", XContentType.JSON).get(); + client().prepareDelete("index", "_doc", "1").get(); + } + client().admin().indices().refresh(new RefreshRequest("index")).actionGet(); + ForceMergeRequest forceMergeRequest = new ForceMergeRequest("index"); + forceMergeRequest.maxNumSegments(1); + client().admin().indices().forceMerge(forceMergeRequest).actionGet(); + + ShardStats shardStats = client().admin().indices().prepareStats("index").get().getIndex("index").getShards()[0]; + String historyUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY); + ShardChangesAction.Request request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId(), historyUUID); + request.setFromSeqNo(0L); + request.setMaxOperationCount(1); + + Exception e = expectThrows(ElasticsearchException.class, () -> client().execute(ShardChangesAction.INSTANCE, request).actionGet()); + assertThat(e.getMessage(), equalTo("Operations are no longer available for replicating. Maybe increase the retention setting " + + "[index.soft_deletes.retention.operations]?")); + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/StatsResponsesTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/StatsResponsesTests.java index e4830413dff3b..86851d98ffed2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/StatsResponsesTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/StatsResponsesTests.java @@ -23,6 +23,10 @@ protected FollowStatsAction.StatsResponses createBlankInstance() { @Override protected FollowStatsAction.StatsResponses createTestInstance() { + return createStatsResponse(); + } + + static FollowStatsAction.StatsResponses createStatsResponse() { int numResponses = randomIntBetween(0, 8); List responses = new ArrayList<>(numResponses); for (int i = 0; i < numResponses; i++) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java index 3f6b2fcb0e3ef..5ef43fc05c81c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java @@ -56,8 +56,7 @@ public void testInnerDelete() { new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS, existingHeaders))) .build(); - Request request = new Request(); - request.setName("name1"); + Request request = new Request("name1"); AutoFollowMetadata result = TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState) .getMetaData() .custom(AutoFollowMetadata.TYPE); @@ -86,8 +85,7 @@ public void testInnerDeleteDoesNotExist() { new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS, existingHeaders))) .build(); - Request request = new Request(); - request.setName("name2"); + Request request = new Request("name2"); Exception e = expectThrows(ResourceNotFoundException.class, () -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState)); assertThat(e.getMessage(), equalTo("auto-follow pattern [name2] is missing")); @@ -98,8 +96,7 @@ public void testInnerDeleteNoAutoFollowMetadata() { .metaData(MetaData.builder()) .build(); - Request request = new Request(); - request.setName("name1"); + Request request = new Request("name1"); Exception e = expectThrows(ResourceNotFoundException.class, () -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState)); assertThat(e.getMessage(), equalTo("auto-follow pattern [name1] is missing")); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java index 4a201e37355a9..9b9d088eea332 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; @@ -104,4 +105,35 @@ public void testUnfollowRunningShardFollowTasks() { equalTo("cannot convert the follower index [follow_index] to a non-follower, because it has not been paused")); } + public void testUnfollowMissingIndex() { + IndexMetaData.Builder followerIndex = IndexMetaData.builder("follow_index") + .settings(settings(Version.CURRENT).put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)) + .numberOfShards(1) + .numberOfReplicas(0) + .state(IndexMetaData.State.CLOSE) + .putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, new HashMap<>()); + + ClusterState current = ClusterState.builder(new ClusterName("cluster_name")) + .metaData(MetaData.builder() + .put(followerIndex) + .build()) + .build(); + expectThrows(IndexNotFoundException.class, () -> TransportUnfollowAction.unfollow("another_index", current)); + } + + public void testUnfollowNoneFollowIndex() { + IndexMetaData.Builder followerIndex = IndexMetaData.builder("follow_index") + .settings(settings(Version.CURRENT).put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)) + .numberOfShards(1) + .numberOfReplicas(0) + .state(IndexMetaData.State.CLOSE); + + ClusterState current = ClusterState.builder(new ClusterName("cluster_name")) + .metaData(MetaData.builder() + .put(followerIndex) + .build()) + .build(); + expectThrows(IllegalArgumentException.class, () -> TransportUnfollowAction.unfollow("follow_index", current)); + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AbstractCcrCollectorTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AbstractCcrCollectorTestCase.java deleted file mode 100644 index f98e541a9d907..0000000000000 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AbstractCcrCollectorTestCase.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.monitoring.collector.ccr; - -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.monitoring.BaseCollectorTestCase; - -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public abstract class AbstractCcrCollectorTestCase extends BaseCollectorTestCase { - - public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { - final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); - final boolean ccrAllowed = randomBoolean(); - final boolean isElectedMaster = randomBoolean(); - whenLocalNodeElectedMaster(isElectedMaster); - - // this controls the blockage - when(licenseState.isMonitoringAllowed()).thenReturn(false); - when(licenseState.isCcrAllowed()).thenReturn(ccrAllowed); - - final AbstractCcrCollector collector = createCollector(settings, clusterService, licenseState, client); - - assertThat(collector.shouldCollect(isElectedMaster), is(false)); - if (isElectedMaster) { - verify(licenseState).isMonitoringAllowed(); - } - } - - public void testShouldCollectReturnsFalseIfNotMaster() { - // regardless of CCR being enabled - final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); - - when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); - when(licenseState.isCcrAllowed()).thenReturn(randomBoolean()); - // this controls the blockage - final boolean isElectedMaster = false; - - final AbstractCcrCollector collector = createCollector(settings, clusterService, licenseState, client); - - assertThat(collector.shouldCollect(isElectedMaster), is(false)); - } - - public void testShouldCollectReturnsFalseIfCCRIsDisabled() { - // this is controls the blockage - final Settings settings = ccrDisabledSettings(); - - when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); - when(licenseState.isCcrAllowed()).thenReturn(randomBoolean()); - - final boolean isElectedMaster = randomBoolean(); - whenLocalNodeElectedMaster(isElectedMaster); - - final AbstractCcrCollector collector = createCollector(settings, clusterService, licenseState, client); - - assertThat(collector.shouldCollect(isElectedMaster), is(false)); - - if (isElectedMaster) { - verify(licenseState).isMonitoringAllowed(); - } - } - - public void testShouldCollectReturnsFalseIfCCRIsNotAllowed() { - final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); - - when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); - // this is controls the blockage - when(licenseState.isCcrAllowed()).thenReturn(false); - final boolean isElectedMaster = randomBoolean(); - whenLocalNodeElectedMaster(isElectedMaster); - - final AbstractCcrCollector collector = createCollector(settings, clusterService, licenseState, client); - - assertThat(collector.shouldCollect(isElectedMaster), is(false)); - - if (isElectedMaster) { - verify(licenseState).isMonitoringAllowed(); - } - } - - public void testShouldCollectReturnsTrue() { - final Settings settings = ccrEnabledSettings(); - - when(licenseState.isMonitoringAllowed()).thenReturn(true); - when(licenseState.isCcrAllowed()).thenReturn(true); - final boolean isElectedMaster = true; - - final AbstractCcrCollector collector = createCollector(settings, clusterService, licenseState, client); - - assertThat(collector.shouldCollect(isElectedMaster), is(true)); - - verify(licenseState).isMonitoringAllowed(); - } - - abstract AbstractCcrCollector createCollector(Settings settings, - ClusterService clusterService, - XPackLicenseState licenseState, - Client client); - - private Settings ccrEnabledSettings() { - // since it's the default, we want to ensure we test both with/without it - return randomBoolean() ? Settings.EMPTY : Settings.builder().put(XPackSettings.CCR_ENABLED_SETTING.getKey(), true).build(); - } - - private Settings ccrDisabledSettings() { - return Settings.builder().put(XPackSettings.CCR_ENABLED_SETTING.getKey(), false).build(); - } - -} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrAutoFollowStatsCollectorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrAutoFollowStatsCollectorTests.java deleted file mode 100644 index 7a302503d2db4..0000000000000 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrAutoFollowStatsCollectorTests.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.monitoring.collector.ccr; - -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.xpack.core.ccr.AutoFollowStats; -import org.elasticsearch.xpack.core.ccr.action.AutoFollowStatsAction; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; -import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; -import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; - -import java.util.Collection; - -import static org.elasticsearch.xpack.monitoring.MonitoringTestUtils.randomMonitoringNode; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class CcrAutoFollowStatsCollectorTests extends AbstractCcrCollectorTestCase { - - @Override - AbstractCcrCollector createCollector(Settings settings, ClusterService clusterService, XPackLicenseState licenseState, Client client) { - return new CcrAutoFollowStatsCollector(settings, clusterService, licenseState, client); - } - - public void testDoCollect() throws Exception { - final String clusterUuid = randomAlphaOfLength(5); - whenClusterStateWithUUID(clusterUuid); - - final MonitoringDoc.Node node = randomMonitoringNode(random()); - final CcrClient client = mock(CcrClient.class); - final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - - final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); - withCollectionTimeout(CcrAutoFollowStatsCollector.CCR_AUTO_FOLLOW_STATS_TIMEOUT, timeout); - - final CcrAutoFollowStatsCollector collector = - new CcrAutoFollowStatsCollector(Settings.EMPTY, clusterService, licenseState, client, threadContext); - assertEquals(timeout, collector.getCollectionTimeout()); - - final AutoFollowStats autoFollowStats = mock(AutoFollowStats.class); - - @SuppressWarnings("unchecked") - final ActionFuture future = (ActionFuture)mock(ActionFuture.class); - final AutoFollowStatsAction.Response response = new AutoFollowStatsAction.Response(autoFollowStats); - - when(client.autoFollowStats(any())).thenReturn(future); - when(future.actionGet(timeout)).thenReturn(response); - - final long interval = randomNonNegativeLong(); - - final Collection documents = collector.doCollect(node, interval, clusterState); - verify(clusterState).metaData(); - verify(metaData).clusterUUID(); - - assertThat(documents, hasSize(1)); - final AutoFollowStatsMonitoringDoc document = (AutoFollowStatsMonitoringDoc) documents.iterator().next(); - - assertThat(document.getCluster(), is(clusterUuid)); - assertThat(document.getTimestamp(), greaterThan(0L)); - assertThat(document.getIntervalMillis(), equalTo(interval)); - assertThat(document.getNode(), equalTo(node)); - assertThat(document.getSystem(), is(MonitoredSystem.ES)); - assertThat(document.getType(), is(AutoFollowStatsMonitoringDoc.TYPE)); - assertThat(document.getId(), nullValue()); - assertThat(document.stats(), is(autoFollowStats)); - } - -} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsCollectorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsCollectorTests.java deleted file mode 100644 index 904735ffedf25..0000000000000 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsCollectorTests.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.monitoring.collector.ccr; - -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; -import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; -import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; -import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; -import org.mockito.ArgumentMatcher; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; - -import static java.util.Collections.emptyList; -import static org.elasticsearch.xpack.monitoring.MonitoringTestUtils.randomMonitoringNode; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Matchers.argThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class FollowStatsCollectorTests extends AbstractCcrCollectorTestCase { - - @Override - AbstractCcrCollector createCollector(Settings settings, ClusterService clusterService, XPackLicenseState licenseState, Client client) { - return new FollowStatsCollector(settings, clusterService, licenseState, client); - } - - public void testDoCollect() throws Exception { - final String clusterUuid = randomAlphaOfLength(5); - whenClusterStateWithUUID(clusterUuid); - - final MonitoringDoc.Node node = randomMonitoringNode(random()); - final CcrClient client = mock(CcrClient.class); - final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - - final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); - withCollectionTimeout(FollowStatsCollector.CCR_STATS_TIMEOUT, timeout); - - final FollowStatsCollector collector = - new FollowStatsCollector(Settings.EMPTY, clusterService, licenseState, client, threadContext); - assertEquals(timeout, collector.getCollectionTimeout()); - - final List statuses = mockStatuses(); - - @SuppressWarnings("unchecked") - final ActionFuture future = - (ActionFuture)mock(ActionFuture.class); - final FollowStatsAction.StatsResponses responses = new FollowStatsAction.StatsResponses(emptyList(), emptyList(), statuses); - - final FollowStatsAction.StatsRequest request = new FollowStatsAction.StatsRequest(); - request.setIndices(Strings.EMPTY_ARRAY); - when(client.stats(statsRequestEq(request))).thenReturn(future); - when(future.actionGet(timeout)).thenReturn(responses); - - final long interval = randomNonNegativeLong(); - - final Collection documents = collector.doCollect(node, interval, clusterState); - verify(clusterState).metaData(); - verify(metaData).clusterUUID(); - - assertThat(documents, hasSize(statuses.size())); - - int index = 0; - for (final Iterator it = documents.iterator(); it.hasNext(); index++) { - final FollowStatsMonitoringDoc document = (FollowStatsMonitoringDoc)it.next(); - final FollowStatsAction.StatsResponse status = statuses.get(index); - - assertThat(document.getCluster(), is(clusterUuid)); - assertThat(document.getTimestamp(), greaterThan(0L)); - assertThat(document.getIntervalMillis(), equalTo(interval)); - assertThat(document.getNode(), equalTo(node)); - assertThat(document.getSystem(), is(MonitoredSystem.ES)); - assertThat(document.getType(), is(FollowStatsMonitoringDoc.TYPE)); - assertThat(document.getId(), nullValue()); - assertThat(document.status(), is(status.status())); - } - } - - private List mockStatuses() { - final int count = randomIntBetween(1, 8); - final List statuses = new ArrayList<>(count); - - for (int i = 0; i < count; ++i) { - FollowStatsAction.StatsResponse statsResponse = mock(FollowStatsAction.StatsResponse.class); - ShardFollowNodeTaskStatus status = mock(ShardFollowNodeTaskStatus.class); - when(statsResponse.status()).thenReturn(status); - statuses.add(statsResponse); - } - - return statuses; - } - - private static FollowStatsAction.StatsRequest statsRequestEq(FollowStatsAction.StatsRequest expected) { - return argThat(new FollowStatsRequest(expected)); - } - - private static class FollowStatsRequest extends ArgumentMatcher { - - private final FollowStatsAction.StatsRequest expected; - - private FollowStatsRequest(FollowStatsAction.StatsRequest expected) { - this.expected = expected; - } - - @Override - public boolean matches(Object o) { - FollowStatsAction.StatsRequest actual = (FollowStatsAction.StatsRequest) o; - return Arrays.equals(expected.indices(), actual.indices()); - } - } - -} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java new file mode 100644 index 0000000000000..bb44fd59da5d2 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.monitoring.collector.ccr; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ccr.AutoFollowStats; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; +import org.elasticsearch.xpack.monitoring.BaseCollectorTestCase; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.monitoring.MonitoringTestUtils.randomMonitoringNode; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class StatsCollectorTests extends BaseCollectorTestCase { + + public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { + final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); + final boolean ccrAllowed = randomBoolean(); + final boolean isElectedMaster = randomBoolean(); + whenLocalNodeElectedMaster(isElectedMaster); + + // this controls the blockage + when(licenseState.isMonitoringAllowed()).thenReturn(false); + when(licenseState.isCcrAllowed()).thenReturn(ccrAllowed); + + final StatsCollector collector = createCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + if (isElectedMaster) { + verify(licenseState).isMonitoringAllowed(); + } + } + + public void testShouldCollectReturnsFalseIfNotMaster() { + // regardless of CCR being enabled + final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); + + when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); + when(licenseState.isCcrAllowed()).thenReturn(randomBoolean()); + // this controls the blockage + final boolean isElectedMaster = false; + + final StatsCollector collector = createCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + } + + public void testShouldCollectReturnsFalseIfCCRIsDisabled() { + // this is controls the blockage + final Settings settings = ccrDisabledSettings(); + + when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); + when(licenseState.isCcrAllowed()).thenReturn(randomBoolean()); + + final boolean isElectedMaster = randomBoolean(); + whenLocalNodeElectedMaster(isElectedMaster); + + final StatsCollector collector = createCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + + if (isElectedMaster) { + verify(licenseState).isMonitoringAllowed(); + } + } + + public void testShouldCollectReturnsFalseIfCCRIsNotAllowed() { + final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); + + when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); + // this is controls the blockage + when(licenseState.isCcrAllowed()).thenReturn(false); + final boolean isElectedMaster = randomBoolean(); + whenLocalNodeElectedMaster(isElectedMaster); + + final StatsCollector collector = createCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + + if (isElectedMaster) { + verify(licenseState).isMonitoringAllowed(); + } + } + + public void testShouldCollectReturnsTrue() { + final Settings settings = ccrEnabledSettings(); + + when(licenseState.isMonitoringAllowed()).thenReturn(true); + when(licenseState.isCcrAllowed()).thenReturn(true); + final boolean isElectedMaster = true; + + final StatsCollector collector = createCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(true)); + + verify(licenseState).isMonitoringAllowed(); + } + + public void testDoCollect() throws Exception { + final String clusterUuid = randomAlphaOfLength(5); + whenClusterStateWithUUID(clusterUuid); + + final MonitoringDoc.Node node = randomMonitoringNode(random()); + final CcrClient client = mock(CcrClient.class); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final List statuses = mockStatuses(); + + final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); + withCollectionTimeout(StatsCollector.CCR_STATS_TIMEOUT, timeout); + + final AutoFollowStats autoFollowStats = mock(AutoFollowStats.class); + final FollowStatsAction.StatsResponses statsResponse = mock(FollowStatsAction.StatsResponses.class); + when(statsResponse.getStatsResponses()).thenReturn(statuses); + + @SuppressWarnings("unchecked") + final ActionFuture future = (ActionFuture) mock(ActionFuture.class); + final CcrStatsAction.Response response = new CcrStatsAction.Response(autoFollowStats, statsResponse); + + when(client.stats(any())).thenReturn(future); + when(future.actionGet(timeout)).thenReturn(response); + + final StatsCollector collector = new StatsCollector(settings, clusterService, licenseState, client, threadContext); + assertEquals(timeout, collector.getCollectionTimeout()); + + final long interval = randomNonNegativeLong(); + final List documents = new ArrayList<>(collector.doCollect(node, interval, clusterState)); + verify(clusterState).metaData(); + verify(metaData).clusterUUID(); + + assertThat(documents, hasSize(statuses.size() + 1)); + + for (int i = 0; i < documents.size() - 1; i++) { + final FollowStatsMonitoringDoc document = (FollowStatsMonitoringDoc) documents.get(i); + final FollowStatsAction.StatsResponse status = statuses.get(i); + + assertThat(document.getCluster(), is(clusterUuid)); + assertThat(document.getTimestamp(), greaterThan(0L)); + assertThat(document.getIntervalMillis(), equalTo(interval)); + assertThat(document.getNode(), equalTo(node)); + assertThat(document.getSystem(), is(MonitoredSystem.ES)); + assertThat(document.getType(), is(FollowStatsMonitoringDoc.TYPE)); + assertThat(document.getId(), nullValue()); + assertThat(document.status(), is(status.status())); + } + + final AutoFollowStatsMonitoringDoc document = (AutoFollowStatsMonitoringDoc) documents.get(documents.size() - 1); + assertThat(document, notNullValue()); + assertThat(document.getCluster(), is(clusterUuid)); + assertThat(document.getTimestamp(), greaterThan(0L)); + assertThat(document.getIntervalMillis(), equalTo(interval)); + assertThat(document.getNode(), equalTo(node)); + assertThat(document.getSystem(), is(MonitoredSystem.ES)); + assertThat(document.getType(), is(AutoFollowStatsMonitoringDoc.TYPE)); + assertThat(document.getId(), nullValue()); + assertThat(document.stats(), is(autoFollowStats)); + } + + private List mockStatuses() { + final int count = randomIntBetween(1, 8); + final List statuses = new ArrayList<>(count); + + for (int i = 0; i < count; ++i) { + FollowStatsAction.StatsResponse statsResponse = mock(FollowStatsAction.StatsResponse.class); + ShardFollowNodeTaskStatus status = mock(ShardFollowNodeTaskStatus.class); + when(status.followerIndex()).thenReturn("follow_index"); + when(statsResponse.status()).thenReturn(status); + statuses.add(statsResponse); + } + + return statuses; + } + + private StatsCollector createCollector(Settings settings, + ClusterService clusterService, + XPackLicenseState licenseState, + Client client) { + return new StatsCollector(settings, clusterService, licenseState, client); + } + + private Settings ccrEnabledSettings() { + // since it's the default, we want to ensure we test both with/without it + return randomBoolean() ? Settings.EMPTY : Settings.builder().put(XPackSettings.CCR_ENABLED_SETTING.getKey(), true).build(); + } + + private Settings ccrDisabledSettings() { + return Settings.builder().put(XPackSettings.CCR_ENABLED_SETTING.getKey(), false).build(); + } + +} diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index 01e8179fb62ea..647ebd53f1cd8 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -49,6 +49,7 @@ dependencies { testCompile project(path: ':modules:reindex', configuration: 'runtime') testCompile project(path: ':modules:parent-join', configuration: 'runtime') testCompile project(path: ':modules:analysis-common', configuration: 'runtime') + testCompile ("org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}") } ext.expansions = [ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 0619aef6961cc..18ded8d585078 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -79,6 +79,8 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste public static final long BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS = XPackInfoResponse.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS; + private final Settings settings; + private final ClusterService clusterService; /** @@ -118,6 +120,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste public LicenseService(Settings settings, ClusterService clusterService, Clock clock, Environment env, ResourceWatcherService resourceWatcherService, XPackLicenseState licenseState) { super(settings); + this.settings = settings; this.clusterService = clusterService; this.clock = clock; this.scheduler = new SchedulerEngine(settings, clock); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java index 66fffbe28d397..3d69c4ad6c4ff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java @@ -6,24 +6,88 @@ package org.elasticsearch.license; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.xpack.common.ProtocolUtils; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Objects; -import static org.elasticsearch.license.PostStartBasicResponse.Status.NEED_ACKNOWLEDGEMENT; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -class PostStartBasicResponse extends AcknowledgedResponse { +public class PostStartBasicResponse extends AcknowledgedResponse { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "start_basic_response", true, (a, v) -> { + boolean basicWasStarted = (Boolean) a[0]; + String errorMessage = (String) a[1]; + + if (basicWasStarted) { + return new PostStartBasicResponse(Status.GENERATED_BASIC); + } + Status status = Status.fromErrorMessage(errorMessage); + @SuppressWarnings("unchecked") Tuple> acknowledgements = (Tuple>) a[2]; + return new PostStartBasicResponse(status, acknowledgements.v2(), acknowledgements.v1()); + }); + + private static final ParseField BASIC_WAS_STARTED_FIELD = new ParseField("basic_was_started"); + private static final ParseField ERROR_MESSAGE_FIELD = new ParseField("error_message"); + private static final ParseField ACKNOWLEDGE_FIELD = new ParseField("acknowledge"); + private static final ParseField MESSAGE_FIELD = new ParseField("message"); + + static { + PARSER.declareBoolean(constructorArg(), BASIC_WAS_STARTED_FIELD); + PARSER.declareString(optionalConstructorArg(), ERROR_MESSAGE_FIELD); + PARSER.declareObject(optionalConstructorArg(), (parser, v) -> { + Map acknowledgeMessages = new HashMap<>(); + String message = null; + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + if (currentFieldName == null) { + throw new XContentParseException(parser.getTokenLocation(), "expected message header or acknowledgement"); + } + if (MESSAGE_FIELD.getPreferredName().equals(currentFieldName)) { + ensureExpectedToken(XContentParser.Token.VALUE_STRING, token, parser::getTokenLocation); + message = parser.text(); + } else { + if (token != XContentParser.Token.START_ARRAY) { + throw new XContentParseException(parser.getTokenLocation(), "unexpected acknowledgement type"); + } + List acknowledgeMessagesList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + ensureExpectedToken(XContentParser.Token.VALUE_STRING, token, parser::getTokenLocation); + acknowledgeMessagesList.add(parser.text()); + } + acknowledgeMessages.put(currentFieldName, acknowledgeMessagesList.toArray(new String[0])); + } + } + } + return new Tuple<>(message, acknowledgeMessages); + }, ACKNOWLEDGE_FIELD); + } private Map acknowledgeMessages; private String acknowledgeMessage; - enum Status { + public enum Status { GENERATED_BASIC(true, null, RestStatus.OK), ALREADY_USING_BASIC(false, "Operation failed: Current license is basic.", RestStatus.FORBIDDEN), NEED_ACKNOWLEDGEMENT(false, "Operation failed: Needs acknowledgement.", RestStatus.OK); @@ -49,19 +113,29 @@ String getErrorMessage() { RestStatus getRestStatus() { return restStatus; } + + static Status fromErrorMessage(final String errorMessage) { + final Status[] values = Status.values(); + for (Status status : values) { + if (Objects.equals(status.errorMessage, errorMessage)) { + return status; + } + } + throw new IllegalArgumentException("No status for error message ['" + errorMessage + "']"); + } } private Status status; - PostStartBasicResponse() { + public PostStartBasicResponse() { } PostStartBasicResponse(Status status) { this(status, Collections.emptyMap(), null); } - PostStartBasicResponse(Status status, Map acknowledgeMessages, String acknowledgeMessage) { - super(status != NEED_ACKNOWLEDGEMENT); + public PostStartBasicResponse(Status status, Map acknowledgeMessages, String acknowledgeMessage) { + super(status != Status.NEED_ACKNOWLEDGEMENT); this.status = status; this.acknowledgeMessages = acknowledgeMessages; this.acknowledgeMessage = acknowledgeMessage; @@ -108,14 +182,14 @@ public void writeTo(StreamOutput out) throws IOException { @Override protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { if (status.isBasicStarted()) { - builder.field("basic_was_started", true); + builder.field(BASIC_WAS_STARTED_FIELD.getPreferredName(), true); } else { - builder.field("basic_was_started", false); - builder.field("error_message", status.getErrorMessage()); + builder.field(BASIC_WAS_STARTED_FIELD.getPreferredName(), false); + builder.field(ERROR_MESSAGE_FIELD.getPreferredName(), status.getErrorMessage()); } if (acknowledgeMessages.isEmpty() == false) { builder.startObject("acknowledge"); - builder.field("message", acknowledgeMessage); + builder.field(MESSAGE_FIELD.getPreferredName(), acknowledgeMessage); for (Map.Entry entry : acknowledgeMessages.entrySet()) { builder.startArray(entry.getKey()); for (String message : entry.getValue()) { @@ -126,4 +200,26 @@ protected void addCustomFields(XContentBuilder builder, Params params) throws IO builder.endObject(); } } + + public static PostStartBasicResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + PostStartBasicResponse that = (PostStartBasicResponse) o; + + return status == that.status && + ProtocolUtils.equals(acknowledgeMessages, that.acknowledgeMessages) && + Objects.equals(acknowledgeMessage, that.acknowledgeMessage); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), status, ProtocolUtils.hashCode(acknowledgeMessages), acknowledgeMessage); + } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java index d1230808bd84c..6e138edb53f00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -27,10 +26,10 @@ public class TransportDeleteLicenseAction extends TransportMasterNodeAction { @Inject - public TransportGetBasicStatusAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportGetBasicStatusAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetBasicStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(GetBasicStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, GetBasicStatusRequest::new, indexNameExpressionResolver); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java index de55a66427769..676ae6543e765 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -25,10 +24,10 @@ public class TransportGetLicenseAction extends TransportMasterNodeReadAction { @Inject - public TransportGetTrialStatusAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetTrialStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, + public TransportGetTrialStatusAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(GetTrialStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, GetTrialStatusRequest::new, indexNameExpressionResolver); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java index 0e9316ab2b33a..e03e6201e917c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -23,10 +22,10 @@ public class TransportPostStartBasicAction extends TransportMasterNodeAction + * Index Lifecycle API is available in for all license types except + * {@link OperationMode#MISSING} + * + * @return {@code true} as long as the license is valid. Otherwise + * {@code false}. + */ + public boolean isIndexLifecycleAllowed() { + // status is volatile + Status localStatus = status; + // Should work on all active licenses + return localStatus.active; + } + /** * Determine if SQL support should be enabled. *

      diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java index 12eb20617ff0f..b44e192f407ac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -241,7 +241,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); } - public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException { + public static GraphExploreResponse fromXContent(XContentParser parser) throws IOException { return PARSER.apply(parser, null); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java index fa7bccf8b08c8..102bcde9dc77b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java @@ -198,7 +198,7 @@ private SegmentCommitInfo syncSegment(SegmentCommitInfo segmentCommitInfo, LiveD List fieldInfoCopy = new ArrayList<>(fieldInfos.size()); for (FieldInfo fieldInfo : fieldInfos) { fieldInfoCopy.add(new FieldInfo(fieldInfo.name, fieldInfo.number, - false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, fieldInfo.attributes(), 0, 0, + false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, fieldInfo.attributes(), 0, 0, 0, fieldInfo.isSoftDeletesField())); } FieldInfos newFieldInfos = new FieldInfos(fieldInfoCopy.toArray(new FieldInfo[0])); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index 0657eb013972a..c0a7a0b90a4a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -40,6 +40,7 @@ public final class ClientHelper { public static final String SECURITY_ORIGIN = "security"; public static final String WATCHER_ORIGIN = "watcher"; public static final String ML_ORIGIN = "ml"; + public static final String INDEX_LIFECYCLE_ORIGIN = "index_lifecycle"; public static final String MONITORING_ORIGIN = "monitoring"; public static final String DEPRECATION_ORIGIN = "deprecation"; public static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java index 3f27f66b27b77..db8981055d24d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.elasticsearch.xpack.core.indexlifecycle.client.ILMClient; import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.security.client.SecurityClient; @@ -36,6 +37,7 @@ public class XPackClient { private final SecurityClient securityClient; private final WatcherClient watcherClient; private final MachineLearningClient machineLearning; + private final ILMClient ilmClient; public XPackClient(Client client) { this.client = Objects.requireNonNull(client, "client"); @@ -45,6 +47,7 @@ public XPackClient(Client client) { this.securityClient = new SecurityClient(client); this.watcherClient = new WatcherClient(client); this.machineLearning = new MachineLearningClient(client); + this.ilmClient = new ILMClient(client); } public Client es() { @@ -75,6 +78,10 @@ public MachineLearningClient machineLearning() { return machineLearning; } + public ILMClient ilmClient() { + return ilmClient; + } + public XPackClient withHeaders(Map headers) { return new XPackClient(client.filterWithHeader(headers)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 21bd005ac5b7c..1d11f3df1721d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -38,12 +38,30 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; +import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; import org.elasticsearch.xpack.core.logstash.LogstashFeatureSetUsage; -import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; @@ -136,8 +154,8 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExceptExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; -import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges; import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; @@ -317,7 +335,15 @@ public List> getClientActions() { StopRollupJobAction.INSTANCE, DeleteRollupJobAction.INSTANCE, GetRollupJobsAction.INSTANCE, - GetRollupCapsAction.INSTANCE + GetRollupCapsAction.INSTANCE, + // ILM + DeleteLifecycleAction.INSTANCE, + GetLifecycleAction.INSTANCE, + PutLifecycleAction.INSTANCE, + ExplainLifecycleAction.INSTANCE, + RemoveIndexLifecyclePolicyAction.INSTANCE, + MoveToStepAction.INSTANCE, + RetryAction.INSTANCE ); } @@ -371,9 +397,27 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new), // ccr + new NamedWriteableRegistry.Entry(AutoFollowMetadata.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), new NamedWriteableRegistry.Entry(MetaData.Custom.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, AutoFollowMetadata.TYPE, - in -> AutoFollowMetadata.readDiffFrom(MetaData.Custom.class, AutoFollowMetadata.TYPE, in)) + in -> AutoFollowMetadata.readDiffFrom(MetaData.Custom.class, AutoFollowMetadata.TYPE, in)), + // ILM + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX_LIFECYCLE, + IndexLifecycleFeatureSetUsage::new), + // ILM - Custom Metadata + new NamedWriteableRegistry.Entry(MetaData.Custom.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, IndexLifecycleMetadata.TYPE, + IndexLifecycleMetadata.IndexLifecycleMetadataDiff::new), + // ILM - LifecycleTypes + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + // ILM - Lifecycle Actions + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new) ); } @@ -404,7 +448,7 @@ public List getNamedXContent() { RollupJobStatus::fromXContent), new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(RollupJobStatus.NAME), RollupJobStatus::fromXContent) - ); + ); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 70eb047c8edef..0e6888dd80d73 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -31,6 +31,8 @@ public final class XPackField { public static final String SQL = "sql"; /** Name constant for the rollup feature. */ public static final String ROLLUP = "rollup"; + /** Name constant for the index lifecycle feature. */ + public static final String INDEX_LIFECYCLE = "ilm"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 6430513d9798d..cee7074132d85 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -248,7 +248,7 @@ public Collection createComponents(Client client, ClusterService cluster List components = new ArrayList<>(); // just create the reloader as it will pull all of the loaded ssl configurations and start watching them - new SSLConfigurationReloader(settings, environment, getSslService(), resourceWatcherService); + new SSLConfigurationReloader(environment, getSslService(), resourceWatcherService); setLicenseService(new LicenseService(settings, clusterService, getClock(), environment, resourceWatcherService, getLicenseState())); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 997f04e33bd77..111d8a9a68ca9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.core; import org.elasticsearch.common.settings.Setting; @@ -78,6 +79,12 @@ private XPackSettings() { public static final Setting BEATS_ENABLED = Setting.boolSetting("xpack.beats.enabled", true, Setting.Property.NodeScope); + /** + * Setting for enabling or disabling the index lifecycle extension. Defaults to true. + */ + public static final Setting INDEX_LIFECYCLE_ENABLED = Setting.boolSetting("xpack.ilm.enabled", true, + Setting.Property.NodeScope); + /** Setting for enabling or disabling TLS. Defaults to false. */ public static final Setting TRANSPORT_SSL_ENABLED = Setting.boolSetting("xpack.security.transport.ssl.enabled", false, Property.NodeScope); @@ -186,6 +193,7 @@ public static List> getAllSettings() { settings.add(USER_SETTING); settings.add(ROLLUP_ENABLED); settings.add(PASSWORD_HASHING_ALGORITHM); + settings.add(INDEX_LIFECYCLE_ENABLED); return Collections.unmodifiableList(settings); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java index 24ebf0530deab..541bd4e2fce92 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; import org.elasticsearch.protocol.xpack.XPackInfoRequest; @@ -31,9 +30,9 @@ public class TransportXPackInfoAction extends HandledTransportAction featureSets; @Inject - public TransportXPackInfoAction(Settings settings, TransportService transportService, - ActionFilters actionFilters, LicenseService licenseService, Set featureSets) { - super(settings, XPackInfoAction.NAME, transportService, actionFilters, + public TransportXPackInfoAction(TransportService transportService, ActionFilters actionFilters, LicenseService licenseService, + Set featureSets) { + super(XPackInfoAction.NAME, transportService, actionFilters, XPackInfoRequest::new); this.licenseService = licenseService; this.featureSets = featureSets; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java index 554e8e076660f..337c2281d5579 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -34,10 +33,10 @@ public class TransportXPackUsageAction extends TransportMasterNodeAction featureSets; @Inject - public TransportXPackUsageAction(Settings settings, ThreadPool threadPool, TransportService transportService, + public TransportXPackUsageAction(ThreadPool threadPool, TransportService transportService, ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Set featureSets) { - super(settings, XPackUsageAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + super(XPackUsageAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, XPackUsageRequest::new); this.featureSets = Collections.unmodifiableList(new ArrayList<>(featureSets)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java new file mode 100644 index 0000000000000..1ce08e5c54297 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.action.Action; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ccr.AutoFollowStats; + +import java.io.IOException; +import java.util.Objects; + +public class CcrStatsAction extends Action { + + public static final String NAME = "cluster:monitor/ccr/stats"; + public static final CcrStatsAction INSTANCE = new CcrStatsAction(); + + private CcrStatsAction() { + super(NAME); + } + + @Override + public Response newResponse() { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return Response::new; + } + + public static class Request extends MasterNodeRequest { + + public Request(StreamInput in) throws IOException { + super(in); + } + + public Request() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final AutoFollowStats autoFollowStats; + private final FollowStatsAction.StatsResponses followStats; + + public Response(AutoFollowStats autoFollowStats, FollowStatsAction.StatsResponses followStats) { + this.autoFollowStats = Objects.requireNonNull(autoFollowStats); + this.followStats = Objects.requireNonNull(followStats); + } + + public Response(StreamInput in) throws IOException { + super(in); + autoFollowStats = new AutoFollowStats(in); + followStats = new FollowStatsAction.StatsResponses(); + followStats.readFrom(in); + } + + public AutoFollowStats getAutoFollowStats() { + return autoFollowStats; + } + + public FollowStatsAction.StatsResponses getFollowStats() { + return followStats; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + autoFollowStats.writeTo(out); + followStats.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("auto_follow_stats", autoFollowStats, params); + builder.field("follow_stats", followStats, params); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(autoFollowStats, response.autoFollowStats) && + Objects.equals(followStats, response.followStats); + } + + @Override + public int hashCode() { + return Objects.hash(autoFollowStats, followStats); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java index 7b389ec6cf1d9..5bd767c5c50bb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java @@ -33,7 +33,11 @@ public AcknowledgedResponse newResponse() { public static class Request extends AcknowledgedRequest { - private String name; + private final String name; + + public Request(String name) { + this.name = name; + } @Override public ActionRequestValidationException validate() { @@ -48,13 +52,8 @@ public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public Request(StreamInput in) throws IOException { + super(in); name = in.readString(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java index dba5f3b6f1f1f..ff47f6e105b92 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java @@ -32,7 +32,7 @@ public class FollowStatsAction extends Action { - public static final String NAME = "cluster:monitor/ccr/stats"; + public static final String NAME = "cluster:monitor/ccr/follow_stats"; public static final FollowStatsAction INSTANCE = new FollowStatsAction(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java index a226118e17b1e..58a909a62ad2d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; @@ -31,7 +32,12 @@ private GetAutoFollowPatternAction() { @Override public Response newResponse() { - return new Response(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return Response::new; } public static class Request extends MasterNodeReadRequest { @@ -81,21 +87,17 @@ public int hashCode() { public static class Response extends ActionResponse implements ToXContentObject { - private Map autoFollowPatterns; + private final Map autoFollowPatterns; public Response(Map autoFollowPatterns) { this.autoFollowPatterns = autoFollowPatterns; } - public Response() { - } - public Map getAutoFollowPatterns() { return autoFollowPatterns; } - @Override - public void readFrom(StreamInput in) throws IOException { + public Response(StreamInput in) throws IOException { super.readFrom(in); autoFollowPatterns = in.readMap(StreamInput::readString, AutoFollowPattern::new); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index b2f966bba749a..58ecd17040490 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -110,6 +110,9 @@ public static Request fromXContent(XContentParser parser, String name) throws IO private TimeValue maxRetryDelay; private TimeValue readPollTimeout; + public Request() { + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -252,9 +255,8 @@ public void setReadPollTimeout(TimeValue readPollTimeout) { this.readPollTimeout = readPollTimeout; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public Request(StreamInput in) throws IOException { + super(in); name = in.readString(); remoteCluster = in.readString(); leaderIndexPatterns = in.readList(StreamInput::readString); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 5273ee036554e..47240a6a1da10 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; @@ -49,7 +50,12 @@ private PutFollowAction() { @Override public Response newResponse() { - return new Response(); + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public Writeable.Reader getResponseReader() { + return Response::new; } public static class Request extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { @@ -166,13 +172,11 @@ public IndicesOptions indicesOptions() { return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public Request(StreamInput in) throws IOException { + super(in); remoteCluster = in.readString(); leaderIndex = in.readString(); - followRequest = new ResumeFollowAction.Request(); - followRequest.readFrom(in); + followRequest = new ResumeFollowAction.Request(in); } @Override @@ -213,13 +217,9 @@ public int hashCode() { public static class Response extends ActionResponse implements ToXContentObject { - private boolean followIndexCreated; - private boolean followIndexShardsAcked; - private boolean indexFollowingStarted; - - public Response() { - - } + private final boolean followIndexCreated; + private final boolean followIndexShardsAcked; + private final boolean indexFollowingStarted; public Response(boolean followIndexCreated, boolean followIndexShardsAcked, boolean indexFollowingStarted) { this.followIndexCreated = followIndexCreated; @@ -239,9 +239,8 @@ public boolean isIndexFollowingStarted() { return indexFollowingStarted; } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); + public Response(StreamInput in) throws IOException { + super(in); followIndexCreated = in.readBoolean(); followIndexShardsAcked = in.readBoolean(); indexFollowingStarted = in.readBoolean(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java index 11c46492cc0f0..913b5e6b0a563 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java @@ -259,13 +259,14 @@ public ActionRequestValidationException validate() { return e; } - @Override - public void readFrom(final StreamInput in) throws IOException { - super.readFrom(in); + public Request(StreamInput in) throws IOException { + super(in); followerIndex = in.readString(); maxReadRequestOperationCount = in.readOptionalVInt(); maxOutstandingReadRequests = in.readOptionalVInt(); maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); + maxWriteRequestOperationCount = in.readOptionalVInt(); + maxWriteRequestSize = in.readOptionalWriteable(ByteSizeValue::new); maxOutstandingWriteRequests = in.readOptionalVInt(); maxWriteBufferCount = in.readOptionalVInt(); maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); @@ -280,6 +281,8 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeOptionalVInt(maxReadRequestOperationCount); out.writeOptionalVInt(maxOutstandingReadRequests); out.writeOptionalWriteable(maxReadRequestSize); + out.writeOptionalVInt(maxWriteRequestOperationCount); + out.writeOptionalWriteable(maxWriteRequestSize); out.writeOptionalVInt(maxOutstandingWriteRequests); out.writeOptionalVInt(maxWriteBufferCount); out.writeOptionalWriteable(maxWriteBufferSize); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java index 3d5be565c1e29..43305b030be83 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.xpack.core.ccr.action.AutoFollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; @@ -53,26 +53,26 @@ public ActionFuture resumeFollow(final ResumeFollowAction. return listener; } - public void stats( + public void followStats( final FollowStatsAction.StatsRequest request, final ActionListener listener) { client.execute(FollowStatsAction.INSTANCE, request, listener); } - public ActionFuture stats(final FollowStatsAction.StatsRequest request) { + public ActionFuture followStats(final FollowStatsAction.StatsRequest request) { final PlainActionFuture listener = PlainActionFuture.newFuture(); client.execute(FollowStatsAction.INSTANCE, request, listener); return listener; } - public void autoFollowStats(final AutoFollowStatsAction.Request request, - final ActionListener listener) { - client.execute(AutoFollowStatsAction.INSTANCE, request, listener); + public void stats(final CcrStatsAction.Request request, + final ActionListener listener) { + client.execute(CcrStatsAction.INSTANCE, request, listener); } - public ActionFuture autoFollowStats(final AutoFollowStatsAction.Request request) { - final PlainActionFuture listener = PlainActionFuture.newFuture(); - autoFollowStats(request, listener); + public ActionFuture stats(final CcrStatsAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + stats(request, listener); return listener; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java new file mode 100644 index 0000000000000..7843fa7d86e0c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateAction.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class AllocateAction implements LifecycleAction { + + public static final String NAME = "allocate"; + public static final ParseField NUMBER_OF_REPLICAS_FIELD = new ParseField("number_of_replicas"); + public static final ParseField INCLUDE_FIELD = new ParseField("include"); + public static final ParseField EXCLUDE_FIELD = new ParseField("exclude"); + public static final ParseField REQUIRE_FIELD = new ParseField("require"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new AllocateAction((Integer) a[0], (Map) a[1], (Map) a[2], (Map) a[3])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUMBER_OF_REPLICAS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), INCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), EXCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), REQUIRE_FIELD); + } + + private final Integer numberOfReplicas; + private final Map include; + private final Map exclude; + private final Map require; + + public static AllocateAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public AllocateAction(Integer numberOfReplicas, Map include, Map exclude, Map require) { + if (include == null) { + this.include = Collections.emptyMap(); + } else { + this.include = include; + } + if (exclude == null) { + this.exclude = Collections.emptyMap(); + } else { + this.exclude = exclude; + } + if (require == null) { + this.require = Collections.emptyMap(); + } else { + this.require = require; + } + if (this.include.isEmpty() && this.exclude.isEmpty() && this.require.isEmpty() && numberOfReplicas == null) { + throw new IllegalArgumentException( + "At least one of " + INCLUDE_FIELD.getPreferredName() + ", " + EXCLUDE_FIELD.getPreferredName() + " or " + + REQUIRE_FIELD.getPreferredName() + "must contain attributes for action " + NAME); + } + if (numberOfReplicas != null && numberOfReplicas < 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0"); + } + this.numberOfReplicas = numberOfReplicas; + } + + @SuppressWarnings("unchecked") + public AllocateAction(StreamInput in) throws IOException { + this(in.readOptionalVInt(), (Map) in.readGenericValue(), (Map) in.readGenericValue(), + (Map) in.readGenericValue()); + } + + public Integer getNumberOfReplicas() { + return numberOfReplicas; + } + + public Map getInclude() { + return include; + } + + public Map getExclude() { + return exclude; + } + + public Map getRequire() { + return require; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVInt(numberOfReplicas); + out.writeGenericValue(include); + out.writeGenericValue(exclude); + out.writeGenericValue(require); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (numberOfReplicas != null) { + builder.field(NUMBER_OF_REPLICAS_FIELD.getPreferredName(), numberOfReplicas); + } + builder.field(INCLUDE_FIELD.getPreferredName(), include); + builder.field(EXCLUDE_FIELD.getPreferredName(), exclude); + builder.field(REQUIRE_FIELD.getPreferredName(), require); + builder.endObject(); + return builder; + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public List toSteps(Client client, String phase, StepKey nextStepKey) { + StepKey allocateKey = new StepKey(phase, NAME, NAME); + StepKey allocationRoutedKey = new StepKey(phase, NAME, AllocationRoutedStep.NAME); + + Settings.Builder newSettings = Settings.builder(); + if (numberOfReplicas != null) { + newSettings.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas); + } + include.forEach((key, value) -> newSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + key, value)); + exclude.forEach((key, value) -> newSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + key, value)); + require.forEach((key, value) -> newSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + key, value)); + UpdateSettingsStep allocateStep = new UpdateSettingsStep(allocateKey, allocationRoutedKey, client, newSettings.build()); + AllocationRoutedStep routedCheckStep = new AllocationRoutedStep(allocationRoutedKey, nextStepKey); + return Arrays.asList(allocateStep, routedCheckStep); + } + + @Override + public List toStepKeys(String phase) { + StepKey allocateKey = new StepKey(phase, NAME, NAME); + StepKey allocationRoutedKey = new StepKey(phase, NAME, AllocationRoutedStep.NAME); + return Arrays.asList(allocateKey, allocationRoutedKey); + } + + @Override + public int hashCode() { + return Objects.hash(numberOfReplicas, include, exclude, require); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + AllocateAction other = (AllocateAction) obj; + return Objects.equals(numberOfReplicas, other.numberOfReplicas) && + Objects.equals(include, other.include) && + Objects.equals(exclude, other.exclude) && + Objects.equals(require, other.require); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStep.java new file mode 100644 index 0000000000000..b97f7168a7f2b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStep.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import com.carrotsearch.hppc.cursors.ObjectCursor; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.ImmutableOpenIntMap; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; + +public class AllocationRoutedStep extends ClusterStateWaitStep { + public static final String NAME = "check-allocation"; + + private static final Logger logger = LogManager.getLogger(AllocationRoutedStep.class); + + private static final AllocationDeciders ALLOCATION_DECIDERS = new AllocationDeciders(Collections.singletonList( + new FilterAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)))); + + AllocationRoutedStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + IndexMetaData idxMeta = clusterState.metaData().index(index); + if (idxMeta == null) { + // Index must have been since deleted, ignore it + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName()); + return new Result(false, null); + } + if (ActiveShardCount.ALL.enoughShardsActive(clusterState, index.getName()) == false) { + logger.debug("[{}] lifecycle action for index [{}] cannot make progress because not all shards are active", + getKey().getAction(), index.getName()); + return new Result(false, new Info(idxMeta.getNumberOfReplicas(), -1, false)); + } + // All the allocation attributes are already set so just need to check + // if the allocation has happened + RoutingAllocation allocation = new RoutingAllocation(ALLOCATION_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, + System.nanoTime()); + + int allocationPendingAllShards = 0; + + ImmutableOpenIntMap allShards = clusterState.getRoutingTable().index(index).getShards(); + for (ObjectCursor shardRoutingTable : allShards.values()) { + for (ShardRouting shardRouting : shardRoutingTable.value.shards()) { + String currentNodeId = shardRouting.currentNodeId(); + boolean canRemainOnCurrentNode = ALLOCATION_DECIDERS + .canRemain(shardRouting, clusterState.getRoutingNodes().node(currentNodeId), allocation) + .type() == Decision.Type.YES; + if (canRemainOnCurrentNode == false) { + allocationPendingAllShards++; + } + } + } + + if (allocationPendingAllShards > 0) { + logger.debug("{} lifecycle action [{}] waiting for [{}] shards to be allocated to nodes matching the given filters", + index, getKey().getAction(), allocationPendingAllShards); + return new Result(false, new Info(idxMeta.getNumberOfReplicas(), allocationPendingAllShards, true)); + } else { + logger.debug("{} lifecycle action for [{}] complete", index, getKey().getAction()); + return new Result(true, null); + } + } + + @Override + public int hashCode() { + return 611; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + return super.equals(obj); + } + + public static final class Info implements ToXContentObject { + + private final long actualReplicas; + private final long numberShardsLeftToAllocate; + private final boolean allShardsActive; + private final String message; + + static final ParseField ACTUAL_REPLICAS = new ParseField("actual_replicas"); + static final ParseField SHARDS_TO_ALLOCATE = new ParseField("shards_left_to_allocate"); + static final ParseField ALL_SHARDS_ACTIVE = new ParseField("all_shards_active"); + static final ParseField MESSAGE = new ParseField("message"); + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("allocation_routed_step_info", + a -> new Info((long) a[0], (long) a[1], (boolean) a[2])); + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), ACTUAL_REPLICAS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SHARDS_TO_ALLOCATE); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ALL_SHARDS_ACTIVE); + PARSER.declareString((i, s) -> {}, MESSAGE); + } + + public Info(long actualReplicas, long numberShardsLeftToAllocate, boolean allShardsActive) { + this.actualReplicas = actualReplicas; + this.numberShardsLeftToAllocate = numberShardsLeftToAllocate; + this.allShardsActive = allShardsActive; + if (allShardsActive == false) { + message = "Waiting for all shard copies to be active"; + } else { + message = "Waiting for [" + numberShardsLeftToAllocate + "] shards " + + "to be allocated to nodes matching the given filters"; + } + } + + public long getActualReplicas() { + return actualReplicas; + } + + public long getNumberShardsLeftToAllocate() { + return numberShardsLeftToAllocate; + } + + public boolean allShardsActive() { + return allShardsActive; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MESSAGE.getPreferredName(), message); + builder.field(SHARDS_TO_ALLOCATE.getPreferredName(), numberShardsLeftToAllocate); + builder.field(ALL_SHARDS_ACTIVE.getPreferredName(), allShardsActive); + builder.field(ACTUAL_REPLICAS.getPreferredName(), actualReplicas); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(actualReplicas, numberShardsLeftToAllocate, allShardsActive); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Info other = (Info) obj; + return Objects.equals(actualReplicas, other.actualReplicas) && + Objects.equals(numberShardsLeftToAllocate, other.numberShardsLeftToAllocate) && + Objects.equals(allShardsActive, other.allShardsActive); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java new file mode 100644 index 0000000000000..4e35ef60a09d6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncActionStep.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +public abstract class AsyncActionStep extends Step { + + private Client client; + + public AsyncActionStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey); + this.client = client; + } + + protected Client getClient() { + return client; + } + + public boolean indexSurvives() { + return true; + } + + public abstract void performAction(IndexMetaData indexMetaData, ClusterState currentClusterState, Listener listener); + + public interface Listener { + + void onResponse(boolean complete); + + void onFailure(Exception e); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java new file mode 100644 index 0000000000000..f6c968cfae41a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.xcontent.ToXContentObject; + +public abstract class AsyncWaitStep extends Step { + + private Client client; + + public AsyncWaitStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey); + this.client = client; + } + + protected Client getClient() { + return client; + } + + public abstract void evaluateCondition(IndexMetaData indexMetaData, Listener listener); + + public interface Listener { + + void onResponse(boolean conditionMet, ToXContentObject infomationContext); + + void onFailure(Exception e); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStep.java new file mode 100644 index 0000000000000..bf29a595c192c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStep.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +/** + * This step is used prior to running a shrink step in order to ensure that the index being shrunk + * has a copy of each shard allocated on one particular node (the node used by the require + * parameter) and that the shards are not relocating. + */ +public class CheckShrinkReadyStep extends ClusterStateWaitStep { + public static final String NAME = "check-shrink-allocation"; + + private static final Logger logger = LogManager.getLogger(CheckShrinkReadyStep.class); + + CheckShrinkReadyStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + IndexMetaData idxMeta = clusterState.metaData().index(index); + + if (idxMeta == null) { + // Index must have been since deleted, ignore it + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", + getKey().getAction(), index.getName()); + return new Result(false, null); + } + + // How many shards the node should have + int expectedShardCount = idxMeta.getNumberOfShards(); + + if (ActiveShardCount.ALL.enoughShardsActive(clusterState, index.getName()) == false) { + logger.debug("[{}] shrink action for [{}] cannot make progress because not all shards are active", + getKey().getAction(), index.getName()); + return new Result(false, new CheckShrinkReadyStep.Info("", expectedShardCount, -1)); + } + + // The id of the node the shards should be on + final String idShardsShouldBeOn = idxMeta.getSettings().get(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id"); + if (idShardsShouldBeOn == null) { + throw new IllegalStateException("Cannot check shrink allocation as there are no allocation rules by _id"); + } + + final IndexRoutingTable routingTable = clusterState.getRoutingTable().index(index); + int foundShards = 0; + for (ShardRouting shard : routingTable.shardsWithState(ShardRoutingState.STARTED)) { + final String currentNodeId = shard.currentNodeId(); + if (idShardsShouldBeOn.equals(currentNodeId) && shard.relocating() == false) { + foundShards++; + } + } + + logger.trace("{} checking for shrink readiness on [{}], found {} shards and need {}", + index, idShardsShouldBeOn, foundShards, expectedShardCount); + + if (foundShards == expectedShardCount) { + logger.trace("{} successfully found {} allocated shards for shrink readiness on node [{}] ({})", + index, expectedShardCount, idShardsShouldBeOn, getKey().getAction()); + return new Result(true, null); + } else { + logger.trace("{} failed to find {} allocated shards (found {}) on node [{}] for shrink readiness ({})", + index, expectedShardCount, foundShards, idShardsShouldBeOn, getKey().getAction()); + return new Result(false, new CheckShrinkReadyStep.Info(idShardsShouldBeOn, expectedShardCount, + expectedShardCount - foundShards)); + } + } + + @Override + public int hashCode() { + return 612; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + return super.equals(obj); + } + + public static final class Info implements ToXContentObject { + + private final String nodeId; + private final long actualReplicas; + private final long numberShardsLeftToAllocate; + private final String message; + + static final ParseField NODE_ID = new ParseField("node_id"); + static final ParseField EXPECTED_SHARDS = new ParseField("expected_shards"); + static final ParseField SHARDS_TO_ALLOCATE = new ParseField("shards_left_to_allocate"); + static final ParseField MESSAGE = new ParseField("message"); + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "check_shrink_ready_step_info", a -> new CheckShrinkReadyStep.Info((String) a[0], (long) a[1], (long) a[2])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NODE_ID); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), EXPECTED_SHARDS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SHARDS_TO_ALLOCATE); + PARSER.declareString((i, s) -> {}, MESSAGE); + } + + public Info(String nodeId, long expectedShards, long numberShardsLeftToAllocate) { + this.nodeId = nodeId; + this.actualReplicas = expectedShards; + this.numberShardsLeftToAllocate = numberShardsLeftToAllocate; + if (numberShardsLeftToAllocate < 0) { + this.message = "Waiting for all shards to become active"; + } else { + this.message = String.format(Locale.ROOT, "Waiting for node [%s] to contain [%d] shards, found [%d], remaining [%d]", + nodeId, expectedShards, expectedShards - numberShardsLeftToAllocate, numberShardsLeftToAllocate); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MESSAGE.getPreferredName(), message); + builder.field(NODE_ID.getPreferredName(), nodeId); + builder.field(SHARDS_TO_ALLOCATE.getPreferredName(), numberShardsLeftToAllocate); + builder.field(EXPECTED_SHARDS.getPreferredName(), actualReplicas); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(nodeId, actualReplicas, numberShardsLeftToAllocate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + CheckShrinkReadyStep.Info other = (CheckShrinkReadyStep.Info) obj; + return Objects.equals(actualReplicas, other.actualReplicas) && + Objects.equals(numberShardsLeftToAllocate, other.numberShardsLeftToAllocate) && + Objects.equals(nodeId, other.nodeId); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateActionStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateActionStep.java new file mode 100644 index 0000000000000..ae64de497886a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateActionStep.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.index.Index; + +public abstract class ClusterStateActionStep extends Step { + + public ClusterStateActionStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + public abstract ClusterState performAction(Index index, ClusterState clusterState); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java new file mode 100644 index 0000000000000..0468f75490d9e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.index.Index; + +public abstract class ClusterStateWaitStep extends Step { + + public ClusterStateWaitStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + public abstract Result isConditionMet(Index index, ClusterState clusterState); + + public static class Result { + private final boolean complete; + private final ToXContentObject infomationContext; + + public Result(boolean complete, ToXContentObject infomationContext) { + this.complete = complete; + this.infomationContext = infomationContext; + } + + public boolean isComplete() { + return complete; + } + + public ToXContentObject getInfomationContext() { + return infomationContext; + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java new file mode 100644 index 0000000000000..b8192dd7e43be --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStep.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.index.Index; + +import java.util.Objects; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; + +/** + * Copies the execution state data from one index to another, typically after a + * new index has been created. Useful for actions such as shrink. + */ +public class CopyExecutionStateStep extends ClusterStateActionStep { + public static final String NAME = "copy_execution_state"; + + private static final Logger logger = LogManager.getLogger(CopyExecutionStateStep.class); + + private String shrunkIndexPrefix; + + + public CopyExecutionStateStep(StepKey key, StepKey nextStepKey, String shrunkIndexPrefix) { + super(key, nextStepKey); + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public ClusterState performAction(Index index, ClusterState clusterState) { + IndexMetaData indexMetaData = clusterState.metaData().index(index); + if (indexMetaData == null) { + // Index must have been since deleted, ignore it + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName()); + return clusterState; + } + // get source index + String indexName = indexMetaData.getIndex().getName(); + // get target shrink index + String targetIndexName = shrunkIndexPrefix + indexName; + IndexMetaData targetIndexMetaData = clusterState.metaData().index(targetIndexName); + + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + String phase = lifecycleState.getPhase(); + String action = lifecycleState.getAction(); + long lifecycleDate = lifecycleState.getLifecycleDate(); + + LifecycleExecutionState.Builder relevantTargetCustomData = LifecycleExecutionState.builder(); + relevantTargetCustomData.setIndexCreationDate(lifecycleDate); + relevantTargetCustomData.setPhase(phase); + relevantTargetCustomData.setAction(action); + relevantTargetCustomData.setStep(ShrunkenIndexCheckStep.NAME); + + MetaData.Builder newMetaData = MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(targetIndexMetaData) + .putCustom(ILM_CUSTOM_METADATA_KEY, relevantTargetCustomData.build().asMap())); + + return ClusterState.builder(clusterState).metaData(newMetaData).build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + CopyExecutionStateStep that = (CopyExecutionStateStep) o; + return Objects.equals(shrunkIndexPrefix, that.shrunkIndexPrefix); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shrunkIndexPrefix); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java new file mode 100644 index 0000000000000..1a0ad4c789ce4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteAction.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * A {@link LifecycleAction} which deletes the index. + */ +public class DeleteAction implements LifecycleAction { + public static final String NAME = "delete"; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, DeleteAction::new); + + public static DeleteAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public DeleteAction() { + } + + public DeleteAction(StreamInput in) throws IOException { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + Step.StepKey deleteStepKey = new Step.StepKey(phase, NAME, DeleteStep.NAME); + return Collections.singletonList(new DeleteStep(deleteStepKey, nextStepKey, client)); + } + + @Override + public List toStepKeys(String phase) { + return Collections.singletonList(new Step.StepKey(phase, NAME, DeleteStep.NAME)); + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java new file mode 100644 index 0000000000000..b5ae441388419 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStep.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +public class DeleteStep extends AsyncActionStep { + public static final String NAME = "delete"; + + public DeleteStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey, client); + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + getClient().admin().indices() + .delete(new DeleteIndexRequest(indexMetaData.getIndex().getName()), + ActionListener.wrap(response -> listener.onResponse(true) , listener::onFailure)); + } + + @Override + public boolean indexSurvives() { + return false; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStep.java new file mode 100644 index 0000000000000..50ad0155dff29 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStep.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +public class ErrorStep extends Step { + public static final String NAME = "ERROR"; + + public ErrorStep(StepKey key) { + super(key, key); + if (NAME.equals(key.getName()) == false) { + throw new IllegalArgumentException("An error step must have a step key whose step name is " + NAME); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java new file mode 100644 index 0000000000000..037de2d505292 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.info.ClusterInfoRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * The request object used by the Explain Lifecycle API. + * + * Multiple indices may be queried in the same request using the + * {@link #indices(String...)} method + */ +public class ExplainLifecycleRequest extends ClusterInfoRequest { + + public ExplainLifecycleRequest() { + super(); + } + + public ExplainLifecycleRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices()), indicesOptions()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleRequest other = (ExplainLifecycleRequest) obj; + return Objects.deepEquals(indices(), other.indices()) && + Objects.equals(indicesOptions(), other.indicesOptions()); + } + + @Override + public String toString() { + return "ExplainLifecycleRequest [indices()=" + Arrays.toString(indices()) + ", indicesOptions()=" + indicesOptions() + "]"; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java new file mode 100644 index 0000000000000..915ca17cb43a4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponse.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * The response object returned by the Explain Lifecycle API. + * + * Since the API can be run over multiple indices the response provides a map of + * index to the explanation of the lifecycle status for that index. + */ +public class ExplainLifecycleResponse extends ActionResponse implements ToXContentObject { + + public static final ParseField INDICES_FIELD = new ParseField("indices"); + + private Map indexResponses; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "explain_lifecycle_response", a -> new ExplainLifecycleResponse(((List) a[0]).stream() + .collect(Collectors.toMap(IndexLifecycleExplainResponse::getIndex, Function.identity())))); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> IndexLifecycleExplainResponse.PARSER.apply(p, c), + INDICES_FIELD); + } + + public static ExplainLifecycleResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ExplainLifecycleResponse() { + } + + public ExplainLifecycleResponse(Map indexResponses) { + this.indexResponses = indexResponses; + } + + /** + * @return a map of the responses from each requested index. The maps key is + * the index name and the value is the + * {@link IndexLifecycleExplainResponse} describing the current + * lifecycle status of that index + */ + public Map getIndexResponses() { + return indexResponses; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(INDICES_FIELD.getPreferredName()); + for (IndexLifecycleExplainResponse indexResponse : indexResponses.values()) { + builder.field(indexResponse.getIndex(), indexResponse); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + int size = in.readVInt(); + Map indexResponses = new HashMap<>(size); + for (int i = 0; i < size; i++) { + IndexLifecycleExplainResponse indexResponse = new IndexLifecycleExplainResponse(in); + indexResponses.put(indexResponse.getIndex(), indexResponse); + } + this.indexResponses = indexResponses; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(indexResponses.size()); + for (IndexLifecycleExplainResponse e : indexResponses.values()) { + e.writeTo(out); + } + } + + @Override + public int hashCode() { + return Objects.hash(indexResponses); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleResponse other = (ExplainLifecycleResponse) obj; + return Objects.equals(indexResponses, other.indexResponses); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java new file mode 100644 index 0000000000000..2c4508a8355f0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeAction.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A {@link LifecycleAction} which force-merges the index. + */ +public class ForceMergeAction implements LifecycleAction { + public static final String NAME = "forcemerge"; + public static final ParseField MAX_NUM_SEGMENTS_FIELD = new ParseField("max_num_segments"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + false, a -> { + int maxNumSegments = (int) a[0]; + return new ForceMergeAction(maxNumSegments); + }); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_NUM_SEGMENTS_FIELD); + } + + private final int maxNumSegments; + + public static ForceMergeAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ForceMergeAction(int maxNumSegments) { + if (maxNumSegments <= 0) { + throw new IllegalArgumentException("[" + MAX_NUM_SEGMENTS_FIELD.getPreferredName() + + "] must be a positive integer"); + } + this.maxNumSegments = maxNumSegments; + } + + public ForceMergeAction(StreamInput in) throws IOException { + this.maxNumSegments = in.readVInt(); + } + + public int getMaxNumSegments() { + return maxNumSegments; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(maxNumSegments); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MAX_NUM_SEGMENTS_FIELD.getPreferredName(), maxNumSegments); + builder.endObject(); + return builder; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + Settings readOnlySettings = Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true).build(); + + StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); + StepKey forceMergeKey = new StepKey(phase, NAME, ForceMergeStep.NAME); + StepKey countKey = new StepKey(phase, NAME, SegmentCountStep.NAME); + + UpdateSettingsStep readOnlyStep = new UpdateSettingsStep(readOnlyKey, forceMergeKey, client, readOnlySettings); + ForceMergeStep forceMergeStep = new ForceMergeStep(forceMergeKey, countKey, client, maxNumSegments); + SegmentCountStep segmentCountStep = new SegmentCountStep(countKey, nextStepKey, client, maxNumSegments); + return Arrays.asList(readOnlyStep, forceMergeStep, segmentCountStep); + } + + @Override + public List toStepKeys(String phase) { + StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); + StepKey forceMergeKey = new StepKey(phase, NAME, ForceMergeStep.NAME); + StepKey countKey = new StepKey(phase, NAME, SegmentCountStep.NAME); + return Arrays.asList(readOnlyKey, forceMergeKey, countKey); + } + + @Override + public int hashCode() { + return Objects.hash(maxNumSegments); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ForceMergeAction other = (ForceMergeAction) obj; + return Objects.equals(maxNumSegments, other.maxNumSegments); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java new file mode 100644 index 0000000000000..776043babf0fc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStep.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +import java.util.Objects; + +public class ForceMergeStep extends AsyncActionStep { + public static final String NAME = "forcemerge"; + private final int maxNumSegments; + + public ForceMergeStep(StepKey key, StepKey nextStepKey, Client client, int maxNumSegments) { + super(key, nextStepKey, client); + this.maxNumSegments = maxNumSegments; + } + + public int getMaxNumSegments() { + return maxNumSegments; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + ForceMergeRequest request = new ForceMergeRequest(indexMetaData.getIndex().getName()); + request.maxNumSegments(maxNumSegments); + getClient().admin().indices() + .forceMerge(request, ActionListener.wrap(response -> listener.onResponse(true), + listener::onFailure)); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), maxNumSegments); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ForceMergeStep other = (ForceMergeStep) obj; + return super.equals(obj) && + Objects.equals(maxNumSegments, other.maxNumSegments); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java new file mode 100644 index 0000000000000..7e97170998a81 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleExplainResponse.java @@ -0,0 +1,308 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Objects; + +public class IndexLifecycleExplainResponse implements ToXContentObject, Writeable { + + private static final ParseField INDEX_FIELD = new ParseField("index"); + private static final ParseField MANAGED_BY_ILM_FIELD = new ParseField("managed"); + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField LIFECYCLE_DATE_MILLIS_FIELD = new ParseField("lifecycle_date_millis"); + private static final ParseField LIFECYCLE_DATE_FIELD = new ParseField("lifecycle_date"); + private static final ParseField PHASE_FIELD = new ParseField("phase"); + private static final ParseField ACTION_FIELD = new ParseField("action"); + private static final ParseField STEP_FIELD = new ParseField("step"); + private static final ParseField FAILED_STEP_FIELD = new ParseField("failed_step"); + private static final ParseField PHASE_TIME_MILLIS_FIELD = new ParseField("phase_time_millis"); + private static final ParseField PHASE_TIME_FIELD = new ParseField("phase_time"); + private static final ParseField ACTION_TIME_MILLIS_FIELD = new ParseField("action_time_millis"); + private static final ParseField ACTION_TIME_FIELD = new ParseField("action_time"); + private static final ParseField STEP_TIME_MILLIS_FIELD = new ParseField("step_time_millis"); + private static final ParseField STEP_TIME_FIELD = new ParseField("step_time"); + private static final ParseField STEP_INFO_FIELD = new ParseField("step_info"); + private static final ParseField PHASE_EXECUTION_INFO = new ParseField("phase_execution"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "index_lifecycle_explain_response", + a -> new IndexLifecycleExplainResponse( + (String) a[0], + (boolean) a[1], + (String) a[2], + (Long) (a[3]), + (String) a[4], + (String) a[5], + (String) a[6], + (String) a[7], + (Long) (a[8]), + (Long) (a[9]), + (Long) (a[10]), + (BytesReference) a[11], + (PhaseExecutionInfo) a[12])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_FIELD); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), MANAGED_BY_ILM_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), POLICY_NAME_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LIFECYCLE_DATE_MILLIS_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PHASE_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ACTION_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), STEP_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FAILED_STEP_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), PHASE_TIME_MILLIS_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), ACTION_TIME_MILLIS_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), STEP_TIME_MILLIS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.copyCurrentStructure(p); + return BytesArray.bytes(builder); + }, STEP_INFO_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), + PHASE_EXECUTION_INFO); + } + + private final String index; + private final String policyName; + private final String phase; + private final String action; + private final String step; + private final String failedStep; + private final Long lifecycleDate; + private final Long phaseTime; + private final Long actionTime; + private final Long stepTime; + private final boolean managedByILM; + private final BytesReference stepInfo; + private final PhaseExecutionInfo phaseExecutionInfo; + + public static IndexLifecycleExplainResponse newManagedIndexResponse(String index, String policyName, Long lifecycleDate, + String phase, String action, String step, String failedStep, Long phaseTime, Long actionTime, Long stepTime, + BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + return new IndexLifecycleExplainResponse(index, true, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, + actionTime, stepTime, stepInfo, phaseExecutionInfo); + } + + public static IndexLifecycleExplainResponse newUnmanagedIndexResponse(String index) { + return new IndexLifecycleExplainResponse(index, false, null, null, null, null, null, null, null, null, null, null, null); + } + + private IndexLifecycleExplainResponse(String index, boolean managedByILM, String policyName, Long lifecycleDate, + String phase, String action, String step, String failedStep, Long phaseTime, Long actionTime, + Long stepTime, BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + if (managedByILM) { + if (policyName == null) { + throw new IllegalArgumentException("[" + POLICY_NAME_FIELD.getPreferredName() + "] cannot be null for managed index"); + } + } else { + if (policyName != null || lifecycleDate != null || phase != null || action != null || step != null || failedStep != null + || phaseTime != null || actionTime != null || stepTime != null || stepInfo != null || phaseExecutionInfo != null) { + throw new IllegalArgumentException( + "Unmanaged index response must only contain fields: [" + MANAGED_BY_ILM_FIELD + ", " + INDEX_FIELD + "]"); + } + } + this.index = index; + this.policyName = policyName; + this.managedByILM = managedByILM; + this.lifecycleDate = lifecycleDate; + this.phase = phase; + this.action = action; + this.step = step; + this.phaseTime = phaseTime; + this.actionTime = actionTime; + this.stepTime = stepTime; + this.failedStep = failedStep; + this.stepInfo = stepInfo; + this.phaseExecutionInfo = phaseExecutionInfo; + } + + public IndexLifecycleExplainResponse(StreamInput in) throws IOException { + index = in.readString(); + managedByILM = in.readBoolean(); + if (managedByILM) { + policyName = in.readString(); + lifecycleDate = in.readOptionalLong(); + phase = in.readOptionalString(); + action = in.readOptionalString(); + step = in.readOptionalString(); + failedStep = in.readOptionalString(); + phaseTime = in.readOptionalLong(); + actionTime = in.readOptionalLong(); + stepTime = in.readOptionalLong(); + stepInfo = in.readOptionalBytesReference(); + phaseExecutionInfo = in.readOptionalWriteable(PhaseExecutionInfo::new); + } else { + policyName = null; + lifecycleDate = null; + phase = null; + action = null; + step = null; + failedStep = null; + phaseTime = null; + actionTime = null; + stepTime = null; + stepInfo = null; + phaseExecutionInfo = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeBoolean(managedByILM); + if (managedByILM) { + out.writeString(policyName); + out.writeOptionalLong(lifecycleDate); + out.writeOptionalString(phase); + out.writeOptionalString(action); + out.writeOptionalString(step); + out.writeOptionalString(failedStep); + out.writeOptionalLong(phaseTime); + out.writeOptionalLong(actionTime); + out.writeOptionalLong(stepTime); + out.writeOptionalBytesReference(stepInfo); + out.writeOptionalWriteable(phaseExecutionInfo); + } + } + + public String getIndex() { + return index; + } + + public boolean managedByILM() { + return managedByILM; + } + + public String getPolicyName() { + return policyName; + } + + public Long getLifecycleDate() { + return lifecycleDate; + } + + public String getPhase() { + return phase; + } + + public Long getPhaseTime() { + return phaseTime; + } + + public String getAction() { + return action; + } + + public Long getActionTime() { + return actionTime; + } + + public String getStep() { + return step; + } + + public Long getStepTime() { + return stepTime; + } + + public String getFailedStep() { + return failedStep; + } + + public BytesReference getStepInfo() { + return stepInfo; + } + + public PhaseExecutionInfo getPhaseExecutionInfo() { + return phaseExecutionInfo; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INDEX_FIELD.getPreferredName(), index); + builder.field(MANAGED_BY_ILM_FIELD.getPreferredName(), managedByILM); + if (managedByILM) { + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + if (lifecycleDate != null) { + builder.timeField(LIFECYCLE_DATE_MILLIS_FIELD.getPreferredName(), LIFECYCLE_DATE_FIELD.getPreferredName(), lifecycleDate); + } + builder.field(PHASE_FIELD.getPreferredName(), phase); + if (phaseTime != null) { + builder.timeField(PHASE_TIME_MILLIS_FIELD.getPreferredName(), PHASE_TIME_FIELD.getPreferredName(), phaseTime); + } + builder.field(ACTION_FIELD.getPreferredName(), action); + if (actionTime != null) { + builder.timeField(ACTION_TIME_MILLIS_FIELD.getPreferredName(), ACTION_TIME_FIELD.getPreferredName(), actionTime); + } + builder.field(STEP_FIELD.getPreferredName(), step); + if (stepTime != null) { + builder.timeField(STEP_TIME_MILLIS_FIELD.getPreferredName(), STEP_TIME_FIELD.getPreferredName(), stepTime); + } + if (Strings.hasLength(failedStep)) { + builder.field(FAILED_STEP_FIELD.getPreferredName(), failedStep); + } + if (stepInfo != null && stepInfo.length() > 0) { + builder.rawField(STEP_INFO_FIELD.getPreferredName(), stepInfo.streamInput(), XContentType.JSON); + } + if (phaseExecutionInfo != null) { + builder.field(PHASE_EXECUTION_INFO.getPreferredName(), phaseExecutionInfo); + } + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(index, managedByILM, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, actionTime, + stepTime, stepInfo, phaseExecutionInfo); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + IndexLifecycleExplainResponse other = (IndexLifecycleExplainResponse) obj; + return Objects.equals(index, other.index) && + Objects.equals(managedByILM, other.managedByILM) && + Objects.equals(policyName, other.policyName) && + Objects.equals(lifecycleDate, other.lifecycleDate) && + Objects.equals(phase, other.phase) && + Objects.equals(action, other.action) && + Objects.equals(step, other.step) && + Objects.equals(failedStep, other.failedStep) && + Objects.equals(phaseTime, other.phaseTime) && + Objects.equals(actionTime, other.actionTime) && + Objects.equals(stepTime, other.stepTime) && + Objects.equals(stepInfo, other.stepInfo) && + Objects.equals(phaseExecutionInfo, other.phaseExecutionInfo); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsage.java new file mode 100644 index 0000000000000..f0dd25eabac1f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsage.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class IndexLifecycleFeatureSetUsage extends XPackFeatureSet.Usage { + + private List policyStats; + + public IndexLifecycleFeatureSetUsage(StreamInput input) throws IOException { + super(input); + if (input.readBoolean()) { + policyStats = input.readList(PolicyStats::new); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + boolean hasPolicyStats = policyStats != null; + out.writeBoolean(hasPolicyStats); + if (hasPolicyStats) { + out.writeList(policyStats); + } + } + + public IndexLifecycleFeatureSetUsage(boolean available, boolean enabled) { + this(available, enabled, null); + } + + public IndexLifecycleFeatureSetUsage(boolean available, boolean enabled, List policyStats) { + super(XPackField.INDEX_LIFECYCLE, available, enabled); + this.policyStats = policyStats; + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + if (policyStats != null) { + builder.field("policy_count", policyStats.size()); + builder.field("policy_stats", policyStats); + } + } + + public List getPolicyStats() { + return policyStats; + } + + @Override + public int hashCode() { + return Objects.hash(available, enabled, policyStats); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + IndexLifecycleFeatureSetUsage other = (IndexLifecycleFeatureSetUsage) obj; + return Objects.equals(available, other.available) && + Objects.equals(enabled, other.enabled) && + Objects.equals(policyStats, other.policyStats); + } + + public static final class PolicyStats implements ToXContentObject, Writeable { + + public static final ParseField INDICES_MANAGED_FIELD = new ParseField("indices_managed"); + + private final Map phaseStats; + private final int indicesManaged; + + public PolicyStats(Map phaseStats, int numberIndicesManaged) { + this.phaseStats = phaseStats; + this.indicesManaged = numberIndicesManaged; + } + + public PolicyStats(StreamInput in) throws IOException { + this.phaseStats = in.readMap(StreamInput::readString, PhaseStats::new); + this.indicesManaged = in.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(phaseStats, StreamOutput::writeString, (o, p) -> p.writeTo(o)); + out.writeVInt(indicesManaged); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(LifecyclePolicy.PHASES_FIELD.getPreferredName(), phaseStats); + builder.field(INDICES_MANAGED_FIELD.getPreferredName(), indicesManaged); + builder.endObject(); + return builder; + } + + public Map getPhaseStats() { + return phaseStats; + } + + public int getIndicesManaged() { + return indicesManaged; + } + + @Override + public int hashCode() { + return Objects.hash(phaseStats, indicesManaged); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PolicyStats other = (PolicyStats) obj; + return Objects.equals(phaseStats, other.phaseStats) && + Objects.equals(indicesManaged, other.indicesManaged); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public static final class PhaseStats implements ToXContentObject, Writeable { + private final String[] actionNames; + private final TimeValue minimumAge; + + public PhaseStats(TimeValue after, String[] actionNames) { + this.actionNames = actionNames; + this.minimumAge = after; + } + + public PhaseStats(StreamInput in) throws IOException { + actionNames = in.readStringArray(); + minimumAge = in.readTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(actionNames); + out.writeTimeValue(minimumAge); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Phase.MIN_AGE.getPreferredName(), minimumAge.getMillis()); + builder.field(Phase.ACTIONS_FIELD.getPreferredName(), actionNames); + builder.endObject(); + return builder; + } + + public String[] getActionNames() { + return actionNames; + } + + public TimeValue getAfter() { + return minimumAge; + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(actionNames), minimumAge); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PhaseStats other = (PhaseStats) obj; + return Objects.equals(minimumAge, other.minimumAge) && + Objects.deepEquals(actionNames, other.actionNames); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java new file mode 100644 index 0000000000000..b2322dd326823 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaData.Custom; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackPlugin.XPackMetaDataCustom; + +import java.io.IOException; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; + + +public class IndexLifecycleMetadata implements XPackMetaDataCustom { + public static final String TYPE = "index_lifecycle"; + public static final ParseField OPERATION_MODE_FIELD = new ParseField("operation_mode"); + public static final ParseField POLICIES_FIELD = new ParseField("policies"); + public static final IndexLifecycleMetadata EMPTY = new IndexLifecycleMetadata(Collections.emptySortedMap(), OperationMode.RUNNING); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TYPE, + a -> new IndexLifecycleMetadata( + ((List) a[0]).stream() + .collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())), + OperationMode.valueOf((String) a[1]))); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> LifecyclePolicyMetadata.parse(p, n), + v -> { + throw new IllegalArgumentException("ordered " + POLICIES_FIELD.getPreferredName() + " are not supported"); + }, POLICIES_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), OPERATION_MODE_FIELD); + } + + private final Map policyMetadatas; + private final OperationMode operationMode; + + public IndexLifecycleMetadata(Map policies, OperationMode operationMode) { + this.policyMetadatas = Collections.unmodifiableMap(policies); + this.operationMode = operationMode; + } + + public IndexLifecycleMetadata(StreamInput in) throws IOException { + int size = in.readVInt(); + TreeMap policies = new TreeMap<>(); + for (int i = 0; i < size; i++) { + policies.put(in.readString(), new LifecyclePolicyMetadata(in)); + } + this.policyMetadatas = policies; + this.operationMode = in.readEnum(OperationMode.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(policyMetadatas.size()); + for (Map.Entry entry : policyMetadatas.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + out.writeEnum(operationMode); + } + + public Map getPolicyMetadatas() { + return policyMetadatas; + } + + public OperationMode getOperationMode() { + return operationMode; + } + + public Map getPolicies() { + return policyMetadatas.values().stream().map(LifecyclePolicyMetadata::getPolicy) + .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())); + } + + @Override + public Diff diff(Custom previousState) { + return new IndexLifecycleMetadataDiff((IndexLifecycleMetadata) previousState, this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(POLICIES_FIELD.getPreferredName(), policyMetadatas); + builder.field(OPERATION_MODE_FIELD.getPreferredName(), operationMode); + return builder; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_7_0_0_alpha1; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public EnumSet context() { + return MetaData.ALL_CONTEXTS; + } + + @Override + public int hashCode() { + return Objects.hash(policyMetadatas, operationMode); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + IndexLifecycleMetadata other = (IndexLifecycleMetadata) obj; + return Objects.equals(policyMetadatas, other.policyMetadatas) + && Objects.equals(operationMode, other.operationMode); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + public static class IndexLifecycleMetadataDiff implements NamedDiff { + + final Diff> policies; + final OperationMode operationMode; + + IndexLifecycleMetadataDiff(IndexLifecycleMetadata before, IndexLifecycleMetadata after) { + this.policies = DiffableUtils.diff(before.policyMetadatas, after.policyMetadatas, DiffableUtils.getStringKeySerializer()); + this.operationMode = after.operationMode; + } + + public IndexLifecycleMetadataDiff(StreamInput in) throws IOException { + this.policies = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), LifecyclePolicyMetadata::new, + IndexLifecycleMetadataDiff::readLifecyclePolicyDiffFrom); + this.operationMode = in.readEnum(OperationMode.class); + } + + @Override + public MetaData.Custom apply(MetaData.Custom part) { + TreeMap newPolicies = new TreeMap<>( + policies.apply(((IndexLifecycleMetadata) part).policyMetadatas)); + return new IndexLifecycleMetadata(newPolicies, this.operationMode); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + policies.writeTo(out); + out.writeEnum(operationMode); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + static Diff readLifecyclePolicyDiffFrom(StreamInput in) throws IOException { + return AbstractDiffable.readDiffFrom(LifecyclePolicyMetadata::new, in); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStep.java new file mode 100644 index 0000000000000..c9046cb5eb7ec --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStep.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.index.Index; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; + +public final class InitializePolicyContextStep extends ClusterStateActionStep { + public static final String INITIALIZATION_PHASE = "new"; + public static final StepKey KEY = new StepKey(INITIALIZATION_PHASE, "init", "init"); + private static final Logger logger = LogManager.getLogger(InitializePolicyContextStep.class); + + public InitializePolicyContextStep(Step.StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + @Override + public ClusterState performAction(Index index, ClusterState clusterState) { + IndexMetaData indexMetaData = clusterState.getMetaData().index(index); + if (indexMetaData == null) { + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName()); + // Index must have been since deleted, ignore it + return clusterState; + } + LifecycleExecutionState lifecycleState = LifecycleExecutionState + .fromIndexMetadata(indexMetaData); + if (lifecycleState.getLifecycleDate() != null) { + return clusterState; + } + + ClusterState.Builder newClusterStateBuilder = ClusterState.builder(clusterState); + + LifecycleExecutionState.Builder newCustomData = LifecycleExecutionState.builder(lifecycleState); + newCustomData.setIndexCreationDate(indexMetaData.getCreationDate()); + newClusterStateBuilder.metaData(MetaData.builder(clusterState.getMetaData()).put(IndexMetaData + .builder(indexMetaData) + .putCustom(ILM_CUSTOM_METADATA_KEY, newCustomData.build().asMap()))); + return newClusterStateBuilder.build(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java new file mode 100644 index 0000000000000..3e84813274d83 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleAction.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.List; + +/** + * Executes an action on an index related to its lifecycle. + */ +public interface LifecycleAction extends ToXContentObject, NamedWriteable { + + /** + * converts the {@link LifecycleAction}'s execution plan into a series of + * {@link Step}s that reference each other to preserve order of operations. + * @param client the client that will be used by {@link AsyncActionStep} and {@link AsyncWaitStep} steps + * @param phase the name of the phase this action is being executed within + * @param nextStepKey the next step to execute after this action's steps. If null, then there are no further + * steps to run. It is the responsibility of each {@link LifecycleAction} to implement this + * correctly and not forget to link to this final step so that the policy can continue. + * @return an ordered list of steps that represent the execution plan of the action + */ + List toSteps(Client client, String phase, @Nullable Step.StepKey nextStepKey); + + /** + * + * @param phase + * the name of the phase this action is being executed within + * @return the {@link StepKey}s for the steps which will be executed in this + * action + */ + List toStepKeys(String phase); + + /** + * @return true if this action is considered safe. An action is not safe if + * it will produce unwanted side effects or will get stuck when the + * action configuration is changed while an index is in this action + */ + boolean isSafeAction(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionState.java new file mode 100644 index 0000000000000..b2d42bca7338e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionState.java @@ -0,0 +1,319 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Contains information about the execution of a lifecycle policy for a single + * index, and serializes/deserializes this information to and from custom + * index metadata. + */ +public class LifecycleExecutionState { + public static final String ILM_CUSTOM_METADATA_KEY = "ilm"; + + private static final String PHASE = "phase"; + private static final String ACTION = "action"; + private static final String STEP = "step"; + private static final String INDEX_CREATION_DATE = "creation_date"; + private static final String PHASE_TIME = "phase_time"; + private static final String ACTION_TIME = "action_time"; + private static final String STEP_TIME = "step_time"; + private static final String FAILED_STEP = "failed_step"; + private static final String STEP_INFO = "step_info"; + private static final String PHASE_DEFINITION = "phase_definition"; + + private final String phase; + private final String action; + private final String step; + private final String failedStep; + private final String stepInfo; + private final String phaseDefinition; + private final Long lifecycleDate; + private final Long phaseTime; + private final Long actionTime; + private final Long stepTime; + + private LifecycleExecutionState(String phase, String action, String step, String failedStep, + String stepInfo, String phaseDefinition, Long lifecycleDate, + Long phaseTime, Long actionTime, Long stepTime) { + this.phase = phase; + this.action = action; + this.step = step; + this.failedStep = failedStep; + this.stepInfo = stepInfo; + this.phaseDefinition = phaseDefinition; + this.lifecycleDate = lifecycleDate; + this.phaseTime = phaseTime; + this.actionTime = actionTime; + this.stepTime = stepTime; + } + + /** + * Retrieves the execution state from an {@link IndexMetaData} based on the + * custom metadata. + * @param indexMetaData The metadata of the index to retrieve the execution + * state from. + * @return The execution state of that index. + */ + public static LifecycleExecutionState fromIndexMetadata(IndexMetaData indexMetaData) { + Map customData = indexMetaData.getCustomData(ILM_CUSTOM_METADATA_KEY); + customData = customData == null ? new HashMap<>() : customData; + return fromCustomMetadata(customData); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(LifecycleExecutionState state) { + return new Builder() + .setPhase(state.phase) + .setAction(state.action) + .setStep(state.step) + .setFailedStep(state.failedStep) + .setStepInfo(state.stepInfo) + .setPhaseDefinition(state.phaseDefinition) + .setIndexCreationDate(state.lifecycleDate) + .setPhaseTime(state.phaseTime) + .setActionTime(state.actionTime) + .setStepTime(state.stepTime); + } + + static LifecycleExecutionState fromCustomMetadata(Map customData) { + Builder builder = builder(); + if (customData.containsKey(PHASE)) { + builder.setPhase(customData.get(PHASE)); + } + if (customData.containsKey(ACTION)) { + builder.setAction(customData.get(ACTION)); + } + if (customData.containsKey(STEP)) { + builder.setStep(customData.get(STEP)); + } + if (customData.containsKey(FAILED_STEP)) { + builder.setFailedStep(customData.get(FAILED_STEP)); + } + if (customData.containsKey(STEP_INFO)) { + builder.setStepInfo(customData.get(STEP_INFO)); + } + if (customData.containsKey(PHASE_DEFINITION)) { + builder.setPhaseDefinition(customData.get(PHASE_DEFINITION)); + } + if (customData.containsKey(INDEX_CREATION_DATE)) { + try { + builder.setIndexCreationDate(Long.parseLong(customData.get(INDEX_CREATION_DATE))); + } catch (NumberFormatException e) { + throw new ElasticsearchException("Custom metadata field [{}] does not contain a valid long. Actual value: [{}]", + e, INDEX_CREATION_DATE, customData.get(INDEX_CREATION_DATE)); + } + } + if (customData.containsKey(PHASE_TIME)) { + try { + builder.setPhaseTime(Long.parseLong(customData.get(PHASE_TIME))); + } catch (NumberFormatException e) { + throw new ElasticsearchException("Custom metadata field [{}] does not contain a valid long. Actual value: [{}]", + e, PHASE_TIME, customData.get(PHASE_TIME)); + } + } + if (customData.containsKey(ACTION_TIME)) { + try { + builder.setActionTime(Long.parseLong(customData.get(ACTION_TIME))); + } catch (NumberFormatException e) { + throw new ElasticsearchException("Custom metadata field [{}] does not contain a valid long. Actual value: [{}]", + e, ACTION_TIME, customData.get(ACTION_TIME)); + } + } + if (customData.containsKey(STEP_TIME)) { + try { + builder.setStepTime(Long.parseLong(customData.get(STEP_TIME))); + } catch (NumberFormatException e) { + throw new ElasticsearchException("Custom metadata field [{}] does not contain a valid long. Actual value: [{}]", + e, STEP_TIME, customData.get(STEP_TIME)); + } + } + return builder.build(); + } + + /** + * Converts this object to an immutable map representation for use with + * {@link IndexMetaData.Builder#putCustom(String, Map)}. + * @return An immutable Map representation of this execution state. + */ + public Map asMap() { + Map result = new HashMap<>(); + if (phase != null) { + result.put(PHASE, phase); + } + if (action != null) { + result.put(ACTION, action); + } + if (step != null) { + result.put(STEP, step); + } + if (failedStep != null) { + result.put(FAILED_STEP, failedStep); + } + if (stepInfo != null) { + result.put(STEP_INFO, stepInfo); + } + if (lifecycleDate != null) { + result.put(INDEX_CREATION_DATE, String.valueOf(lifecycleDate)); + } + if (phaseTime != null) { + result.put(PHASE_TIME, String.valueOf(phaseTime)); + } + if (actionTime != null) { + result.put(ACTION_TIME, String.valueOf(actionTime)); + } + if (stepTime != null) { + result.put(STEP_TIME, String.valueOf(stepTime)); + } + if (phaseDefinition != null) { + result.put(PHASE_DEFINITION, String.valueOf(phaseDefinition)); + } + return Collections.unmodifiableMap(result); + } + + public String getPhase() { + return phase; + } + + public String getAction() { + return action; + } + + public String getStep() { + return step; + } + + public String getFailedStep() { + return failedStep; + } + + public String getStepInfo() { + return stepInfo; + } + + public String getPhaseDefinition() { + return phaseDefinition; + } + + public Long getLifecycleDate() { + return lifecycleDate; + } + + public Long getPhaseTime() { + return phaseTime; + } + + public Long getActionTime() { + return actionTime; + } + + public Long getStepTime() { + return stepTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + LifecycleExecutionState that = (LifecycleExecutionState) o; + return getLifecycleDate() == that.getLifecycleDate() && + getPhaseTime() == that.getPhaseTime() && + getActionTime() == that.getActionTime() && + getStepTime() == that.getStepTime() && + Objects.equals(getPhase(), that.getPhase()) && + Objects.equals(getAction(), that.getAction()) && + Objects.equals(getStep(), that.getStep()) && + Objects.equals(getFailedStep(), that.getFailedStep()) && + Objects.equals(getStepInfo(), that.getStepInfo()) && + Objects.equals(getPhaseDefinition(), that.getPhaseDefinition()); + } + + @Override + public int hashCode() { + return Objects.hash(getPhase(), getAction(), getStep(), getFailedStep(), getStepInfo(), getPhaseDefinition(), + getLifecycleDate(), getPhaseTime(), getActionTime(), getStepTime()); + } + + public static class Builder { + private String phase; + private String action; + private String step; + private String failedStep; + private String stepInfo; + private String phaseDefinition; + private Long indexCreationDate; + private Long phaseTime; + private Long actionTime; + private Long stepTime; + + public Builder setPhase(String phase) { + this.phase = phase; + return this; + } + + public Builder setAction(String action) { + this.action = action; + return this; + } + + public Builder setStep(String step) { + this.step = step; + return this; + } + + public Builder setFailedStep(String failedStep) { + this.failedStep = failedStep; + return this; + } + + public Builder setStepInfo(String stepInfo) { + this.stepInfo = stepInfo; + return this; + } + + public Builder setPhaseDefinition(String phaseDefinition) { + this.phaseDefinition = phaseDefinition; + return this; + } + + public Builder setIndexCreationDate(Long indexCreationDate) { + this.indexCreationDate = indexCreationDate; + return this; + } + + public Builder setPhaseTime(Long phaseTime) { + this.phaseTime = phaseTime; + return this; + } + + public Builder setActionTime(Long actionTime) { + this.actionTime = actionTime; + return this; + } + + public Builder setStepTime(Long stepTime) { + this.stepTime = stepTime; + return this; + } + + public LifecycleExecutionState build() { + return new LifecycleExecutionState(phase, action, step, failedStep, stepInfo, phaseDefinition, indexCreationDate, + phaseTime, actionTime, stepTime); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicy.java new file mode 100644 index 0000000000000..a56818355c3e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicy.java @@ -0,0 +1,266 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents the lifecycle of an index from creation to deletion. A + * {@link LifecyclePolicy} is made up of a set of {@link Phase}s which it will + * move through. Policies are constrained by a {@link LifecycleType} which governs which + * {@link Phase}s and {@link LifecycleAction}s are allowed to be defined and in which order + * they are executed. + */ +public class LifecyclePolicy extends AbstractDiffable + implements ToXContentObject, Diffable { + private static final Logger logger = LogManager.getLogger(LifecyclePolicy.class); + + public static final ParseField PHASES_FIELD = new ParseField("phases"); + + @SuppressWarnings("unchecked") + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("lifecycle_policy", false, + (a, name) -> { + List phases = (List) a[0]; + Map phaseMap = phases.stream().collect(Collectors.toMap(Phase::getName, Function.identity())); + return new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, name, phaseMap); + }); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> Phase.parse(p, n), v -> { + throw new IllegalArgumentException("ordered " + PHASES_FIELD.getPreferredName() + " are not supported"); + }, PHASES_FIELD); + } + + private final String name; + private final LifecycleType type; + private final Map phases; + + /** + * @param name + * the name of this {@link LifecyclePolicy} + * @param phases + * a {@link Map} of {@link Phase}s which make up this + * {@link LifecyclePolicy}. + */ + public LifecyclePolicy(String name, Map phases) { + this(TimeseriesLifecycleType.INSTANCE, name, phases); + } + + /** + * For Serialization + */ + public LifecyclePolicy(StreamInput in) throws IOException { + type = in.readNamedWriteable(LifecycleType.class); + name = in.readString(); + phases = Collections.unmodifiableMap(in.readMap(StreamInput::readString, Phase::new)); + } + + /** + * @param type + * the {@link LifecycleType} of the policy + * @param name + * the name of this {@link LifecyclePolicy} + * @param phases + * a {@link Map} of {@link Phase}s which make up this + * {@link LifecyclePolicy}. + */ + public LifecyclePolicy(LifecycleType type, String name, Map phases) { + this.name = name; + this.phases = phases; + this.type = type; + this.type.validate(phases.values()); + } + + public static LifecyclePolicy parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(type); + out.writeString(name); + out.writeMap(phases, StreamOutput::writeString, (o, val) -> val.writeTo(o)); + } + + /** + * @return the name of this {@link LifecyclePolicy} + */ + public String getName() { + return name; + } + + /** + * @return the type of this {@link LifecyclePolicy} + */ + public LifecycleType getType() { + return type; + } + + /** + * @return the {@link Phase}s for this {@link LifecyclePolicy} in the order + * in which they will be executed. + */ + public Map getPhases() { + return phases; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(PHASES_FIELD.getPreferredName()); + for (Phase phase : phases.values()) { + builder.field(phase.getName(), phase); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + /** + * This method is used to compile this policy into its execution plan built out + * of {@link Step} instances. The order of the {@link Phase}s and {@link LifecycleAction}s is + * determined by the {@link LifecycleType} associated with this policy. + * + * The order of the policy will have this structure: + * + * - initialize policy context step + * - phase-1 phase-after-step + * - ... phase-1 action steps + * - phase-2 phase-after-step + * - ... + * - terminal policy step + * + * We first initialize the policy's context and ensure that the index has proper settings set. + * Then we begin each phase's after-step along with all its actions as steps. Finally, we have + * a terminal step to inform us that this policy's steps are all complete. Each phase's `after` + * step is associated with the previous phase's phase. For example, the warm phase's `after` is + * associated with the hot phase so that it is clear that we haven't stepped into the warm phase + * just yet (until this step is complete). + * + * @param client The Elasticsearch Client to use during execution of {@link AsyncActionStep} + * and {@link AsyncWaitStep} steps. + * @return The list of {@link Step} objects in order of their execution. + */ + public List toSteps(Client client) { + List steps = new ArrayList<>(); + List orderedPhases = type.getOrderedPhases(phases); + ListIterator phaseIterator = orderedPhases.listIterator(orderedPhases.size()); + + // final step so that policy can properly update cluster-state with last action completed + steps.add(TerminalPolicyStep.INSTANCE); + Step.StepKey lastStepKey = TerminalPolicyStep.KEY; + + Phase phase = null; + // add steps for each phase, in reverse + while (phaseIterator.hasPrevious()) { + + Phase previousPhase = phaseIterator.previous(); + + // add `after` step for phase before next + if (phase != null) { + // after step should have the name of the previous phase since the index is still in the + // previous phase until the after condition is reached + Step.StepKey afterStepKey = new Step.StepKey(previousPhase.getName(), PhaseCompleteStep.NAME, PhaseCompleteStep.NAME); + Step phaseAfterStep = new PhaseCompleteStep(afterStepKey, lastStepKey); + steps.add(phaseAfterStep); + lastStepKey = phaseAfterStep.getKey(); + } + + phase = previousPhase; + List orderedActions = type.getOrderedActions(phase); + ListIterator actionIterator = orderedActions.listIterator(orderedActions.size()); + // add steps for each action, in reverse + while (actionIterator.hasPrevious()) { + LifecycleAction action = actionIterator.previous(); + List actionSteps = action.toSteps(client, phase.getName(), lastStepKey); + ListIterator actionStepsIterator = actionSteps.listIterator(actionSteps.size()); + while (actionStepsIterator.hasPrevious()) { + Step step = actionStepsIterator.previous(); + steps.add(step); + lastStepKey = step.getKey(); + } + } + } + + if (phase != null) { + // The very first after step is in a phase before the hot phase so call this "new" + Step.StepKey afterStepKey = new Step.StepKey("new", PhaseCompleteStep.NAME, PhaseCompleteStep.NAME); + Step phaseAfterStep = new PhaseCompleteStep(afterStepKey, lastStepKey); + steps.add(phaseAfterStep); + lastStepKey = phaseAfterStep.getKey(); + } + + // init step so that policy is guaranteed to have + steps.add(new InitializePolicyContextStep(InitializePolicyContextStep.KEY, lastStepKey)); + + Collections.reverse(steps); + + return steps; + } + + public boolean isActionSafe(StepKey stepKey) { + if ("new".equals(stepKey.getPhase())) { + return true; + } + Phase phase = phases.get(stepKey.getPhase()); + if (phase != null) { + LifecycleAction action = phase.getActions().get(stepKey.getAction()); + if (action != null) { + return action.isSafeAction(); + } else { + throw new IllegalArgumentException("Action [" + stepKey.getAction() + "] in phase [" + stepKey.getPhase() + + "] does not exist in policy [" + name + "]"); + } + } else { + throw new IllegalArgumentException("Phase [" + stepKey.getPhase() + "] does not exist in policy [" + name + "]"); + } + } + + @Override + public int hashCode() { + return Objects.hash(name, phases); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + LifecyclePolicy other = (LifecyclePolicy) obj; + return Objects.equals(name, other.name) && + Objects.equals(phases, other.phases); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadata.java new file mode 100644 index 0000000000000..bfb2bee1edaac --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadata.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.Map; +import java.util.Objects; + +public class LifecyclePolicyMetadata extends AbstractDiffable + implements ToXContentObject, Diffable { + + static final ParseField POLICY = new ParseField("policy"); + static final ParseField HEADERS = new ParseField("headers"); + static final ParseField VERSION = new ParseField("version"); + static final ParseField MODIFIED_DATE = new ParseField("modified_date"); + static final ParseField MODIFIED_DATE_STRING = new ParseField("modified_date_string"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("policy_metadata", + a -> { + LifecyclePolicy policy = (LifecyclePolicy) a[0]; + return new LifecyclePolicyMetadata(policy, (Map) a[1], (long) a[2], (long) a[3]); + }); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), LifecyclePolicy::parse, POLICY); + PARSER.declareField(ConstructingObjectParser.constructorArg(), XContentParser::mapStrings, HEADERS, ValueType.OBJECT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE); + PARSER.declareString(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_STRING); + } + + public static LifecyclePolicyMetadata parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final LifecyclePolicy policy; + private final Map headers; + private final long version; + private final long modifiedDate; + + public LifecyclePolicyMetadata(LifecyclePolicy policy, Map headers, long version, long modifiedDate) { + this.policy = policy; + this.headers = headers; + this.version = version; + this.modifiedDate = modifiedDate; + } + + @SuppressWarnings("unchecked") + public LifecyclePolicyMetadata(StreamInput in) throws IOException { + this.policy = new LifecyclePolicy(in); + this.headers = (Map) in.readGenericValue(); + this.version = in.readVLong(); + this.modifiedDate = in.readVLong(); + } + + public Map getHeaders() { + return headers; + } + + public LifecyclePolicy getPolicy() { + return policy; + } + + public String getName() { + return policy.getName(); + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + public String getModifiedDateString() { + ZonedDateTime modifiedDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(modifiedDate), ZoneOffset.UTC); + return modifiedDateTime.toString(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY.getPreferredName(), policy); + builder.field(HEADERS.getPreferredName(), headers); + builder.field(VERSION.getPreferredName(), version); + builder.field(MODIFIED_DATE.getPreferredName(), modifiedDate); + builder.field(MODIFIED_DATE_STRING.getPreferredName(), getModifiedDateString()); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + policy.writeTo(out); + out.writeGenericValue(headers); + out.writeVLong(version); + out.writeVLong(modifiedDate); + } + + @Override + public int hashCode() { + return Objects.hash(policy, headers, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + LifecyclePolicyMetadata other = (LifecyclePolicyMetadata) obj; + return Objects.equals(policy, other.policy) && + Objects.equals(headers, other.headers) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java new file mode 100644 index 0000000000000..4f8eb339db7e8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleSettings.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; + +/** + * Class encapsulating settings related to Index Lifecycle Management X-Pack Plugin + */ +public class LifecycleSettings { + public static final String LIFECYCLE_POLL_INTERVAL = "indices.lifecycle.poll_interval"; + public static final String LIFECYCLE_NAME = "index.lifecycle.name"; + + public static final Setting LIFECYCLE_POLL_INTERVAL_SETTING = Setting.positiveTimeSetting(LIFECYCLE_POLL_INTERVAL, + TimeValue.timeValueMinutes(10), Setting.Property.Dynamic, Setting.Property.NodeScope); + public static final Setting LIFECYCLE_NAME_SETTING = Setting.simpleString(LIFECYCLE_NAME, + Setting.Property.Dynamic, Setting.Property.IndexScope); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleType.java new file mode 100644 index 0000000000000..69be30fdfbd0e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleType.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.NamedWriteable; + +import java.security.Policy; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +public interface LifecycleType extends NamedWriteable { + + /** + * @return the first phase of this policy to execute + */ + List getOrderedPhases(Map phases); + + /** + * Returns the next phase thats available after + * currentPhaseName. Note that currentPhaseName + * does not need to exist in phases. + * + * If the current {@link Phase} is the last phase in the {@link Policy} this + * method will return null. + * + * If the phase is not valid for the lifecycle type an + * {@link IllegalArgumentException} will be thrown. + */ + String getNextPhaseName(String currentPhaseName, Map phases); + + /** + * Returns the previous phase thats available before + * currentPhaseName. Note that currentPhaseName + * does not need to exist in phases. + * + * If the current {@link Phase} is the first phase in the {@link Policy} + * this method will return null. + * + * If the phase is not valid for the lifecycle type an + * {@link IllegalArgumentException} will be thrown. + */ + String getPreviousPhaseName(String currentPhaseName, Map phases); + + List getOrderedActions(Phase phase); + + /** + * Returns the name of the next phase that is available in the phases after + * currentActionName. Note that currentActionName + * does not need to exist in the {@link Phase}. + * + * If the current action is the last action in the phase this method will + * return null. + * + * If the action is not valid for the phase an + * {@link IllegalArgumentException} will be thrown. + */ + String getNextActionName(String currentActionName, Phase phase); + + + /** + * validates whether the specified phases are valid for this + * policy instance. + * + * @param phases + * the phases to verify validity against + * @throws IllegalArgumentException + * if a specific phase or lack of a specific phase is invalid. + */ + void validate(Collection phases); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OperationMode.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OperationMode.java new file mode 100644 index 0000000000000..defc2e46818bc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/OperationMode.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; + +/** + * Enum representing the different modes that Index Lifecycle Service can operate in. + */ +public enum OperationMode { + /** + * This represents a state where no policies are executed + */ + STOPPED { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING; + } + }, + + /** + * this represents a state where only sensitive actions (like {@link ShrinkAction}) will be executed + * until they finish, at which point the operation mode will move to STOPPED. + */ + STOPPING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING || nextMode == STOPPED; + } + }, + + /** + * Normal operation where all policies are executed as normal. + */ + RUNNING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == STOPPING; + } + }; + + public abstract boolean isValidChange(OperationMode nextMode); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Phase.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Phase.java new file mode 100644 index 0000000000000..08b995ade1460 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Phase.java @@ -0,0 +1,161 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents set of {@link LifecycleAction}s which should be executed at a + * particular point in the lifecycle of an index. + */ +public class Phase implements ToXContentObject, Writeable { + + public static final ParseField MIN_AGE = new ParseField("min_age"); + public static final ParseField ACTIONS_FIELD = new ParseField("actions"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("phase", false, + (a, name) -> new Phase(name, (TimeValue) a[0], ((List) a[1]).stream() + .collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())))); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MIN_AGE.getPreferredName()), MIN_AGE, ValueType.VALUE); + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), + (p, c, n) -> p.namedObject(LifecycleAction.class, n, null), v -> { + throw new IllegalArgumentException("ordered " + ACTIONS_FIELD.getPreferredName() + " are not supported"); + }, ACTIONS_FIELD); + } + + public static Phase parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String name; + private final Map actions; + private final TimeValue minimumAge; + + /** + * @param name + * the name of this {@link Phase}. + * @param minimumAge + * the age of the index when the index should move to this + * {@link Phase}. + * @param actions + * a {@link Map} of the {@link LifecycleAction}s to run when + * during his {@link Phase}. The keys in this map are the associated + * action names. The order of these actions is defined + * by the {@link LifecycleType} + */ + public Phase(String name, TimeValue minimumAge, Map actions) { + this.name = name; + if (minimumAge == null) { + this.minimumAge = TimeValue.ZERO; + } else { + this.minimumAge = minimumAge; + } + this.actions = actions; + } + + /** + * For Serialization + */ + public Phase(StreamInput in) throws IOException { + this.name = in.readString(); + this.minimumAge = in.readTimeValue(); + int size = in.readVInt(); + TreeMap actions = new TreeMap<>(); + for (int i = 0; i < size; i++) { + actions.put(in.readString(), in.readNamedWriteable(LifecycleAction.class)); + } + this.actions = actions; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeTimeValue(minimumAge); + out.writeVInt(actions.size()); + for (Map.Entry entry : actions.entrySet()) { + out.writeString(entry.getKey()); + out.writeNamedWriteable(entry.getValue()); + } + } + + /** + * @return the age of the index when the index should move to this + * {@link Phase}. + */ + public TimeValue getMinimumAge() { + return minimumAge; + } + + /** + * @return the name of this {@link Phase} + */ + public String getName() { + return name; + } + + /** + * @return a {@link Map} of the {@link LifecycleAction}s to run when during + * his {@link Phase}. + */ + public Map getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MIN_AGE.getPreferredName(), minimumAge.getStringRep()); + builder.field(ACTIONS_FIELD.getPreferredName(), actions); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(name, minimumAge, actions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Phase other = (Phase) obj; + return Objects.equals(name, other.name) && + Objects.equals(minimumAge, other.minimumAge) && + Objects.equals(actions, other.actions); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStep.java new file mode 100644 index 0000000000000..fa960bb9994b4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStep.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +/** + * This is essentially a marker that a phase has ended, and we need to check + * the age of an index before proceeding to the next phase. + */ +public class PhaseCompleteStep extends Step { + public static final String NAME = "complete"; + + public PhaseCompleteStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfo.java new file mode 100644 index 0000000000000..1ba7390ed2202 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfo.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class contains information about the current phase being executed by Index + * Lifecycle Management on the specific index. + */ +public class PhaseExecutionInfo implements ToXContentObject, Writeable { + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField PHASE_DEFINITION_FIELD = new ParseField("phase_definition"); + private static final ParseField VERSION_FIELD = new ParseField("version"); + private static final ParseField MODIFIED_DATE_IN_MILLIS_FIELD = new ParseField("modified_date_in_millis"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "phase_execution_info", false, + (a, name) -> new PhaseExecutionInfo((String) a[0], (Phase) a[1], (long) a[2], (long) a[3])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), POLICY_NAME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Phase::parse, PHASE_DEFINITION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_IN_MILLIS_FIELD); + } + + public static PhaseExecutionInfo parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String policyName; + private final Phase phase; + private final long version; + private final long modifiedDate; + + /** + * This class holds information about the current phase that is being executed + * + * @param policyName the name of the policy being executed, this may not be the current policy assigned to an index + * @param phase the current phase definition executed + * @param version the version of the policyName being executed + * @param modifiedDate the time the executing version of the phase was modified + */ + public PhaseExecutionInfo(String policyName, @Nullable Phase phase, long version, long modifiedDate) { + this.policyName = policyName; + this.phase = phase; + this.version = version; + this.modifiedDate = modifiedDate; + } + + PhaseExecutionInfo(StreamInput in) throws IOException { + this.policyName = in.readString(); + this.phase = in.readOptionalWriteable(Phase::new); + this.version = in.readVLong(); + this.modifiedDate = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(policyName); + out.writeOptionalWriteable(phase); + out.writeVLong(version); + out.writeVLong(modifiedDate); + } + + public String getPolicyName() { + return policyName; + } + + public Phase getPhase() { + return phase; + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + @Override + public int hashCode() { + return Objects.hash(policyName, phase, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PhaseExecutionInfo other = (PhaseExecutionInfo) obj; + return Objects.equals(policyName, other.policyName) && + Objects.equals(phase, other.phase) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + + @Override + public String toString() { + return Strings.toString(this, false, true); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + if (phase != null) { + builder.field(PHASE_DEFINITION_FIELD.getPreferredName(), phase); + } + builder.field(VERSION_FIELD.getPreferredName(), version); + builder.timeField(MODIFIED_DATE_IN_MILLIS_FIELD.getPreferredName(), "modified_date", modifiedDate); + builder.endObject(); + return builder; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java new file mode 100644 index 0000000000000..15edd51908bfe --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * A {@link LifecycleAction} which force-merges the index. + */ +public class ReadOnlyAction implements LifecycleAction { + public static final String NAME = "readonly"; + public static final ReadOnlyAction INSTANCE = new ReadOnlyAction(); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, ReadOnlyAction::new); + + public static ReadOnlyAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ReadOnlyAction() { + } + + public ReadOnlyAction(StreamInput in) { + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + Step.StepKey key = new Step.StepKey(phase, NAME, NAME); + Settings readOnlySettings = Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true).build(); + return Collections.singletonList(new UpdateSettingsStep(key, nextStepKey, client, readOnlySettings)); + } + + @Override + public List toStepKeys(String phase) { + return Collections.singletonList(new Step.StepKey(phase, NAME, NAME)); + } + + @Override + public int hashCode() { + return ReadOnlyAction.class.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java new file mode 100644 index 0000000000000..78dce2db1b8c2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverAction.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A {@link LifecycleAction} which deletes the index. + */ +public class RolloverAction implements LifecycleAction { + public static final String NAME = "rollover"; + public static final ParseField MAX_SIZE_FIELD = new ParseField("max_size"); + public static final ParseField MAX_DOCS_FIELD = new ParseField("max_docs"); + public static final ParseField MAX_AGE_FIELD = new ParseField("max_age"); + public static final String LIFECYCLE_ROLLOVER_ALIAS = "index.lifecycle.rollover_alias"; + public static final Setting LIFECYCLE_ROLLOVER_ALIAS_SETTING = Setting.simpleString(LIFECYCLE_ROLLOVER_ALIAS, + Setting.Property.Dynamic, Setting.Property.IndexScope); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new RolloverAction((ByteSizeValue) a[0], (TimeValue) a[1], (Long) a[2])); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SIZE_FIELD.getPreferredName()), MAX_SIZE_FIELD, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_AGE_FIELD.getPreferredName()), MAX_AGE_FIELD, ValueType.VALUE); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_DOCS_FIELD); + } + + private final ByteSizeValue maxSize; + private final Long maxDocs; + private final TimeValue maxAge; + + public static RolloverAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public RolloverAction(ByteSizeValue maxSize, TimeValue maxAge, Long maxDocs) { + if (maxSize == null && maxAge == null && maxDocs == null) { + throw new IllegalArgumentException("At least one rollover condition must be set."); + } + this.maxSize = maxSize; + this.maxAge = maxAge; + this.maxDocs = maxDocs; + } + + public RolloverAction(StreamInput in) throws IOException { + if (in.readBoolean()) { + maxSize = new ByteSizeValue(in); + } else { + maxSize = null; + } + maxAge = in.readOptionalTimeValue(); + if (in.readBoolean()) { + maxDocs = in.readVLong(); + } else { + maxDocs = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + boolean hasMaxSize = maxSize != null; + out.writeBoolean(hasMaxSize); + if (hasMaxSize) { + maxSize.writeTo(out); + } + out.writeOptionalTimeValue(maxAge); + boolean hasMaxDocs = maxDocs != null; + out.writeBoolean(hasMaxDocs); + if (hasMaxDocs) { + out.writeVLong(maxDocs); + } + } + + @Override + public String getWriteableName() { + return NAME; + } + + public ByteSizeValue getMaxSize() { + return maxSize; + } + + public TimeValue getMaxAge() { + return maxAge; + } + + public Long getMaxDocs() { + return maxDocs; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (maxSize != null) { + builder.field(MAX_SIZE_FIELD.getPreferredName(), maxSize.getStringRep()); + } + if (maxAge != null) { + builder.field(MAX_AGE_FIELD.getPreferredName(), maxAge.getStringRep()); + } + if (maxDocs != null) { + builder.field(MAX_DOCS_FIELD.getPreferredName(), maxDocs); + } + builder.endObject(); + return builder; + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + StepKey updateDateStepKey = new StepKey(phase, NAME, UpdateRolloverLifecycleDateStep.NAME); + RolloverStep rolloverStep = new RolloverStep(new StepKey(phase, NAME, RolloverStep.NAME), updateDateStepKey, client, + maxSize, maxAge, maxDocs); + UpdateRolloverLifecycleDateStep updateDateStep = new UpdateRolloverLifecycleDateStep(updateDateStepKey, nextStepKey); + return Arrays.asList(rolloverStep, updateDateStep); + } + + @Override + public List toStepKeys(String phase) { + StepKey rolloverStepKey = new StepKey(phase, NAME, RolloverStep.NAME); + StepKey updateDateStepKey = new StepKey(phase, NAME, UpdateRolloverLifecycleDateStep.NAME); + return Arrays.asList(rolloverStepKey, updateDateStepKey); + } + + @Override + public int hashCode() { + return Objects.hash(maxSize, maxAge, maxDocs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + RolloverAction other = (RolloverAction) obj; + return Objects.equals(maxSize, other.maxSize) && + Objects.equals(maxAge, other.maxAge) && + Objects.equals(maxDocs, other.maxDocs); + } + + @Override + public String toString() { + return Strings.toString(this); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java new file mode 100644 index 0000000000000..399f90df31dae --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStep.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +public class RolloverStep extends AsyncWaitStep { + public static final String NAME = "attempt_rollover"; + + private static final Logger logger = LogManager.getLogger(RolloverStep.class); + + private ByteSizeValue maxSize; + private TimeValue maxAge; + private Long maxDocs; + + public RolloverStep(StepKey key, StepKey nextStepKey, Client client, ByteSizeValue maxSize, TimeValue maxAge, + Long maxDocs) { + super(key, nextStepKey, client); + this.maxSize = maxSize; + this.maxAge = maxAge; + this.maxDocs = maxDocs; + } + + @Override + public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { + String rolloverAlias = RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.get(indexMetaData.getSettings()); + + if (Strings.isNullOrEmpty(rolloverAlias)) { + listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, + "setting [%s] for index [%s] is empty or not defined", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, + indexMetaData.getIndex().getName()))); + return; + } + + if (indexMetaData.getAliases().containsKey(rolloverAlias) == false) { + listener.onFailure(new IllegalArgumentException(String.format(Locale.ROOT, + "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias, + indexMetaData.getIndex().getName()))); + return; + } + + RolloverRequest rolloverRequest = new RolloverRequest(rolloverAlias, null); + if (maxAge != null) { + rolloverRequest.addMaxIndexAgeCondition(maxAge); + } + if (maxSize != null) { + rolloverRequest.addMaxIndexSizeCondition(maxSize); + } + if (maxDocs != null) { + rolloverRequest.addMaxIndexDocsCondition(maxDocs); + } + getClient().admin().indices().rolloverIndex(rolloverRequest, + ActionListener.wrap(response -> listener.onResponse(response.isRolledOver(), new EmptyInfo()), exception -> { + if (exception instanceof ResourceAlreadyExistsException) { + // This can happen sometimes when this step is executed multiple times + if (logger.isTraceEnabled()) { + logger.debug(() -> new ParameterizedMessage("{} index cannot roll over because the next index already exists, " + + "skipping to next step", indexMetaData.getIndex()), exception); + } else { + logger.debug("{} index cannot roll over because the next index already exists, skipping to next step", + indexMetaData.getIndex()); + } + listener.onResponse(true, new EmptyInfo()); + } else { + listener.onFailure(exception); + } + })); + } + + ByteSizeValue getMaxSize() { + return maxSize; + } + + TimeValue getMaxAge() { + return maxAge; + } + + Long getMaxDocs() { + return maxDocs; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), maxSize, maxAge, maxDocs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RolloverStep other = (RolloverStep) obj; + return super.equals(obj) && + Objects.equals(maxSize, other.maxSize) && + Objects.equals(maxAge, other.maxAge) && + Objects.equals(maxDocs, other.maxDocs); + } + + // We currently have no information to provide for this AsyncWaitStep, so this is an empty object + private class EmptyInfo implements ToXContentObject { + private EmptyInfo() {} + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java new file mode 100644 index 0000000000000..0d706dca10445 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.StreamSupport; + +/** + * This {@link Step} evaluates whether force_merge was successful + */ +public class SegmentCountStep extends AsyncWaitStep { + public static final String NAME = "segment-count"; + + private final int maxNumSegments; + + public SegmentCountStep(StepKey key, StepKey nextStepKey, Client client, int maxNumSegments) { + super(key, nextStepKey, client); + this.maxNumSegments = maxNumSegments; + } + + public int getMaxNumSegments() { + return maxNumSegments; + } + + @Override + public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { + getClient().admin().indices().segments(new IndicesSegmentsRequest(indexMetaData.getIndex().getName()), + ActionListener.wrap(response -> { + long numberShardsLeftToMerge = + StreamSupport.stream(response.getIndices().get(indexMetaData.getIndex().getName()).spliterator(), false) + .filter(iss -> Arrays.stream(iss.getShards()).anyMatch(p -> p.getSegments().size() > maxNumSegments)).count(); + listener.onResponse(numberShardsLeftToMerge == 0, new Info(numberShardsLeftToMerge)); + }, listener::onFailure)); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), maxNumSegments); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SegmentCountStep other = (SegmentCountStep) obj; + return super.equals(obj) + && Objects.equals(maxNumSegments, other.maxNumSegments); + } + + public static class Info implements ToXContentObject { + + private final long numberShardsLeftToMerge; + + static final ParseField SHARDS_TO_MERGE = new ParseField("shards_left_to_merge"); + static final ParseField MESSAGE = new ParseField("message"); + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("segment_count_step_info", + a -> new Info((long) a[0])); + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SHARDS_TO_MERGE); + PARSER.declareString((i, s) -> {}, MESSAGE); + } + + public Info(long numberShardsLeftToMerge) { + this.numberShardsLeftToMerge = numberShardsLeftToMerge; + } + + public long getNumberShardsLeftToMerge() { + return numberShardsLeftToMerge; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MESSAGE.getPreferredName(), + "Waiting for [" + numberShardsLeftToMerge + "] shards " + "to forcemerge"); + builder.field(SHARDS_TO_MERGE.getPreferredName(), numberShardsLeftToMerge); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(numberShardsLeftToMerge); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Info other = (Info) obj; + return Objects.equals(numberShardsLeftToMerge, other.numberShardsLeftToMerge); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java new file mode 100644 index 0000000000000..0caa217841769 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStep.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +public class SetSingleNodeAllocateStep extends AsyncActionStep { + public static final String NAME = "set-single-node-allocation"; + + private static final AllocationDeciders ALLOCATION_DECIDERS = new AllocationDeciders(Collections.singletonList( + new FilterAllocationDecider(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)))); + + public SetSingleNodeAllocateStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey, client); + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState clusterState, Listener listener) { + RoutingAllocation allocation = new RoutingAllocation(ALLOCATION_DECIDERS, clusterState.getRoutingNodes(), clusterState, null, + System.nanoTime()); + List validNodeIds = new ArrayList<>(); + Optional anyShard = clusterState.getRoutingTable().allShards(indexMetaData.getIndex().getName()).stream().findAny(); + if (anyShard.isPresent()) { + // Iterate through the nodes finding ones that are acceptable for the current allocation rules of the shard + for (RoutingNode node : clusterState.getRoutingNodes()) { + boolean canRemainOnCurrentNode = ALLOCATION_DECIDERS.canRemain(anyShard.get(), node, allocation) + .type() == Decision.Type.YES; + if (canRemainOnCurrentNode) { + DiscoveryNode discoveryNode = node.node(); + validNodeIds.add(discoveryNode.getId()); + } + } + // Shuffle the list of nodes so the one we pick is random + Randomness.shuffle(validNodeIds); + Optional nodeId = validNodeIds.stream().findAny(); + if (nodeId.isPresent()) { + Settings settings = Settings.builder() + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", nodeId.get()).build(); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indexMetaData.getIndex().getName()) + .settings(settings); + getClient().admin().indices().updateSettings(updateSettingsRequest, + ActionListener.wrap(response -> listener.onResponse(true), listener::onFailure)); + } else { + // No nodes currently match the allocation rules so just wait until there is one that does + listener.onResponse(false); + } + } else { + // There are no shards for the index, the index might be gone + listener.onFailure(new IndexNotFoundException(indexMetaData.getIndex())); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java new file mode 100644 index 0000000000000..a79383c24de8b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkAction.java @@ -0,0 +1,140 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A {@link LifecycleAction} which shrinks the index. + */ +public class ShrinkAction implements LifecycleAction { + public static final String NAME = "shrink"; + public static final String SHRUNKEN_INDEX_PREFIX = "shrink-"; + public static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME, a -> new ShrinkAction((Integer) a[0])); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_SHARDS_FIELD); + } + + private int numberOfShards; + + public static ShrinkAction parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public ShrinkAction(int numberOfShards) { + if (numberOfShards <= 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0"); + } + this.numberOfShards = numberOfShards; + } + + public ShrinkAction(StreamInput in) throws IOException { + this.numberOfShards = in.readVInt(); + } + + int getNumberOfShards() { + return numberOfShards; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(numberOfShards); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards); + builder.endObject(); + return builder; + } + + @Override + public boolean isSafeAction() { + return false; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + Settings readOnlySettings = Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true).build(); + + StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); + StepKey setSingleNodeKey = new StepKey(phase, NAME, SetSingleNodeAllocateStep.NAME); + StepKey allocationRoutedKey = new StepKey(phase, NAME, CheckShrinkReadyStep.NAME); + StepKey shrinkKey = new StepKey(phase, NAME, ShrinkStep.NAME); + StepKey enoughShardsKey = new StepKey(phase, NAME, ShrunkShardsAllocatedStep.NAME); + StepKey copyMetadataKey = new StepKey(phase, NAME, CopyExecutionStateStep.NAME); + StepKey aliasKey = new StepKey(phase, NAME, ShrinkSetAliasStep.NAME); + StepKey isShrunkIndexKey = new StepKey(phase, NAME, ShrunkenIndexCheckStep.NAME); + + UpdateSettingsStep readOnlyStep = new UpdateSettingsStep(readOnlyKey, setSingleNodeKey, client, readOnlySettings); + SetSingleNodeAllocateStep setSingleNodeStep = new SetSingleNodeAllocateStep(setSingleNodeKey, allocationRoutedKey, client); + CheckShrinkReadyStep checkShrinkReadyStep = new CheckShrinkReadyStep(allocationRoutedKey, shrinkKey); + ShrinkStep shrink = new ShrinkStep(shrinkKey, enoughShardsKey, client, numberOfShards, SHRUNKEN_INDEX_PREFIX); + ShrunkShardsAllocatedStep allocated = new ShrunkShardsAllocatedStep(enoughShardsKey, copyMetadataKey, SHRUNKEN_INDEX_PREFIX); + CopyExecutionStateStep copyMetadata = new CopyExecutionStateStep(copyMetadataKey, aliasKey, SHRUNKEN_INDEX_PREFIX); + ShrinkSetAliasStep aliasSwapAndDelete = new ShrinkSetAliasStep(aliasKey, isShrunkIndexKey, client, SHRUNKEN_INDEX_PREFIX); + ShrunkenIndexCheckStep waitOnShrinkTakeover = new ShrunkenIndexCheckStep(isShrunkIndexKey, nextStepKey, SHRUNKEN_INDEX_PREFIX); + return Arrays.asList(readOnlyStep, setSingleNodeStep, checkShrinkReadyStep, shrink, allocated, copyMetadata, + aliasSwapAndDelete, waitOnShrinkTakeover); + } + + @Override + public List toStepKeys(String phase) { + StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyAction.NAME); + StepKey setSingleNodeKey = new StepKey(phase, NAME, SetSingleNodeAllocateStep.NAME); + StepKey checkShrinkReadyKey = new StepKey(phase, NAME, CheckShrinkReadyStep.NAME); + StepKey shrinkKey = new StepKey(phase, NAME, ShrinkStep.NAME); + StepKey enoughShardsKey = new StepKey(phase, NAME, ShrunkShardsAllocatedStep.NAME); + StepKey copyMetadataKey = new StepKey(phase, NAME, CopyExecutionStateStep.NAME); + StepKey aliasKey = new StepKey(phase, NAME, ShrinkSetAliasStep.NAME); + StepKey isShrunkIndexKey = new StepKey(phase, NAME, ShrunkenIndexCheckStep.NAME); + return Arrays.asList(readOnlyKey, setSingleNodeKey, checkShrinkReadyKey, shrinkKey, enoughShardsKey, + copyMetadataKey, aliasKey, isShrunkIndexKey); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShrinkAction that = (ShrinkAction) o; + return Objects.equals(numberOfShards, that.numberOfShards); + } + + @Override + public int hashCode() { + return Objects.hash(numberOfShards); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java new file mode 100644 index 0000000000000..b9e0e00eeb600 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStep.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; + +import java.util.Objects; + +public class ShrinkSetAliasStep extends AsyncActionStep { + public static final String NAME = "aliases"; + private String shrunkIndexPrefix; + + public ShrinkSetAliasStep(StepKey key, StepKey nextStepKey, Client client, String shrunkIndexPrefix) { + super(key, nextStepKey, client); + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + // get source index + String index = indexMetaData.getIndex().getName(); + // get target shrink index + String targetIndexName = shrunkIndexPrefix + index; + + IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest() + .addAliasAction(IndicesAliasesRequest.AliasActions.removeIndex().index(index)) + .addAliasAction(IndicesAliasesRequest.AliasActions.add().index(targetIndexName).alias(index)); + + getClient().admin().indices().aliases(aliasesRequest, ActionListener.wrap(response -> + listener.onResponse(true), listener::onFailure)); + } + + @Override + public boolean indexSurvives() { + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shrunkIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ShrinkSetAliasStep other = (ShrinkSetAliasStep) obj; + return super.equals(obj) && + Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java new file mode 100644 index 0000000000000..8c7adcb62ff4e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStep.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; + +import java.util.Objects; + +public class ShrinkStep extends AsyncActionStep { + public static final String NAME = "shrink"; + + private int numberOfShards; + private String shrunkIndexPrefix; + + public ShrinkStep(StepKey key, StepKey nextStepKey, Client client, int numberOfShards, String shrunkIndexPrefix) { + super(key, nextStepKey, client); + this.numberOfShards = numberOfShards; + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + public int getNumberOfShards() { + return numberOfShards; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + if (lifecycleState.getLifecycleDate() == null) { + throw new IllegalStateException("source index [" + indexMetaData.getIndex().getName() + + "] is missing lifecycle date"); + } + + String lifecycle = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexMetaData.getSettings()); + + Settings relevantTargetSettings = Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, indexMetaData.getNumberOfReplicas()) + .put(LifecycleSettings.LIFECYCLE_NAME, lifecycle) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", (String) null) // need to remove the single shard + // allocation so replicas can be allocated + .build(); + + String shrunkenIndexName = shrunkIndexPrefix + indexMetaData.getIndex().getName(); + ResizeRequest resizeRequest = new ResizeRequest(shrunkenIndexName, indexMetaData.getIndex().getName()); + indexMetaData.getAliases().values().spliterator().forEachRemaining(aliasMetaDataObjectCursor -> { + resizeRequest.getTargetIndexRequest().alias(new Alias(aliasMetaDataObjectCursor.value.alias())); + }); + resizeRequest.getTargetIndexRequest().settings(relevantTargetSettings); + + getClient().admin().indices().resizeIndex(resizeRequest, ActionListener.wrap(response -> { + // TODO(talevy): when is this not acknowledged? + listener.onResponse(response.isAcknowledged()); + }, listener::onFailure)); + + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), numberOfShards, shrunkIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ShrinkStep other = (ShrinkStep) obj; + return super.equals(obj) && + Objects.equals(numberOfShards, other.numberOfShards) && + Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStep.java new file mode 100644 index 0000000000000..b64ebf5e46b5a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStep.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; + +import java.io.IOException; +import java.util.Objects; + +public class ShrunkShardsAllocatedStep extends ClusterStateWaitStep { + public static final String NAME = "shrunk-shards-allocated"; + private String shrunkIndexPrefix; + + public ShrunkShardsAllocatedStep(StepKey key, StepKey nextStepKey, String shrunkIndexPrefix) { + super(key, nextStepKey); + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + // We only want to make progress if all shards of the shrunk index are + // active + boolean indexExists = clusterState.metaData().index(shrunkIndexPrefix + index.getName()) != null; + if (indexExists == false) { + return new Result(false, new Info(false, -1, false)); + } + boolean allShardsActive = ActiveShardCount.ALL.enoughShardsActive(clusterState, shrunkIndexPrefix + index.getName()); + int numShrunkIndexShards = clusterState.metaData().index(shrunkIndexPrefix + index.getName()).getNumberOfShards(); + if (allShardsActive) { + return new Result(true, null); + } else { + return new Result(false, new Info(true, numShrunkIndexShards, allShardsActive)); + } + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shrunkIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ShrunkShardsAllocatedStep other = (ShrunkShardsAllocatedStep) obj; + return super.equals(obj) && Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix); + } + + public static final class Info implements ToXContentObject { + + private final int actualShards; + private final boolean shrunkIndexExists; + private final boolean allShardsActive; + private final String message; + + static final ParseField ACTUAL_SHARDS = new ParseField("actual_shards"); + static final ParseField SHRUNK_INDEX_EXISTS = new ParseField("shrunk_index_exists"); + static final ParseField ALL_SHARDS_ACTIVE = new ParseField("all_shards_active"); + static final ParseField MESSAGE = new ParseField("message"); + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("shrunk_shards_allocated_step_info", + a -> new Info((boolean) a[0], (int) a[1], (boolean) a[2])); + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SHRUNK_INDEX_EXISTS); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), ACTUAL_SHARDS); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ALL_SHARDS_ACTIVE); + PARSER.declareString((i, s) -> {}, MESSAGE); + } + + public Info(boolean shrunkIndexExists, int actualShards, boolean allShardsActive) { + this.actualShards = actualShards; + this.shrunkIndexExists = shrunkIndexExists; + this.allShardsActive = allShardsActive; + if (shrunkIndexExists == false) { + message = "Waiting for shrunk index to be created"; + } else if (allShardsActive == false) { + message = "Waiting for all shard copies to be active"; + } else { + message = ""; + } + } + + public int getActualShards() { + return actualShards; + } + + public boolean shrunkIndexExists() { + return shrunkIndexExists; + } + + public boolean allShardsActive() { + return allShardsActive; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MESSAGE.getPreferredName(), message); + builder.field(SHRUNK_INDEX_EXISTS.getPreferredName(), shrunkIndexExists); + builder.field(ACTUAL_SHARDS.getPreferredName(), actualShards); + builder.field(ALL_SHARDS_ACTIVE.getPreferredName(), allShardsActive); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(shrunkIndexExists, actualShards, allShardsActive); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Info other = (Info) obj; + return Objects.equals(shrunkIndexExists, other.shrunkIndexExists) && + Objects.equals(actualShards, other.actualShards) && + Objects.equals(allShardsActive, other.allShardsActive); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStep.java new file mode 100644 index 0000000000000..28e219cd4e542 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStep.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; + +import java.io.IOException; +import java.util.Objects; + +public class ShrunkenIndexCheckStep extends ClusterStateWaitStep { + public static final String NAME = "is-shrunken-index"; + private static final Logger logger = LogManager.getLogger(InitializePolicyContextStep.class); + private String shrunkIndexPrefix; + + public ShrunkenIndexCheckStep(StepKey key, StepKey nextStepKey, String shrunkIndexPrefix) { + super(key, nextStepKey); + this.shrunkIndexPrefix = shrunkIndexPrefix; + } + + String getShrunkIndexPrefix() { + return shrunkIndexPrefix; + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + IndexMetaData idxMeta = clusterState.getMetaData().index(index); + if (idxMeta == null) { + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName()); + // Index must have been since deleted, ignore it + return new Result(false, null); + } + String shrunkenIndexSource = IndexMetaData.INDEX_RESIZE_SOURCE_NAME.get( + clusterState.metaData().index(index).getSettings()); + if (Strings.isNullOrEmpty(shrunkenIndexSource)) { + throw new IllegalStateException("step[" + NAME + "] is checking an un-shrunken index[" + index.getName() + "]"); + } + boolean isConditionMet = index.getName().equals(shrunkIndexPrefix + shrunkenIndexSource) && + clusterState.metaData().index(shrunkenIndexSource) == null; + if (isConditionMet) { + return new Result(true, null); + } else { + return new Result(false, new Info(shrunkenIndexSource)); + } + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shrunkIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ShrunkenIndexCheckStep other = (ShrunkenIndexCheckStep) obj; + return super.equals(obj) && + Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix); + } + + public static final class Info implements ToXContentObject { + + private final String originalIndexName; + private final String message; + + static final ParseField ORIGINAL_INDEX_NAME = new ParseField("original_index_name"); + static final ParseField MESSAGE = new ParseField("message"); + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("shrunken_index_check_step_info", + a -> new Info((String) a[0])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ORIGINAL_INDEX_NAME); + PARSER.declareString((i, s) -> {}, MESSAGE); + } + + public Info(String originalIndexName) { + this.originalIndexName = originalIndexName; + this.message = "Waiting for original index [" + originalIndexName + "] to be deleted"; + } + + public String getOriginalIndexName() { + return originalIndexName; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MESSAGE.getPreferredName(), message); + builder.field(ORIGINAL_INDEX_NAME.getPreferredName(), originalIndexName); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(originalIndexName); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Info other = (Info) obj; + return Objects.equals(originalIndexName, other.originalIndexName); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java new file mode 100644 index 0000000000000..de38a5e092ae2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequest.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + +public class StartILMRequest extends AcknowledgedRequest { + + public StartILMRequest() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return 64; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Step.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Step.java new file mode 100644 index 0000000000000..7152b093e62ef --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/Step.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * A {@link LifecycleAction} which deletes the index. + */ +public abstract class Step { + private final StepKey key; + private final StepKey nextStepKey; + + public Step(StepKey key, StepKey nextStepKey) { + this.key = key; + this.nextStepKey = nextStepKey; + } + + public final StepKey getKey() { + return key; + } + + public final StepKey getNextStepKey() { + return nextStepKey; + } + + @Override + public int hashCode() { + return Objects.hash(key, nextStepKey); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Step other = (Step) obj; + return Objects.equals(key, other.key) && + Objects.equals(nextStepKey, other.nextStepKey); + } + + @Override + public String toString() { + return key + " => " + nextStepKey; + } + + public static final class StepKey implements Writeable, ToXContentObject { + private final String phase; + private final String action; + private final String name; + + public static final ParseField PHASE_FIELD = new ParseField("phase"); + public static final ParseField ACTION_FIELD = new ParseField("action"); + public static final ParseField NAME_FIELD = new ParseField("name"); + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("stepkey", a -> new StepKey((String) a[0], (String) a[1], (String) a[2])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), PHASE_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), ACTION_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD); + } + + public StepKey(String phase, String action, String name) { + this.phase = phase; + this.action = action; + this.name = name; + } + + public StepKey(StreamInput in) throws IOException { + this.phase = in.readString(); + this.action = in.readString(); + this.name = in.readString(); + } + + public static StepKey parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(phase); + out.writeString(action); + out.writeString(name); + } + + public String getPhase() { + return phase; + } + + public String getAction() { + return action; + } + + public String getName() { + return name; + } + + @Override + public int hashCode() { + return Objects.hash(phase, action, name); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + StepKey other = (StepKey) obj; + return Objects.equals(phase, other.phase) && + Objects.equals(action, other.action) && + Objects.equals(name, other.name); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(PHASE_FIELD.getPreferredName(), phase); + builder.field(ACTION_FIELD.getPreferredName(), action); + builder.field(NAME_FIELD.getPreferredName(), name); + builder.endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java new file mode 100644 index 0000000000000..3a2d458406b30 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequest.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; + +public class StopILMRequest extends AcknowledgedRequest { + + public StopILMRequest() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return 75; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStep.java new file mode 100644 index 0000000000000..4ba1b4fd83c60 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStep.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +public class TerminalPolicyStep extends Step { + public static final String COMPLETED_PHASE = "completed"; + public static final StepKey KEY = new StepKey(COMPLETED_PHASE, "completed", "completed"); + public static final TerminalPolicyStep INSTANCE = new TerminalPolicyStep(KEY, null); + + TerminalPolicyStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java new file mode 100644 index 0000000000000..17c9eaf17c083 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Represents the lifecycle of an index from creation to deletion. A + * {@link TimeseriesLifecycleType} is made up of a set of {@link Phase}s which it will + * move through. Soon we will constrain the phases using some kinda of lifecycle + * type which will allow only particular {@link Phase}s to be defined, will + * dictate the order in which the {@link Phase}s are executed and will define + * which {@link LifecycleAction}s are allowed in each phase. + */ +public class TimeseriesLifecycleType implements LifecycleType { + public static final TimeseriesLifecycleType INSTANCE = new TimeseriesLifecycleType(); + + public static final String TYPE = "timeseries"; + static final List VALID_PHASES = Arrays.asList("hot", "warm", "cold", "delete"); + static final List ORDERED_VALID_HOT_ACTIONS = Collections.singletonList(RolloverAction.NAME); + static final List ORDERED_VALID_WARM_ACTIONS = Arrays.asList(ReadOnlyAction.NAME, AllocateAction.NAME, + ShrinkAction.NAME, ForceMergeAction.NAME); + static final List ORDERED_VALID_COLD_ACTIONS = Arrays.asList(AllocateAction.NAME); + static final List ORDERED_VALID_DELETE_ACTIONS = Arrays.asList(DeleteAction.NAME); + static final Set VALID_HOT_ACTIONS = Sets.newHashSet(ORDERED_VALID_HOT_ACTIONS); + static final Set VALID_WARM_ACTIONS = Sets.newHashSet(ORDERED_VALID_WARM_ACTIONS); + static final Set VALID_COLD_ACTIONS = Sets.newHashSet(ORDERED_VALID_COLD_ACTIONS); + static final Set VALID_DELETE_ACTIONS = Sets.newHashSet(ORDERED_VALID_DELETE_ACTIONS); + private static final Phase EMPTY_WARM_PHASE = new Phase("warm", TimeValue.ZERO, + Collections.singletonMap("readonly", ReadOnlyAction.INSTANCE)); + private static Map> ALLOWED_ACTIONS = new HashMap<>(); + + static { + ALLOWED_ACTIONS.put("hot", VALID_HOT_ACTIONS); + ALLOWED_ACTIONS.put("warm", VALID_WARM_ACTIONS); + ALLOWED_ACTIONS.put("cold", VALID_COLD_ACTIONS); + ALLOWED_ACTIONS.put("delete", VALID_DELETE_ACTIONS); + } + + private TimeseriesLifecycleType() { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public String getWriteableName() { + return TYPE; + } + + public List getOrderedPhases(Map phases) { + List orderedPhases = new ArrayList<>(VALID_PHASES.size()); + for (String phaseName : VALID_PHASES) { + Phase phase = phases.get(phaseName); + if (phase != null) { + orderedPhases.add(phase); + } + } + return orderedPhases; + } + + @Override + public String getNextPhaseName(String currentPhaseName, Map phases) { + int index = VALID_PHASES.indexOf(currentPhaseName); + if (index < 0 && "new".equals(currentPhaseName) == false) { + throw new IllegalArgumentException("[" + currentPhaseName + "] is not a valid phase for lifecycle type [" + TYPE + "]"); + } else { + // Find the next phase after `index` that exists in `phases` and return it + while (++index < VALID_PHASES.size()) { + String phaseName = VALID_PHASES.get(index); + if (phases.containsKey(phaseName)) { + return phaseName; + } + } + // if we have exhausted VALID_PHASES and haven't found a matching + // phase in `phases` return null indicating there is no next phase + // available + return null; + } + } + + public String getPreviousPhaseName(String currentPhaseName, Map phases) { + if ("new".equals(currentPhaseName)) { + return null; + } + int index = VALID_PHASES.indexOf(currentPhaseName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentPhaseName + "] is not a valid phase for lifecycle type [" + TYPE + "]"); + } else { + // Find the previous phase before `index` that exists in `phases` and return it + while (--index >=0) { + String phaseName = VALID_PHASES.get(index); + if (phases.containsKey(phaseName)) { + return phaseName; + } + } + // if we have exhausted VALID_PHASES and haven't found a matching + // phase in `phases` return null indicating there is no previous phase + // available + return null; + } + } + + public List getOrderedActions(Phase phase) { + Map actions = phase.getActions(); + switch (phase.getName()) { + case "hot": + return ORDERED_VALID_HOT_ACTIONS.stream().map(a -> actions.getOrDefault(a, null)) + .filter(Objects::nonNull).collect(Collectors.toList()); + case "warm": + return ORDERED_VALID_WARM_ACTIONS.stream() .map(a -> actions.getOrDefault(a, null)) + .filter(Objects::nonNull).collect(Collectors.toList()); + case "cold": + return ORDERED_VALID_COLD_ACTIONS.stream().map(a -> actions.getOrDefault(a, null)) + .filter(Objects::nonNull).collect(Collectors.toList()); + case "delete": + return ORDERED_VALID_DELETE_ACTIONS.stream().map(a -> actions.getOrDefault(a, null)) + .filter(Objects::nonNull).collect(Collectors.toList()); + default: + throw new IllegalArgumentException("lifecycle type[" + TYPE + "] does not support phase[" + phase.getName() + "]"); + } + } + + @Override + public String getNextActionName(String currentActionName, Phase phase) { + List orderedActionNames; + switch (phase.getName()) { + case "hot": + orderedActionNames = ORDERED_VALID_HOT_ACTIONS; + break; + case "warm": + orderedActionNames = ORDERED_VALID_WARM_ACTIONS; + break; + case "cold": + orderedActionNames = ORDERED_VALID_COLD_ACTIONS; + break; + case "delete": + orderedActionNames = ORDERED_VALID_DELETE_ACTIONS; + break; + default: + throw new IllegalArgumentException("lifecycle type[" + TYPE + "] does not support phase[" + phase.getName() + "]"); + } + + int index = orderedActionNames.indexOf(currentActionName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentActionName + "] is not a valid action for phase [" + phase.getName() + + "] in lifecycle type [" + TYPE + "]"); + } else { + // Find the next action after `index` that exists in the phase and return it + while (++index < orderedActionNames.size()) { + String actionName = orderedActionNames.get(index); + if (phase.getActions().containsKey(actionName)) { + return actionName; + } + } + // if we have exhausted `validActions` and haven't found a matching + // action in the Phase return null indicating there is no next + // action available + return null; + } + } + + @Override + public void validate(Collection phases) { + phases.forEach(phase -> { + if (ALLOWED_ACTIONS.containsKey(phase.getName()) == false) { + throw new IllegalArgumentException("Timeseries lifecycle does not support phase [" + phase.getName() + "]"); + } + phase.getActions().forEach((actionName, action) -> { + if (ALLOWED_ACTIONS.get(phase.getName()).contains(actionName) == false) { + throw new IllegalArgumentException("invalid action [" + actionName + "] " + + "defined in phase [" + phase.getName() +"]"); + } + }); + }); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java new file mode 100644 index 0000000000000..704d122f571a6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStep.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.Index; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; + +public class UpdateRolloverLifecycleDateStep extends ClusterStateActionStep { + public static final String NAME = "update-rollover-lifecycle-date"; + + public UpdateRolloverLifecycleDateStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + @Override + public ClusterState performAction(Index index, ClusterState currentState) { + IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); + // find the newly created index from the rollover and fetch its index.creation_date + String rolloverAlias = RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.get(indexMetaData.getSettings()); + if (Strings.isNullOrEmpty(rolloverAlias)) { + throw new IllegalStateException("setting [" + RolloverAction.LIFECYCLE_ROLLOVER_ALIAS + + "] is not set on index [" + indexMetaData.getIndex().getName() + "]"); + } + RolloverInfo rolloverInfo = indexMetaData.getRolloverInfos().get(rolloverAlias); + if (rolloverInfo == null) { + throw new IllegalStateException("no rollover info found for [" + indexMetaData.getIndex().getName() + "], either the index " + + "has not yet rolled over or a subsequent index was created outside of Index Lifecycle Management"); + } + + LifecycleExecutionState.Builder newLifecycleState = LifecycleExecutionState + .builder(LifecycleExecutionState.fromIndexMetadata(indexMetaData)); + newLifecycleState.setIndexCreationDate(rolloverInfo.getTime()); + + IndexMetaData.Builder newIndexMetadata = IndexMetaData.builder(indexMetaData); + newIndexMetadata.putCustom(ILM_CUSTOM_METADATA_KEY, newLifecycleState.build().asMap()); + return ClusterState.builder(currentState).metaData(MetaData.builder(currentState.metaData()) + .put(newIndexMetadata)).build(); + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return obj != null && getClass() == obj.getClass() && super.equals(obj); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java new file mode 100644 index 0000000000000..5602f6aa3f499 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStep.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; + +import java.util.Objects; + +public class UpdateSettingsStep extends AsyncActionStep { + public static final String NAME = "update-settings"; + + private final Settings settings; + + public UpdateSettingsStep(StepKey key, StepKey nextStepKey, Client client, Settings settings) { + super(key, nextStepKey, client); + this.settings = settings; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indexMetaData.getIndex().getName()).settings(settings); + getClient().admin().indices().updateSettings(updateSettingsRequest, + ActionListener.wrap(response -> listener.onResponse(true), listener::onFailure)); + } + + public Settings getSettings() { + return settings; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), settings); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + UpdateSettingsStep other = (UpdateSettingsStep) obj; + return super.equals(obj) && + Objects.equals(settings, other.settings); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java new file mode 100644 index 0000000000000..ba962d635ee6e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleAction.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteLifecycleAction extends Action { + public static final DeleteLifecycleAction INSTANCE = new DeleteLifecycleAction(); + public static final String NAME = "cluster:admin/ilm/delete"; + + protected DeleteLifecycleAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response() { + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + } + + public static class Request extends AcknowledgedRequest { + + public static final ParseField POLICY_FIELD = new ParseField("policy"); + + private String policyName; + + public Request(String policyName) { + this.policyName = policyName; + } + + public Request() { + } + + public String getPolicyName() { + return policyName; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + policyName = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(policyName); + } + + @Override + public int hashCode() { + return Objects.hash(policyName); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(policyName, other.policyName); + } + + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java new file mode 100644 index 0000000000000..5acbbcb4967f3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/ExplainLifecycleAction.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; + +public class ExplainLifecycleAction extends Action { + public static final ExplainLifecycleAction INSTANCE = new ExplainLifecycleAction(); + public static final String NAME = "indices:admin/ilm/explain"; + + protected ExplainLifecycleAction() { + super(NAME); + } + + @Override + public ExplainLifecycleResponse newResponse() { + return new ExplainLifecycleResponse(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java new file mode 100644 index 0000000000000..aaa295354a850 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleAction.java @@ -0,0 +1,213 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class GetLifecycleAction extends Action { + public static final GetLifecycleAction INSTANCE = new GetLifecycleAction(); + public static final String NAME = "cluster:admin/ilm/get"; + + protected GetLifecycleAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private List policies; + + public Response() { + } + + public Response(List policies) { + this.policies = policies; + } + + public List getPolicies() { + return policies; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (LifecyclePolicyResponseItem item : policies) { + builder.startObject(item.getLifecyclePolicy().getName()); + builder.field("version", item.getVersion()); + builder.field("modified_date", item.getModifiedDate()); + builder.field("policy", item.getLifecyclePolicy()); + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.policies = in.readList(LifecyclePolicyResponseItem::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(policies); + } + + @Override + public int hashCode() { + return Objects.hash(policies); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(policies, other.policies); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + } + + public static class Request extends AcknowledgedRequest { + private String[] policyNames; + + public Request(String... policyNames) { + if (policyNames == null) { + throw new IllegalArgumentException("ids cannot be null"); + } + this.policyNames = policyNames; + } + + public Request() { + policyNames = Strings.EMPTY_ARRAY; + } + + public String[] getPolicyNames() { + return policyNames; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + policyNames = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(policyNames); + } + + @Override + public int hashCode() { + return Arrays.hashCode(policyNames); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Arrays.equals(policyNames, other.policyNames); + } + + } + + public static class LifecyclePolicyResponseItem implements Writeable { + private final LifecyclePolicy lifecyclePolicy; + private final long version; + private final String modifiedDate; + + public LifecyclePolicyResponseItem(LifecyclePolicy lifecyclePolicy, long version, String modifiedDate) { + this.lifecyclePolicy = lifecyclePolicy; + this.version = version; + this.modifiedDate = modifiedDate; + } + + LifecyclePolicyResponseItem(StreamInput in) throws IOException { + this.lifecyclePolicy = new LifecyclePolicy(in); + this.version = in.readVLong(); + this.modifiedDate = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + lifecyclePolicy.writeTo(out); + out.writeVLong(version); + out.writeString(modifiedDate); + } + + public LifecyclePolicy getLifecyclePolicy() { + return lifecyclePolicy; + } + + public long getVersion() { + return version; + } + + public String getModifiedDate() { + return modifiedDate; + } + + @Override + public int hashCode() { + return Objects.hash(lifecyclePolicy, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + LifecyclePolicyResponseItem other = (LifecyclePolicyResponseItem) obj; + return Objects.equals(lifecyclePolicy, other.lifecyclePolicy) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/AutoFollowStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java similarity index 53% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/AutoFollowStatsAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java index 12cb1b4bf5935..40765f0aa6650 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/AutoFollowStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetStatusAction.java @@ -4,27 +4,27 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.ccr.action; +package org.elasticsearch.xpack.core.indexlifecycle.action; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.ccr.AutoFollowStats; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import java.io.IOException; import java.util.Objects; -public class AutoFollowStatsAction extends Action { - - public static final String NAME = "cluster:monitor/ccr/auto_follow_stats"; - public static final AutoFollowStatsAction INSTANCE = new AutoFollowStatsAction(); +public class GetStatusAction extends Action { + public static final GetStatusAction INSTANCE = new GetStatusAction(); + public static final String NAME = "cluster:admin/ilm/operation_mode/get"; - private AutoFollowStatsAction() { + protected GetStatusAction() { super(NAME); } @@ -33,71 +33,81 @@ public Response newResponse() { return new Response(); } - public static class Request extends MasterNodeRequest { + public static class Response extends ActionResponse implements ToXContentObject { + + private OperationMode mode; - public Request(StreamInput in) throws IOException { - super(in); + public Response() { } - public Request() { + public Response(OperationMode mode) { + this.mode = mode; } - @Override - public ActionRequestValidationException validate() { - return null; + public OperationMode getMode() { + return mode; } @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("operation_mode", mode); + builder.endObject(); + return builder; } - } - - public static class Response extends ActionResponse implements ToXContentObject { - - private AutoFollowStats stats; - public Response(AutoFollowStats stats) { - this.stats = stats; + @Override + public void readFrom(StreamInput in) throws IOException { + mode = in.readEnum(OperationMode.class); } - public Response() { + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(mode); } - public AutoFollowStats getStats() { - return stats; + @Override + public int hashCode() { + return Objects.hash(mode); } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - stats = new AutoFollowStats(in); + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(mode, other.mode); } @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - stats.writeTo(out); + public String toString() { + return Strings.toString(this, true, true); + } + + } + + public static class Request extends AcknowledgedRequest { + + public Request() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - stats.toXContent(builder, params); - return builder; + public ActionRequestValidationException validate() { + return null; } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Response response = (Response) o; - return Objects.equals(stats, response.stats); + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); } @Override - public int hashCode() { - return Objects.hash(stats); + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); } } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java new file mode 100644 index 0000000000000..e92502d4ef526 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepAction.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.Objects; + +public class MoveToStepAction extends Action { + public static final MoveToStepAction INSTANCE = new MoveToStepAction(); + public static final String NAME = "cluster:admin/ilm/_move/post"; + + protected MoveToStepAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response() { + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + static final ParseField CURRENT_KEY_FIELD = new ParseField("current_step"); + static final ParseField NEXT_KEY_FIELD = new ParseField("next_step"); + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("move_to_step_request", false, + (a, index) -> { + StepKey currentStepKey = (StepKey) a[0]; + StepKey nextStepKey = (StepKey) a[1]; + return new Request(index, currentStepKey, nextStepKey); + }); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, name) -> StepKey.parse(p), CURRENT_KEY_FIELD); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, name) -> StepKey.parse(p), NEXT_KEY_FIELD); + } + + private String index; + private StepKey currentStepKey; + private StepKey nextStepKey; + + public Request(String index, StepKey currentStepKey, StepKey nextStepKey) { + this.index = index; + this.currentStepKey = currentStepKey; + this.nextStepKey = nextStepKey; + } + + public Request() { + } + + public String getIndex() { + return index; + } + + public StepKey getCurrentStepKey() { + return currentStepKey; + } + + public StepKey getNextStepKey() { + return nextStepKey; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public static Request parseRequest(String name, XContentParser parser) { + return PARSER.apply(parser, name); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.index = in.readString(); + this.currentStepKey = new StepKey(in); + this.nextStepKey = new StepKey(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + currentStepKey.writeTo(out); + nextStepKey.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(index, currentStepKey, nextStepKey); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(index, other.index) && Objects.equals(currentStepKey, other.currentStepKey) + && Objects.equals(nextStepKey, other.nextStepKey); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(CURRENT_KEY_FIELD.getPreferredName(), currentStepKey) + .field(NEXT_KEY_FIELD.getPreferredName(), nextStepKey) + .endObject(); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java new file mode 100644 index 0000000000000..09e4d978b6094 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleAction.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; + +import java.io.IOException; +import java.util.Objects; + +public class PutLifecycleAction extends Action { + public static final PutLifecycleAction INSTANCE = new PutLifecycleAction(); + public static final String NAME = "cluster:admin/ilm/put"; + + protected PutLifecycleAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response() { + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + public static final ParseField POLICY_FIELD = new ParseField("policy"); + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("put_lifecycle_request", a -> new Request((LifecyclePolicy) a[0])); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), LifecyclePolicy::parse, POLICY_FIELD); + } + + private LifecyclePolicy policy; + + public Request(LifecyclePolicy policy) { + this.policy = policy; + } + + public Request() { + } + + public LifecyclePolicy getPolicy() { + return policy; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public static Request parseRequest(String name, XContentParser parser) { + return PARSER.apply(parser, name); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY_FIELD.getPreferredName(), policy); + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + policy = new LifecyclePolicy(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + policy.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(policy); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(policy, other.policy); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java new file mode 100644 index 0000000000000..239e748e58d8a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyAction.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class RemoveIndexLifecyclePolicyAction extends Action { + public static final RemoveIndexLifecyclePolicyAction INSTANCE = new RemoveIndexLifecyclePolicyAction(); + public static final String NAME = "indices:admin/ilm/remove_policy"; + + protected RemoveIndexLifecyclePolicyAction() { + super(NAME); + } + + @Override + public RemoveIndexLifecyclePolicyAction.Response newResponse() { + return new Response(); + } + + public static class Response extends ActionResponse implements ToXContentObject { + + public static final ParseField HAS_FAILURES_FIELD = new ParseField("has_failures"); + public static final ParseField FAILED_INDEXES_FIELD = new ParseField("failed_indexes"); + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "change_policy_for_index_response", a -> new Response((List) a[0])); + static { + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), FAILED_INDEXES_FIELD); + // Needs to be declared but not used in constructing the response object + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), HAS_FAILURES_FIELD); + } + + private List failedIndexes; + + public Response() { + } + + public Response(List failedIndexes) { + if (failedIndexes == null) { + throw new IllegalArgumentException(FAILED_INDEXES_FIELD.getPreferredName() + " cannot be null"); + } + this.failedIndexes = failedIndexes; + } + + public List getFailedIndexes() { + return failedIndexes; + } + + public boolean hasFailures() { + return failedIndexes.isEmpty() == false; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(HAS_FAILURES_FIELD.getPreferredName(), hasFailures()); + builder.field(FAILED_INDEXES_FIELD.getPreferredName(), failedIndexes); + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + failedIndexes = in.readList(StreamInput::readString); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringList(failedIndexes); + } + + @Override + public int hashCode() { + return Objects.hash(failedIndexes); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(failedIndexes, other.failedIndexes); + } + + } + + public static class Request extends AcknowledgedRequest implements IndicesRequest.Replaceable { + + private String[] indices; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + public Request() { + } + + public Request(String... indices) { + if (indices == null) { + throw new IllegalArgumentException("indices cannot be null"); + } + this.indices = indices; + } + + @Override + public Request indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public String[] indices() { + return indices; + } + + public void indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), indicesOptions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.deepEquals(indices, other.indices) && + Objects.equals(indicesOptions, other.indicesOptions); + } + + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java new file mode 100644 index 0000000000000..6af7f2de6f0b7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryAction.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +public class RetryAction extends Action { + public static final RetryAction INSTANCE = new RetryAction(); + public static final String NAME = "indices:admin/ilm/retry"; + + protected RetryAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends AcknowledgedResponse implements ToXContentObject { + + public Response() { + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + } + + public static class Request extends AcknowledgedRequest implements IndicesRequest.Replaceable { + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + public Request(String... indices) { + this.indices = indices; + } + + public Request() { + } + + @Override + public Request indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public Request indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.indices = in.readStringArray(); + this.indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), indicesOptions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.deepEquals(indices, other.indices) + && Objects.equals(indicesOptions, other.indicesOptions); + } + + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java new file mode 100644 index 0000000000000..5d2f066d60b15 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StartILMAction.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.support.master.AcknowledgedResponse; + +public class StartILMAction extends Action { + public static final StartILMAction INSTANCE = new StartILMAction(); + public static final String NAME = "cluster:admin/ilm/start"; + + protected StartILMAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java new file mode 100644 index 0000000000000..63adeabb30f90 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/action/StopILMAction.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.support.master.AcknowledgedResponse; + +public class StopILMAction extends Action { + public static final StopILMAction INSTANCE = new StopILMAction(); + public static final String NAME = "cluster:admin/ilm/stop"; + + protected StopILMAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/client/ILMClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/client/ILMClient.java new file mode 100644 index 0000000000000..5e81d2e2aa7e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/client/ILMClient.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.xpack.core.indexlifecycle.StartILMRequest; +import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; + +/** + * A wrapper to elasticsearch clients that exposes all ILM related APIs + */ +public class ILMClient { + + private ElasticsearchClient client; + + public ILMClient(ElasticsearchClient client) { + this.client = client; + } + + /** + * Create or modify a lifecycle policy definition + */ + public void putLifecyclePolicy(PutLifecycleAction.Request request, ActionListener listener) { + client.execute(PutLifecycleAction.INSTANCE, request, listener); + } + + /** + * Create or modify a lifecycle policy definition + */ + public ActionFuture putLifecyclePolicy(PutLifecycleAction.Request request) { + return client.execute(PutLifecycleAction.INSTANCE, request); + } + + /** + * Get a lifecycle policy definition + */ + public void getLifecyclePolicy(GetLifecycleAction.Request request, ActionListener listener) { + client.execute(GetLifecycleAction.INSTANCE, request, listener); + } + + /** + * Get a lifecycle policy definition + */ + public ActionFuture getLifecyclePolicy(GetLifecycleAction.Request request) { + return client.execute(GetLifecycleAction.INSTANCE, request); + } + + /** + * Delete a lifecycle policy definition + */ + public void deleteLifecyclePolicy(DeleteLifecycleAction.Request request, ActionListener listener) { + client.execute(DeleteLifecycleAction.INSTANCE, request, listener); + } + + /** + * Delete a lifecycle policy definition + */ + public ActionFuture deleteLifecyclePolicy(DeleteLifecycleAction.Request request) { + return client.execute(DeleteLifecycleAction.INSTANCE, request); + } + + /** + * Explain the current lifecycle state for an index + */ + public void explainLifecycle(ExplainLifecycleRequest request, ActionListener listener) { + client.execute(ExplainLifecycleAction.INSTANCE, request, listener); + } + + /** + * Explain the current lifecycle state for an index + */ + public ActionFuture explainLifecycle(ExplainLifecycleRequest request) { + return client.execute(ExplainLifecycleAction.INSTANCE, request); + } + + /** + * Returns the current status of the ILM plugin + */ + public void getStatus(GetStatusAction.Request request, ActionListener listener) { + client.execute(GetStatusAction.INSTANCE, request, listener); + } + + /** + * Returns the current status of the ILM plugin + */ + public ActionFuture getStatus(GetStatusAction.Request request) { + return client.execute(GetStatusAction.INSTANCE, request); + } + + /** + * Removes index lifecycle management from an index + */ + public void removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyAction.Request request, + ActionListener listener) { + client.execute(RemoveIndexLifecyclePolicyAction.INSTANCE, request, listener); + } + + /** + * Removes index lifecycle management from an index + */ + public ActionFuture removeIndexLifecyclePolicy( + RemoveIndexLifecyclePolicyAction.Request request) { + return client.execute(RemoveIndexLifecyclePolicyAction.INSTANCE, request); + } + + /** + * Retries the policy for an index which is currently in ERROR + */ + public void retryPolicy(RetryAction.Request request, ActionListener listener) { + client.execute(RetryAction.INSTANCE, request, listener); + } + + /** + * Removes index lifecycle management from an index + */ + public ActionFuture retryPolicy(RetryAction.Request request) { + return client.execute(RetryAction.INSTANCE, request); + } + + /** + * Starts the ILM plugin + */ + public void startILM(StartILMRequest request, ActionListener listener) { + client.execute(StartILMAction.INSTANCE, request, listener); + } + + /** + * Starts the ILM plugin + */ + public ActionFuture startILM(StartILMRequest request) { + return client.execute(StartILMAction.INSTANCE, request); + } + + /** + * Stops the ILM plugin + */ + public void stopILM(StopILMRequest request, ActionListener listener) { + client.execute(StopILMAction.INSTANCE, request, listener); + } + + /** + * Stops the ILM plugin + */ + public ActionFuture stopILM(StopILMRequest request) { + return client.execute(StopILMAction.INSTANCE, request); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 9cad992327e25..b5aac1e62256e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -422,9 +422,9 @@ private TimeValue defaultFrequencyTarget(TimeValue bucketSpan) { public static class Builder { + public static final int DEFAULT_AGGREGATION_CHUNKING_BUCKETS = 1000; private static final TimeValue MIN_DEFAULT_QUERY_DELAY = TimeValue.timeValueMinutes(1); private static final TimeValue MAX_DEFAULT_QUERY_DELAY = TimeValue.timeValueMinutes(2); - private static final int DEFAULT_AGGREGATION_CHUNKING_BUCKETS = 1000; private String id; private String jobId; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java index 6d9312654facb..f0ba07ad15c68 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.datafeed.extractor; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -34,7 +34,7 @@ */ public final class ExtractorUtils { - private static final Logger LOGGER = Loggers.getLogger(ExtractorUtils.class); + private static final Logger LOGGER = LogManager.getLogger(ExtractorUtils.class); private static final String EPOCH_MILLIS = "epoch_millis"; private ExtractorUtils() {} @@ -139,7 +139,7 @@ private static long validateAndGetDateHistogramInterval(DateHistogramAggregation } } - static long validateAndGetCalendarInterval(String calendarInterval) { + public static long validateAndGetCalendarInterval(String calendarInterval) { TimeValue interval; DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(calendarInterval); if (dateTimeUnit != null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java index f381d5296a48b..12c42f3df4d58 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java @@ -32,11 +32,11 @@ public class FileStructure implements ToXContentObject, Writeable { public enum Format { - JSON, XML, DELIMITED, SEMI_STRUCTURED_TEXT; + NDJSON, XML, DELIMITED, SEMI_STRUCTURED_TEXT; public boolean supportsNesting() { switch (this) { - case JSON: + case NDJSON: case XML: return true; case DELIMITED: @@ -49,7 +49,7 @@ public boolean supportsNesting() { public boolean isStructured() { switch (this) { - case JSON: + case NDJSON: case XML: case DELIMITED: return true; @@ -62,7 +62,7 @@ public boolean isStructured() { public boolean isSemiStructured() { switch (this) { - case JSON: + case NDJSON: case XML: case DELIMITED: return false; @@ -645,7 +645,7 @@ public FileStructure build() { } switch (format) { - case JSON: + case NDJSON: if (shouldTrimFields != null) { throw new IllegalArgumentException("Should trim fields may not be specified for [" + format + "] structures."); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java index 3980282321cc8..595df0f8c31ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java @@ -25,7 +25,11 @@ public SearchResponse newResponse() { return new SearchResponse(); } - static class RequestBuilder extends ActionRequestBuilder { + public static class RequestBuilder extends ActionRequestBuilder { + public RequestBuilder(ElasticsearchClient client, SearchRequest searchRequest) { + super(client, INSTANCE, searchRequest); + } + RequestBuilder(ElasticsearchClient client) { super(client, INSTANCE, new SearchRequest()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java index f422d073cfeb5..82d31aa8a2993 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java @@ -7,12 +7,10 @@ import org.apache.lucene.util.SPIClassIterator; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.Realm; -import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; @@ -42,16 +40,6 @@ default Map getRealms(ResourceWatcherService resourceWatc return Collections.emptyMap(); } - /** - * Returns the set of {@link Setting settings} that may be configured for the each type of realm. - * - * Each setting key must be unqualified and is in the same format as will be provided via {@link RealmConfig#settings()}. - * If a given realm-type is not present in the returned map, then it will be treated as if it supported all possible settings. - * - * The life-cycle of an extension dictates that this method will be called before {@link #getRealms(ResourceWatcherService)} - */ - default Map>> getRealmSettings() { return Collections.emptyMap(); } - /** * Returns a handler for authentication failures, or null to use the default handler. * diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java index 0570a2bdad23f..707912fb20268 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java @@ -13,26 +13,25 @@ import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; import java.util.Collections; -import java.util.HashMap; -import java.util.Map; +import java.util.HashSet; import java.util.Set; public final class InternalRealmsSettings { - private InternalRealmsSettings() {} + private InternalRealmsSettings() { + } /** * Provides the {@link Setting setting configuration} for each internal realm type. * This excludes the ReservedRealm, as it cannot be configured dynamically. - * @return A map from realm-type to a collection of Setting objects. */ - public static Map>> getSettings() { - Map>> map = new HashMap<>(); - map.put(FileRealmSettings.TYPE, FileRealmSettings.getSettings()); - map.put(NativeRealmSettings.TYPE, NativeRealmSettings.getSettings()); - map.put(LdapRealmSettings.AD_TYPE, LdapRealmSettings.getSettings(LdapRealmSettings.AD_TYPE)); - map.put(LdapRealmSettings.LDAP_TYPE, LdapRealmSettings.getSettings(LdapRealmSettings.LDAP_TYPE)); - map.put(PkiRealmSettings.TYPE, PkiRealmSettings.getSettings()); - map.put(SamlRealmSettings.TYPE, SamlRealmSettings.getSettings()); - return Collections.unmodifiableMap(map); + public static Set> getSettings() { + Set> set = new HashSet<>(); + set.addAll(FileRealmSettings.getSettings()); + set.addAll(NativeRealmSettings.getSettings()); + set.addAll(LdapRealmSettings.getSettings(LdapRealmSettings.AD_TYPE)); + set.addAll(LdapRealmSettings.getSettings(LdapRealmSettings.LDAP_TYPE)); + set.addAll(PkiRealmSettings.getSettings()); + set.addAll(SamlRealmSettings.getSettings()); + return Collections.unmodifiableSet(set); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java index 5ccd8bec1a53d..adc79279a70ee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java @@ -27,16 +27,10 @@ public abstract class Realm implements Comparable { protected final Logger logger = LogManager.getLogger(getClass()); - protected final String type; - - public String getType() { - return type; - } protected RealmConfig config; - public Realm(String type, RealmConfig config) { - this.type = type; + public Realm(RealmConfig config) { this.config = config; } @@ -44,14 +38,14 @@ public Realm(String type, RealmConfig config) { * @return The type of this realm */ public String type() { - return type; + return config.type(); } /** * @return The name of this realm. */ public String name() { - return config.name; + return config.name(); } /** @@ -78,7 +72,7 @@ public int compareTo(Realm other) { int result = Integer.compare(config.order, other.config.order); if (result == 0) { // If same order, compare based on the realm name - result = config.name.compareTo(other.config.name); + result = config.name().compareTo(other.config.name()); } return result; } @@ -145,7 +139,7 @@ public void usageStats(ActionListener> listener) { @Override public String toString() { - return type + "/" + config.name; + return config.type() + "/" + config.name(); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmConfig.java index 759f938491e30..f6f370addba83 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmConfig.java @@ -5,52 +5,58 @@ */ package org.elasticsearch.xpack.core.security.authc; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; +import java.util.Objects; +import java.util.function.Function; +import java.util.function.Supplier; + public class RealmConfig { - final String name; + final RealmIdentifier identifier; final boolean enabled; final int order; - private final String type; - final Settings settings; - private final Environment env; private final Settings globalSettings; private final ThreadContext threadContext; - public RealmConfig(String name, Settings settings, Settings globalSettings, Environment env, + @Deprecated + public RealmConfig(RealmIdentifier identifier, Settings settings, Settings globalSettings, Environment env, + ThreadContext threadContext) { + this(identifier, globalSettings, env, threadContext); + } + + public RealmConfig(RealmIdentifier identifier, Settings globalSettings, Environment env, ThreadContext threadContext) { - this.name = name; - this.settings = settings; + this.identifier = identifier; this.globalSettings = globalSettings; this.env = env; - enabled = RealmSettings.ENABLED_SETTING.get(settings); - order = RealmSettings.ORDER_SETTING.get(settings); - type = RealmSettings.TYPE_SETTING.get(settings); + enabled = getSetting(RealmSettings.ENABLED_SETTING); + order = getSetting(RealmSettings.ORDER_SETTING); this.threadContext = threadContext; } - + + public RealmIdentifier identifier() { + return identifier; + } + public String name() { - return name; + return identifier.name; } public boolean enabled() { return enabled; } - + public int order() { return order; } public String type() { - return type; - } - - public Settings settings() { - return settings; + return identifier.type; } public Settings globalSettings() { @@ -64,4 +70,148 @@ public Environment env() { public ThreadContext threadContext() { return threadContext; } + + /** + * Return the {@link Setting.AffixSetting#getConcreteSettingForNamespace concrete setting} + * that is produced by applying this realm's name as the namespace. + * Realm configuration is defined using affix settings in the form {@code xpack.security.authc.realms.type.(name).key}, + * where + *
        + *
      • {@code type} is a fixed string (known at compile time) that identifies the type of the realm being configured.
      • + *
      • {@code (name)} is a variable string (known only at runtime) that uniquely names the realm.
      • + *
      • {@code key} is a fixed string (known at compile time) that identifies a specific setting within the realm.
      • + *
      + * In order to extract an individual value from the runtime {@link Settings} object, it is necessary to convert an + * {@link Setting.AffixSetting} object into a concrete {@link Setting} object that has a fixed key, for a specific name. + */ + public Setting getConcreteSetting(Setting.AffixSetting setting) { + return setting.getConcreteSettingForNamespace(name()); + } + + /** + * Return the {@link Setting.AffixSetting#getConcreteSettingForNamespace concrete setting} that is produced by applying this realm's + * type as a parameter to the provided function, and the realm's name (as the namespace) to the resulting {@link Setting.AffixSetting}. + * Because some settings (e.g. {@link RealmSettings#ORDER_SETTING "order"}) are defined for multiple "types", but the Settings + * infrastructure treats the type as a fixed part of the setting key, it is common to define such multi-realm settings using a + * {@link Function} of this form. + * @see #getConcreteSetting(Setting.AffixSetting) + */ + public Setting getConcreteSetting(Function> settingFactory) { + return getConcreteSetting(settingFactory.apply(type())); + } + + /** + * Obtain the value of the provided {@code setting} from the node's {@link #globalSettings global settings}. + * The {@link Setting.AffixSetting} is made concrete through {@link #getConcreteSetting(Setting.AffixSetting)}, which is then + * used to {@link Setting#get(Settings) retrieve} the setting value. + */ + public T getSetting(Setting.AffixSetting setting) { + return getConcreteSetting(setting).get(globalSettings); + } + + /** + * Obtain the value of the provided {@code setting} from the node's {@link #globalSettings global settings}. + * {@link #getConcreteSetting(Function)} is used to obtain a concrete setting from the provided + * {@link Function}/{@link Setting.AffixSetting}, and this concrete setting is then used to + * {@link Setting#get(Settings) retrieve} the setting value. + */ + public T getSetting(Function> settingFactory) { + return getSetting(settingFactory.apply(type())); + } + + /** + * Obtain the value of the provided {@code setting} from the node's {@link #globalSettings global settings}. + * {@link #getConcreteSetting(Function)} is used to obtain a concrete setting from the provided + * {@link Function}/{@link Setting.AffixSetting}. + * If this concrete setting {@link Setting#exists(Settings) exists} in the global settings, then its value is returned, + * otherwise the {@code onElse} {@link Supplier} is executed and returned. + */ + public T getSetting(Function> settingFactory, Supplier orElse) { + return getSetting(settingFactory.apply(type()), orElse); + } + + /** + * Obtain the value of the provided {@code setting} from the node's {@link #globalSettings global settings}. + * {@link #getConcreteSetting(Setting.AffixSetting)} is used to obtain a concrete setting from the provided + * {@link Setting.AffixSetting}. + * If this concrete setting {@link Setting#exists(Settings) exists} in the global settings, then its value is returned, + * otherwise the {@code onElse} {@link Supplier} is executed and returned. + */ + public T getSetting(Setting.AffixSetting setting, Supplier orElse) { + final Setting concrete = setting.getConcreteSettingForNamespace(name()); + if (concrete.exists(globalSettings)) { + return concrete.get(globalSettings); + } else { + return orElse.get(); + } + } + + /** + * Determines whether the provided {@code setting} has an explicit value in the node's {@link #globalSettings global settings}. + * {@link #getConcreteSetting(Function)} is used to obtain a concrete setting from the provided + * {@link Function}/{@link Setting.AffixSetting}, and this concrete setting is then used to + * {@link Setting#exists(Settings) check} for a value. + */ + public boolean hasSetting(Function> settingFactory) { + return getConcreteSetting(settingFactory).exists(globalSettings); + } + + /** + * Determines whether the provided {@code setting} has an explicit value in the node's {@link #globalSettings global settings}. + * {@link #getConcreteSetting(Setting.AffixSetting)} is used to obtain a concrete setting from the provided + * {@link Setting.AffixSetting}, and this concrete setting is then used to {@link Setting#exists(Settings) check} for a value. + */ + public boolean hasSetting(Setting.AffixSetting setting) { + return getConcreteSetting(setting).exists(globalSettings); + } + + /** + * A realm identifier consists of a realm's {@link RealmConfig#type() type} and {@link RealmConfig#name() name}. + * Because realms are configured using a key that contains both of these parts + * (e.g. {@code xpack.security.authc.realms.native.native_realm.order}), it is often necessary to be able to + * pass this pair of variables as a single type (e.g. in method parameters, or return values). + */ + public static class RealmIdentifier { + private final String type; + private final String name; + + public RealmIdentifier(String type, String name) { + this.type = Objects.requireNonNull(type, "Realm type cannot be null"); + this.name = Objects.requireNonNull(name, "Realm name cannot be null"); + } + + public String getType() { + return type; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null) { + return false; + } + if (getClass() != o.getClass()) { + return false; + } + final RealmIdentifier other = (RealmIdentifier) o; + return Objects.equals(this.type, other.type) && + Objects.equals(this.name, other.name); + } + + @Override + public int hashCode() { + return Objects.hash(type, name); + } + + @Override + public String toString() { + return type + '/' + name; + } + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java index daf1775a80a52..913fcba3d33c8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java @@ -5,193 +5,106 @@ */ package org.elasticsearch.xpack.core.security.authc; -import org.elasticsearch.common.settings.AbstractScopedSettings; -import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.security.SecurityExtension; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; +import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; -import static org.elasticsearch.common.Strings.isNullOrEmpty; -import static org.elasticsearch.xpack.core.security.SecurityField.setting; - /** - * Configures the {@link Setting#groupSetting(String, Consumer, Setting.Property...) group setting} for security - * {@link Realm realms}, with validation according to the realm type. - *

      - * The allowable settings for a given realm are dependent on the {@link Realm#type() realm type}, so it is not possible - * to simply provide a list of {@link Setting} objects and rely on the global setting vacomlidation (e.g. A custom realm-type might - * define a setting with the same logical key as an internal realm-type, but a different data type). - *

      - * Instead, realm configuration relies on the validator parameter to - * {@link Setting#groupSetting(String, Consumer, Setting.Property...)} in order to validate each realm in a way that respects the - * declared type. - * Internally, this validation delegates to {@link AbstractScopedSettings#validate(Settings, boolean)} so that validation is reasonably - * aligned - * with the way we validate settings globally. - *

      - *

      - * The allowable settings for each realm-type are determined by calls to {@link InternalRealmsSettings#getSettings()} and - * {@link org.elasticsearch.xpack.core.security.SecurityExtension#getRealmSettings()} + * Provides a number of utility methods for interacting with {@link Settings} and {@link Setting} inside a {@link Realm}. + * Settings for realms use an {@link Setting#affixKeySetting(String, String, Function, Setting.AffixSetting[]) affix} style, + * where the type of the realm is part of the prefix, and name of the realm is the variable portion + * (That is to set the order in a file realm named "file1", then full setting key would be + * {@code xpack.security.authc.realms.file.file1.order}. + * This class provides some convenience methods for defining and retrieving such settings. */ public class RealmSettings { - public static final String PREFIX = setting("authc.realms."); - - static final Setting TYPE_SETTING = Setting.simpleString("type", Setting.Property.NodeScope); - static final Setting ENABLED_SETTING = Setting.boolSetting("enabled", true, Setting.Property.NodeScope); - static final Setting ORDER_SETTING = Setting.intSetting("order", Integer.MAX_VALUE, Setting.Property.NodeScope); + public static final String PREFIX = "xpack.security.authc.realms."; - /** - * Add the {@link Setting} configuration for all realms to the provided list. - */ - public static void addSettings(List> settingsList, List extensions) { - settingsList.add(getGroupSetting(extensions)); - } + public static final Function> ENABLED_SETTING = affixSetting("enabled", + key -> Setting.boolSetting(key, true, Setting.Property.NodeScope)); + public static final Function> ORDER_SETTING = affixSetting("order", + key -> Setting.intSetting(key, Integer.MAX_VALUE, Setting.Property.NodeScope)); - public static Collection getSettingsFilter(List extensions) { - return getSettingsByRealm(extensions).values().stream() - .flatMap(Collection::stream) - .filter(Setting::isFiltered) - .map(setting -> PREFIX + "*." + setting.getKey()) - .collect(Collectors.toSet()); + public static String realmSettingPrefix(String type) { + return PREFIX + type + "."; } - /** - * Extract the child {@link Settings} for the {@link #PREFIX realms prefix}. - * The top level names in the returned Settings will be the names of the configured realms. - */ - public static Settings get(Settings settings) { - return settings.getByPrefix(RealmSettings.PREFIX); + public static String realmSettingPrefix(RealmConfig.RealmIdentifier identifier) { + return realmSettingPrefix(identifier.getType()) + identifier.getName() + "."; } - /** - * Extracts the realm settings from a global settings object. - * Returns a Map of realm-name to realm-settings. - */ - public static Map getRealmSettings(Settings globalSettings) { - Settings realmsSettings = RealmSettings.get(globalSettings); - return realmsSettings.names().stream() - .collect(Collectors.toMap(Function.identity(), realmsSettings::getAsSettings)); + public static String realmSslPrefix(RealmConfig.RealmIdentifier identifier) { + return realmSettingPrefix(identifier) + "ssl."; } /** - * Convert the child {@link Setting} for the provided realm into a fully scoped key for use in an error message. - * @see #PREFIX + * Create a {@link Setting#simpleString(String, Setting.Property...) simple string} {@link Setting} object for a realm of + * with the provided type and setting suffix. + * @param realmType The type of the realm, used within the setting prefix + * @param suffix The suffix of the setting (everything following the realm name in the affix setting) + * @param properties And properties to apply to the setting */ - public static String getFullSettingKey(RealmConfig realm, Setting setting) { - return getFullSettingKey(realm.name(), setting); + public static Setting.AffixSetting simpleString(String realmType, String suffix, Setting.Property... properties) { + return Setting.affixKeySetting(realmSettingPrefix(realmType), suffix, key -> Setting.simpleString(key, properties)); } /** - * @see #getFullSettingKey(RealmConfig, Setting) + * Create a {@link Function} that acts as a factory an {@link org.elasticsearch.common.settings.Setting.AffixSetting}. + * The {@code Function} takes the realm-type as an argument. + * @param suffix The suffix of the setting (everything following the realm name in the affix setting) + * @param delegateFactory A factory to produce the concrete setting. + * See {@link Setting#affixKeySetting(Setting.AffixKey, Function, Setting.AffixSetting[])} */ - public static String getFullSettingKey(RealmConfig realm, String subKey) { - return getFullSettingKey(realm.name(), subKey); - } - - private static String getFullSettingKey(String name, Setting setting) { - return getFullSettingKey(name, setting.getKey()); - } - - private static String getFullSettingKey(String name, String subKey) { - return PREFIX + name + "." + subKey; - } - - private static Setting getGroupSetting(List extensions) { - return Setting.groupSetting(PREFIX, getSettingsValidator(extensions), Setting.Property.NodeScope); - } - - private static Consumer getSettingsValidator(List extensions) { - final Map>> childSettings = getSettingsByRealm(extensions); - childSettings.forEach(RealmSettings::verify); - return validator(childSettings); + public static Function> affixSetting(String suffix, Function> delegateFactory) { + return realmType -> Setting.affixKeySetting(realmSettingPrefix(realmType), suffix, delegateFactory); } /** - * @return A map from realm-type to a collection of Setting objects. - * @see InternalRealmsSettings#getSettings() + * Extracts the realm settings from a global settings object. + * Returns a Map of realm-id to realm-settings. */ - private static Map>> getSettingsByRealm(List extensions) { - final Map>> settingsByRealm = new HashMap<>(InternalRealmsSettings.getSettings()); - if (extensions != null) { - extensions.forEach(ext -> { - final Map>> extSettings = ext.getRealmSettings(); - extSettings.keySet().stream().filter(settingsByRealm::containsKey).forEach(type -> { - throw new IllegalArgumentException("duplicate realm type " + type); - }); - settingsByRealm.putAll(extSettings); - }); - } - return settingsByRealm; + public static Map getRealmSettings(Settings globalSettings) { + Settings settingsByType = globalSettings.getByPrefix(RealmSettings.PREFIX); + return settingsByType.names().stream() + .flatMap(type -> { + final Settings settingsByName = settingsByType.getAsSettings(type); + return settingsByName.names().stream().map(name -> { + final RealmConfig.RealmIdentifier id = new RealmConfig.RealmIdentifier(type, name); + final Settings realmSettings = settingsByName.getAsSettings(name); + return new Tuple<>(id, realmSettings); + }); + }) + .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); } - private static void verify(String type, Set> settings) { - Set keys = new HashSet<>(); - settings.forEach(setting -> { - final String key = setting.getKey(); - if (keys.contains(key)) { - throw new IllegalArgumentException("duplicate setting for key " + key + " in realm type " + type); - } - keys.add(key); - if (setting.getProperties().contains(Setting.Property.NodeScope) == false) { - throw new IllegalArgumentException("setting " + key + " in realm type " + type + " does not have NodeScope"); - } - }); + public static String getFullSettingKey(String realmName, Setting.AffixSetting setting) { + return setting.getConcreteSettingForNamespace(realmName).getKey(); } - private static Consumer validator(Map>> validSettings) { - return (settings) -> settings.names().forEach(n -> validateRealm(n, settings.getAsSettings(n), validSettings)); + public static String getFullSettingKey(RealmConfig realm, Setting.AffixSetting setting) { + return setting.getConcreteSettingForNamespace(realm.name()).getKey(); } - private static void validateRealm(String name, Settings settings, Map>> validSettings) { - final String type = getRealmType(settings); - if (isNullOrEmpty(type)) { - throw new IllegalArgumentException("missing realm type [" + getFullSettingKey(name, TYPE_SETTING) + "] for realm"); - } - validateRealm(name, type, settings, validSettings.get(type)); + public static String getFullSettingKey(RealmConfig.RealmIdentifier realmId, Function> setting) { + return getFullSettingKey(realmId.getName(), setting.apply(realmId.getType())); } - public static String getRealmType(Settings settings) { - return TYPE_SETTING.get(settings); + public static String getFullSettingKey(RealmConfig realm, Function> setting) { + return getFullSettingKey(realm.identifier, setting); } - private static void validateRealm(String name, String type, Settings settings, Set> validSettings) { - if (validSettings == null) { - // For backwards compatibility, we assume that is we don't know the valid settings for a realm.type then everything - // is valid. Ideally we would reject these, but XPackExtension doesn't enforce that realm-factories and realm-settings are - // perfectly aligned - return; - } - - // Don't validate secure settings because they might have been cleared already - settings = Settings.builder().put(settings, false).build(); - validSettings.removeIf(s -> s instanceof SecureSetting); - - Set> settingSet = new HashSet<>(validSettings); - settingSet.add(TYPE_SETTING); - settingSet.add(ENABLED_SETTING); - settingSet.add(ORDER_SETTING); - final AbstractScopedSettings validator = - new AbstractScopedSettings(settings, settingSet, Collections.emptySet(), Setting.Property.NodeScope) { }; - try { - validator.validate(settings, false); - } catch (RuntimeException e) { - throw new IllegalArgumentException("incorrect configuration for realm [" + getFullSettingKey(name, "") - + "] of type " + type, e); - } + public static List> getStandardSettings(String realmType) { + return Arrays.asList(ENABLED_SETTING.apply(realmType), ORDER_SETTING.apply(realmType)); } private RealmSettings() { } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/NativeRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/NativeRealmSettings.java index eebcb6db7af87..16c08413fb2a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/NativeRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/NativeRealmSettings.java @@ -6,8 +6,10 @@ package org.elasticsearch.xpack.core.security.authc.esnative; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import java.util.HashSet; import java.util.Set; public final class NativeRealmSettings { @@ -18,7 +20,9 @@ private NativeRealmSettings() {} /** * @return The {@link Setting setting configuration} for this realm type */ - public static Set> getSettings() { - return CachingUsernamePasswordRealmSettings.getSettings(); + public static Set> getSettings() { + final Set> set = new HashSet<>(CachingUsernamePasswordRealmSettings.getSettings(TYPE)); + set.addAll(RealmSettings.getStandardSettings(TYPE)); + return set; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/file/FileRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/file/FileRealmSettings.java index ed81d07d4ccc9..1777e8bb6ecfd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/file/FileRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/file/FileRealmSettings.java @@ -6,8 +6,10 @@ package org.elasticsearch.xpack.core.security.authc.file; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import java.util.HashSet; import java.util.Set; public final class FileRealmSettings { @@ -18,7 +20,9 @@ private FileRealmSettings() {} /** * @return The {@link Setting setting configuration} for this realm type */ - public static Set> getSettings() { - return CachingUsernamePasswordRealmSettings.getSettings(); + public static Set> getSettings() { + final Set> set = new HashSet<>(CachingUsernamePasswordRealmSettings.getSettings(TYPE)); + set.addAll(RealmSettings.getStandardSettings(TYPE)); + return set; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java index 656632a2ec631..1b1d44d2d114d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/kerberos/KerberosRealmSettings.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import java.util.Set; @@ -24,19 +25,23 @@ public final class KerberosRealmSettings { * Kerberos key tab for Elasticsearch service
      * Uses single key tab for multiple service accounts. */ - public static final Setting HTTP_SERVICE_KEYTAB_PATH = - Setting.simpleString("keytab.path", Property.NodeScope); - public static final Setting SETTING_KRB_DEBUG_ENABLE = - Setting.boolSetting("krb.debug", Boolean.FALSE, Property.NodeScope); - public static final Setting SETTING_REMOVE_REALM_NAME = - Setting.boolSetting("remove_realm_name", Boolean.FALSE, Property.NodeScope); + public static final Setting.AffixSetting HTTP_SERVICE_KEYTAB_PATH = RealmSettings.simpleString(TYPE, + "keytab.path", Property.NodeScope); + + public static final Setting.AffixSetting SETTING_KRB_DEBUG_ENABLE = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "krb.debug", key -> Setting.boolSetting(key, Boolean.FALSE, Property.NodeScope)); + + public static final Setting.AffixSetting SETTING_REMOVE_REALM_NAME = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "remove_realm_name", key -> Setting.boolSetting(key, Boolean.FALSE, Property.NodeScope)); // Cache private static final TimeValue DEFAULT_TTL = TimeValue.timeValueMinutes(20); private static final int DEFAULT_MAX_USERS = 100_000; // 100k users - public static final Setting CACHE_TTL_SETTING = Setting.timeSetting("cache.ttl", DEFAULT_TTL, Setting.Property.NodeScope); - public static final Setting CACHE_MAX_USERS_SETTING = - Setting.intSetting("cache.max_users", DEFAULT_MAX_USERS, Property.NodeScope); + public static final Setting.AffixSetting CACHE_TTL_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "cache.ttl", key -> Setting.timeSetting(key, DEFAULT_TTL, Setting.Property.NodeScope)); + + public static final Setting.AffixSetting CACHE_MAX_USERS_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "cache.max_users", key -> Setting.intSetting(key, DEFAULT_MAX_USERS, Property.NodeScope)); private KerberosRealmSettings() { } @@ -44,10 +49,10 @@ private KerberosRealmSettings() { /** * @return the valid set of {@link Setting}s for a {@value #TYPE} realm */ - public static Set> getSettings() { - final Set> settings = Sets.newHashSet(HTTP_SERVICE_KEYTAB_PATH, CACHE_TTL_SETTING, CACHE_MAX_USERS_SETTING, + public static Set> getSettings() { + final Set> settings = Sets.newHashSet(HTTP_SERVICE_KEYTAB_PATH, CACHE_TTL_SETTING, CACHE_MAX_USERS_SETTING, SETTING_KRB_DEBUG_ENABLE, SETTING_REMOVE_REALM_NAME); - settings.addAll(DelegatedAuthorizationSettings.getSettings()); + settings.addAll(DelegatedAuthorizationSettings.getSettings(TYPE)); return settings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/ActiveDirectorySessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/ActiveDirectorySessionFactorySettings.java index 691b43f24635e..3bd54316736c6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/ActiveDirectorySessionFactorySettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/ActiveDirectorySessionFactorySettings.java @@ -6,46 +6,85 @@ package org.elasticsearch.xpack.core.security.authc.ldap; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; import java.util.HashSet; import java.util.Set; +import static org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings.AD_TYPE; + public final class ActiveDirectorySessionFactorySettings { - public static final String AD_DOMAIN_NAME_SETTING = "domain_name"; + private static final String AD_DOMAIN_NAME_SETTING_KEY = "domain_name"; + public static final Setting.AffixSetting AD_DOMAIN_NAME_SETTING + = RealmSettings.simpleString(AD_TYPE, AD_DOMAIN_NAME_SETTING_KEY, Setting.Property.NodeScope); + public static final String AD_GROUP_SEARCH_BASEDN_SETTING = "group_search.base_dn"; public static final String AD_GROUP_SEARCH_SCOPE_SETTING = "group_search.scope"; - public static final String AD_USER_SEARCH_BASEDN_SETTING = "user_search.base_dn"; - public static final String AD_USER_SEARCH_FILTER_SETTING = "user_search.filter"; - public static final String AD_UPN_USER_SEARCH_FILTER_SETTING = "user_search.upn_filter"; - public static final String AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING = "user_search.down_level_filter"; - public static final String AD_USER_SEARCH_SCOPE_SETTING = "user_search.scope"; - public static final Setting AD_LDAP_PORT_SETTING = Setting.intSetting("port.ldap", 389, Setting.Property.NodeScope); - public static final Setting AD_LDAPS_PORT_SETTING = Setting.intSetting("port.ldaps", 636, Setting.Property.NodeScope); - public static final Setting AD_GC_LDAP_PORT_SETTING = Setting.intSetting("port.gc_ldap", 3268, Setting.Property.NodeScope); - public static final Setting AD_GC_LDAPS_PORT_SETTING = Setting.intSetting("port.gc_ldaps", 3269, Setting.Property.NodeScope); - public static final Setting POOL_ENABLED = Setting.boolSetting("user_search.pool.enabled", - settings -> Boolean.toString(PoolingSessionFactorySettings.BIND_DN.exists(settings)), Setting.Property.NodeScope); - - private ActiveDirectorySessionFactorySettings() {} - - public static Set> getSettings() { - Set> settings = new HashSet<>(); - settings.addAll(SessionFactorySettings.getSettings()); - settings.add(Setting.simpleString(AD_DOMAIN_NAME_SETTING, Setting.Property.NodeScope)); - settings.add(Setting.simpleString(AD_GROUP_SEARCH_BASEDN_SETTING, Setting.Property.NodeScope)); - settings.add(Setting.simpleString(AD_GROUP_SEARCH_SCOPE_SETTING, Setting.Property.NodeScope)); - settings.add(Setting.simpleString(AD_USER_SEARCH_BASEDN_SETTING, Setting.Property.NodeScope)); - settings.add(Setting.simpleString(AD_USER_SEARCH_FILTER_SETTING, Setting.Property.NodeScope)); - settings.add(Setting.simpleString(AD_UPN_USER_SEARCH_FILTER_SETTING, Setting.Property.NodeScope)); - settings.add(Setting.simpleString(AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING, Setting.Property.NodeScope)); - settings.add(Setting.simpleString(AD_USER_SEARCH_SCOPE_SETTING, Setting.Property.NodeScope)); + + private static final String AD_USER_SEARCH_BASEDN_SETTING_KEY = "user_search.base_dn"; + public static final Setting.AffixSetting AD_USER_SEARCH_BASEDN_SETTING + = RealmSettings.simpleString(AD_TYPE, AD_USER_SEARCH_BASEDN_SETTING_KEY, Setting.Property.NodeScope); + + private static final String AD_USER_SEARCH_FILTER_SETTING_KEY = "user_search.filter"; + public static final Setting.AffixSetting AD_USER_SEARCH_FILTER_SETTING + = RealmSettings.simpleString(AD_TYPE, AD_USER_SEARCH_FILTER_SETTING_KEY, Setting.Property.NodeScope); + + private static final String AD_UPN_USER_SEARCH_FILTER_SETTING_KEY = "user_search.upn_filter"; + public static final Setting.AffixSetting AD_UPN_USER_SEARCH_FILTER_SETTING + = RealmSettings.simpleString(AD_TYPE, AD_UPN_USER_SEARCH_FILTER_SETTING_KEY, Setting.Property.NodeScope); + + private static final String AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING_KEY = "user_search.down_level_filter"; + public static final Setting.AffixSetting AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING + = RealmSettings.simpleString(AD_TYPE, AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING_KEY, Setting.Property.NodeScope); + + private static final String AD_USER_SEARCH_SCOPE_SETTING_KEY = "user_search.scope"; + public static final Setting.AffixSetting AD_USER_SEARCH_SCOPE_SETTING + = RealmSettings.simpleString(AD_TYPE, AD_USER_SEARCH_SCOPE_SETTING_KEY, Setting.Property.NodeScope); + + public static final Setting.AffixSetting AD_LDAP_PORT_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(AD_TYPE), "port.ldap", key -> Setting.intSetting(key, 389, Setting.Property.NodeScope)); + public static final Setting.AffixSetting AD_LDAPS_PORT_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(AD_TYPE), "port.ldaps", key -> Setting.intSetting(key, 636, Setting.Property.NodeScope)); + public static final Setting.AffixSetting AD_GC_LDAP_PORT_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(AD_TYPE), "port.gc_ldap", key -> Setting.intSetting(key, 3268, Setting.Property.NodeScope)); + public static final Setting.AffixSetting AD_GC_LDAPS_PORT_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(AD_TYPE), "port.gc_ldaps", key -> Setting.intSetting(key, 3269, Setting.Property.NodeScope)); + + public static final String POOL_ENABLED_SUFFIX = "user_search.pool.enabled"; + public static final Setting.AffixSetting POOL_ENABLED = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(AD_TYPE), POOL_ENABLED_SUFFIX, + key -> { + if (key.endsWith(POOL_ENABLED_SUFFIX)) { + final String bindDnKey = key.substring(0, key.length() - POOL_ENABLED_SUFFIX.length()) + + PoolingSessionFactorySettings.BIND_DN_SUFFIX; + return Setting.boolSetting(key, settings -> Boolean.toString(settings.keySet().contains(bindDnKey)), + Setting.Property.NodeScope); + } else { + return Setting.boolSetting(key, false, Setting.Property.NodeScope); + } + }); + + private ActiveDirectorySessionFactorySettings() { + } + + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.addAll(SessionFactorySettings.getSettings(AD_TYPE)); + settings.add(AD_DOMAIN_NAME_SETTING); + settings.add(RealmSettings.simpleString(AD_TYPE, AD_GROUP_SEARCH_BASEDN_SETTING, Setting.Property.NodeScope)); + settings.add(RealmSettings.simpleString(AD_TYPE, AD_GROUP_SEARCH_SCOPE_SETTING, Setting.Property.NodeScope)); + settings.add(AD_USER_SEARCH_BASEDN_SETTING); + settings.add(AD_USER_SEARCH_FILTER_SETTING); + settings.add(AD_UPN_USER_SEARCH_FILTER_SETTING); + settings.add(AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING); + settings.add(AD_USER_SEARCH_SCOPE_SETTING); settings.add(AD_LDAP_PORT_SETTING); settings.add(AD_LDAPS_PORT_SETTING); settings.add(AD_GC_LDAP_PORT_SETTING); settings.add(AD_GC_LDAPS_PORT_SETTING); settings.add(POOL_ENABLED); - settings.addAll(PoolingSessionFactorySettings.getSettings()); + settings.addAll(PoolingSessionFactorySettings.getSettings(AD_TYPE)); return settings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java index 272b4115b285e..4b746a7901613 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; @@ -14,33 +15,39 @@ import java.util.HashSet; import java.util.Set; +import java.util.function.Function; public final class LdapRealmSettings { public static final String LDAP_TYPE = "ldap"; public static final String AD_TYPE = "active_directory"; - public static final Setting EXECUTION_TIMEOUT = - Setting.timeSetting("timeout.execution", TimeValue.timeValueSeconds(30L), Setting.Property.NodeScope); - private LdapRealmSettings() {} + public static final String TIMEOUT_EXECUTION_SUFFIX = "timeout.execution"; + public static final Function> EXECUTION_TIMEOUT = type -> + Setting.affixKeySetting(RealmSettings.realmSettingPrefix(type), TIMEOUT_EXECUTION_SUFFIX, + key -> Setting.timeSetting(key, TimeValue.timeValueSeconds(30L), Setting.Property.NodeScope)); + + private LdapRealmSettings() { + } /** * @param type Either {@link #AD_TYPE} or {@link #LDAP_TYPE} * @return The {@link Setting setting configuration} for this realm type */ - public static Set> getSettings(String type) { - Set> settings = new HashSet<>(); - settings.addAll(CachingUsernamePasswordRealmSettings.getSettings()); - settings.addAll(CompositeRoleMapperSettings.getSettings()); - settings.add(LdapRealmSettings.EXECUTION_TIMEOUT); + public static Set> getSettings(String type) { + Set> settings = new HashSet<>(); + settings.addAll(CachingUsernamePasswordRealmSettings.getSettings(type)); + settings.addAll(CompositeRoleMapperSettings.getSettings(type)); + settings.add(LdapRealmSettings.EXECUTION_TIMEOUT.apply(type)); if (AD_TYPE.equals(type)) { settings.addAll(ActiveDirectorySessionFactorySettings.getSettings()); } else { assert LDAP_TYPE.equals(type) : "type [" + type + "] is unknown. expected one of [" + AD_TYPE + ", " + LDAP_TYPE + "]"; settings.addAll(LdapSessionFactorySettings.getSettings()); settings.addAll(LdapUserSearchSessionFactorySettings.getSettings()); - settings.addAll(DelegatedAuthorizationSettings.getSettings()); + settings.addAll(DelegatedAuthorizationSettings.getSettings(type)); } settings.addAll(LdapMetaDataResolverSettings.getSettings()); + settings.addAll(RealmSettings.getStandardSettings(type)); return settings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapSessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapSessionFactorySettings.java index 4fb7e9a1d9346..0643bf9daa9d1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapSessionFactorySettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapSessionFactorySettings.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authc.ldap; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; import java.util.Collections; @@ -14,14 +15,17 @@ import java.util.Set; import java.util.function.Function; +import static org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings.LDAP_TYPE; + public final class LdapSessionFactorySettings { - public static final Setting> USER_DN_TEMPLATES_SETTING = Setting.listSetting("user_dn_templates", - Collections.emptyList(), Function.identity(), Setting.Property.NodeScope); + public static final Setting.AffixSetting> USER_DN_TEMPLATES_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(LDAP_TYPE), "user_dn_templates", + key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Setting.Property.NodeScope)); - public static Set> getSettings() { - Set> settings = new HashSet<>(); - settings.addAll(SessionFactorySettings.getSettings()); + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.addAll(SessionFactorySettings.getSettings(LDAP_TYPE)); settings.add(USER_DN_TEMPLATES_SETTING); return settings; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapUserSearchSessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapUserSearchSessionFactorySettings.java index 86f635e7427ff..89eadc962928a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapUserSearchSessionFactorySettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapUserSearchSessionFactorySettings.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authc.ldap; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; @@ -13,30 +14,43 @@ import java.util.Set; import java.util.function.Function; +import static org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings.LDAP_TYPE; + public final class LdapUserSearchSessionFactorySettings { - public static final Setting SEARCH_ATTRIBUTE = new Setting<>("user_search.attribute", - LdapUserSearchSessionFactorySettings.DEFAULT_USERNAME_ATTRIBUTE, - Function.identity(), Setting.Property.NodeScope, Setting.Property.Deprecated); - public static final Setting SEARCH_BASE_DN = Setting.simpleString("user_search.base_dn", Setting.Property.NodeScope); - public static final Setting SEARCH_FILTER = Setting.simpleString("user_search.filter", Setting.Property.NodeScope); - public static final Setting SEARCH_SCOPE = new Setting<>("user_search.scope", (String) null, - s -> LdapSearchScope.resolve(s, LdapSearchScope.SUB_TREE), Setting.Property.NodeScope); - public static final Setting POOL_ENABLED = Setting.boolSetting("user_search.pool.enabled", true, Setting.Property.NodeScope); + public static final Setting.AffixSetting SEARCH_ATTRIBUTE = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(LDAP_TYPE), "user_search.attribute", + key -> new Setting<>(key, LdapUserSearchSessionFactorySettings.DEFAULT_USERNAME_ATTRIBUTE, Function.identity(), + Setting.Property.NodeScope, Setting.Property.Deprecated)); + + public static final Setting.AffixSetting SEARCH_BASE_DN + = RealmSettings.simpleString(LDAP_TYPE, "user_search.base_dn", Setting.Property.NodeScope); + + public static final Setting.AffixSetting SEARCH_FILTER + = RealmSettings.simpleString(LDAP_TYPE, "user_search.filter", Setting.Property.NodeScope); + + public static final Setting.AffixSetting SEARCH_SCOPE = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(LDAP_TYPE), "user_search.scope", + key -> new Setting<>(key, (String) null, (String s) -> LdapSearchScope.resolve(s, LdapSearchScope.SUB_TREE), + Setting.Property.NodeScope)); + public static final Setting.AffixSetting POOL_ENABLED = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(LDAP_TYPE), "user_search.pool.enabled", + key -> Setting.boolSetting(key, true, Setting.Property.NodeScope)); private static final String DEFAULT_USERNAME_ATTRIBUTE = "uid"; - private LdapUserSearchSessionFactorySettings() {} + private LdapUserSearchSessionFactorySettings() { + } - public static Set> getSettings() { - Set> settings = new HashSet<>(); - settings.addAll(SessionFactorySettings.getSettings()); - settings.addAll(PoolingSessionFactorySettings.getSettings()); + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.addAll(SessionFactorySettings.getSettings(LDAP_TYPE)); + settings.addAll(PoolingSessionFactorySettings.getSettings(LDAP_TYPE)); settings.add(SEARCH_BASE_DN); settings.add(SEARCH_SCOPE); settings.add(SEARCH_ATTRIBUTE); settings.add(POOL_ENABLED); settings.add(SEARCH_FILTER); - settings.addAll(SearchGroupsResolverSettings.getSettings()); + settings.addAll(SearchGroupsResolverSettings.getSettings(LDAP_TYPE)); settings.addAll(UserAttributeGroupsResolverSettings.getSettings()); return settings; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/PoolingSessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/PoolingSessionFactorySettings.java index 88ff5485a5474..b7b0d529d33e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/PoolingSessionFactorySettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/PoolingSessionFactorySettings.java @@ -8,37 +8,62 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import java.util.Optional; import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.common.settings.SecureSetting.secureString; public final class PoolingSessionFactorySettings { public static final TimeValue DEFAULT_HEALTH_CHECK_INTERVAL = TimeValue.timeValueSeconds(60L); - public static final Setting BIND_DN = Setting.simpleString("bind_dn", Setting.Property.NodeScope, Setting.Property.Filtered); - public static final Setting LEGACY_BIND_PASSWORD = new Setting<>("bind_password", "", SecureString::new, - Setting.Property.NodeScope, Setting.Property.Filtered, Setting.Property.Deprecated); - public static final Setting SECURE_BIND_PASSWORD = secureString("secure_bind_password", LEGACY_BIND_PASSWORD); + + public static final String BIND_DN_SUFFIX = "bind_dn"; + public static final Function> BIND_DN = RealmSettings.affixSetting(BIND_DN_SUFFIX, + key -> Setting.simpleString(key, Setting.Property.NodeScope, Setting.Property.Filtered)); + + public static final Function> LEGACY_BIND_PASSWORD = RealmSettings.affixSetting( + "bind_password", key -> new Setting<>(key, "", SecureString::new, + Setting.Property.NodeScope, Setting.Property.Filtered, Setting.Property.Deprecated)); + + public static final Function> SECURE_BIND_PASSWORD = realmType -> + Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(realmType), "secure_bind_password", + key -> secureString(key, null) + ); public static final int DEFAULT_CONNECTION_POOL_INITIAL_SIZE = 0; - public static final Setting POOL_INITIAL_SIZE = Setting.intSetting("user_search.pool.initial_size", - DEFAULT_CONNECTION_POOL_INITIAL_SIZE, 0, Setting.Property.NodeScope); + public static final Function> POOL_INITIAL_SIZE = RealmSettings.affixSetting( + "user_search.pool.initial_size", + key -> Setting.intSetting(key, DEFAULT_CONNECTION_POOL_INITIAL_SIZE, 0, Setting.Property.NodeScope)); + public static final int DEFAULT_CONNECTION_POOL_SIZE = 20; - public static final Setting POOL_SIZE = Setting.intSetting("user_search.pool.size", - DEFAULT_CONNECTION_POOL_SIZE, 1, Setting.Property.NodeScope); - public static final Setting HEALTH_CHECK_INTERVAL = Setting.timeSetting("user_search.pool.health_check.interval", - DEFAULT_HEALTH_CHECK_INTERVAL, Setting.Property.NodeScope); - public static final Setting HEALTH_CHECK_ENABLED = Setting.boolSetting("user_search.pool.health_check.enabled", - true, Setting.Property.NodeScope); - public static final Setting> HEALTH_CHECK_DN = new Setting<>("user_search.pool.health_check.dn", (String) null, - Optional::ofNullable, Setting.Property.NodeScope); - - private PoolingSessionFactorySettings() {} - - public static Set> getSettings() { - return Sets.newHashSet(POOL_INITIAL_SIZE, POOL_SIZE, HEALTH_CHECK_ENABLED, HEALTH_CHECK_INTERVAL, HEALTH_CHECK_DN, BIND_DN, - SECURE_BIND_PASSWORD, LEGACY_BIND_PASSWORD); + public static final Function> POOL_SIZE = RealmSettings.affixSetting("user_search.pool.size", + key -> Setting.intSetting(key, DEFAULT_CONNECTION_POOL_SIZE, 1, Setting.Property.NodeScope)); + + public static final Function> HEALTH_CHECK_INTERVAL = RealmSettings.affixSetting( + "user_search.pool.health_check.interval", + key -> Setting.timeSetting(key, DEFAULT_HEALTH_CHECK_INTERVAL, Setting.Property.NodeScope)); + + public static final Function> HEALTH_CHECK_ENABLED = RealmSettings.affixSetting( + "user_search.pool.health_check.enabled", + key -> Setting.boolSetting(key, true, Setting.Property.NodeScope)); + + public static final Function>> HEALTH_CHECK_DN = RealmSettings.affixSetting( + "user_search.pool.health_check.dn", + key -> new Setting<>(key, (String) null, + Optional::ofNullable, Setting.Property.NodeScope)); + + private PoolingSessionFactorySettings() { + } + + public static Set> getSettings(String realmType) { + return Stream.of( + POOL_INITIAL_SIZE, POOL_SIZE, HEALTH_CHECK_ENABLED, HEALTH_CHECK_INTERVAL, HEALTH_CHECK_DN, BIND_DN, + LEGACY_BIND_PASSWORD, SECURE_BIND_PASSWORD + ).map(f -> f.apply(realmType)).collect(Collectors.toSet()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/SearchGroupsResolverSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/SearchGroupsResolverSettings.java index 67ae3bcf24b2d..f9a8e697a2399 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/SearchGroupsResolverSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/SearchGroupsResolverSettings.java @@ -6,34 +6,46 @@ package org.elasticsearch.xpack.core.security.authc.ldap; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import java.util.HashSet; import java.util.Set; import java.util.function.Function; +import static org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings.LDAP_TYPE; + public final class SearchGroupsResolverSettings { - public static final Setting BASE_DN = Setting.simpleString("group_search.base_dn", + + public static final Function> BASE_DN = RealmSettings.affixSetting( + "group_search.base_dn", key -> Setting.simpleString(key, new Setting.Property[]{Setting.Property.NodeScope})); + + public static final Function> SCOPE = RealmSettings.affixSetting( + "group_search.scope", key -> new Setting<>(key, (String) null, + s -> LdapSearchScope.resolve(s, LdapSearchScope.SUB_TREE), Setting.Property.NodeScope)); + + public static final Setting.AffixSetting USER_ATTRIBUTE = RealmSettings.simpleString(LDAP_TYPE, "group_search.user_attribute", Setting.Property.NodeScope); - public static final Setting SCOPE = new Setting<>("group_search.scope", (String) null, - s -> LdapSearchScope.resolve(s, LdapSearchScope.SUB_TREE), Setting.Property.NodeScope); - public static final Setting USER_ATTRIBUTE = Setting.simpleString( - "group_search.user_attribute", Setting.Property.NodeScope); + private static final String GROUP_SEARCH_DEFAULT_FILTER = "(&" + "(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)" + "(objectclass=group)(objectclass=posixGroup))" + "(|(uniqueMember={0})(member={0})(memberUid={0})))"; - public static final Setting FILTER = new Setting<>("group_search.filter", - GROUP_SEARCH_DEFAULT_FILTER, Function.identity(), Setting.Property.NodeScope); + public static final Setting.AffixSetting FILTER = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(LDAP_TYPE), "group_search.filter", + key -> new Setting<>(key, GROUP_SEARCH_DEFAULT_FILTER, Function.identity(), Setting.Property.NodeScope)); - private SearchGroupsResolverSettings() {} + private SearchGroupsResolverSettings() { + } - public static Set> getSettings() { - Set> settings = new HashSet<>(); - settings.add(BASE_DN); - settings.add(FILTER); - settings.add(USER_ATTRIBUTE); - settings.add(SCOPE); + public static Set> getSettings(String realmType) { + Set> settings = new HashSet<>(); + settings.add(BASE_DN.apply(realmType)); + settings.add(SCOPE.apply(realmType)); + if (realmType.equals(LDAP_TYPE)) { + settings.add(FILTER); + settings.add(USER_ATTRIBUTE); + } return settings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/UserAttributeGroupsResolverSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/UserAttributeGroupsResolverSettings.java index 88538a810a5dc..c6f36c3c3cd03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/UserAttributeGroupsResolverSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/UserAttributeGroupsResolverSettings.java @@ -6,18 +6,21 @@ package org.elasticsearch.xpack.core.security.authc.ldap; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import java.util.Collections; import java.util.Set; import java.util.function.Function; public final class UserAttributeGroupsResolverSettings { - public static final Setting ATTRIBUTE = new Setting<>("user_group_attribute", "memberOf", - Function.identity(), Setting.Property.NodeScope); + public static final Setting.AffixSetting ATTRIBUTE = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(LdapRealmSettings.LDAP_TYPE), "user_group_attribute", + key -> new Setting<>(key, "memberOf", Function.identity(), Setting.Property.NodeScope)); - private UserAttributeGroupsResolverSettings() {} + private UserAttributeGroupsResolverSettings() { + } - public static Set> getSettings() { + public static Set> getSettings() { return Collections.singleton(ATTRIBUTE); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapLoadBalancingSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapLoadBalancingSettings.java index 4d7aff0939754..325862a2c3c93 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapLoadBalancingSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapLoadBalancingSettings.java @@ -6,21 +6,29 @@ package org.elasticsearch.xpack.core.security.authc.ldap.support; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import java.util.HashSet; import java.util.Set; +import java.util.function.Function; public final class LdapLoadBalancingSettings { - public static final String LOAD_BALANCE_SETTINGS = "load_balance"; - public static final String LOAD_BALANCE_TYPE_SETTING = "type"; - public static final String CACHE_TTL_SETTING = "cache_ttl"; - private LdapLoadBalancingSettings() {} + public static final Function> LOAD_BALANCE_TYPE_SETTING = RealmSettings.affixSetting( + "load_balance.type", key -> Setting.simpleString(key, Setting.Property.NodeScope)); - public static Set> getSettings() { - Set> settings = new HashSet<>(); - settings.add(Setting.simpleString(LOAD_BALANCE_SETTINGS + "." + LOAD_BALANCE_TYPE_SETTING, Setting.Property.NodeScope)); - settings.add(Setting.simpleString(LOAD_BALANCE_SETTINGS + "." + CACHE_TTL_SETTING, Setting.Property.NodeScope)); + private static final TimeValue CACHE_TTL_DEFAULT = TimeValue.timeValueHours(1L); + public static final Function> CACHE_TTL_SETTING = RealmSettings.affixSetting( + "load_balance.cache_ttl", key -> Setting.timeSetting(key, CACHE_TTL_DEFAULT, Setting.Property.NodeScope)); + + private LdapLoadBalancingSettings() { + } + + public static Set> getSettings(String realmType) { + Set> settings = new HashSet<>(); + settings.add(LOAD_BALANCE_TYPE_SETTING.apply(realmType)); + settings.add(CACHE_TTL_SETTING.apply(realmType)); return settings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapMetaDataResolverSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapMetaDataResolverSettings.java index e284de9c03c3d..878325f983786 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapMetaDataResolverSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapMetaDataResolverSettings.java @@ -6,18 +6,21 @@ package org.elasticsearch.xpack.core.security.authc.ldap.support; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import java.util.Collections; import java.util.List; import java.util.function.Function; public final class LdapMetaDataResolverSettings { - public static final Setting> ADDITIONAL_META_DATA_SETTING = Setting.listSetting( - "metadata", Collections.emptyList(), Function.identity(), Setting.Property.NodeScope); + public static final Setting.AffixSetting> ADDITIONAL_META_DATA_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(LdapRealmSettings.LDAP_TYPE), "metadata", + key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Setting.Property.NodeScope)); private LdapMetaDataResolverSettings() {} - public static List> getSettings() { + public static List> getSettings() { return Collections.singletonList(ADDITIONAL_META_DATA_SETTING); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapSearchScope.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapSearchScope.java index 3c3d6bc8ab818..d951a7dbc65dd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapSearchScope.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapSearchScope.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authc.ldap.support; import com.unboundid.ldap.sdk.SearchScope; +import org.elasticsearch.common.Strings; import java.util.Locale; @@ -26,7 +27,7 @@ public SearchScope scope() { } public static LdapSearchScope resolve(String scope, LdapSearchScope defaultScope) { - if (scope == null) { + if (Strings.isNullOrEmpty(scope)) { return defaultScope; } switch (scope.toLowerCase(Locale.ENGLISH)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java index 42fc70f25176d..378cf5bd0e2a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java @@ -7,38 +7,53 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.function.Function; public final class SessionFactorySettings { - public static final String URLS_SETTING = "url"; - public static final String TIMEOUT_TCP_CONNECTION_SETTING = "timeout.tcp_connect"; - public static final String TIMEOUT_TCP_READ_SETTING = "timeout.tcp_read"; - public static final String TIMEOUT_LDAP_SETTING = "timeout.ldap_search"; - public static final String HOSTNAME_VERIFICATION_SETTING = "hostname_verification"; - public static final String FOLLOW_REFERRALS_SETTING = "follow_referrals"; - public static final Setting IGNORE_REFERRAL_ERRORS_SETTING = Setting.boolSetting( - "ignore_referral_errors", true, Setting.Property.NodeScope); + + public static final Function>> URLS_SETTING = RealmSettings.affixSetting( + "url", key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Setting.Property.NodeScope)); + public static final TimeValue TIMEOUT_DEFAULT = TimeValue.timeValueSeconds(5); + public static final Function> TIMEOUT_TCP_CONNECTION_SETTING = RealmSettings.affixSetting( + "timeout.tcp_connect", key -> Setting.timeSetting(key, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); + + public static final Function> TIMEOUT_TCP_READ_SETTING = RealmSettings.affixSetting( + "timeout.tcp_read", key -> Setting.timeSetting(key, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); + + public static final Function> TIMEOUT_LDAP_SETTING = RealmSettings.affixSetting( + "timeout.ldap_search", key -> Setting.timeSetting(key, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); + + public static final Function> HOSTNAME_VERIFICATION_SETTING = RealmSettings.affixSetting( + "hostname_verification", key -> Setting.boolSetting(key, true, Setting.Property.NodeScope, Setting.Property.Filtered)); + + public static final Function> FOLLOW_REFERRALS_SETTING = RealmSettings.affixSetting( + "follow_referrals", key -> Setting.boolSetting(key, true, Setting.Property.NodeScope)); + + public static final Function> IGNORE_REFERRAL_ERRORS_SETTING = RealmSettings.affixSetting( + "ignore_referral_errors", key -> Setting.boolSetting(key, true, Setting.Property.NodeScope)); + + private SessionFactorySettings() { + } - private SessionFactorySettings() {} - - public static Set> getSettings() { - Set> settings = new HashSet<>(); - settings.addAll(LdapLoadBalancingSettings.getSettings()); - settings.add(Setting.listSetting(URLS_SETTING, Collections.emptyList(), Function.identity(), - Setting.Property.NodeScope)); - settings.add(Setting.timeSetting(TIMEOUT_TCP_CONNECTION_SETTING, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); - settings.add(Setting.timeSetting(TIMEOUT_TCP_READ_SETTING, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); - settings.add(Setting.timeSetting(TIMEOUT_LDAP_SETTING, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); - settings.add(Setting.boolSetting(HOSTNAME_VERIFICATION_SETTING, true, Setting.Property.NodeScope, Setting.Property.Filtered)); - settings.add(Setting.boolSetting(FOLLOW_REFERRALS_SETTING, true, Setting.Property.NodeScope)); - settings.add(IGNORE_REFERRAL_ERRORS_SETTING); - settings.addAll(SSLConfigurationSettings.withPrefix("ssl.").getAllSettings()); + public static Set> getSettings(String realmType) { + Set> settings = new HashSet<>(); + settings.addAll(LdapLoadBalancingSettings.getSettings(realmType)); + settings.add(URLS_SETTING.apply(realmType)); + settings.add(TIMEOUT_TCP_CONNECTION_SETTING.apply(realmType)); + settings.add(TIMEOUT_TCP_READ_SETTING.apply(realmType)); + settings.add(TIMEOUT_LDAP_SETTING.apply(realmType)); + settings.add(HOSTNAME_VERIFICATION_SETTING.apply(realmType)); + settings.add(FOLLOW_REFERRALS_SETTING.apply(realmType)); + settings.add(IGNORE_REFERRAL_ERRORS_SETTING.apply(realmType)); + settings.addAll(SSLConfigurationSettings.getRealmSettings(realmType)); return settings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java index 53af4938a8ff4..cd153c9009ed6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java @@ -5,48 +5,84 @@ */ package org.elasticsearch.xpack.core.security.authc.pki; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.mapper.CompositeRoleMapperSettings; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import java.util.HashSet; +import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.regex.Pattern; public final class PkiRealmSettings { public static final String TYPE = "pki"; public static final String DEFAULT_USERNAME_PATTERN = "CN=(.*?)(?:,|$)"; - public static final Setting USERNAME_PATTERN_SETTING = new Setting<>("username_pattern", DEFAULT_USERNAME_PATTERN, - s -> Pattern.compile(s, Pattern.CASE_INSENSITIVE), Setting.Property.NodeScope); + public static final Setting.AffixSetting USERNAME_PATTERN_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "username_pattern", + key -> new Setting<>(key, DEFAULT_USERNAME_PATTERN, s -> Pattern.compile(s, Pattern.CASE_INSENSITIVE), + Setting.Property.NodeScope)); + private static final TimeValue DEFAULT_TTL = TimeValue.timeValueMinutes(20); - public static final Setting CACHE_TTL_SETTING = Setting.timeSetting("cache.ttl", DEFAULT_TTL, Setting.Property.NodeScope); + public static final Setting.AffixSetting CACHE_TTL_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "cache.ttl", + key -> Setting.timeSetting(key, DEFAULT_TTL, Setting.Property.NodeScope)); + private static final int DEFAULT_MAX_USERS = 100_000; //100k users - public static final Setting CACHE_MAX_USERS_SETTING = Setting.intSetting("cache.max_users", DEFAULT_MAX_USERS, - Setting.Property.NodeScope); - public static final SSLConfigurationSettings SSL_SETTINGS = SSLConfigurationSettings.withoutPrefix(); + public static final Setting.AffixSetting CACHE_MAX_USERS_SETTING = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "cache.max_users", + key -> Setting.intSetting(key, DEFAULT_MAX_USERS, Setting.Property.NodeScope)); + + public static final Setting.AffixSetting> TRUST_STORE_PATH; + public static final Setting.AffixSetting> TRUST_STORE_TYPE; + public static final Setting.AffixSetting TRUST_STORE_PASSWORD; + public static final Setting.AffixSetting LEGACY_TRUST_STORE_PASSWORD; + public static final Setting.AffixSetting TRUST_STORE_ALGORITHM; + public static final Setting.AffixSetting> CAPATH_SETTING; - private PkiRealmSettings() {} + static { + final String prefix = "xpack.security.authc.realms." + TYPE + "."; + final SSLConfigurationSettings ssl = SSLConfigurationSettings.withoutPrefix(); + TRUST_STORE_PATH = Setting.affixKeySetting(prefix, ssl.truststorePath.getKey(), + SSLConfigurationSettings.TRUST_STORE_PATH_TEMPLATE); + TRUST_STORE_TYPE = Setting.affixKeySetting(prefix, ssl.truststoreType.getKey(), + SSLConfigurationSettings.TRUST_STORE_TYPE_TEMPLATE); + TRUST_STORE_PASSWORD = Setting.affixKeySetting(prefix, ssl.truststorePassword.getKey(), + SSLConfigurationSettings.TRUSTSTORE_PASSWORD_TEMPLATE); + LEGACY_TRUST_STORE_PASSWORD = Setting.affixKeySetting(prefix, ssl.legacyTruststorePassword.getKey(), + SSLConfigurationSettings.LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE); + TRUST_STORE_ALGORITHM = Setting.affixKeySetting(prefix, ssl.truststoreAlgorithm.getKey(), + SSLConfigurationSettings.TRUST_STORE_ALGORITHM_TEMPLATE); + CAPATH_SETTING = Setting.affixKeySetting(prefix, ssl.caPaths.getKey(), + SSLConfigurationSettings.CAPATH_SETTING_TEMPLATE); + } + + private PkiRealmSettings() { + } /** * @return The {@link Setting setting configuration} for this realm type */ - public static Set> getSettings() { - Set> settings = new HashSet<>(); + public static Set> getSettings() { + Set> settings = new HashSet<>(); settings.add(USERNAME_PATTERN_SETTING); settings.add(CACHE_TTL_SETTING); settings.add(CACHE_MAX_USERS_SETTING); - settings.add(SSL_SETTINGS.truststorePath); - settings.add(SSL_SETTINGS.truststorePassword); - settings.add(SSL_SETTINGS.legacyTruststorePassword); - settings.add(SSL_SETTINGS.truststoreAlgorithm); - settings.add(SSL_SETTINGS.caPaths); + settings.add(TRUST_STORE_PATH); + settings.add(TRUST_STORE_PASSWORD); + settings.add(LEGACY_TRUST_STORE_PASSWORD); + settings.add(TRUST_STORE_ALGORITHM); + settings.add(CAPATH_SETTING); - settings.addAll(DelegatedAuthorizationSettings.getSettings()); - settings.addAll(CompositeRoleMapperSettings.getSettings()); + settings.addAll(DelegatedAuthorizationSettings.getSettings(TYPE)); + settings.addAll(CompositeRoleMapperSettings.getSettings(TYPE)); + settings.addAll(RealmSettings.getStandardSettings(TYPE)); return settings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java index e254cee124300..437dca0c60e76 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java @@ -8,6 +8,8 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.X509KeyPairSettings; @@ -27,26 +29,43 @@ public class SamlRealmSettings { // these settings will be used under the prefix xpack.security.authc.realms.REALM_NAME. private static final String IDP_METADATA_SETTING_PREFIX = "idp.metadata."; - public static final Setting IDP_ENTITY_ID = Setting.simpleString("idp.entity_id", Setting.Property.NodeScope); - public static final Setting IDP_METADATA_PATH - = Setting.simpleString(IDP_METADATA_SETTING_PREFIX + "path", Setting.Property.NodeScope); - public static final Setting IDP_METADATA_HTTP_REFRESH - = Setting.timeSetting(IDP_METADATA_SETTING_PREFIX + "http.refresh", TimeValue.timeValueHours(1), Setting.Property.NodeScope); - public static final Setting IDP_SINGLE_LOGOUT = Setting.boolSetting("idp.use_single_logout", true, Setting.Property.NodeScope); + public static final Setting.AffixSetting IDP_ENTITY_ID + = RealmSettings.simpleString(TYPE, "idp.entity_id", Setting.Property.NodeScope); - public static final Setting SP_ENTITY_ID = Setting.simpleString("sp.entity_id", Setting.Property.NodeScope); - public static final Setting SP_ACS = Setting.simpleString("sp.acs", Setting.Property.NodeScope); - public static final Setting SP_LOGOUT = Setting.simpleString("sp.logout", Setting.Property.NodeScope); + public static final Setting.AffixSetting IDP_METADATA_PATH + = RealmSettings.simpleString(TYPE, IDP_METADATA_SETTING_PREFIX + "path", Setting.Property.NodeScope); - public static final Setting NAMEID_FORMAT = new Setting<>("nameid_format", s -> TRANSIENT_NAMEID_FORMAT, Function.identity(), - Setting.Property.NodeScope); - public static final Setting NAMEID_ALLOW_CREATE = Setting.boolSetting("nameid.allow_create", false, - Setting.Property.NodeScope); - public static final Setting NAMEID_SP_QUALIFIER = Setting.simpleString("nameid.sp_qualifier", Setting.Property.NodeScope); + public static final Setting.AffixSetting IDP_METADATA_HTTP_REFRESH = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), IDP_METADATA_SETTING_PREFIX + "http.refresh", + key -> Setting.timeSetting(key, TimeValue.timeValueHours(1), Setting.Property.NodeScope)); - public static final Setting FORCE_AUTHN = Setting.boolSetting("force_authn", false, Setting.Property.NodeScope); - public static final Setting POPULATE_USER_METADATA = Setting.boolSetting("populate_user_metadata", true, - Setting.Property.NodeScope); + public static final Setting.AffixSetting IDP_SINGLE_LOGOUT = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "idp.use_single_logout", + key -> Setting.boolSetting(key, true, Setting.Property.NodeScope)); + + public static final Setting.AffixSetting SP_ENTITY_ID + = RealmSettings.simpleString(TYPE, "sp.entity_id", Setting.Property.NodeScope); + + public static final Setting.AffixSetting SP_ACS = RealmSettings.simpleString(TYPE, "sp.acs", Setting.Property.NodeScope); + public static final Setting.AffixSetting SP_LOGOUT = RealmSettings.simpleString(TYPE, "sp.logout", Setting.Property.NodeScope); + + public static final Setting.AffixSetting NAMEID_FORMAT = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "nameid_format", + key -> new Setting<>(key, s -> TRANSIENT_NAMEID_FORMAT, Function.identity(), Setting.Property.NodeScope)); + + public static final Setting.AffixSetting NAMEID_ALLOW_CREATE = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "nameid.allow_create", + key -> Setting.boolSetting(key, false, Setting.Property.NodeScope)); + public static final Setting.AffixSetting NAMEID_SP_QUALIFIER + = RealmSettings.simpleString(TYPE, "nameid.sp_qualifier", Setting.Property.NodeScope); + + public static final Setting.AffixSetting FORCE_AUTHN = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "force_authn", + key -> Setting.boolSetting(key, false, Setting.Property.NodeScope)); + + public static final Setting.AffixSetting POPULATE_USER_METADATA = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "populate_user_metadata", + key -> Setting.boolSetting(key, true, Setting.Property.NodeScope)); public static final AttributeSetting PRINCIPAL_ATTRIBUTE = new AttributeSetting("principal"); public static final AttributeSetting GROUPS_ATTRIBUTE = new AttributeSetting("groups"); @@ -54,19 +73,25 @@ public class SamlRealmSettings { public static final AttributeSetting NAME_ATTRIBUTE = new AttributeSetting("name"); public static final AttributeSetting MAIL_ATTRIBUTE = new AttributeSetting("mail"); - public static final X509KeyPairSettings ENCRYPTION_SETTINGS = new X509KeyPairSettings("encryption.", false); - public static final Setting ENCRYPTION_KEY_ALIAS = - Setting.simpleString("encryption.keystore.alias", Setting.Property.NodeScope); + public static final String ENCRYPTION_SETTING_KEY = "encryption."; + public static final Setting.AffixSetting ENCRYPTION_KEY_ALIAS = RealmSettings.simpleString( + TYPE, ENCRYPTION_SETTING_KEY + "keystore.alias", Setting.Property.NodeScope); + + public static final String SIGNING_SETTING_KEY = "signing."; + public static final Setting.AffixSetting SIGNING_KEY_ALIAS = RealmSettings.simpleString( + TYPE, SIGNING_SETTING_KEY + "keystore.alias", Setting.Property.NodeScope); + + public static final Setting.AffixSetting> SIGNING_MESSAGE_TYPES = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "signing.saml_messages", + key -> Setting.listSetting(key, Collections.singletonList("*"), Function.identity(), Setting.Property.NodeScope)); - public static final X509KeyPairSettings SIGNING_SETTINGS = new X509KeyPairSettings("signing.", false); - public static final Setting SIGNING_KEY_ALIAS = - Setting.simpleString("signing.keystore.alias", Setting.Property.NodeScope); - public static final Setting> SIGNING_MESSAGE_TYPES = Setting.listSetting("signing.saml_messages", - Collections.singletonList("*"), Function.identity(), Setting.Property.NodeScope); - public static final Setting> REQUESTED_AUTHN_CONTEXT_CLASS_REF = Setting.listSetting("req_authn_context_class_ref", - Collections.emptyList(), Function.identity(),Setting.Property.NodeScope); - public static final Setting CLOCK_SKEW = Setting.positiveTimeSetting("allowed_clock_skew", TimeValue.timeValueMinutes(3), - Setting.Property.NodeScope); + public static final Setting.AffixSetting> REQUESTED_AUTHN_CONTEXT_CLASS_REF = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "req_authn_context_class_ref", + key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(),Setting.Property.NodeScope)); + + public static final Setting.AffixSetting CLOCK_SKEW = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), "allowed_clock_skew", + key -> Setting.positiveTimeSetting(key, TimeValue.timeValueMinutes(3), Setting.Property.NodeScope)); public static final String SSL_PREFIX = "ssl."; @@ -76,21 +101,24 @@ private SamlRealmSettings() { /** * @return The {@link Setting setting configuration} for this realm type */ - public static Set> getSettings() { - final Set> set = Sets.newHashSet(IDP_ENTITY_ID, IDP_METADATA_PATH, IDP_SINGLE_LOGOUT, + public static Set> getSettings() { + final Set> set = Sets.newHashSet( + IDP_ENTITY_ID, IDP_METADATA_PATH, IDP_SINGLE_LOGOUT, SP_ENTITY_ID, SP_ACS, SP_LOGOUT, NAMEID_FORMAT, NAMEID_ALLOW_CREATE, NAMEID_SP_QUALIFIER, FORCE_AUTHN, POPULATE_USER_METADATA, CLOCK_SKEW, - ENCRYPTION_KEY_ALIAS, SIGNING_KEY_ALIAS, SIGNING_MESSAGE_TYPES, REQUESTED_AUTHN_CONTEXT_CLASS_REF); - set.addAll(ENCRYPTION_SETTINGS.getAllSettings()); - set.addAll(SIGNING_SETTINGS.getAllSettings()); - set.addAll(SSLConfigurationSettings.withPrefix(SSL_PREFIX).getAllSettings()); + ENCRYPTION_KEY_ALIAS, SIGNING_KEY_ALIAS, SIGNING_MESSAGE_TYPES, REQUESTED_AUTHN_CONTEXT_CLASS_REF); + set.addAll(X509KeyPairSettings.affix(RealmSettings.realmSettingPrefix(TYPE), ENCRYPTION_SETTING_KEY, false)); + set.addAll(X509KeyPairSettings.affix(RealmSettings.realmSettingPrefix(TYPE), SIGNING_SETTING_KEY, false)); + set.addAll(SSLConfigurationSettings.getRealmSettings(TYPE)); set.addAll(PRINCIPAL_ATTRIBUTE.settings()); set.addAll(GROUPS_ATTRIBUTE.settings()); set.addAll(DN_ATTRIBUTE.settings()); set.addAll(NAME_ATTRIBUTE.settings()); set.addAll(MAIL_ATTRIBUTE.settings()); - set.addAll(DelegatedAuthorizationSettings.getSettings()); + + set.addAll(DelegatedAuthorizationSettings.getSettings(TYPE)); + set.addAll(RealmSettings.getStandardSettings(TYPE)); return set; } @@ -109,27 +137,27 @@ public static final class AttributeSetting { public static final String ATTRIBUTES_PREFIX = "attributes."; public static final String ATTRIBUTE_PATTERNS_PREFIX = "attribute_patterns."; - private final Setting attribute; - private final Setting pattern; + private final Setting.AffixSetting attribute; + private final Setting.AffixSetting pattern; public AttributeSetting(String name) { - attribute = Setting.simpleString(ATTRIBUTES_PREFIX + name, Setting.Property.NodeScope); - pattern = Setting.simpleString(ATTRIBUTE_PATTERNS_PREFIX + name, Setting.Property.NodeScope); + attribute = RealmSettings.simpleString(TYPE, ATTRIBUTES_PREFIX + name, Setting.Property.NodeScope); + pattern = RealmSettings.simpleString(TYPE, ATTRIBUTE_PATTERNS_PREFIX + name, Setting.Property.NodeScope); } - public Collection> settings() { + public Collection> settings() { return Arrays.asList(getAttribute(), getPattern()); } - public String name() { - return getAttribute().getKey(); + public String name(RealmConfig config) { + return getAttribute().getConcreteSettingForNamespace(config.name()).getKey(); } - public Setting getAttribute() { + public Setting.AffixSetting getAttribute() { return attribute; } - public Setting getPattern() { + public Setting.AffixSetting getPattern() { return pattern; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingUsernamePasswordRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingUsernamePasswordRealmSettings.java index 6b7867e421180..157a8edf5640d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingUsernamePasswordRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingUsernamePasswordRealmSettings.java @@ -7,29 +7,44 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import java.util.Arrays; import java.util.HashSet; import java.util.Set; +import java.util.function.Function; public final class CachingUsernamePasswordRealmSettings { - public static final Setting CACHE_HASH_ALGO_SETTING = Setting.simpleString("cache.hash_algo", "ssha256", - Setting.Property.NodeScope); + + private static final String CACHE_HASH_ALGO_SUFFIX = "cache.hash_algo"; + public static final Function> CACHE_HASH_ALGO_SETTING = RealmSettings.affixSetting( + CACHE_HASH_ALGO_SUFFIX, key -> Setting.simpleString(key, "ssha256", Setting.Property.NodeScope)); + private static final TimeValue DEFAULT_TTL = TimeValue.timeValueMinutes(20); - public static final Setting CACHE_TTL_SETTING = Setting.timeSetting("cache.ttl", DEFAULT_TTL, Setting.Property.NodeScope); + private static final String CACHE_TTL_SUFFIX = "cache.ttl"; + public static final Function> CACHE_TTL_SETTING = RealmSettings.affixSetting( + CACHE_TTL_SUFFIX, key -> Setting.timeSetting(key, DEFAULT_TTL, Setting.Property.NodeScope)); + private static final int DEFAULT_MAX_USERS = 100_000; //100k users - public static final Setting CACHE_MAX_USERS_SETTING = Setting.intSetting("cache.max_users", DEFAULT_MAX_USERS, - Setting.Property.NodeScope); + private static final String CACHE_MAX_USERS_SUFFIX = "cache.max_users"; + public static final Function> CACHE_MAX_USERS_SETTING = RealmSettings.affixSetting( + CACHE_MAX_USERS_SUFFIX, key -> Setting.intSetting(key, DEFAULT_MAX_USERS, Setting.Property.NodeScope)); - public static final Setting AUTHC_ENABLED_SETTING = Setting.boolSetting("authentication.enabled", true, - Setting.Property.NodeScope); + public static final Function> AUTHC_ENABLED_SETTING = RealmSettings.affixSetting( + "authentication.enabled", key -> Setting.boolSetting(key, true, Setting.Property.NodeScope)); - private CachingUsernamePasswordRealmSettings() {} + private CachingUsernamePasswordRealmSettings() { + } /** * Returns the {@link Setting setting configuration} that is common for all caching realms */ - public static Set> getSettings() { - return new HashSet<>(Arrays.asList(CACHE_HASH_ALGO_SETTING, CACHE_TTL_SETTING, CACHE_MAX_USERS_SETTING, AUTHC_ENABLED_SETTING)); + public static Set> getSettings(String type) { + return new HashSet<>(Arrays.asList( + CACHE_HASH_ALGO_SETTING.apply(type), + CACHE_TTL_SETTING.apply(type), + CACHE_MAX_USERS_SETTING.apply(type), + AUTHC_ENABLED_SETTING.apply(type) + )); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java index b8384a76b41ad..969f6f0eb0d51 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DelegatedAuthorizationSettings.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.authc.support; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import java.util.Collection; import java.util.Collections; @@ -18,10 +19,11 @@ */ public class DelegatedAuthorizationSettings { - public static final Setting> AUTHZ_REALMS = Setting.listSetting("authorization_realms", - Collections.emptyList(), Function.identity(), Setting.Property.NodeScope); + public static final String AUTHZ_REALMS_SUFFIX = "authorization_realms"; + public static final Function>> AUTHZ_REALMS = RealmSettings.affixSetting( + AUTHZ_REALMS_SUFFIX, key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Setting.Property.NodeScope)); - public static Collection> getSettings() { - return Collections.singleton(AUTHZ_REALMS); + public static Collection> getSettings(String realmType) { + return Collections.singleton(AUTHZ_REALMS.apply(realmType)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DnRoleMapperSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DnRoleMapperSettings.java index 034f7a18dbee4..d2e22a7049110 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DnRoleMapperSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DnRoleMapperSettings.java @@ -6,20 +6,26 @@ package org.elasticsearch.xpack.core.security.authc.support; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import java.util.Arrays; -import java.util.List; +import java.util.Collection; import java.util.function.Function; public final class DnRoleMapperSettings { private static final String DEFAULT_FILE_NAME = "role_mapping.yml"; - public static final Setting ROLE_MAPPING_FILE_SETTING = new Setting<>("files.role_mapping", DEFAULT_FILE_NAME, - Function.identity(), Setting.Property.NodeScope); - public static final Setting USE_UNMAPPED_GROUPS_AS_ROLES_SETTING = - Setting.boolSetting("unmapped_groups_as_roles", false, Setting.Property.NodeScope); + public static final String FILES_ROLE_MAPPING_SUFFIX = "files.role_mapping"; + public static final Function> ROLE_MAPPING_FILE_SETTING = type -> + Setting.affixKeySetting(RealmSettings.realmSettingPrefix(type), FILES_ROLE_MAPPING_SUFFIX, + key -> new Setting<>(key, DEFAULT_FILE_NAME, Function.identity(), Setting.Property.NodeScope)); - public static List> getSettings() { - return Arrays.asList(USE_UNMAPPED_GROUPS_AS_ROLES_SETTING, ROLE_MAPPING_FILE_SETTING); + public static final String UNMAPPED_GROUPS_AS_ROLES_SUFFIX = "unmapped_groups_as_roles"; + public static final Function> USE_UNMAPPED_GROUPS_AS_ROLES_SETTING = type -> + Setting.affixKeySetting(RealmSettings.realmSettingPrefix(type), UNMAPPED_GROUPS_AS_ROLES_SUFFIX, + key -> Setting.boolSetting(key, false, Setting.Property.NodeScope)); + + public static Collection> getSettings(String realmType) { + return Arrays.asList(USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.apply(realmType), ROLE_MAPPING_FILE_SETTING.apply(realmType)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/CompositeRoleMapperSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/CompositeRoleMapperSettings.java index 54fa0c2ffbcd9..ee719580893f3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/CompositeRoleMapperSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/CompositeRoleMapperSettings.java @@ -13,7 +13,7 @@ public final class CompositeRoleMapperSettings { private CompositeRoleMapperSettings() {} - public static Collection> getSettings() { - return DnRoleMapperSettings.getSettings(); + public static Collection> getSettings(String type) { + return DnRoleMapperSettings.getSettings(type); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index ea7a37b205431..f0e2f2b7e6217 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.authz.permission; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.elasticsearch.ElasticsearchSecurityException; @@ -14,7 +15,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.support.Automatons; @@ -67,7 +67,7 @@ static Predicate indexMatcher(List indices) { try { return Automatons.predicate(indices); } catch (TooComplexToDeterminizeException e) { - Loggers.getLogger(IndicesPermission.class).debug("Index pattern automaton [{}] is too complex", indices); + LogManager.getLogger(IndicesPermission.class).debug("Index pattern automaton [{}] is too complex", indices); String description = Strings.collectionToCommaDelimitedString(indices); if (description.length() > 80) { description = Strings.cleanTruncate(description, 80) + "..."; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloader.java index 2217513c03fe7..2a786f12e59bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloader.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.core.ssl; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.env.Environment; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; @@ -28,15 +28,16 @@ * Ensures that the files backing an {@link SSLConfiguration} are monitored for changes and the underlying key/trust material is reloaded * and the {@link SSLContext} has existing sessions invalidated to force the use of the new key/trust material */ -public class SSLConfigurationReloader extends AbstractComponent { +public class SSLConfigurationReloader { + + private static final Logger logger = LogManager.getLogger(SSLConfigurationReloader.class); private final ConcurrentHashMap pathToChangeListenerMap = new ConcurrentHashMap<>(); private final Environment environment; private final ResourceWatcherService resourceWatcherService; private final SSLService sslService; - public SSLConfigurationReloader(Settings settings, Environment env, SSLService sslService, ResourceWatcherService resourceWatcher) { - super(settings); + public SSLConfigurationReloader(Environment env, SSLService sslService, ResourceWatcherService resourceWatcher) { this.environment = env; this.resourceWatcherService = resourceWatcher; this.sslService = sslService; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java index 0cc01270ced53..c16035f1cabe3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.util.CollectionUtils; import javax.net.ssl.TrustManagerFactory; - import java.security.KeyStore; import java.util.Arrays; import java.util.Collection; @@ -58,107 +57,169 @@ public class SSLConfigurationSettings { private static final Function>> CIPHERS_SETTING_TEMPLATE = key -> Setting.listSetting(key, Collections .emptyList(), Function.identity(), Property.NodeScope, Property.Filtered); - public static final Setting> CIPHERS_SETTING_PROFILES = Setting.affixKeySetting("transport.profiles.", + public static final Setting> CIPHERS_SETTING_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.cipher_suites", CIPHERS_SETTING_TEMPLATE); + public static final Function>> CIPHERS_SETTING_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.cipher_suites", CIPHERS_SETTING_TEMPLATE); - private static final Function>> SUPPORTED_PROTOCOLS_TEMPLATE = key -> Setting.listSetting(key, + private static final Function>> SUPPORTED_PROTOCOLS_TEMPLATE = key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope, Property.Filtered); public static final Setting> SUPPORTED_PROTOCOLS_PROFILES = Setting.affixKeySetting("transport.profiles.", - "xpack.security.ssl.supported_protocols", SUPPORTED_PROTOCOLS_TEMPLATE) ; + "xpack.security.ssl.supported_protocols", SUPPORTED_PROTOCOLS_TEMPLATE); + public static final Function>> SUPPORTED_PROTOCOLS_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.supported_protocols", + SUPPORTED_PROTOCOLS_TEMPLATE); - public static final Setting> KEYSTORE_PATH_PROFILES = Setting.affixKeySetting("transport.profiles.", + static final Setting> KEYSTORE_PATH_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.keystore.path", X509KeyPairSettings.KEYSTORE_PATH_TEMPLATE); + static final Function>> KEYSTORE_PATH_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.keystore.path", + X509KeyPairSettings.KEYSTORE_PATH_TEMPLATE); public static final Setting LEGACY_KEYSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.keystore.password", X509KeyPairSettings.LEGACY_KEYSTORE_PASSWORD_TEMPLATE); + public static final Function> LEGACY_KEYSTORE_PASSWORD_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.keystore.password", + X509KeyPairSettings.LEGACY_KEYSTORE_PASSWORD_TEMPLATE); public static final Setting KEYSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.keystore.secure_password", X509KeyPairSettings.KEYSTORE_PASSWORD_TEMPLATE); + public static final Function> KEYSTORE_PASSWORD_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.keystore.secure_password", + X509KeyPairSettings.KEYSTORE_PASSWORD_TEMPLATE); public static final Setting LEGACY_KEYSTORE_KEY_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.keystore.key_password", X509KeyPairSettings.LEGACY_KEYSTORE_KEY_PASSWORD_TEMPLATE); + public static final Function> LEGACY_KEYSTORE_KEY_PASSWORD_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.keystore.key_password", + X509KeyPairSettings.LEGACY_KEYSTORE_KEY_PASSWORD_TEMPLATE); public static final Setting KEYSTORE_KEY_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.keystore.secure_key_password", X509KeyPairSettings.KEYSTORE_KEY_PASSWORD_TEMPLATE); + public static final Function> KEYSTORE_KEY_PASSWORD_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.keystore.secure_key_password", + X509KeyPairSettings.KEYSTORE_KEY_PASSWORD_TEMPLATE); - private static final Function>> TRUST_STORE_PATH_TEMPLATE = key -> new Setting<>(key, s -> null, + public static final Function>> TRUST_STORE_PATH_TEMPLATE = key -> new Setting<>(key, s -> null, Optional::ofNullable, Property.NodeScope, Property.Filtered); public static final Setting> TRUST_STORE_PATH_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.truststore.path", TRUST_STORE_PATH_TEMPLATE); + public static final Function>> TRUST_STORE_PATH_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.truststore.path", TRUST_STORE_PATH_TEMPLATE); public static final Setting> KEY_PATH_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.key", X509KeyPairSettings.KEY_PATH_TEMPLATE); + public static final Function>> KEY_PATH_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.key", X509KeyPairSettings.KEY_PATH_TEMPLATE); - private static final Function> LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE = key -> + public static final Function> LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE = key -> new Setting<>(key, "", SecureString::new, Property.Deprecated, Property.Filtered, Property.NodeScope); public static final Setting LEGACY_TRUSTSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.truststore.password", LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE); + public static final Function> LEGACY_TRUST_STORE_PASSWORD_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "truststore.password", + LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE); - private static final Function> TRUSTSTORE_PASSWORD_TEMPLATE = key -> + public static final Function> TRUSTSTORE_PASSWORD_TEMPLATE = key -> SecureSetting.secureString(key, LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE.apply(key.replace("truststore.secure_password", "truststore.password"))); public static final Setting TRUSTSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.truststore.secure_password", TRUSTSTORE_PASSWORD_TEMPLATE); + public static final Function> TRUST_STORE_PASSWORD_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.truststore.secure_password", + TRUSTSTORE_PASSWORD_TEMPLATE); public static final Setting KEY_STORE_ALGORITHM_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.keystore.algorithm", X509KeyPairSettings.KEY_STORE_ALGORITHM_TEMPLATE); + public static final Function> KEY_STORE_ALGORITHM_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.keystore.algorithm", + X509KeyPairSettings.KEY_STORE_ALGORITHM_TEMPLATE); - private static final Function> TRUST_STORE_ALGORITHM_TEMPLATE = key -> + public static final Function> TRUST_STORE_ALGORITHM_TEMPLATE = key -> new Setting<>(key, s -> TrustManagerFactory.getDefaultAlgorithm(), Function.identity(), Property.NodeScope, Property.Filtered); public static final Setting TRUST_STORE_ALGORITHM_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.truststore.algorithm", TRUST_STORE_ALGORITHM_TEMPLATE); + public static final Function> TRUST_STORE_ALGORITHM_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.truststore.algorithm", + TRUST_STORE_ALGORITHM_TEMPLATE); public static final Setting> KEY_STORE_TYPE_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.keystore.type", X509KeyPairSettings.KEY_STORE_TYPE_TEMPLATE); + public static final Function>> KEY_STORE_TYPE_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.keystore.type", + X509KeyPairSettings.KEY_STORE_TYPE_TEMPLATE); - private static final Function>> TRUST_STORE_TYPE_TEMPLATE = + public static final Function>> TRUST_STORE_TYPE_TEMPLATE = X509KeyPairSettings.KEY_STORE_TYPE_TEMPLATE; public static final Setting> TRUST_STORE_TYPE_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.truststore.type", TRUST_STORE_TYPE_TEMPLATE); + public static final Function>> TRUST_STORE_TYPE_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.truststore.type", TRUST_STORE_TYPE_TEMPLATE); private static final Function>> TRUST_RESTRICTIONS_TEMPLATE = key -> new Setting<>(key, s -> null, Optional::ofNullable, Property.NodeScope, Property.Filtered); public static final Setting> TRUST_RESTRICTIONS_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.trust_restrictions", TRUST_RESTRICTIONS_TEMPLATE); + public static final Function>> TRUST_RESTRICTIONS_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.trust_restrictions", + TRUST_RESTRICTIONS_TEMPLATE); public static final Setting LEGACY_KEY_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.key_passphrase", X509KeyPairSettings.LEGACY_KEY_PASSWORD_TEMPLATE); + public static final Function> LEGACY_KEY_PASSWORD_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.key_passphrase", + X509KeyPairSettings.LEGACY_KEY_PASSWORD_TEMPLATE); public static final Setting KEY_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.secure_key_passphrase", X509KeyPairSettings.KEY_PASSWORD_TEMPLATE); + public static final Function> KEY_PASSWORD_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.secure_key_passphrase", + X509KeyPairSettings.KEY_PASSWORD_TEMPLATE); public static final Setting> CERT_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.certificate", X509KeyPairSettings.CERT_TEMPLATE); + public static final Function>> CERT_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.certificate", + X509KeyPairSettings.CERT_TEMPLATE); - private static final Function>> CAPATH_SETTING_TEMPLATE = key -> Setting.listSetting(key, Collections + public static final Function>> CAPATH_SETTING_TEMPLATE = key -> Setting.listSetting(key, Collections .emptyList(), Function.identity(), Property.NodeScope, Property.Filtered); - public static final Setting> CAPATH_SETTING_PROFILES = Setting.affixKeySetting("transport.profiles.", + public static final Setting> CAPATH_SETTING_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.certificate_authorities", CAPATH_SETTING_TEMPLATE); + public static final Function>> CAPATH_SETTING_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.certificate_authorities", + CAPATH_SETTING_TEMPLATE); private static final Function>> CLIENT_AUTH_SETTING_TEMPLATE = key -> new Setting<>(key, (String) null, s -> s == null ? Optional.empty() : Optional.of(SSLClientAuth.parse(s)), Property.NodeScope, Property.Filtered); - public static final Setting> CLIENT_AUTH_SETTING_PROFILES = Setting.affixKeySetting("transport.profiles.", + public static final Setting> CLIENT_AUTH_SETTING_PROFILES = Setting.affixKeySetting("transport.profiles.", "xpack.security.ssl.client_authentication", CLIENT_AUTH_SETTING_TEMPLATE); + public static final Function>> CLIENT_AUTH_SETTING_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.client_authentication", + CLIENT_AUTH_SETTING_TEMPLATE); private static final Function>> VERIFICATION_MODE_SETTING_TEMPLATE = key -> new Setting<>(key, (String) null, s -> s == null ? Optional.empty() : Optional.of(VerificationMode.parse(s)), Property.NodeScope, Property.Filtered); public static final Setting> VERIFICATION_MODE_SETTING_PROFILES = Setting.affixKeySetting( "transport.profiles.", "xpack.security.ssl.verification_mode", VERIFICATION_MODE_SETTING_TEMPLATE); + public static final Function>> VERIFICATION_MODE_SETTING_REALM = realmType -> + Setting.affixKeySetting("xpack.security.authc.realms." + realmType + ".", "ssl.verification_mode", + VERIFICATION_MODE_SETTING_TEMPLATE); /** - * @see #withoutPrefix - * @see #withPrefix * @param prefix The prefix under which each setting should be defined. Must be either the empty string ("") or a string * ending in "." + * @see #withoutPrefix + * @see #withPrefix */ private SSLConfigurationSettings(String prefix) { assert prefix != null : "Prefix cannot be null (but can be blank)"; - x509KeyPair = new X509KeyPairSettings(prefix, true); - ciphers = CIPHERS_SETTING_TEMPLATE.apply(prefix + "cipher_suites"); + x509KeyPair = X509KeyPairSettings.withPrefix(prefix, true); + ciphers = CIPHERS_SETTING_TEMPLATE.apply(prefix + "cipher_suites"); supportedProtocols = SUPPORTED_PROTOCOLS_TEMPLATE.apply(prefix + "supported_protocols"); truststorePath = TRUST_STORE_PATH_TEMPLATE.apply(prefix + "truststore.path"); legacyTruststorePassword = LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE.apply(prefix + "truststore.password"); @@ -205,6 +266,7 @@ public static SSLConfigurationSettings withoutPrefix() { /** * Construct settings that have a prefixed. That is, they can be used to read from a {@link Settings} object where the configuration * keys are prefixed-children of the Settings. + * * @param prefix A string that must end in "ssl." */ public static SSLConfigurationSettings withPrefix(String prefix) { @@ -212,17 +274,27 @@ public static SSLConfigurationSettings withPrefix(String prefix) { return new SSLConfigurationSettings(prefix); } - public static Collection> getProfileSettings() { return Arrays.asList(CIPHERS_SETTING_PROFILES, SUPPORTED_PROTOCOLS_PROFILES, KEYSTORE_PATH_PROFILES, LEGACY_KEYSTORE_PASSWORD_PROFILES, KEYSTORE_PASSWORD_PROFILES, LEGACY_KEYSTORE_KEY_PASSWORD_PROFILES, KEYSTORE_KEY_PASSWORD_PROFILES, TRUST_STORE_PATH_PROFILES, LEGACY_TRUSTSTORE_PASSWORD_PROFILES, TRUSTSTORE_PASSWORD_PROFILES, KEY_STORE_ALGORITHM_PROFILES, TRUST_STORE_ALGORITHM_PROFILES, KEY_STORE_TYPE_PROFILES, TRUST_STORE_TYPE_PROFILES, TRUST_RESTRICTIONS_PROFILES, - KEY_PATH_PROFILES, LEGACY_KEY_PASSWORD_PROFILES, KEY_PASSWORD_PROFILES,CERT_PROFILES,CAPATH_SETTING_PROFILES, + KEY_PATH_PROFILES, LEGACY_KEY_PASSWORD_PROFILES, KEY_PASSWORD_PROFILES, CERT_PROFILES, CAPATH_SETTING_PROFILES, CLIENT_AUTH_SETTING_PROFILES, VERIFICATION_MODE_SETTING_PROFILES); } + public static Collection> getRealmSettings(String realmType) { + return Stream.of(CIPHERS_SETTING_REALM, SUPPORTED_PROTOCOLS_REALM, KEYSTORE_PATH_REALM, + LEGACY_KEYSTORE_PASSWORD_REALM, KEYSTORE_PASSWORD_REALM, LEGACY_KEYSTORE_KEY_PASSWORD_REALM, + KEYSTORE_KEY_PASSWORD_REALM, TRUST_STORE_PATH_REALM, LEGACY_TRUST_STORE_PASSWORD_REALM, + TRUST_STORE_PASSWORD_REALM, KEY_STORE_ALGORITHM_REALM, TRUST_STORE_ALGORITHM_REALM, + KEY_STORE_TYPE_REALM, TRUST_STORE_TYPE_REALM, TRUST_RESTRICTIONS_REALM, + KEY_PATH_REALM, LEGACY_KEY_PASSWORD_REALM, KEY_PASSWORD_REALM, CERT_REALM, CAPATH_SETTING_REALM, + CLIENT_AUTH_SETTING_REALM, VERIFICATION_MODE_SETTING_REALM) + .map(f -> f.apply(realmType)).collect(Collectors.toList()); + } + public List> getSecureSettingsInUse(Settings settings) { return Stream.of(this.truststorePassword, this.x509KeyPair.keystorePassword, this.x509KeyPair.keystoreKeyPassword, this.x509KeyPair.keyPassword) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index 08513ce7412a4..428daf56059ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -7,16 +7,16 @@ import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.common.socket.SocketAccess; -import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; import javax.net.ssl.HostnameVerifier; @@ -58,7 +58,11 @@ * Provides access to {@link SSLEngine} and {@link SSLSocketFactory} objects based on a provided configuration. All * configurations loaded by this service must be configured on construction. */ -public class SSLService extends AbstractComponent { +public class SSLService { + + private static final Logger logger = LogManager.getLogger(SSLService.class); + + private final Settings settings; /** * This is a mapping from "context name" (in general use, the name of a setting key) @@ -86,7 +90,7 @@ public class SSLService extends AbstractComponent { * for use later */ public SSLService(Settings settings, Environment environment) { - super(settings); + this.settings = settings; this.env = environment; this.globalSSLConfiguration = new SSLConfiguration(settings.getByPrefix(XPackSettings.GLOBAL_SSL_PREFIX)); this.sslConfigurations = new HashMap<>(); @@ -95,7 +99,7 @@ public SSLService(Settings settings, Environment environment) { private SSLService(Settings settings, Environment environment, SSLConfiguration globalSSLConfiguration, Map sslConfigurations, Map sslContexts) { - super(settings); + this.settings = settings; this.env = environment; this.globalSSLConfiguration = globalSSLConfiguration; this.sslConfigurations = sslConfigurations; @@ -181,9 +185,9 @@ SSLParameters sslParameters(SSLContext sslContext) { * not expose any of the parameters that you give it. * * @param sslContext SSL Context used to handle SSL / TCP requests - * @param protocols Supported protocols - * @param ciphers Supported ciphers - * @param verifier Hostname verifier + * @param protocols Supported protocols + * @param ciphers Supported ciphers + * @param verifier Hostname verifier * @return Never {@code null}. */ SSLIOSessionStrategy sslIOSessionStrategy(SSLContext sslContext, String[] protocols, String[] ciphers, HostnameVerifier verifier) { @@ -206,10 +210,11 @@ public SSLSocketFactory sslSocketFactory(SSLConfiguration configuration) { * Creates an {@link SSLEngine} based on the provided configuration. This SSLEngine can be used for a connection that requires * hostname verification assuming the provided * host and port are correct. The SSLEngine created by this method is most useful for clients with hostname verification enabled + * * @param configuration the ssl configuration - * @param host the host of the remote endpoint. If using hostname verification, this should match what is in the remote endpoint's - * certificate - * @param port the port of the remote endpoint + * @param host the host of the remote endpoint. If using hostname verification, this should match what is in the remote + * endpoint's certificate + * @param port the port of the remote endpoint * @return {@link SSLEngine} * @see #getSSLConfiguration(String) */ @@ -238,6 +243,7 @@ public SSLEngine createSSLEngine(SSLConfiguration configuration, String host, in /** * Returns whether the provided settings results in a valid configuration that can be used for server connections + * * @param sslConfiguration the configuration to check */ public boolean isConfigurationValidForServerUsage(SSLConfiguration sslConfiguration) { @@ -269,6 +275,7 @@ SSLContext sslContext(SSLConfiguration configuration) { /** * Returns the existing {@link SSLContextHolder} for the configuration + * * @throws IllegalArgumentException if not found */ SSLContextHolder sslContextHolder(SSLConfiguration sslConfiguration) { @@ -282,6 +289,7 @@ SSLContextHolder sslContextHolder(SSLConfiguration sslConfiguration) { /** * Returns the existing {@link SSLConfiguration} for the given settings + * * @param settings the settings for the ssl configuration * @return the ssl configuration for the provided settings. If the settings are empty, the global configuration is returned */ @@ -309,6 +317,7 @@ Collection getLoadedSSLConfigurations() { /** * Returns the intersection of the supported ciphers with the requested ciphers. This method will also optionally log if unsupported * ciphers were requested. + * * @throws IllegalArgumentException if no supported ciphers are in the requested ciphers */ String[] supportedCiphers(String[] supportedCiphers, List requestedCiphers, boolean log) { @@ -346,6 +355,7 @@ String[] supportedCiphers(String[] supportedCiphers, List requestedCiphe /** * Creates an {@link SSLContext} based on the provided configuration + * * @param sslConfiguration the configuration to use for context creation * @return the created SSLContext */ @@ -360,9 +370,10 @@ private SSLContextHolder createSslContext(SSLConfiguration sslConfiguration) { /** * Creates an {@link SSLContext} based on the provided configuration and trust/key managers + * * @param sslConfiguration the configuration to use for context creation - * @param keyManager the key manager to use - * @param trustManager the trust manager to use + * @param keyManager the key manager to use + * @param trustManager the trust manager to use * @return the created SSLContext */ private SSLContextHolder createSslContext(X509ExtendedKeyManager keyManager, X509ExtendedTrustManager trustManager, @@ -370,7 +381,7 @@ private SSLContextHolder createSslContext(X509ExtendedKeyManager keyManager, X50 // Initialize sslContext try { SSLContext sslContext = SSLContext.getInstance(sslContextAlgorithm(sslConfiguration.supportedProtocols())); - sslContext.init(new X509ExtendedKeyManager[] { keyManager }, new X509ExtendedTrustManager[] { trustManager }, null); + sslContext.init(new X509ExtendedKeyManager[]{keyManager}, new X509ExtendedTrustManager[]{trustManager}, null); // check the supported ciphers and log them here to prevent spamming logs on every call supportedCiphers(sslContext.getSupportedSSLParameters().getCipherSuites(), sslConfiguration.cipherSuites(), true); @@ -434,6 +445,7 @@ private void storeSslConfiguration(String key, SSLConfiguration configuration) { * certificates that are provided by the JRE. * Due to the nature of KeyStores, this may include certificates that are available, but never used * such as a CA certificate that is no longer in use, or a server certificate for an unrelated host. + * * @see TrustConfig#certificates(Environment) */ public Set getLoadedCertificates() throws GeneralSecurityException, IOException { @@ -494,14 +506,14 @@ public Socket createSocket(String host, int port) throws IOException { @Override public Socket createSocket(String host, int port, InetAddress localHost, int localPort) throws IOException { - SSLSocket sslSocket = createWithPermissions(() -> delegate.createSocket(host, port, localHost, localPort)); + SSLSocket sslSocket = createWithPermissions(() -> delegate.createSocket(host, port, localHost, localPort)); configureSSLSocket(sslSocket); return sslSocket; } @Override public Socket createSocket(InetAddress host, int port) throws IOException { - SSLSocket sslSocket = createWithPermissions(() -> delegate.createSocket(host, port)); + SSLSocket sslSocket = createWithPermissions(() -> delegate.createSocket(host, port)); configureSSLSocket(sslSocket); return sslSocket; } @@ -604,14 +616,16 @@ static void invalidateSessions(SSLSessionContext sslSessionContext) { * @return A map of Settings prefix to Settings object */ private static Map getRealmsSSLSettings(Settings settings) { - Map sslSettings = new HashMap<>(); - final String prefix = SecurityField.setting("authc.realms."); - Settings realmsSettings = settings.getByPrefix(prefix); - for (String name : realmsSettings.names()) { - Settings realmSSLSettings = realmsSettings.getAsSettings(name).getByPrefix("ssl."); - // Put this even if empty, so that the name will be mapped to the global SSL configuration - sslSettings.put(prefix + name + ".ssl", realmSSLSettings); - } + final Map sslSettings = new HashMap<>(); + final String prefix = "xpack.security.authc.realms."; + final Map settingsByRealmType = settings.getGroups(prefix); + settingsByRealmType.forEach((realmType, typeSettings) -> + typeSettings.getAsGroups().forEach((realmName, realmSettings) -> { + Settings realmSSLSettings = realmSettings.getByPrefix("ssl."); + // Put this even if empty, so that the name will be mapped to the global SSL configuration + sslSettings.put(prefix + realmType + "." + realmName + ".ssl", realmSSLSettings); + }) + ); return sslSettings; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/X509KeyPairSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/X509KeyPairSettings.java index 33b78b2da8609..bf2209f213cd4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/X509KeyPairSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/X509KeyPairSettings.java @@ -11,13 +11,13 @@ import org.elasticsearch.common.util.CollectionUtils; import javax.net.ssl.KeyManagerFactory; - import java.security.KeyStore; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.function.Function; +import java.util.stream.Collectors; /** * An encapsulation of the configuration options for X.509 Key Pair support in X-Pack security. @@ -61,8 +61,6 @@ public class X509KeyPairSettings { "key_passphrase"))); - private final String prefix; - // Specify private cert/key pair via keystore final Setting> keystorePath; final Setting keystorePassword; @@ -83,21 +81,24 @@ public class X509KeyPairSettings { private final List> allSettings; - public X509KeyPairSettings(String prefix, boolean acceptNonSecurePasswords) { - keystorePath = KEYSTORE_PATH_TEMPLATE.apply(prefix + "keystore.path"); - keystorePassword = KEYSTORE_PASSWORD_TEMPLATE.apply(prefix + "keystore.secure_password"); - keystoreAlgorithm = KEY_STORE_ALGORITHM_TEMPLATE.apply(prefix + "keystore.algorithm"); - keystoreType = KEY_STORE_TYPE_TEMPLATE.apply(prefix + "keystore.type"); - keystoreKeyPassword = KEYSTORE_KEY_PASSWORD_TEMPLATE.apply(prefix + "keystore.secure_key_password"); + private interface SettingFactory { + Setting apply(String keyPart, Function> template); + } + + private X509KeyPairSettings(boolean acceptNonSecurePasswords, SettingFactory factory) { + keystorePath = factory.apply("keystore.path", KEYSTORE_PATH_TEMPLATE); + keystorePassword = factory.apply("keystore.secure_password", KEYSTORE_PASSWORD_TEMPLATE); + keystoreAlgorithm = factory.apply("keystore.algorithm", KEY_STORE_ALGORITHM_TEMPLATE); + keystoreType = factory.apply("keystore.type", KEY_STORE_TYPE_TEMPLATE); + keystoreKeyPassword = factory.apply("keystore.secure_key_password", KEYSTORE_KEY_PASSWORD_TEMPLATE); - keyPath = KEY_PATH_TEMPLATE.apply(prefix + "key"); - keyPassword = KEY_PASSWORD_TEMPLATE.apply(prefix + "secure_key_passphrase"); - certificatePath = CERT_TEMPLATE.apply(prefix + "certificate"); + keyPath = factory.apply("key", KEY_PATH_TEMPLATE); + keyPassword = factory.apply("secure_key_passphrase", KEY_PASSWORD_TEMPLATE); + certificatePath = factory.apply("certificate", CERT_TEMPLATE); - legacyKeystorePassword = LEGACY_KEYSTORE_PASSWORD_TEMPLATE.apply(prefix + "keystore.password"); - legacyKeystoreKeyPassword = LEGACY_KEYSTORE_KEY_PASSWORD_TEMPLATE.apply(prefix + "keystore.key_password"); - legacyKeyPassword = LEGACY_KEY_PASSWORD_TEMPLATE.apply(prefix + "key_passphrase"); - this.prefix = prefix; + legacyKeystorePassword = factory.apply("keystore.password", LEGACY_KEYSTORE_PASSWORD_TEMPLATE); + legacyKeystoreKeyPassword = factory.apply("keystore.key_password", LEGACY_KEYSTORE_KEY_PASSWORD_TEMPLATE); + legacyKeyPassword = factory.apply("key_passphrase", LEGACY_KEY_PASSWORD_TEMPLATE); final List> settings = CollectionUtils.arrayAsArrayList( keystorePath, keystorePassword, keystoreAlgorithm, keystoreType, keystoreKeyPassword, @@ -110,12 +111,26 @@ public X509KeyPairSettings(String prefix, boolean acceptNonSecurePasswords) { allSettings = Collections.unmodifiableList(settings); } + public static X509KeyPairSettings withPrefix(String prefix, boolean acceptNonSecurePasswords) { + return new X509KeyPairSettings(acceptNonSecurePasswords, new SettingFactory() { + @Override + public Setting apply(String key, Function> template) { + return template.apply(prefix + key); + } + }); + } - public Collection> getAllSettings() { - return allSettings; + public static Collection> affix(String prefix, String suffixPart, boolean acceptNonSecurePasswords) { + final X509KeyPairSettings settings = new X509KeyPairSettings(acceptNonSecurePasswords, new SettingFactory() { + @Override + public Setting apply(String keyPart, Function> template) { + return Setting.affixKeySetting(prefix, suffixPart + keyPart, template); + } + }); + return settings.getAllSettings().stream().map(s -> (Setting.AffixSetting) s).collect(Collectors.toList()); } - public String getPrefix() { - return prefix; + public Collection> getAllSettings() { + return allSettings; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java index 9337f7f6b0c22..2f81d75e7d224 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -25,9 +24,8 @@ public class TransportGetCertificateInfoAction extends HandledTransportAction ENCRYPTION_KEY_ALGO_SETTING = new Setting<>(SecurityField.setting("encryption_key.algorithm"), DEFAULT_KEY_ALGORITH, s -> s, Property.NodeScope); + private static final Logger logger = LogManager.getLogger(CryptoService.class); private final SecureRandom secureRandom = new SecureRandom(); private final String encryptionAlgorithm; @@ -68,7 +70,6 @@ public class CryptoService extends AbstractComponent { private final SecretKey encryptionKey; public CryptoService(Settings settings) throws IOException { - super(settings); this.encryptionAlgorithm = ENCRYPTION_ALGO_SETTING.get(settings); final int keyLength = ENCRYPTION_KEY_LENGTH_SETTING.get(settings); this.ivLength = keyLength / 8; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java index 059718973201c..e0724795c297c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java @@ -12,11 +12,12 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.watcher.common.xcontent.XContentUtils; +import org.elasticsearch.common.xcontent.XContentUtils; import java.io.IOException; import java.io.InputStream; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java new file mode 100644 index 0000000000000..d81fd5aed79ba --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcStreamableXContentTestCase.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol; + +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public abstract class AbstractHlrcStreamableXContentTestCase + extends AbstractStreamableXContentTestCase { + + /** + * Generic test that creates new instance of HLRC request/response from the test instance and checks + * both for equality and asserts equality on the two queries. + */ + public final void testHlrcFromXContent() throws IOException { + xContentTester(this::createParser, this::createTestInstance, getToXContentParams(), + p -> convertHlrcToInternal(doHlrcParseInstance(p))) + .numberOfTestRuns(NUMBER_OF_TEST_RUNS) + .supportsUnknownFields(supportsUnknownFields()) + .shuffleFieldsExceptions(getShuffleFieldsExceptions()) + .randomFieldsExcludeFilter(getRandomFieldsExcludeFilter()) + .assertEqualsConsumer(this::assertEqualInstances) + .assertToXContentEquivalence(true) + .test(); + } + + /** + * Parses to a new HLRC instance using the provided {@link XContentParser} + */ + public abstract H doHlrcParseInstance(XContentParser parser) throws IOException; + + /** + * Converts a HLRC instance to a XPack instance + */ + public abstract T convertHlrcToInternal(H instance); + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcXContentTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcXContentTestCase.java new file mode 100644 index 0000000000000..d6d8f9afe3659 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/AbstractHlrcXContentTestCase.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public abstract class AbstractHlrcXContentTestCase extends AbstractXContentTestCase { + + /** + * Generic test that creates new instance of HLRC request/response from the test instance and checks + * both for equality and asserts equality on the two queries. + */ + public final void testHlrcFromXContent() throws IOException { + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, this::createTestInstance, supportsUnknownFields(), + getShuffleFieldsExceptions(), getRandomFieldsExcludeFilter(), this::createParser, + p -> convertHlrcToInternal(doHlrcParseInstance(p)), + this::assertEqualInstances, true, getToXContentParams()); + } + + /** + * Parses to a new HLRC instance using the provided {@link XContentParser} + */ + public abstract H doHlrcParseInstance(XContentParser parser) throws IOException; + + /** + * Converts a HLRC instance to a XPack instance + */ + public abstract T convertHlrcToInternal(H instance); +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java index fac99959c536a..1e77d6a83f2ed 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoResponseTests.java @@ -7,12 +7,12 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; import java.util.HashMap; import java.util.HashSet; @@ -21,8 +21,11 @@ import java.util.function.Function; import java.util.function.Predicate; import java.io.IOException; +import java.util.stream.Collectors; + +public class XPackInfoResponseTests extends + AbstractHlrcStreamableXContentTestCase { -public class XPackInfoResponseTests extends AbstractStreamableXContentTestCase { @Override protected XPackInfoResponse doParseInstance(XContentParser parser) throws IOException { return XPackInfoResponse.fromXContent(parser); @@ -33,6 +36,38 @@ protected XPackInfoResponse createBlankInstance() { return new XPackInfoResponse(); } + @Override + public org.elasticsearch.client.xpack.XPackInfoResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.xpack.XPackInfoResponse.fromXContent(parser); + } + + @Override + public XPackInfoResponse convertHlrcToInternal(org.elasticsearch.client.xpack.XPackInfoResponse instance) { + return new XPackInfoResponse(convertHlrcToInternal(instance.getBuildInfo()), + convertHlrcToInternal(instance.getLicenseInfo()), convertHlrcToInternal(instance.getFeatureSetsInfo())); + } + + private BuildInfo convertHlrcToInternal(org.elasticsearch.client.xpack.XPackInfoResponse.BuildInfo buildInfo) { + return buildInfo != null ? new BuildInfo(buildInfo.getHash(), buildInfo.getTimestamp()) : null; + } + + private LicenseInfo convertHlrcToInternal(org.elasticsearch.client.xpack.XPackInfoResponse.LicenseInfo licenseInfo) { + return licenseInfo != null + ? new LicenseInfo(licenseInfo.getUid(), licenseInfo.getType(), licenseInfo.getMode(), + licenseInfo.getStatus() != null ? LicenseStatus.valueOf(licenseInfo.getStatus().name()) : null, + licenseInfo.getExpiryDate()) + : null; + } + + private FeatureSetsInfo convertHlrcToInternal(org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo featureSetsInfo) { + return featureSetsInfo != null + ? new FeatureSetsInfo(featureSetsInfo.getFeatureSets().values().stream() + .map(fs -> new FeatureSet(fs.name(), fs.description(), fs.available(), fs.enabled(), + fs.nativeCodeInfo())) + .collect(Collectors.toSet())) + : null; + } + @Override protected Predicate getRandomFieldsExcludeFilter() { return path -> path.equals("features") diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java index 72f84e5815c97..3235f62b3e481 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponseTests.java @@ -10,24 +10,74 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Comparator; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.Map; +import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; -public class GraphExploreResponseTests extends AbstractXContentTestCase< GraphExploreResponse> { +public class GraphExploreResponseTests extends + AbstractHlrcXContentTestCase { + + static final Function VERTEX_ID_FUNCTION = + vId -> new Vertex.VertexId(vId.getField(), vId.getTerm()); + static final Function VERTEX_FUNCTION = + v -> new Vertex(v.getField(), v.getTerm(), v.getWeight(), v.getHopDepth(), v.getBg(), v.getFg()); + + @Override + public org.elasticsearch.client.graph.GraphExploreResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.graph.GraphExploreResponse.fromXContent(parser); + } + + @Override + public GraphExploreResponse convertHlrcToInternal(org.elasticsearch.client.graph.GraphExploreResponse instance) { + return new GraphExploreResponse(instance.getTookInMillis(), instance.isTimedOut(), + instance.getShardFailures(), convertVertices(instance), convertConnections(instance), instance.isReturnDetailedInfo()); + } + + public Map convertVertices(org.elasticsearch.client.graph.GraphExploreResponse instance) { + final Collection vertexIds = instance.getVertexIds(); + final Map vertexMap = new LinkedHashMap<>(vertexIds.size()); + + for (org.elasticsearch.client.graph.Vertex.VertexId vertexId : vertexIds) { + final org.elasticsearch.client.graph.Vertex vertex = instance.getVertex(vertexId); + + vertexMap.put(VERTEX_ID_FUNCTION.apply(vertexId), VERTEX_FUNCTION.apply(vertex)); + } + return vertexMap; + } + + public Map convertConnections(org.elasticsearch.client.graph.GraphExploreResponse instance) { + final Collection connectionIds = instance.getConnectionIds(); + final Map connectionMap = new LinkedHashMap<>(connectionIds.size()); + for (org.elasticsearch.client.graph.Connection.ConnectionId connectionId : connectionIds) { + final org.elasticsearch.client.graph.Connection connection = instance.getConnection(connectionId); + final Connection.ConnectionId connectionId1 = + new Connection.ConnectionId(VERTEX_ID_FUNCTION.apply(connectionId.getSource()), + VERTEX_ID_FUNCTION.apply(connectionId.getTarget())); + final Connection connection1 = new Connection(VERTEX_FUNCTION.apply(connection.getFrom()), + VERTEX_FUNCTION.apply(connection.getTo()), + connection.getWeight(), connection.getDocCount()); + connectionMap.put(connectionId1, connection1); + } + return connectionMap; + } @Override protected GraphExploreResponse createTestInstance() { return createInstance(0); } + private static GraphExploreResponse createInstance(int numFailures) { int numItems = randomIntBetween(4, 128); boolean timedOut = randomBoolean(); @@ -62,13 +112,13 @@ private static GraphExploreResponse createInstance(int numFailures) { } - private static GraphExploreResponse createTestInstanceWithFailures() { + private static GraphExploreResponse createTestInstanceWithFailures() { return createInstance(randomIntBetween(1, 128)); } @Override protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { - return GraphExploreResponse.fromXContext(parser); + return GraphExploreResponse.fromXContent(parser); } @Override @@ -79,7 +129,7 @@ protected boolean supportsUnknownFields() { @Override protected boolean assertToXContentEquivalence() { return false; - } + } @Override protected String[] getShuffleFieldsExceptions() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java index 7149477d00765..0b73850c5e637 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/LicenseStatusTests.java @@ -9,9 +9,25 @@ import org.elasticsearch.test.ESTestCase; +import static org.hamcrest.CoreMatchers.equalTo; + public class LicenseStatusTests extends ESTestCase { public void testSerialization() throws IOException { LicenseStatus status = randomFrom(LicenseStatus.values()); assertSame(status, copyWriteable(status, writableRegistry(), LicenseStatus::readFrom)); } + + public void testCompatibility() { + final LicenseStatus[] values = LicenseStatus.values(); + final org.elasticsearch.client.license.LicenseStatus[] hlrcValues = + org.elasticsearch.client.license.LicenseStatus.values(); + + assertThat(values.length, equalTo(hlrcValues.length)); + + for (LicenseStatus value : values) { + final org.elasticsearch.client.license.LicenseStatus licenseStatus = + org.elasticsearch.client.license.LicenseStatus.fromString(value.label()); + assertThat(licenseStatus.label(), equalTo(value.label())); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java index a09fd6fb99b45..87ba4324ec1e3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponseTests.java @@ -6,7 +6,7 @@ package org.elasticsearch.protocol.xpack.license; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; import java.io.IOException; import java.util.Collections; @@ -15,7 +15,19 @@ import java.util.function.Function; import java.util.function.Predicate; -public class PutLicenseResponseTests extends AbstractStreamableXContentTestCase { +public class PutLicenseResponseTests extends + AbstractHlrcStreamableXContentTestCase { + + @Override + public org.elasticsearch.client.license.PutLicenseResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.license.PutLicenseResponse.fromXContent(parser); + } + + @Override + public PutLicenseResponse convertHlrcToInternal(org.elasticsearch.client.license.PutLicenseResponse instance) { + return new PutLicenseResponse(instance.isAcknowledged(), LicensesStatus.valueOf(instance.status().name()), + instance.acknowledgeHeader(), instance.acknowledgeMessages()); + } @Override protected boolean supportsUnknownFields() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/StartBasicResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/StartBasicResponseTests.java new file mode 100644 index 0000000000000..78e1c75483aa6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/license/StartBasicResponseTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.protocol.xpack.license; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.PostStartBasicResponse; +import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class StartBasicResponseTests extends + AbstractHlrcStreamableXContentTestCase { + + @Override + public org.elasticsearch.client.license.StartBasicResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.license.StartBasicResponse.fromXContent(parser); + } + + @Override + public PostStartBasicResponse convertHlrcToInternal(org.elasticsearch.client.license.StartBasicResponse instance) { + return new PostStartBasicResponse(PostStartBasicResponse.Status.valueOf(instance.getStatus().name()), + instance.getAcknowledgeMessages(), instance.getAcknowledgeMessage()); + } + + @Override + protected PostStartBasicResponse doParseInstance(XContentParser parser) throws IOException { + return PostStartBasicResponse.fromXContent(parser); + } + + @Override + protected PostStartBasicResponse createBlankInstance() { + return new PostStartBasicResponse(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // The structure of the response is such that unknown fields inside acknowledge cannot be supported since they + // are treated as messages from new services + return p -> p.startsWith("acknowledge"); + } + + @Override + protected PostStartBasicResponse createTestInstance() { + PostStartBasicResponse.Status status = randomFrom(PostStartBasicResponse.Status.values()); + String acknowledgeMessage = null; + Map ackMessages = Collections.emptyMap(); + if (status != PostStartBasicResponse.Status.GENERATED_BASIC) { + acknowledgeMessage = randomAlphaOfLength(10); + ackMessages = randomAckMessages(); + } + final PostStartBasicResponse postStartBasicResponse = new PostStartBasicResponse(status, ackMessages, acknowledgeMessage); + logger.info("{}", Strings.toString(postStartBasicResponse)); + return postStartBasicResponse; + } + + private static Map randomAckMessages() { + int nFeatures = randomIntBetween(1, 5); + + Map ackMessages = new HashMap<>(); + + for (int i = 0; i < nFeatures; i++) { + String feature = randomAlphaOfLengthBetween(9, 15); + int nMessages = randomIntBetween(1, 5); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = randomAlphaOfLengthBetween(10, 30); + } + ackMessages.put(feature, messages); + } + + return ackMessages; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java index 57f01a4454e02..76f00ebb24309 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java @@ -6,18 +6,36 @@ package org.elasticsearch.protocol.xpack.migration; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; +import java.io.IOException; +import java.util.AbstractMap; import java.util.HashMap; import java.util.Iterator; import java.util.Map; +import java.util.stream.Collectors; + +public class IndexUpgradeInfoResponseTests extends + AbstractHlrcStreamableXContentTestCase { -public class IndexUpgradeInfoResponseTests extends AbstractStreamableXContentTestCase { @Override protected IndexUpgradeInfoResponse doParseInstance(XContentParser parser) { return IndexUpgradeInfoResponse.fromXContent(parser); } + @Override + public org.elasticsearch.client.migration.IndexUpgradeInfoResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.migration.IndexUpgradeInfoResponse.fromXContent(parser); + } + + @Override + public IndexUpgradeInfoResponse convertHlrcToInternal(org.elasticsearch.client.migration.IndexUpgradeInfoResponse instance) { + final Map actions = instance.getActions(); + return new IndexUpgradeInfoResponse(actions.entrySet().stream().map( + e -> new AbstractMap.SimpleEntry<>(e.getKey(), UpgradeActionRequired.valueOf(e.getValue().name())) + ).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); + } + @Override protected IndexUpgradeInfoResponse createBlankInstance() { return new IndexUpgradeInfoResponse(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java index 209bc790a8c54..7486252f53859 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchResponseTests.java @@ -6,11 +6,12 @@ package org.elasticsearch.protocol.xpack.watcher; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; import java.io.IOException; -public class DeleteWatchResponseTests extends AbstractXContentTestCase { +public class DeleteWatchResponseTests extends + AbstractHlrcXContentTestCase { @Override protected DeleteWatchResponse createTestInstance() { @@ -25,6 +26,16 @@ protected DeleteWatchResponse doParseInstance(XContentParser parser) throws IOEx return DeleteWatchResponse.fromXContent(parser); } + @Override + public org.elasticsearch.client.watcher.DeleteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.watcher.DeleteWatchResponse.fromXContent(parser); + } + + @Override + public DeleteWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.DeleteWatchResponse instance) { + return new DeleteWatchResponse(instance.getId(), instance.getVersion(), instance.isFound()); + } + @Override protected boolean supportsUnknownFields() { return false; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java index 1fc2f61b684c7..8ea4a84daed95 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/PutWatchResponseTests.java @@ -6,11 +6,12 @@ package org.elasticsearch.protocol.xpack.watcher; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.protocol.AbstractHlrcXContentTestCase; import java.io.IOException; -public class PutWatchResponseTests extends AbstractXContentTestCase { +public class PutWatchResponseTests extends + AbstractHlrcXContentTestCase { @Override protected PutWatchResponse createTestInstance() { @@ -25,6 +26,16 @@ protected PutWatchResponse doParseInstance(XContentParser parser) throws IOExcep return PutWatchResponse.fromXContent(parser); } + @Override + public org.elasticsearch.client.watcher.PutWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.watcher.PutWatchResponse.fromXContent(parser); + } + + @Override + public PutWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.PutWatchResponse instance) { + return new PutWatchResponse(instance.getId(), instance.getVersion(), instance.isCreated()); + } + @Override protected boolean supportsUnknownFields() { return false; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java index 60050bd93114a..f6c5c4ce232df 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java @@ -57,8 +57,8 @@ public void testDoExecute() throws Exception { TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportXPackInfoAction action = new TransportXPackInfoAction(Settings.EMPTY, transportService, - mock(ActionFilters.class), licenseService, featureSets); + TransportXPackInfoAction action = new TransportXPackInfoAction(transportService, mock(ActionFilters.class), + licenseService, featureSets); License license = mock(License.class); long expiryDate = randomLong(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java index 160641be46986..5267e5dc2ff42 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java @@ -80,7 +80,7 @@ public void testFrom() throws IOException { List nodeStats = Collections.singletonList(new NodeStats(discoveryNode, 0L, null, null, null, null, null, null, null, null, null, null, null, null, null)); - IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(Settings.EMPTY); + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true); boolean clusterIssueFound = randomBoolean(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java new file mode 100644 index 0000000000000..bed04a7cf5425 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractActionTestCase.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.List; +import java.util.stream.Collectors; + +public abstract class AbstractActionTestCase extends AbstractSerializingTestCase { + + public abstract void testToSteps(); + + protected boolean isSafeAction() { + return true; + } + + public final void testIsSafeAction() { + LifecycleAction action = createTestInstance(); + assertEquals(isSafeAction(), action.isSafeAction()); + } + + public void testToStepKeys() { + T action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + List stepKeys = action.toStepKeys(phase); + assertNotNull(stepKeys); + List expectedStepKeys = steps.stream().map(Step::getKey).collect(Collectors.toList()); + assertEquals(expectedStepKeys, stepKeys); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractStepTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractStepTestCase.java new file mode 100644 index 0000000000000..2757a0499aae9 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AbstractStepTestCase.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public abstract class AbstractStepTestCase extends ESTestCase { + + protected static final int NUMBER_OF_TEST_RUNS = 20; + + protected abstract T createRandomInstance(); + protected abstract T mutateInstance(T instance); + protected abstract T copyInstance(T instance); + + public void testHashcodeAndEquals() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createRandomInstance(), this::copyInstance, this::mutateInstance); + } + } + + public static StepKey randomStepKey() { + String randomPhase = randomAlphaOfLength(10); + String randomAction = randomAlphaOfLength(10); + String randomStepName = randomAlphaOfLength(10); + return new StepKey(randomPhase, randomAction, randomStepName); + } + + public void testStepNameNotError() { + T instance = createRandomInstance(); + StepKey stepKey = instance.getKey(); + assertFalse(ErrorStep.NAME.equals(stepKey.getName())); + StepKey nextStepKey = instance.getKey(); + assertFalse(ErrorStep.NAME.equals(nextStepKey.getName())); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateActionTests.java new file mode 100644 index 0000000000000..dfe2afc5d19ef --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocateActionTests.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class AllocateActionTests extends AbstractActionTestCase { + + @Override + protected AllocateAction doParseInstance(XContentParser parser) { + return AllocateAction.parse(parser); + } + + @Override + protected AllocateAction createTestInstance() { + return randomInstance(); + } + + static AllocateAction randomInstance() { + boolean hasAtLeastOneMap = false; + Map includes; + if (randomBoolean()) { + includes = randomMap(1, 100); + hasAtLeastOneMap = true; + } else { + includes = randomBoolean() ? null : Collections.emptyMap(); + } + Map excludes; + if (randomBoolean()) { + hasAtLeastOneMap = true; + excludes = randomMap(1, 100); + } else { + excludes = randomBoolean() ? null : Collections.emptyMap(); + } + Map requires; + if (hasAtLeastOneMap == false || randomBoolean()) { + requires = randomMap(1, 100); + } else { + requires = randomBoolean() ? null : Collections.emptyMap(); + } + Integer numberOfReplicas = randomBoolean() ? null : randomIntBetween(0, 10); + return new AllocateAction(numberOfReplicas, includes, excludes, requires); + } + + + @Override + protected Reader instanceReader() { + return AllocateAction::new; + } + + @Override + protected AllocateAction mutateInstance(AllocateAction instance) { + Map include = instance.getInclude(); + Map exclude = instance.getExclude(); + Map require = instance.getRequire(); + Integer numberOfReplicas = instance.getNumberOfReplicas(); + switch (randomIntBetween(0, 3)) { + case 0: + include = new HashMap<>(include); + include.put(randomAlphaOfLengthBetween(11, 15), randomAlphaOfLengthBetween(1, 20)); + break; + case 1: + exclude = new HashMap<>(exclude); + exclude.put(randomAlphaOfLengthBetween(11, 15), randomAlphaOfLengthBetween(1, 20)); + break; + case 2: + require = new HashMap<>(require); + require.put(randomAlphaOfLengthBetween(11, 15), randomAlphaOfLengthBetween(1, 20)); + break; + case 3: + numberOfReplicas = randomIntBetween(11, 20); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new AllocateAction(numberOfReplicas, include, exclude, require); + } + + public void testAllMapsNullOrEmpty() { + Map include = randomBoolean() ? null : Collections.emptyMap(); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(null, include, exclude, require)); + assertEquals("At least one of " + AllocateAction.INCLUDE_FIELD.getPreferredName() + ", " + + AllocateAction.EXCLUDE_FIELD.getPreferredName() + " or " + AllocateAction.REQUIRE_FIELD.getPreferredName() + + "must contain attributes for action " + AllocateAction.NAME, exception.getMessage()); + } + + public void testInvalidNumberOfReplicas() { + Map include = randomMap(1, 5); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(randomIntBetween(-1000, -1), include, exclude, require)); + assertEquals("[" + AllocateAction.NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0", exception.getMessage()); + } + + public static Map randomMap(int minEntries, int maxEntries) { + Map map = new HashMap<>(); + int numIncludes = randomIntBetween(minEntries, maxEntries); + for (int i = 0; i < numIncludes; i++) { + map.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + } + return map; + } + + public void testToSteps() { + AllocateAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(2, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, AllocateAction.NAME, AllocateAction.NAME); + StepKey expectedSecondStepKey = new StepKey(phase, AllocateAction.NAME, AllocationRoutedStep.NAME); + UpdateSettingsStep firstStep = (UpdateSettingsStep) steps.get(0); + assertEquals(expectedFirstStepKey, firstStep.getKey()); + assertEquals(expectedSecondStepKey, firstStep.getNextStepKey()); + Settings.Builder expectedSettings = Settings.builder(); + if (action.getNumberOfReplicas() != null) { + expectedSettings.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, action.getNumberOfReplicas()); + } + action.getInclude().forEach( + (key, value) -> expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + key, value)); + action.getExclude().forEach( + (key, value) -> expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + key, value)); + action.getRequire().forEach( + (key, value) -> expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + key, value)); + assertThat(firstStep.getSettings(), equalTo(expectedSettings.build())); + AllocationRoutedStep secondStep = (AllocationRoutedStep) steps.get(1); + assertEquals(expectedSecondStepKey, secondStep.getKey()); + assertEquals(nextStepKey, secondStep.getNextStepKey()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepInfoTests.java new file mode 100644 index 0000000000000..80eb89c45b952 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepInfoTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.AllocationRoutedStep.Info; + +import java.io.IOException; + +public class AllocationRoutedStepInfoTests extends AbstractXContentTestCase { + + @Override + protected Info createTestInstance() { + return new Info(randomNonNegativeLong(), randomNonNegativeLong(), randomBoolean()); + } + + @Override + protected Info doParseInstance(XContentParser parser) throws IOException { + return Info.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public final void testEqualsAndHashcode() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copyInstance, this::mutateInstance); + } + } + + protected final Info copyInstance(Info instance) throws IOException { + return new Info(instance.getActualReplicas(), instance.getNumberShardsLeftToAllocate(), instance.allShardsActive()); + } + + protected Info mutateInstance(Info instance) throws IOException { + long actualReplicas = instance.getActualReplicas(); + long shardsToAllocate = instance.getNumberShardsLeftToAllocate(); + boolean allShardsActive = instance.allShardsActive(); + switch (between(0, 2)) { + case 0: + shardsToAllocate += between(1, 20); + break; + case 1: + allShardsActive = allShardsActive == false; + break; + case 2: + actualReplicas += between(1, 20); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new Info(actualReplicas, shardsToAllocate, allShardsActive); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java new file mode 100644 index 0000000000000..03320516d9442 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java @@ -0,0 +1,309 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.UnassignedInfo.Reason; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep.Result; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.Collections; +import java.util.Map; + +public class AllocationRoutedStepTests extends AbstractStepTestCase { + + @Override + public AllocationRoutedStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + return new AllocationRoutedStep(stepKey, nextStepKey); + } + + @Override + public AllocationRoutedStep mutateInstance(AllocationRoutedStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + switch (between(0, 1)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new AllocationRoutedStep(key, nextKey); + } + + @Override + public AllocationRoutedStep copyInstance(AllocationRoutedStep instance) { + return new AllocationRoutedStep(instance.getKey(), instance.getNextStepKey()); + } + + public void testConditionMet() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)); + + AllocationRoutedStep step = createRandomInstance(); + assertAllocateStatus(index, 1, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(true, null)); + } + + public void testConditionMetOnlyOneCopyAllocated() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + boolean primaryOnNode1 = randomBoolean(); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, + ShardRoutingState.STARTED)); + + AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey()); + assertAllocateStatus(index, 1, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new AllocationRoutedStep.Info(0, 1, true))); + } + + public void testExecuteAllocateNotComplete() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 1), "node2", true, ShardRoutingState.STARTED)); + + AllocationRoutedStep step = createRandomInstance(); + assertAllocateStatus(index, 2, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new AllocationRoutedStep.Info(0, 1, true))); + } + + public void testExecuteAllocateNotCompleteOnlyOneCopyAllocated() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + boolean primaryOnNode1 = randomBoolean(); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, + ShardRoutingState.STARTED)); + + AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey()); + assertAllocateStatus(index, 2, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new AllocationRoutedStep.Info(0, 1, true))); + } + + public void testExecuteAllocateUnassigned() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map includes = AllocateActionTests.randomMap(1, 5); + Map excludes = AllocateActionTests.randomMap(1, 5); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + includes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + excludes.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); + }); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 1), null, null, true, ShardRoutingState.UNASSIGNED, + new UnassignedInfo(randomFrom(Reason.values()), "the shard is intentionally unassigned"))); + + AllocationRoutedStep step = createRandomInstance(); + assertAllocateStatus(index, 2, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new AllocationRoutedStep.Info(0, -1, false))); + } + + /** + * this tests the scenario where + * + * PUT index + * { + * "settings": { + * "number_of_replicas": 0, + * "number_of_shards": 1 + * } + * } + * + * PUT index/_settings + * { + * "number_of_replicas": 1, + * "index.routing.allocation.include._name": "{node-name}" + * } + */ + public void testExecuteReplicasNotAllocatedOnSingleNode() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = Collections.singletonMap("_name", "node1"); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), null, null, false, ShardRoutingState.UNASSIGNED, + new UnassignedInfo(Reason.REPLICA_ADDED, "no attempt"))); + + AllocationRoutedStep step = createRandomInstance(); + assertAllocateStatus(index, 1, 1, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new AllocationRoutedStep.Info(1, -1, false))); + } + + public void testExecuteIndexMissing() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).build(); + + AllocationRoutedStep step = createRandomInstance(); + + Result actualResult = step.isConditionMet(index, clusterState); + assertFalse(actualResult.isComplete()); + assertNull(actualResult.getInfomationContext()); + } + + private void assertAllocateStatus(Index index, int shards, int replicas, AllocationRoutedStep step, Settings.Builder existingSettings, + Settings.Builder node1Settings, Settings.Builder node2Settings, IndexRoutingTable.Builder indexRoutingTable, + ClusterStateWaitStep.Result expectedResult) { + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()).settings(existingSettings).numberOfShards(shards) + .numberOfReplicas(replicas).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetadata); + + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(DiscoveryNodes.builder() + .add(DiscoveryNode.createLocal(node1Settings.build(), new TransportAddress(TransportAddress.META_ADDRESS, 9200), + "node1")) + .add(DiscoveryNode.createLocal(node2Settings.build(), new TransportAddress(TransportAddress.META_ADDRESS, 9201), + "node2"))) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + Result actualResult = step.isConditionMet(index, clusterState); + assertEquals(expectedResult.isComplete(), actualResult.isComplete()); + assertEquals(expectedResult.getInfomationContext(), actualResult.getInfomationContext()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStepTests.java new file mode 100644 index 0000000000000..f7a6687dd987c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStepTests.java @@ -0,0 +1,323 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; + +import java.util.Collections; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; + +public class CheckShrinkReadyStepTests extends AbstractStepTestCase { + + @Override + public CheckShrinkReadyStep createRandomInstance() { + Step.StepKey stepKey = randomStepKey(); + Step.StepKey nextStepKey = randomStepKey(); + + return new CheckShrinkReadyStep(stepKey, nextStepKey); + } + + @Override + public CheckShrinkReadyStep mutateInstance(CheckShrinkReadyStep instance) { + Step.StepKey key = instance.getKey(); + Step.StepKey nextKey = instance.getNextStepKey(); + + switch (between(0, 1)) { + case 0: + key = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new CheckShrinkReadyStep(key, nextKey); + } + + @Override + public CheckShrinkReadyStep copyInstance(CheckShrinkReadyStep instance) { + return new CheckShrinkReadyStep(instance.getKey(), instance.getNextStepKey()); + } + + public void testNoSetting() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)); + + CheckShrinkReadyStep step = createRandomInstance(); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + assertAllocateStatus(index, 1, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(true, null)); + }); + assertThat(e.getMessage(), containsString("Cannot check shrink allocation as there are no allocation rules by _id")); + } + + public void testConditionMet() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)); + + CheckShrinkReadyStep step = createRandomInstance(); + assertAllocateStatus(index, 1, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(true, null)); + } + + public void testConditionMetOnlyOneCopyAllocated() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + boolean primaryOnNode1 = randomBoolean(); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, + ShardRoutingState.STARTED)); + + CheckShrinkReadyStep step = new CheckShrinkReadyStep(randomStepKey(), randomStepKey()); + assertAllocateStatus(index, 1, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(true, null)); + } + + public void testConditionNotMetDueToRelocation() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + boolean primaryOnNode1 = randomBoolean(); + ShardRouting shardOnNode1 = TestShardRouting.newShardRouting(new ShardId(index, 0), + "node1", primaryOnNode1, ShardRoutingState.STARTED); + shardOnNode1 = shardOnNode1.relocate("node3", 230); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(shardOnNode1) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, + ShardRoutingState.STARTED)); + + CheckShrinkReadyStep step = new CheckShrinkReadyStep(randomStepKey(), randomStepKey()); + assertAllocateStatus(index, 1, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new CheckShrinkReadyStep.Info("node1", 1, 1))); + } + + public void testExecuteAllocateNotComplete() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 1), "node2", true, ShardRoutingState.STARTED)); + + CheckShrinkReadyStep step = createRandomInstance(); + assertAllocateStatus(index, 2, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new CheckShrinkReadyStep.Info("node1", 2, 1))); + } + + public void testExecuteAllocateNotCompleteOnlyOneCopyAllocated() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + boolean primaryOnNode1 = randomBoolean(); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, + ShardRoutingState.STARTED)); + + CheckShrinkReadyStep step = new CheckShrinkReadyStep(randomStepKey(), randomStepKey()); + assertAllocateStatus(index, 2, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new CheckShrinkReadyStep.Info("node1", 2, 1))); + } + + public void testExecuteAllocateUnassigned() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = AllocateActionTests.randomMap(1, 5); + Settings.Builder existingSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1") + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + existingSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 1), null, null, true, ShardRoutingState.UNASSIGNED, + new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "the shard is intentionally unassigned"))); + + CheckShrinkReadyStep step = createRandomInstance(); + assertAllocateStatus(index, 2, 0, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new CheckShrinkReadyStep.Info("", 2, -1))); + } + + /** + * this tests the scenario where + * + * PUT index + * { + * "settings": { + * "number_of_replicas": 0, + * "number_of_shards": 1 + * } + * } + * + * PUT index/_settings + * { + * "number_of_replicas": 1, + * "index.routing.allocation.include._id": "{node-name}" + * } + */ + public void testExecuteReplicasNotAllocatedOnSingleNode() { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Map requires = Collections.singletonMap("_id", "node1"); + Settings.Builder existingSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + Settings.Builder expectedSettings = Settings.builder(); + Settings.Builder node1Settings = Settings.builder(); + Settings.Builder node2Settings = Settings.builder(); + requires.forEach((k, v) -> { + expectedSettings.put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v); + }); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED)) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), null, null, false, ShardRoutingState.UNASSIGNED, + new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, "no attempt"))); + + CheckShrinkReadyStep step = createRandomInstance(); + assertAllocateStatus(index, 1, 1, step, existingSettings, node1Settings, node2Settings, indexRoutingTable, + new ClusterStateWaitStep.Result(false, new CheckShrinkReadyStep.Info("", 1, -1))); + } + + public void testExecuteIndexMissing() throws Exception { + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).build(); + + CheckShrinkReadyStep step = createRandomInstance(); + + ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); + assertFalse(actualResult.isComplete()); + assertNull(actualResult.getInfomationContext()); + } + + private void assertAllocateStatus(Index index, int shards, int replicas, CheckShrinkReadyStep step, Settings.Builder existingSettings, + Settings.Builder node1Settings, Settings.Builder node2Settings, + IndexRoutingTable.Builder indexRoutingTable, ClusterStateWaitStep.Result expectedResult) { + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()).settings(existingSettings).numberOfShards(shards) + .numberOfReplicas(replicas).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetadata); + + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(DiscoveryNodes.builder() + .add(DiscoveryNode.createLocal(Settings.builder().put(node1Settings.build()) + .put(Node.NODE_NAME_SETTING.getKey(), "node1").build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9200), + "node1")) + .add(DiscoveryNode.createLocal(Settings.builder().put(node2Settings.build()) + .put(Node.NODE_NAME_SETTING.getKey(), "node2").build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9201), + "node2"))) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); + assertEquals(expectedResult.isComplete(), actualResult.isComplete()); + assertEquals(expectedResult.getInfomationContext(), actualResult.getInfomationContext()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java new file mode 100644 index 0000000000000..40dd022c05de6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CopyExecutionStateStepTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.Map; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionStateTests.createCustomMetadata; + +public class CopyExecutionStateStepTests extends AbstractStepTestCase { + @Override + protected CopyExecutionStateStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new CopyExecutionStateStep(stepKey, nextStepKey, shrunkIndexPrefix); + } + + @Override + protected CopyExecutionStateStep mutateInstance(CopyExecutionStateStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new CopyExecutionStateStep(key, nextKey, shrunkIndexPrefix); + } + + @Override + protected CopyExecutionStateStep copyInstance(CopyExecutionStateStep instance) { + return new CopyExecutionStateStep(instance.getKey(), instance.getNextStepKey(), instance.getShrunkIndexPrefix()); + } + + public void testPerformAction() { + CopyExecutionStateStep step = createRandomInstance(); + String indexName = randomAlphaOfLengthBetween(5, 20); + Map customMetadata = createCustomMetadata(); + + IndexMetaData originalIndexMetaData = IndexMetaData.builder(indexName) + .settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1,5)) + .numberOfReplicas(randomIntBetween(1,5)) + .putCustom(ILM_CUSTOM_METADATA_KEY, customMetadata) + .build(); + IndexMetaData shrunkIndexMetaData = IndexMetaData.builder(step.getShrunkIndexPrefix() + indexName) + .settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1,5)) + .numberOfReplicas(randomIntBetween(1,5)) + .build(); + ClusterState originalClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder() + .put(originalIndexMetaData, false) + .put(shrunkIndexMetaData, false)) + .build(); + + ClusterState newClusterState = step.performAction(originalIndexMetaData.getIndex(), originalClusterState); + + LifecycleExecutionState oldIndexData = LifecycleExecutionState.fromIndexMetadata(originalIndexMetaData); + LifecycleExecutionState newIndexData = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(step.getShrunkIndexPrefix() + indexName)); + + assertEquals(oldIndexData.getLifecycleDate(), newIndexData.getLifecycleDate()); + assertEquals(oldIndexData.getPhase(), newIndexData.getPhase()); + assertEquals(oldIndexData.getAction(), newIndexData.getAction()); + assertEquals(ShrunkenIndexCheckStep.NAME, newIndexData.getStep()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteActionTests.java new file mode 100644 index 0000000000000..3286ce0225a39 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteActionTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.List; + +public class DeleteActionTests extends AbstractActionTestCase { + + @Override + protected DeleteAction doParseInstance(XContentParser parser) throws IOException { + return DeleteAction.parse(parser); + } + + @Override + protected DeleteAction createTestInstance() { + return new DeleteAction(); + } + + @Override + protected Reader instanceReader() { + return DeleteAction::new; + } + + public void testToSteps() { + DeleteAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(1, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, DeleteAction.NAME, DeleteStep.NAME); + DeleteStep firstStep = (DeleteStep) steps.get(0); + assertEquals(expectedFirstStepKey, firstStep.getKey()); + assertEquals(nextStepKey, firstStep.getNextStepKey()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java new file mode 100644 index 0000000000000..c85df6de659e8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/DeleteStepTests.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import static org.hamcrest.Matchers.equalTo; + +public class DeleteStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public DeleteStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + return new DeleteStep(stepKey, nextStepKey, client); + } + + @Override + public DeleteStep mutateInstance(DeleteStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + switch (between(0, 1)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new DeleteStep(key, nextKey, instance.getClient()); + } + + @Override + public DeleteStep copyInstance(DeleteStep instance) { + return new DeleteStep(instance.getKey(), instance.getNextStepKey(), instance.getClient()); + } + + public void testIndexSurvives() { + assertFalse(createRandomInstance().indexSurvives()); + } + + public void testDeleted() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(invocation -> { + DeleteIndexRequest request = (DeleteIndexRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertNotNull(request); + assertEquals(1, request.indices().length); + assertEquals(indexMetaData.getIndex().getName(), request.indices()[0]); + listener.onResponse(null); + return null; + }).when(indicesClient).delete(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + + DeleteStep step = createRandomInstance(); + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }); + + assertThat(actionCompleted.get(), equalTo(true)); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).delete(Mockito.any(), Mockito.any()); + } + + public void testExceptionThrown() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + DeleteIndexRequest request = (DeleteIndexRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertNotNull(request); + assertEquals(1, request.indices().length); + assertEquals(indexMetaData.getIndex().getName(), request.indices()[0]); + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).delete(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + DeleteStep step = createRandomInstance(); + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertEquals(exception, e); + exceptionThrown.set(true); + } + }); + + assertThat(exceptionThrown.get(), equalTo(true)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStepTests.java new file mode 100644 index 0000000000000..5bd0cdf230da5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ErrorStepTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class ErrorStepTests extends AbstractStepTestCase { + + @Override + public ErrorStep createRandomInstance() { + StepKey stepKey = new StepKey(randomAlphaOfLength(10), randomAlphaOfLength(10), ErrorStep.NAME); + return new ErrorStep(stepKey); + } + + @Override + public ErrorStep mutateInstance(ErrorStep instance) { + StepKey key = instance.getKey(); + assertSame(instance.getNextStepKey(), instance.getKey()); + + key = new StepKey(key.getPhase(), key.getAction() + randomAlphaOfLength(5), key.getName()); + + return new ErrorStep(key); + } + + @Override + public ErrorStep copyInstance(ErrorStep instance) { + assertSame(instance.getNextStepKey(), instance.getKey()); + return new ErrorStep(instance.getKey()); + } + + public void testInvalidStepKey() { + StepKey invalidKey = randomStepKey(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new ErrorStep(invalidKey)); + assertEquals("An error step must have a step key whose step name is " + ErrorStep.NAME, exception.getMessage()); + } + + @Override + public void testStepNameNotError() { + // Need to override this test because this is the one special step that + // is allowed to have ERROR as the step name + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java new file mode 100644 index 0000000000000..e520e0d1f19fb --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.Arrays; + +public class ExplainLifecycleRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected ExplainLifecycleRequest createTestInstance() { + ExplainLifecycleRequest request = new ExplainLifecycleRequest(); + if (randomBoolean()) { + request.indices(generateRandomStringArray(20, 20, false, true)); + } + if (randomBoolean()) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + request.indicesOptions(indicesOptions); + } + return request; + } + + @Override + protected ExplainLifecycleRequest mutateInstance(ExplainLifecycleRequest instance) throws IOException { + String[] indices = instance.indices(); + IndicesOptions indicesOptions = instance.indicesOptions(); + switch (between(0, 1)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + () -> generateRandomStringArray(20, 10, false, true)); + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + ExplainLifecycleRequest newRequest = new ExplainLifecycleRequest(); + newRequest.indices(indices); + newRequest.indicesOptions(indicesOptions); + return newRequest; + } + + @Override + protected Reader instanceReader() { + return ExplainLifecycleRequest::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java new file mode 100644 index 0000000000000..8b64e1128c020 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleResponseTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ExplainLifecycleResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected ExplainLifecycleResponse createTestInstance() { + Map indexResponses = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 2); i++) { + IndexLifecycleExplainResponse indexResponse = IndexExplainResponseTests.randomIndexExplainResponse(); + indexResponses.put(indexResponse.getIndex(), indexResponse); + } + return new ExplainLifecycleResponse(indexResponses); + } + + @Override + protected ExplainLifecycleResponse createBlankInstance() { + return new ExplainLifecycleResponse(); + } + + @Override + protected ExplainLifecycleResponse mutateInstance(ExplainLifecycleResponse response) { + Map indexResponses = new HashMap<>(response.getIndexResponses()); + IndexLifecycleExplainResponse indexResponse = IndexExplainResponseTests.randomIndexExplainResponse(); + indexResponses.put(indexResponse.getIndex(), indexResponse); + return new ExplainLifecycleResponse(indexResponses); + } + + @Override + protected ExplainLifecycleResponse doParseInstance(XContentParser parser) throws IOException { + return ExplainLifecycleResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Arrays + .asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MockAction.NAME), MockAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeActionTests.java new file mode 100644 index 0000000000000..3772124385c46 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeActionTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ForceMergeActionTests extends AbstractActionTestCase { + + @Override + protected ForceMergeAction doParseInstance(XContentParser parser) { + return ForceMergeAction.parse(parser); + } + + @Override + protected ForceMergeAction createTestInstance() { + return randomInstance(); + } + + static ForceMergeAction randomInstance() { + return new ForceMergeAction(randomIntBetween(1, 100)); + } + + @Override + protected ForceMergeAction mutateInstance(ForceMergeAction instance) { + int maxNumSegments = instance.getMaxNumSegments(); + maxNumSegments = maxNumSegments + randomIntBetween(1, 10); + return new ForceMergeAction(maxNumSegments); + } + + @Override + protected Reader instanceReader() { + return ForceMergeAction::new; + } + + public void testMissingMaxNumSegments() throws IOException { + BytesReference emptyObject = BytesReference.bytes(JsonXContent.contentBuilder().startObject().endObject()); + XContentParser parser = XContentHelper.createParser(null, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + emptyObject, XContentType.JSON); + Exception e = expectThrows(IllegalArgumentException.class, () -> ForceMergeAction.parse(parser)); + assertThat(e.getMessage(), equalTo("Required [max_num_segments]")); + } + + public void testInvalidNegativeSegmentNumber() { + Exception r = expectThrows(IllegalArgumentException.class, () -> new ForceMergeAction(randomIntBetween(-10, 0))); + assertThat(r.getMessage(), equalTo("[max_num_segments] must be a positive integer")); + } + + public void testToSteps() { + ForceMergeAction instance = createTestInstance(); + String phase = randomAlphaOfLength(5); + StepKey nextStepKey = new StepKey(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10)); + List steps = instance.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(3, steps.size()); + UpdateSettingsStep firstStep = (UpdateSettingsStep) steps.get(0); + ForceMergeStep secondStep = (ForceMergeStep) steps.get(1); + SegmentCountStep thirdStep = (SegmentCountStep) steps.get(2); + assertThat(firstStep.getKey(), equalTo(new StepKey(phase, ForceMergeAction.NAME, ReadOnlyAction.NAME))); + assertThat(firstStep.getNextStepKey(), equalTo(new StepKey(phase, ForceMergeAction.NAME, ForceMergeStep.NAME))); + assertTrue(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(firstStep.getSettings())); + assertThat(secondStep.getKey(), equalTo(new StepKey(phase, ForceMergeAction.NAME, ForceMergeStep.NAME))); + assertThat(secondStep.getNextStepKey(), equalTo(thirdStep.getKey())); + assertThat(thirdStep.getKey(), equalTo(new StepKey(phase, ForceMergeAction.NAME, SegmentCountStep.NAME))); + assertThat(thirdStep.getNextStepKey(), equalTo(nextStepKey)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java new file mode 100644 index 0000000000000..9a38ddf3a2677 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ForceMergeStepTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.mockito.Mockito; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ForceMergeStepTests extends AbstractStepTestCase { + + @Override + public ForceMergeStep createRandomInstance() { + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(1, 10); + + return new ForceMergeStep(stepKey, nextStepKey, null, maxNumSegments); + } + + @Override + public ForceMergeStep mutateInstance(ForceMergeStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + int maxNumSegments = instance.getMaxNumSegments(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + maxNumSegments += 1; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new ForceMergeStep(key, nextKey, instance.getClient(), maxNumSegments); + } + + @Override + public ForceMergeStep copyInstance(ForceMergeStep instance) { + return new ForceMergeStep(instance.getKey(), instance.getNextStepKey(), + instance.getClient(), instance.getMaxNumSegments()); + } + + public void testPerformActionComplete() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(1, 10); + Client client = mock(Client.class); + AdminClient adminClient = mock(AdminClient.class); + IndicesAdminClient indicesClient = mock(IndicesAdminClient.class); + when(client.admin()).thenReturn(adminClient); + when(adminClient.indices()).thenReturn(indicesClient); + ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.OK); + Mockito.doAnswer(invocationOnMock -> { + ForceMergeRequest request = (ForceMergeRequest) invocationOnMock.getArguments()[0]; + assertThat(request.maxNumSegments(), equalTo(maxNumSegments)); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(forceMergeResponse); + return null; + }).when(indicesClient).forceMerge(any(), any()); + + ForceMergeStep step = new ForceMergeStep(stepKey, nextStepKey, client, maxNumSegments); + SetOnce completed = new SetOnce<>(); + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + completed.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("unexpected method call", e); + } + }); + assertThat(completed.get(), equalTo(true)); + } + + public void testPerformActionThrowsException() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException("error"); + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(1, 10); + Client client = mock(Client.class); + AdminClient adminClient = mock(AdminClient.class); + IndicesAdminClient indicesClient = mock(IndicesAdminClient.class); + when(client.admin()).thenReturn(adminClient); + when(adminClient.indices()).thenReturn(indicesClient); + ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.OK); + Mockito.doAnswer(invocationOnMock -> { + ForceMergeRequest request = (ForceMergeRequest) invocationOnMock.getArguments()[0]; + assertThat(request.indices().length, equalTo(1)); + assertThat(request.indices()[0], equalTo(indexMetaData.getIndex().getName())); + assertThat(request.maxNumSegments(), equalTo(maxNumSegments)); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onFailure(exception); + return null; + }).when(indicesClient).forceMerge(any(), any()); + + ForceMergeStep step = new ForceMergeStep(stepKey, nextStepKey, client, maxNumSegments); + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + throw new AssertionError("unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertEquals(exception, e); + exceptionThrown.set(true); + } + }); + assertThat(exceptionThrown.get(), equalTo(true)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexExplainResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexExplainResponseTests.java new file mode 100644 index 0000000000000..90915d66e5d47 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexExplainResponseTests.java @@ -0,0 +1,204 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +public class IndexExplainResponseTests extends AbstractSerializingTestCase { + + static IndexLifecycleExplainResponse randomIndexExplainResponse() { + if (frequently()) { + return randomManagedIndexExplainResponse(); + } else { + return randomUnmanagedIndexExplainResponse(); + } + } + + private static IndexLifecycleExplainResponse randomUnmanagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newUnmanagedIndexResponse(randomAlphaOfLength(10)); + } + + private static IndexLifecycleExplainResponse randomManagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newManagedIndexResponse(randomAlphaOfLength(10), randomAlphaOfLength(10), + randomNonNegativeLong(), randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(10), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomBoolean() ? null : new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString()), + randomBoolean() ? null : PhaseExecutionInfoTests.randomPhaseExecutionInfo("")); + } + + @Override + protected IndexLifecycleExplainResponse createTestInstance() { + return randomIndexExplainResponse(); + } + + @Override + protected Reader instanceReader() { + return IndexLifecycleExplainResponse::new; + } + + @Override + protected IndexLifecycleExplainResponse doParseInstance(XContentParser parser) throws IOException { + return IndexLifecycleExplainResponse.PARSER.apply(parser, null); + } + + @Override + protected IndexLifecycleExplainResponse mutateInstance(IndexLifecycleExplainResponse instance) throws IOException { + String index = instance.getIndex(); + String policy = instance.getPolicyName(); + String phase = instance.getPhase(); + String action = instance.getAction(); + String step = instance.getStep(); + String failedStep = instance.getFailedStep(); + Long policyTime = instance.getLifecycleDate(); + Long phaseTime = instance.getPhaseTime(); + Long actionTime = instance.getActionTime(); + Long stepTime = instance.getStepTime(); + boolean managed = instance.managedByILM(); + BytesReference stepInfo = instance.getStepInfo(); + PhaseExecutionInfo phaseExecutionInfo = instance.getPhaseExecutionInfo(); + if (managed) { + switch (between(0, 12)) { + case 0: + index = index + randomAlphaOfLengthBetween(1, 5); + break; + case 1: + policy = policy + randomAlphaOfLengthBetween(1, 5); + break; + case 2: + phase = phase + randomAlphaOfLengthBetween(1, 5); + break; + case 3: + action = action + randomAlphaOfLengthBetween(1, 5); + break; + case 4: + step = step + randomAlphaOfLengthBetween(1, 5); + break; + case 5: + if (Strings.hasLength(failedStep) == false) { + failedStep = randomAlphaOfLength(10); + } else if (randomBoolean()) { + failedStep = failedStep + randomAlphaOfLengthBetween(1, 5); + } else { + failedStep = null; + } + break; + case 6: + policyTime += randomLongBetween(0, 100000); + break; + case 7: + phaseTime += randomLongBetween(0, 100000); + break; + case 8: + actionTime += randomLongBetween(0, 100000); + break; + case 9: + stepTime += randomLongBetween(0, 100000); + break; + case 10: + if (Strings.hasLength(stepInfo) == false) { + stepInfo = new BytesArray(randomByteArrayOfLength(100)); + } else if (randomBoolean()) { + stepInfo = randomValueOtherThan(stepInfo, + () -> new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString())); + } else { + stepInfo = null; + } + break; + case 11: + phaseExecutionInfo = randomValueOtherThan(phaseExecutionInfo, () -> PhaseExecutionInfoTests.randomPhaseExecutionInfo("")); + break; + case 12: + return IndexLifecycleExplainResponse.newUnmanagedIndexResponse(index); + default: + throw new AssertionError("Illegal randomisation branch"); + } + return IndexLifecycleExplainResponse.newManagedIndexResponse(index, policy, policyTime, phase, action, step, failedStep, + phaseTime, actionTime, stepTime, stepInfo, phaseExecutionInfo); + } else { + switch (between(0, 1)) { + case 0: + return IndexLifecycleExplainResponse.newUnmanagedIndexResponse(index + randomAlphaOfLengthBetween(1, 5)); + case 1: + return randomManagedIndexExplainResponse(); + default: + throw new AssertionError("Illegal randomisation branch"); + } + } + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Arrays + .asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MockAction.NAME), MockAction::parse)); + return new NamedXContentRegistry(entries); + } + + private static class RandomStepInfo implements ToXContentObject { + + private final String key; + private final String value; + + RandomStepInfo(Supplier randomStringSupplier) { + this.key = randomStringSupplier.get(); + this.value = randomStringSupplier.get(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(key, value); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RandomStepInfo other = (RandomStepInfo) obj; + return Objects.equals(key, other.key) && Objects.equals(value, other.value); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java new file mode 100644 index 0000000000000..1035eb7a7462c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class IndexLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected IndexLifecycleFeatureSetUsage createTestInstance() { + boolean enabled = randomBoolean(); + boolean available = randomBoolean(); + List policyStats = null; + if (enabled) { + int size = randomIntBetween(0, 10); + policyStats = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + policyStats.add(PolicyStatsTests.createRandomInstance()); + } + } + return new IndexLifecycleFeatureSetUsage(available, enabled, policyStats); + } + + @Override + protected IndexLifecycleFeatureSetUsage mutateInstance(IndexLifecycleFeatureSetUsage instance) throws IOException { + boolean available = instance.available(); + boolean enabled = instance.enabled(); + List policyStats = instance.getPolicyStats(); + switch (between(0, 2)) { + case 0: + available = available == false; + break; + case 1: + enabled = enabled == false; + break; + case 2: + if (policyStats == null) { + policyStats = new ArrayList<>(); + policyStats.add(PolicyStatsTests.createRandomInstance()); + } else if (randomBoolean()) { + policyStats = null; + } else { + policyStats = new ArrayList<>(policyStats); + policyStats.add(PolicyStatsTests.createRandomInstance()); + } + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new IndexLifecycleFeatureSetUsage(available, enabled, policyStats); + } + + @Override + protected Reader instanceReader() { + return IndexLifecycleFeatureSetUsage::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStepTests.java new file mode 100644 index 0000000000000..5dbeee07fe75f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/InitializePolicyContextStepTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; + +public class InitializePolicyContextStepTests extends AbstractStepTestCase { + + @Override + public InitializePolicyContextStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + return new InitializePolicyContextStep(stepKey, nextStepKey); + } + + @Override + public InitializePolicyContextStep mutateInstance(InitializePolicyContextStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + switch (between(0, 1)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new InitializePolicyContextStep(key, nextKey); + } + + @Override + public InitializePolicyContextStep copyInstance(InitializePolicyContextStep instance) { + return new InitializePolicyContextStep(instance.getKey(), instance.getNextStepKey()); + } + + public void testAddCreationDate() { + long creationDate = randomNonNegativeLong(); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT)) + .creationDate(creationDate) + .numberOfShards(1).numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + Index index = indexMetadata.getIndex(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + InitializePolicyContextStep step = new InitializePolicyContextStep(null, null); + ClusterState newState = step.performAction(index, clusterState); + assertThat(getIndexLifecycleDate(index, newState), equalTo(creationDate)); + } + + public void testDoNothing() { + long creationDate = randomNonNegativeLong(); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setIndexCreationDate(creationDate); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .creationDate(creationDate) + .numberOfShards(1).numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + Index index = indexMetadata.getIndex(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + InitializePolicyContextStep step = new InitializePolicyContextStep(null, null); + ClusterState newState = step.performAction(index, clusterState); + assertTrue(newState == clusterState); + } + + private long getIndexLifecycleDate(Index index, ClusterState clusterState) { + return LifecycleExecutionState.fromIndexMetadata(clusterState.getMetaData().index(index)).getLifecycleDate(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionStateTests.java new file mode 100644 index 0000000000000..e8276ad06ead2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecycleExecutionStateTests.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.HashMap; +import java.util.Map; + +public class LifecycleExecutionStateTests extends ESTestCase { + + public void testConversion() { + Map customMetadata = createCustomMetadata(); + LifecycleExecutionState parsed = LifecycleExecutionState.fromCustomMetadata(customMetadata); + assertEquals(customMetadata, parsed.asMap()); + } + + public void testEmptyValuesAreNotSerialized() { + LifecycleExecutionState empty = LifecycleExecutionState.builder().build(); + assertEquals(new HashMap().entrySet(), empty.asMap().entrySet()); + + Map originalMap = createCustomMetadata(); + LifecycleExecutionState originalState = LifecycleExecutionState.fromCustomMetadata(originalMap); + LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder(originalState); + newState.setPhase(null); + assertFalse(newState.build().asMap().containsKey("phase")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setAction(null); + assertFalse(newState.build().asMap().containsKey("action")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setStep(null); + assertFalse(newState.build().asMap().containsKey("step")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setFailedStep(null); + assertFalse(newState.build().asMap().containsKey("failed_step")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setPhaseDefinition(null); + assertFalse(newState.build().asMap().containsKey("phase_definition")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setStepInfo(null); + assertFalse(newState.build().asMap().containsKey("step_info")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setPhaseTime(null); + assertFalse(newState.build().asMap().containsKey("phase_time")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setActionTime(null); + assertFalse(newState.build().asMap().containsKey("action_time")); + + newState = LifecycleExecutionState.builder(originalState); + newState.setIndexCreationDate(null); + assertFalse(newState.build().asMap().containsKey("creation_date")); + } + + public void testEqualsAndHashcode() { + LifecycleExecutionState original = LifecycleExecutionState.fromCustomMetadata(createCustomMetadata()); + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + original, + toCopy -> LifecycleExecutionState.builder(toCopy).build(), + LifecycleExecutionStateTests::mutate); + } + + private static LifecycleExecutionState mutate(LifecycleExecutionState toMutate) { + LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder(toMutate); + boolean changed = false; + if (randomBoolean()) { + newState.setPhase(randomValueOtherThan(toMutate.getPhase(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setAction(randomValueOtherThan(toMutate.getAction(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setStep(randomValueOtherThan(toMutate.getStep(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setPhaseDefinition(randomValueOtherThan(toMutate.getPhaseDefinition(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setFailedStep(randomValueOtherThan(toMutate.getFailedStep(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setStepInfo(randomValueOtherThan(toMutate.getStepInfo(), () -> randomAlphaOfLengthBetween(5, 20))); + changed = true; + } + if (randomBoolean()) { + newState.setPhaseTime(randomValueOtherThan(toMutate.getPhaseTime(), ESTestCase::randomLong)); + changed = true; + } + if (randomBoolean()) { + newState.setActionTime(randomValueOtherThan(toMutate.getActionTime(), ESTestCase::randomLong)); + changed = true; + } + if (randomBoolean()) { + newState.setStepTime(randomValueOtherThan(toMutate.getStepTime(), ESTestCase::randomLong)); + changed = true; + } + if (randomBoolean()) { + newState.setIndexCreationDate(randomValueOtherThan(toMutate.getLifecycleDate(), ESTestCase::randomLong)); + changed = true; + } + + if (changed == false) { + return LifecycleExecutionState.builder().build(); + } + + return newState.build(); + } + + static Map createCustomMetadata() { + String phase = randomAlphaOfLengthBetween(5,20); + String action = randomAlphaOfLengthBetween(5,20); + String step = randomAlphaOfLengthBetween(5,20); + String failedStep = randomAlphaOfLengthBetween(5,20); + String stepInfo = randomAlphaOfLengthBetween(15,50); + String phaseDefinition = randomAlphaOfLengthBetween(15,50); + long indexCreationDate = randomLong(); + long phaseTime = randomLong(); + long actionTime = randomLong(); + long stepTime = randomLong(); + + Map customMetadata = new HashMap<>(); + customMetadata.put("phase", phase); + customMetadata.put("action", action); + customMetadata.put("step", step); + customMetadata.put("failed_step", failedStep); + customMetadata.put("step_info", stepInfo); + customMetadata.put("phase_definition", phaseDefinition); + customMetadata.put("creation_date", String.valueOf(indexCreationDate)); + customMetadata.put("phase_time", String.valueOf(phaseTime)); + customMetadata.put("action_time", String.valueOf(actionTime)); + customMetadata.put("step_time", String.valueOf(stepTime)); + return customMetadata; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadataTests.java new file mode 100644 index 0000000000000..5cb75e132ce92 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyMetadataTests.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class LifecyclePolicyMetadataTests extends AbstractSerializingTestCase { + + private String lifecycleName; + + @Before + public void setup() { + lifecycleName = randomAlphaOfLength(20); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new), + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new) + )); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + @Override + protected LifecyclePolicyMetadata doParseInstance(XContentParser parser) throws IOException { + return LifecyclePolicyMetadata.parse(parser, lifecycleName); + } + + @Override + protected LifecyclePolicyMetadata createTestInstance() { + Map headers = new HashMap<>(); + int numberHeaders = between(0, 10); + for (int i = 0; i < numberHeaders; i++) { + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + return new LifecyclePolicyMetadata(LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(lifecycleName), headers, + randomNonNegativeLong(), randomNonNegativeLong()); + } + + @Override + protected Reader instanceReader() { + return LifecyclePolicyMetadata::new; + } + + @Override + protected LifecyclePolicyMetadata mutateInstance(LifecyclePolicyMetadata instance) throws IOException { + LifecyclePolicy policy = instance.getPolicy(); + Map headers = instance.getHeaders(); + long version = instance.getVersion(); + long creationDate = instance.getModifiedDate(); + switch (between(0, 3)) { + case 0: + policy = new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, policy.getName() + randomAlphaOfLengthBetween(1, 5), + policy.getPhases()); + break; + case 1: + headers = new HashMap<>(headers); + headers.put(randomAlphaOfLength(11), randomAlphaOfLength(11)); + break; + case 2: + version++; + break; + case 3: + creationDate++; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new LifecyclePolicyMetadata(policy, headers, version, creationDate); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTests.java new file mode 100644 index 0000000000000..9d90cc025b0e3 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTests.java @@ -0,0 +1,319 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class LifecyclePolicyTests extends AbstractSerializingTestCase { + + private String lifecycleName; + + @Override + protected LifecyclePolicy doParseInstance(XContentParser parser) { + return LifecyclePolicy.parse(parser, lifecycleName); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new) + )); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + @Override + protected LifecyclePolicy createTestInstance() { + lifecycleName = randomAlphaOfLength(5); + return randomTimeseriesLifecyclePolicy(lifecycleName); + } + + /** + * The same as {@link #randomTimeseriesLifecyclePolicy(String)} but ensures + * that the resulting policy has all valid phases and all valid actions. + */ + public static LifecyclePolicy randomTimeseriesLifecyclePolicyWithAllPhases(@Nullable String lifecycleName) { + List phaseNames = TimeseriesLifecycleType.VALID_PHASES; + Map phases = new HashMap<>(phaseNames.size()); + Function> validActions = (phase) -> { + switch (phase) { + case "hot": + return TimeseriesLifecycleType.VALID_HOT_ACTIONS; + case "warm": + return TimeseriesLifecycleType.VALID_WARM_ACTIONS; + case "cold": + return TimeseriesLifecycleType.VALID_COLD_ACTIONS; + case "delete": + return TimeseriesLifecycleType.VALID_DELETE_ACTIONS; + default: + throw new IllegalArgumentException("invalid phase [" + phase + "]"); + }}; + Function randomAction = (action) -> { + switch (action) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("invalid action [" + action + "]"); + }}; + for (String phase : phaseNames) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = new HashMap<>(); + Set actionNames = validActions.apply(phase); + for (String action : actionNames) { + actions.put(action, randomAction.apply(action)); + } + phases.put(phase, new Phase(phase, after, actions)); + } + return new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, lifecycleName, phases); + } + + public static LifecyclePolicy randomTimeseriesLifecyclePolicy(@Nullable String lifecycleName) { + List phaseNames = randomSubsetOf(TimeseriesLifecycleType.VALID_PHASES); + Map phases = new HashMap<>(phaseNames.size()); + Function> validActions = (phase) -> { + switch (phase) { + case "hot": + return TimeseriesLifecycleType.VALID_HOT_ACTIONS; + case "warm": + return TimeseriesLifecycleType.VALID_WARM_ACTIONS; + case "cold": + return TimeseriesLifecycleType.VALID_COLD_ACTIONS; + case "delete": + return TimeseriesLifecycleType.VALID_DELETE_ACTIONS; + default: + throw new IllegalArgumentException("invalid phase [" + phase + "]"); + }}; + Function randomAction = (action) -> { + switch (action) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("invalid action [" + action + "]"); + }}; + for (String phase : phaseNames) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = new HashMap<>(); + List actionNames = randomSubsetOf(validActions.apply(phase)); + for (String action : actionNames) { + actions.put(action, randomAction.apply(action)); + } + phases.put(phase, new Phase(phase, after, actions)); + } + return new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, lifecycleName, phases); + } + + public static LifecyclePolicy randomTestLifecyclePolicy(@Nullable String lifecycleName) { + int numberPhases = randomInt(5); + Map phases = new HashMap<>(numberPhases); + for (int i = 0; i < numberPhases; i++) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = new HashMap<>(); + if (randomBoolean()) { + MockAction action = new MockAction(); + actions.put(action.getWriteableName(), action); + } + String phaseName = randomAlphaOfLength(10); + phases.put(phaseName, new Phase(phaseName, after, actions)); + } + return new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + } + + @Override + protected LifecyclePolicy mutateInstance(LifecyclePolicy instance) throws IOException { + String name = instance.getName(); + Map phases = instance.getPhases(); + switch (between(0, 1)) { + case 0: + name = name + randomAlphaOfLengthBetween(1, 5); + break; + case 1: + String phaseName = randomValueOtherThanMany(phases::containsKey, () -> randomFrom(TimeseriesLifecycleType.VALID_PHASES)); + phases = new LinkedHashMap<>(phases); + phases.put(phaseName, new Phase(phaseName, TimeValue.timeValueSeconds(randomIntBetween(1, 1000)), Collections.emptyMap())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new LifecyclePolicy(TimeseriesLifecycleType.INSTANCE, name, phases); + } + + @Override + protected Reader instanceReader() { + return LifecyclePolicy::new; + } + + public void testFirstAndLastSteps() { + Client client = mock(Client.class); + lifecycleName = randomAlphaOfLengthBetween(1, 20); + Map phases = new LinkedHashMap<>(); + LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + List steps = policy.toSteps(client); + assertThat(steps.size(), equalTo(2)); + assertThat(steps.get(0), instanceOf(InitializePolicyContextStep.class)); + assertThat(steps.get(0).getKey(), equalTo(new StepKey("new", "init", "init"))); + assertThat(steps.get(0).getNextStepKey(), equalTo(TerminalPolicyStep.KEY)); + assertSame(steps.get(1), TerminalPolicyStep.INSTANCE); + } + + public void testToStepsWithOneStep() { + Client client = mock(Client.class); + MockStep mockStep = new MockStep( + new Step.StepKey("test", "test", "test"), TerminalPolicyStep.KEY); + + lifecycleName = randomAlphaOfLengthBetween(1, 20); + Map phases = new LinkedHashMap<>(); + LifecycleAction firstAction = new MockAction(Arrays.asList(mockStep)); + Map actions = Collections.singletonMap(MockAction.NAME, firstAction); + Phase firstPhase = new Phase("test", TimeValue.ZERO, actions); + phases.put(firstPhase.getName(), firstPhase); + LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + StepKey firstStepKey = InitializePolicyContextStep.KEY; + StepKey secondStepKey = new StepKey("new", PhaseCompleteStep.NAME, PhaseCompleteStep.NAME); + List steps = policy.toSteps(client); + assertThat(steps.size(), equalTo(4)); + assertSame(steps.get(0).getKey(), firstStepKey); + assertThat(steps.get(0).getNextStepKey(), equalTo(secondStepKey)); + assertThat(steps.get(1).getKey(), equalTo(secondStepKey)); + assertThat(steps.get(1).getNextStepKey(), equalTo(mockStep.getKey())); + assertThat(steps.get(2).getKey(), equalTo(mockStep.getKey())); + assertThat(steps.get(2).getNextStepKey(), equalTo(TerminalPolicyStep.KEY)); + assertSame(steps.get(3), TerminalPolicyStep.INSTANCE); + } + + public void testToStepsWithTwoPhases() { + Client client = mock(Client.class); + MockStep secondActionStep = new MockStep(new StepKey("second_phase", "test2", "test"), TerminalPolicyStep.KEY); + MockStep secondAfter = new MockStep(new StepKey("first_phase", PhaseCompleteStep.NAME, PhaseCompleteStep.NAME), + secondActionStep.getKey()); + MockStep firstActionAnotherStep = new MockStep(new StepKey("first_phase", "test", "bar"), secondAfter.getKey()); + MockStep firstActionStep = new MockStep(new StepKey("first_phase", "test", "foo"), firstActionAnotherStep.getKey()); + MockStep firstAfter = new MockStep(new StepKey("new", PhaseCompleteStep.NAME, PhaseCompleteStep.NAME), firstActionStep.getKey()); + MockStep init = new MockStep(InitializePolicyContextStep.KEY, firstAfter.getKey()); + + lifecycleName = randomAlphaOfLengthBetween(1, 20); + Map phases = new LinkedHashMap<>(); + LifecycleAction firstAction = new MockAction(Arrays.asList(firstActionStep, firstActionAnotherStep)); + LifecycleAction secondAction = new MockAction(Arrays.asList(secondActionStep)); + Map firstActions = Collections.singletonMap(MockAction.NAME, firstAction); + Map secondActions = Collections.singletonMap(MockAction.NAME, secondAction); + Phase firstPhase = new Phase("first_phase", TimeValue.ZERO, firstActions); + Phase secondPhase = new Phase("second_phase", TimeValue.ZERO, secondActions); + phases.put(firstPhase.getName(), firstPhase); + phases.put(secondPhase.getName(), secondPhase); + LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + + List steps = policy.toSteps(client); + assertThat(steps.size(), equalTo(7)); + assertThat(steps.get(0).getClass(), equalTo(InitializePolicyContextStep.class)); + assertThat(steps.get(0).getKey(), equalTo(init.getKey())); + assertThat(steps.get(0).getNextStepKey(), equalTo(init.getNextStepKey())); + assertThat(steps.get(1).getClass(), equalTo(PhaseCompleteStep.class)); + assertThat(steps.get(1).getKey(), equalTo(firstAfter.getKey())); + assertThat(steps.get(1).getNextStepKey(), equalTo(firstAfter.getNextStepKey())); + assertThat(steps.get(2), equalTo(firstActionStep)); + assertThat(steps.get(3), equalTo(firstActionAnotherStep)); + assertThat(steps.get(4).getClass(), equalTo(PhaseCompleteStep.class)); + assertThat(steps.get(4).getKey(), equalTo(secondAfter.getKey())); + assertThat(steps.get(4).getNextStepKey(), equalTo(secondAfter.getNextStepKey())); + assertThat(steps.get(5), equalTo(secondActionStep)); + assertSame(steps.get(6), TerminalPolicyStep.INSTANCE); + } + + public void testIsActionSafe() { + Map phases = new LinkedHashMap<>(); + LifecycleAction firstAction = new MockAction(Collections.emptyList(), true); + LifecycleAction secondAction = new MockAction(Collections.emptyList(), false); + Map firstActions = Collections.singletonMap(MockAction.NAME, firstAction); + Map secondActions = Collections.singletonMap(MockAction.NAME, secondAction); + Phase firstPhase = new Phase("first_phase", TimeValue.ZERO, firstActions); + Phase secondPhase = new Phase("second_phase", TimeValue.ZERO, secondActions); + phases.put(firstPhase.getName(), firstPhase); + phases.put(secondPhase.getName(), secondPhase); + LifecyclePolicy policy = new LifecyclePolicy(TestLifecycleType.INSTANCE, lifecycleName, phases); + + assertTrue(policy.isActionSafe(new StepKey("first_phase", MockAction.NAME, randomAlphaOfLength(10)))); + + assertFalse(policy.isActionSafe(new StepKey("second_phase", MockAction.NAME, randomAlphaOfLength(10)))); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> policy.isActionSafe(new StepKey("non_existant_phase", MockAction.NAME, randomAlphaOfLength(10)))); + assertEquals("Phase [non_existant_phase] does not exist in policy [" + policy.getName() + "]", exception.getMessage()); + + exception = expectThrows(IllegalArgumentException.class, + () -> policy.isActionSafe(new StepKey("first_phase", "non_existant_action", randomAlphaOfLength(10)))); + assertEquals("Action [non_existant_action] in phase [first_phase] does not exist in policy [" + policy.getName() + "]", + exception.getMessage()); + + assertTrue(policy.isActionSafe(new StepKey("new", randomAlphaOfLength(10), randomAlphaOfLength(10)))); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java new file mode 100644 index 0000000000000..30eabac562606 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockAction.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class MockAction implements LifecycleAction { + public static final String NAME = "TEST_ACTION"; + private List steps; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, MockAction::new); + private final boolean safe; + + public static MockAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public MockAction() { + this(Collections.emptyList()); + } + + public MockAction(List steps) { + this(steps, true); + } + + public MockAction(List steps, boolean safe) { + this.steps = steps; + this.safe = safe; + } + + public MockAction(StreamInput in) throws IOException { + this.steps = in.readList(MockStep::new); + this.safe = in.readBoolean(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + public List getSteps() { + return steps; + } + + @Override + public boolean isSafeAction() { + return safe; + } + + @Override + public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { + return new ArrayList<>(steps); + } + + @Override + public List toStepKeys(String phase) { + return steps.stream().map(Step::getKey).collect(Collectors.toList()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(steps.stream().map(MockStep::new).collect(Collectors.toList())); + out.writeBoolean(safe); + } + + @Override + public int hashCode() { + return Objects.hash(steps, safe); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + MockAction other = (MockAction) obj; + return Objects.equals(steps, other.steps) && + Objects.equals(safe, other.safe); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockActionTests.java new file mode 100644 index 0000000000000..1f10aa051cbe4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockActionTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class MockActionTests extends AbstractActionTestCase { + + @Override + protected MockAction createTestInstance() { + return new MockAction(); + } + + @Override + protected MockAction doParseInstance(XContentParser parser) throws IOException { + return MockAction.parse(parser); + } + + @Override + protected Reader instanceReader() { + return MockAction::new; + } + + @Override + protected MockAction mutateInstance(MockAction instance) throws IOException { + List steps = instance.getSteps(); + boolean safe = instance.isSafeAction(); + if (randomBoolean()) { + steps = new ArrayList<>(steps); + if (steps.size() > 0) { + Step lastStep = steps.remove(steps.size() - 1); + if (randomBoolean()) { + Step.StepKey additionalStepKey = randomStepKey(); + steps.add(new MockStep(lastStep.getKey(), additionalStepKey)); + steps.add(new MockStep(additionalStepKey, null)); + } + } else { + steps.add(new MockStep(randomStepKey(), null)); + } + } else { + safe = safe == false; + } + return new MockAction(steps, safe); + } + + private static Step.StepKey randomStepKey() { + return new Step.StepKey(randomAlphaOfLength(5), + randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + + @Override + public void testToSteps() { + int numSteps = randomIntBetween(1, 10); + List steps = new ArrayList<>(numSteps); + for (int i = 0; i < numSteps; i++) { + steps.add(new MockStep(randomStepKey(), randomStepKey())); + } + MockAction action = new MockAction(steps); + assertEquals(action.getSteps(), action.toSteps(null, null, null)); + } +} + diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockStep.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockStep.java new file mode 100644 index 0000000000000..7de2bd14c5f71 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/MockStep.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + +public class MockStep extends Step implements Writeable { + public static final String NAME = "TEST_STEP"; + + public MockStep(StepKey stepKey, Step.StepKey nextStepKey) { + super(stepKey, nextStepKey); + } + + public MockStep(Step other) { + super(other.getKey(), other.getNextStepKey()); + } + + public MockStep(StreamInput in) throws IOException { + super(new StepKey(in.readString(), in.readString(), in.readString()), readOptionalNextStepKey(in)); + } + + private static StepKey readOptionalNextStepKey(StreamInput in) throws IOException { + if (in.readBoolean()) { + return new StepKey(in.readString(), in.readString(), in.readString()); + } + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(getKey().getPhase()); + out.writeString(getKey().getAction()); + out.writeString(getKey().getName()); + boolean hasNextStep = getNextStepKey() != null; + out.writeBoolean(hasNextStep); + if (hasNextStep) { + out.writeString(getNextStepKey().getPhase()); + out.writeString(getNextStepKey().getAction()); + out.writeString(getNextStepKey().getName()); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OperationModeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OperationModeTests.java new file mode 100644 index 0000000000000..d99868fe178a7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/OperationModeTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +public class OperationModeTests extends ESTestCase { + + public void testIsValidChange() { + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.RUNNING)); + assertTrue(OperationMode.RUNNING.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPING.isValidChange(OperationMode.STOPPING)); + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPED.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPED)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStepTests.java new file mode 100644 index 0000000000000..eea46baadf65f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseCompleteStepTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class PhaseCompleteStepTests extends AbstractStepTestCase { + + @Override + public PhaseCompleteStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + return new PhaseCompleteStep(stepKey, nextStepKey); + } + + @Override + public PhaseCompleteStep mutateInstance(PhaseCompleteStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + if (randomBoolean()) { + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } else { + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } + + return new PhaseCompleteStep(key, nextKey); + } + + @Override + public PhaseCompleteStep copyInstance(PhaseCompleteStep instance) { + return new PhaseCompleteStep(instance.getKey(), instance.getNextStepKey()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfoTests.java new file mode 100644 index 0000000000000..9198282a0717c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseExecutionInfoTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class PhaseExecutionInfoTests extends AbstractSerializingTestCase { + + static PhaseExecutionInfo randomPhaseExecutionInfo(String phaseName) { + return new PhaseExecutionInfo(randomAlphaOfLength(5), PhaseTests.randomTestPhase(phaseName), + randomNonNegativeLong(), randomNonNegativeLong()); + } + + String phaseName; + + @Before + public void setupPhaseName() { + phaseName = randomAlphaOfLength(7); + } + + @Override + protected PhaseExecutionInfo createTestInstance() { + return randomPhaseExecutionInfo(phaseName); + } + + @Override + protected Reader instanceReader() { + return PhaseExecutionInfo::new; + } + + @Override + protected PhaseExecutionInfo doParseInstance(XContentParser parser) throws IOException { + return PhaseExecutionInfo.parse(parser, phaseName); + } + + @Override + protected PhaseExecutionInfo mutateInstance(PhaseExecutionInfo instance) throws IOException { + String policyName = instance.getPolicyName(); + Phase phase = instance.getPhase(); + long version = instance.getVersion(); + long modifiedDate = instance.getModifiedDate(); + switch (between(0, 3)) { + case 0: + policyName = policyName + randomAlphaOfLengthBetween(1, 5); + break; + case 1: + phase = randomValueOtherThan(phase, () -> PhaseTests.randomTestPhase(randomAlphaOfLength(6))); + break; + case 2: + version++; + break; + case 3: + modifiedDate++; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PhaseExecutionInfo(policyName, phase, version, modifiedDate); + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Arrays + .asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MockAction.NAME), MockAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseStatsTests.java new file mode 100644 index 0000000000000..ae4325abe5d87 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseStatsTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; + +import java.io.IOException; +import java.util.Arrays; + +public class PhaseStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected PhaseStats createTestInstance() { + return createRandomInstance(); + } + + public static PhaseStats createRandomInstance() { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(), "phase_stats_tests"); + String[] actionNames = randomArray(0, 20, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 20)); + return new PhaseStats(after, actionNames); + } + + @Override + protected PhaseStats mutateInstance(PhaseStats instance) throws IOException { + TimeValue after = instance.getAfter(); + String[] actionNames = instance.getActionNames(); + switch (between(0, 1)) { + case 0: + after = randomValueOtherThan(after, () -> TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test")); + break; + case 1: + actionNames = randomValueOtherThanMany(a -> Arrays.equals(a, instance.getActionNames()), + () -> randomArray(0, 20, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 20))); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PhaseStats(after, actionNames); + } + + @Override + protected Reader instanceReader() { + return PhaseStats::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseTests.java new file mode 100644 index 0000000000000..0c3530216f26c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PhaseTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class PhaseTests extends AbstractSerializingTestCase { + private String phaseName; + + @Before + public void setup() { + phaseName = randomAlphaOfLength(20); + } + + @Override + protected Phase createTestInstance() { + return randomTestPhase(phaseName); + } + + static Phase randomTestPhase(String phaseName) { + TimeValue after = null; + if (randomBoolean()) { + after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + } + Map actions = Collections.emptyMap(); + if (randomBoolean()) { + actions = Collections.singletonMap(MockAction.NAME, new MockAction()); + } + return new Phase(phaseName, after, actions); + } + + @Override + protected Phase doParseInstance(XContentParser parser) throws IOException { + return Phase.parse(parser, phaseName); + } + + @Override + protected Reader instanceReader() { + return Phase::new; + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Arrays + .asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new))); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(MockAction.NAME), MockAction::parse)); + return new NamedXContentRegistry(entries); + } + + @Override + protected Phase mutateInstance(Phase instance) throws IOException { + String name = instance.getName(); + TimeValue after = instance.getMinimumAge(); + Map actions = instance.getActions(); + switch (between(0, 2)) { + case 0: + name = name + randomAlphaOfLengthBetween(1, 5); + break; + case 1: + after = TimeValue.timeValueSeconds(after.getSeconds() + randomIntBetween(1, 1000)); + break; + case 2: + actions = new HashMap<>(actions); + actions.put(MockAction.NAME + "another", new MockAction(Collections.emptyList())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new Phase(name, after, actions); + } + + public void testDefaultAfter() { + Phase phase = new Phase(randomAlphaOfLength(20), null, Collections.emptyMap()); + assertEquals(TimeValue.ZERO, phase.getMinimumAge()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PolicyStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PolicyStatsTests.java new file mode 100644 index 0000000000000..29a6912a7370f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/PolicyStatsTests.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class PolicyStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected PolicyStats createTestInstance() { + return createRandomInstance(); + } + + public static PolicyStats createRandomInstance() { + int size = randomIntBetween(0, 10); + Map phaseStats = new HashMap<>(size); + for (int i = 0; i < size; i++) { + phaseStats.put(randomAlphaOfLengthBetween(1, 20), PhaseStatsTests.createRandomInstance()); + } + return new PolicyStats(phaseStats, randomIntBetween(0, 100)); + } + + @Override + protected PolicyStats mutateInstance(PolicyStats instance) throws IOException { + Map phaseStats = instance.getPhaseStats(); + int indicesManaged = instance.getIndicesManaged(); + switch (between(0, 1)) { + case 0: + phaseStats = new HashMap<>(instance.getPhaseStats()); + phaseStats.put(randomAlphaOfLengthBetween(21, 25), PhaseStatsTests.createRandomInstance()); + break; + case 1: + indicesManaged += randomIntBetween(1, 10); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PolicyStats(phaseStats, indicesManaged); + } + + @Override + protected Reader instanceReader() { + return PolicyStats::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyActionTests.java new file mode 100644 index 0000000000000..1d28985fac1db --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyActionTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ReadOnlyActionTests extends AbstractActionTestCase { + + @Override + protected ReadOnlyAction doParseInstance(XContentParser parser) { + return ReadOnlyAction.parse(parser); + } + + @Override + protected ReadOnlyAction createTestInstance() { + return new ReadOnlyAction(); + } + + @Override + protected Reader instanceReader() { + return ReadOnlyAction::new; + } + + public void testToSteps() { + ReadOnlyAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(1, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, ReadOnlyAction.NAME, ReadOnlyAction.NAME); + UpdateSettingsStep firstStep = (UpdateSettingsStep) steps.get(0); + assertThat(firstStep.getKey(), equalTo(expectedFirstStepKey)); + assertThat(firstStep.getNextStepKey(), equalTo(nextStepKey)); + assertThat(firstStep.getSettings().size(), equalTo(1)); + assertTrue(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(firstStep.getSettings())); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java new file mode 100644 index 0000000000000..f13a09ac7476e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverActionTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.List; + +public class RolloverActionTests extends AbstractActionTestCase { + + @Override + protected RolloverAction doParseInstance(XContentParser parser) throws IOException { + return RolloverAction.parse(parser); + } + + @Override + protected RolloverAction createTestInstance() { + return randomInstance(); + } + + static RolloverAction randomInstance() { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + Long maxDocs = randomBoolean() ? null : randomNonNegativeLong(); + TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) + ? TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test") + : null; + return new RolloverAction(maxSize, maxAge, maxDocs); + } + + @Override + protected Reader instanceReader() { + return RolloverAction::new; + } + + @Override + protected RolloverAction mutateInstance(RolloverAction instance) throws IOException { + ByteSizeValue maxSize = instance.getMaxSize(); + TimeValue maxAge = instance.getMaxAge(); + Long maxDocs = instance.getMaxDocs(); + switch (between(0, 2)) { + case 0: + maxSize = randomValueOtherThan(maxSize, () -> { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + }); + break; + case 1: + maxAge = randomValueOtherThan(maxAge, + () -> TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test")); + break; + case 2: + maxDocs = maxDocs == null ? randomNonNegativeLong() : maxDocs + 1; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new RolloverAction(maxSize, maxAge, maxDocs); + } + + public void testNoConditions() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new RolloverAction(null, null, null)); + assertEquals("At least one rollover condition must be set.", exception.getMessage()); + } + + public void testToSteps() { + RolloverAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(2, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, RolloverAction.NAME, RolloverStep.NAME); + StepKey expectedSecondStepKey = new StepKey(phase, RolloverAction.NAME, UpdateRolloverLifecycleDateStep.NAME); + RolloverStep firstStep = (RolloverStep) steps.get(0); + UpdateRolloverLifecycleDateStep secondStep = (UpdateRolloverLifecycleDateStep) steps.get(1); + assertEquals(expectedFirstStepKey, firstStep.getKey()); + assertEquals(expectedSecondStepKey, secondStep.getKey()); + assertEquals(secondStep.getKey(), firstStep.getNextStepKey()); + assertEquals(action.getMaxSize(), firstStep.getMaxSize()); + assertEquals(action.getMaxAge(), firstStep.getMaxAge()); + assertEquals(action.getMaxDocs(), firstStep.getMaxDocs()); + assertEquals(nextStepKey, secondStep.getNextStepKey()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java new file mode 100644 index 0000000000000..596099e6e275b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/RolloverStepTests.java @@ -0,0 +1,341 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.rollover.Condition; +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class RolloverStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public RolloverStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + Long maxDocs = randomBoolean() ? null : randomNonNegativeLong(); + TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) + ? TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test") + : null; + return new RolloverStep(stepKey, nextStepKey, client, maxSize, maxAge, maxDocs); + } + + @Override + public RolloverStep mutateInstance(RolloverStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + ByteSizeValue maxSize = instance.getMaxSize(); + TimeValue maxAge = instance.getMaxAge(); + Long maxDocs = instance.getMaxDocs(); + + switch (between(0, 4)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + maxSize = randomValueOtherThan(maxSize, () -> { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + }); + break; + case 3: + maxAge = TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test"); + break; + case 4: + maxDocs = randomNonNegativeLong(); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new RolloverStep(key, nextKey, instance.getClient(), maxSize, maxAge, maxDocs); + } + + @Override + public RolloverStep copyInstance(RolloverStep instance) { + return new RolloverStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), + instance.getMaxSize(), instance.getMaxAge(), instance.getMaxDocs()); + } + + private static void assertRolloverIndexRequest(RolloverRequest request, String alias, Set> expectedConditions) { + assertNotNull(request); + assertEquals(1, request.indices().length); + assertEquals(alias, request.indices()[0]); + assertEquals(alias, request.getAlias()); + assertEquals(expectedConditions.size(), request.getConditions().size()); + Set expectedConditionValues = expectedConditions.stream().map(Condition::value).collect(Collectors.toSet()); + Set actualConditionValues = request.getConditions().values().stream() + .map(Condition::value).collect(Collectors.toSet()); + assertEquals(expectedConditionValues, actualConditionValues); + } + + public void testPerformAction() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putAlias(AliasMetaData.builder(alias)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + RolloverStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + Set> expectedConditions = new HashSet<>(); + if (step.getMaxAge() != null) { + expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); + } + if (step.getMaxSize() != null) { + expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); + } + if (step.getMaxDocs() != null) { + expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); + } + assertRolloverIndexRequest(request, alias, expectedConditions); + listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), true, true, true)); + return null; + } + + }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(true, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionNotComplete() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putAlias(AliasMetaData.builder(alias)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + RolloverStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + Set> expectedConditions = new HashSet<>(); + if (step.getMaxAge() != null) { + expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); + } + if (step.getMaxSize() != null) { + expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); + } + if (step.getMaxDocs() != null) { + expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); + } + assertRolloverIndexRequest(request, alias, expectedConditions); + listener.onResponse(new RolloverResponse(null, null, Collections.emptyMap(), request.isDryRun(), false, true, true)); + return null; + } + + }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(false, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionFailure() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putAlias(AliasMetaData.builder(alias)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + RolloverStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + RolloverRequest request = (RolloverRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + Set> expectedConditions = new HashSet<>(); + if (step.getMaxAge() != null) { + expectedConditions.add(new MaxAgeCondition(step.getMaxAge())); + } + if (step.getMaxSize() != null) { + expectedConditions.add(new MaxSizeCondition(step.getMaxSize())); + } + if (step.getMaxDocs() != null) { + expectedConditions.add(new MaxDocsCondition(step.getMaxDocs())); + } + assertRolloverIndexRequest(request, alias, expectedConditions); + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionInvalidNullOrEmptyAlias() { + String alias = randomBoolean() ? "" : null; + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + RolloverStep step = createRandomInstance(); + + SetOnce exceptionThrown = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + exceptionThrown.set(e); + } + }); + assertThat(exceptionThrown.get().getClass(), equalTo(IllegalArgumentException.class)); + assertThat(exceptionThrown.get().getMessage(), equalTo(String.format(Locale.ROOT, + "setting [%s] for index [%s] is empty or not defined", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, + indexMetaData.getIndex().getName()))); + } + + public void testPerformActionAliasDoesNotPointToIndex() { + String alias = randomAlphaOfLength(5); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + RolloverStep step = createRandomInstance(); + + SetOnce exceptionThrown = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean complete, ToXContentObject obj) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + exceptionThrown.set(e); + } + }); + assertThat(exceptionThrown.get().getClass(), equalTo(IllegalArgumentException.class)); + assertThat(exceptionThrown.get().getMessage(), equalTo(String.format(Locale.ROOT, + "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias, + indexMetaData.getIndex().getName()))); + + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepInfoTests.java new file mode 100644 index 0000000000000..a5c1813de2153 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepInfoTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.SegmentCountStep.Info; + +import java.io.IOException; + +public class SegmentCountStepInfoTests extends AbstractXContentTestCase { + + @Override + protected Info createTestInstance() { + return new Info(randomNonNegativeLong()); + } + + @Override + protected Info doParseInstance(XContentParser parser) throws IOException { + return Info.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public final void testEqualsAndHashcode() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copyInstance, this::mutateInstance); + } + } + + protected final Info copyInstance(Info instance) throws IOException { + return new Info(instance.getNumberShardsLeftToMerge()); + } + + protected Info mutateInstance(Info instance) throws IOException { + return createTestInstance(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java new file mode 100644 index 0000000000000..ae0551020fbd1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java @@ -0,0 +1,236 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.segments.IndexSegments; +import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.elasticsearch.action.admin.indices.segments.ShardSegments; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.engine.Segment; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Spliterator; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; + +public class SegmentCountStepTests extends AbstractStepTestCase { + + @Override + public SegmentCountStep createRandomInstance() { + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(1, 10); + + return new SegmentCountStep(stepKey, nextStepKey, null, maxNumSegments); + } + + private IndexMetaData makeMeta(Index index) { + return IndexMetaData.builder(index.getName()) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .build(); + } + + @Override + public SegmentCountStep mutateInstance(SegmentCountStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + int maxNumSegments = instance.getMaxNumSegments(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + maxNumSegments += 1; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new SegmentCountStep(key, nextKey, null, maxNumSegments); + } + + @Override + public SegmentCountStep copyInstance(SegmentCountStep instance) { + return new SegmentCountStep(instance.getKey(), instance.getNextStepKey(), null, instance.getMaxNumSegments()); + } + + public void testIsConditionMet() { + int maxNumSegments = randomIntBetween(3, 10); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Client client = Mockito.mock(Client.class); + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + IndicesSegmentResponse indicesSegmentResponse = Mockito.mock(IndicesSegmentResponse.class); + IndexSegments indexSegments = Mockito.mock(IndexSegments.class); + IndexShardSegments indexShardSegments = Mockito.mock(IndexShardSegments.class); + Map indexShards = Collections.singletonMap(0, indexShardSegments); + ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); + ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; + Spliterator iss = indexShards.values().spliterator(); + List segments = new ArrayList<>(); + for (int i = 0; i < maxNumSegments - randomIntBetween(0, 3); i++) { + segments.add(null); + } + Mockito.when(indicesSegmentResponse.getStatus()).thenReturn(RestStatus.OK); + Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Collections.singletonMap(index.getName(), indexSegments)); + Mockito.when(indexSegments.spliterator()).thenReturn(iss); + Mockito.when(indexShardSegments.getShards()).thenReturn(shardSegmentsArray); + Mockito.when(shardSegmentsOne.getSegments()).thenReturn(segments); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + Mockito.doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(indicesSegmentResponse); + return null; + }).when(indicesClient).segments(any(), any()); + + SetOnce conditionMetResult = new SetOnce<>(); + SetOnce conditionInfo = new SetOnce<>(); + + SegmentCountStep step = new SegmentCountStep(stepKey, nextStepKey, client, maxNumSegments); + step.evaluateCondition(makeMeta(index), new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject info) { + conditionMetResult.set(conditionMet); + conditionInfo.set(info); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("unexpected method call"); + } + }); + + assertTrue(conditionMetResult.get()); + assertEquals(new SegmentCountStep.Info(0L), conditionInfo.get()); + } + + public void testIsConditionFails() { + int maxNumSegments = randomIntBetween(3, 10); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Client client = Mockito.mock(Client.class); + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + IndicesSegmentResponse indicesSegmentResponse = Mockito.mock(IndicesSegmentResponse.class); + IndexSegments indexSegments = Mockito.mock(IndexSegments.class); + IndexShardSegments indexShardSegments = Mockito.mock(IndexShardSegments.class); + Map indexShards = Collections.singletonMap(0, indexShardSegments); + ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); + ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; + Spliterator iss = indexShards.values().spliterator(); + List segments = new ArrayList<>(); + for (int i = 0; i < maxNumSegments + randomIntBetween(1, 3); i++) { + segments.add(null); + } + Mockito.when(indicesSegmentResponse.getStatus()).thenReturn(RestStatus.OK); + Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Collections.singletonMap(index.getName(), indexSegments)); + Mockito.when(indexSegments.spliterator()).thenReturn(iss); + Mockito.when(indexShardSegments.getShards()).thenReturn(shardSegmentsArray); + Mockito.when(shardSegmentsOne.getSegments()).thenReturn(segments); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + Mockito.doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(indicesSegmentResponse); + return null; + }).when(indicesClient).segments(any(), any()); + + SetOnce conditionMetResult = new SetOnce<>(); + SetOnce conditionInfo = new SetOnce<>(); + + SegmentCountStep step = new SegmentCountStep(stepKey, nextStepKey, client, maxNumSegments); + step.evaluateCondition(makeMeta(index), new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject info) { + conditionMetResult.set(conditionMet); + conditionInfo.set(info); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("unexpected method call"); + } + }); + + assertFalse(conditionMetResult.get()); + assertEquals(new SegmentCountStep.Info(1L), conditionInfo.get()); + } + + public void testThrowsException() { + Exception exception = new RuntimeException("error"); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + Client client = Mockito.mock(Client.class); + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int maxNumSegments = randomIntBetween(3, 10); + + Mockito.doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onFailure(exception); + return null; + }).when(indicesClient).segments(any(), any()); + + SetOnce exceptionThrown = new SetOnce<>(); + + SegmentCountStep step = new SegmentCountStep(stepKey, nextStepKey, client, maxNumSegments); + step.evaluateCondition(makeMeta(index), new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject info) { + throw new AssertionError("unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, equalTo(exception)); + exceptionThrown.set(true); + } + }); + + assertTrue(exceptionThrown.get()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java new file mode 100644 index 0000000000000..b42ada6956f87 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java @@ -0,0 +1,428 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep.Listener; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; + +public class SetSingleNodeAllocateStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + protected SetSingleNodeAllocateStep createRandomInstance() { + return new SetSingleNodeAllocateStep(randomStepKey(), randomStepKey(), client); + } + + @Override + protected SetSingleNodeAllocateStep mutateInstance(SetSingleNodeAllocateStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + switch (between(0, 1)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new SetSingleNodeAllocateStep(key, nextKey, instance.getClient()); + } + + @Override + protected SetSingleNodeAllocateStep copyInstance(SetSingleNodeAllocateStep instance) { + return new SetSingleNodeAllocateStep(instance.getKey(), instance.getNextStepKey(), client); + } + + public static void assertSettingsRequestContainsValueFrom(UpdateSettingsRequest request, String settingsKey, + Set acceptableValues, boolean assertOnlyKeyInSettings, + String... expectedIndices) { + assertNotNull(request); + assertArrayEquals(expectedIndices, request.indices()); + assertThat(request.settings().get(settingsKey), anyOf(acceptableValues.stream().map(e -> equalTo(e)).collect(Collectors.toList()))); + if (assertOnlyKeyInSettings) { + assertEquals(1, request.settings().size()); + } + } + + public void testPerformActionNoAttrs() throws IOException { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.EMPTY; + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + Settings nodeSettings = Settings.builder().put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(); + nodes.add( + DiscoveryNode.createLocal(nodeSettings, new TransportAddress(TransportAddress.META_ADDRESS, nodePort), nodeId)); + validNodeIds.add(nodeId); + } + + assertNodeSelected(indexMetaData, index, validNodeIds, nodes); + } + + public void testPerformActionAttrsAllNodesValid() throws IOException { + int numAttrs = randomIntBetween(1, 10); + String[][] validAttrs = new String[numAttrs][2]; + for (int i = 0; i < numAttrs; i++) { + validAttrs[i] = new String[] { randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20) }; + } + Settings.Builder indexSettings = settings(Version.CURRENT); + for (String[] attr : validAttrs) { + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + attr[0], attr[1]); + } + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.EMPTY; + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + String[] nodeAttr = randomFrom(validAttrs); + Settings nodeSettings = Settings.builder().put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName) + .put(Node.NODE_ATTRIBUTES.getKey() + nodeAttr[0], nodeAttr[1]).build(); + nodes.add(DiscoveryNode.createLocal(nodeSettings, new TransportAddress(TransportAddress.META_ADDRESS, nodePort), nodeId)); + validNodeIds.add(nodeId); + } + + assertNodeSelected(indexMetaData, index, validNodeIds, nodes); + } + + public void testPerformActionAttrsSomeNodesValid() throws IOException { + String[] validAttr = new String[] { "box_type", "valid" }; + String[] invalidAttr = new String[] { "box_type", "not_valid" }; + Settings.Builder indexSettings = settings(Version.CURRENT); + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + validAttr[0], validAttr[1]); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.builder().put(Node.NODE_ATTRIBUTES.getKey() + validAttr[0], validAttr[1]).build(); + Settings invalidNodeSettings = Settings.builder().put(Node.NODE_ATTRIBUTES.getKey() + invalidAttr[0], invalidAttr[1]).build(); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + Builder nodeSettingsBuilder = Settings.builder(); + // randomise whether the node had valid attributes or not but make sure at least one node is valid + if (randomBoolean() || (i == numNodes - 1 && validNodeIds.isEmpty())) { + nodeSettingsBuilder.put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName); + validNodeIds.add(nodeId); + } else { + nodeSettingsBuilder.put(invalidNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName); + } + nodes.add(DiscoveryNode.createLocal(nodeSettingsBuilder.build(), new TransportAddress(TransportAddress.META_ADDRESS, nodePort), + nodeId)); + } + + assertNodeSelected(indexMetaData, index, validNodeIds, nodes); + } + + public void testPerformActionAttrsNoNodesValid() { + String[] validAttr = new String[] { "box_type", "valid" }; + String[] invalidAttr = new String[] { "box_type", "not_valid" }; + Settings.Builder indexSettings = settings(Version.CURRENT); + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + validAttr[0], validAttr[1]); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Settings invalidNodeSettings = Settings.builder().put(Node.NODE_ATTRIBUTES.getKey() + invalidAttr[0], invalidAttr[1]).build(); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + Builder nodeSettingsBuilder = Settings.builder().put(invalidNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName); + nodes.add(DiscoveryNode.createLocal(nodeSettingsBuilder.build(), new TransportAddress(TransportAddress.META_ADDRESS, nodePort), + nodeId)); + } + + assertNoValidNode(indexMetaData, index, nodes); + } + + public void testPerformActionAttrsRequestFails() { + int numAttrs = randomIntBetween(1, 10); + String[][] validAttrs = new String[numAttrs][2]; + for (int i = 0; i < numAttrs; i++) { + validAttrs[i] = new String[] { randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20) }; + } + Settings.Builder indexSettings = settings(Version.CURRENT); + for (String[] attr : validAttrs) { + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + attr[0], attr[1]); + } + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.EMPTY; + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + String[] nodeAttr = randomFrom(validAttrs); + Settings nodeSettings = Settings.builder().put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName) + .put(Node.NODE_ATTRIBUTES.getKey() + nodeAttr[0], nodeAttr[1]).build(); + nodes.add(DiscoveryNode.createLocal(nodeSettings, new TransportAddress(TransportAddress.META_ADDRESS, nodePort), nodeId)); + validNodeIds.add(nodeId); + } + + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetaData); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node_id_0", true, ShardRoutingState.STARTED)); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + + SetSingleNodeAllocateStep step = createRandomInstance(); + Exception exception = new RuntimeException(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + UpdateSettingsRequest request = (UpdateSettingsRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertSettingsRequestContainsValueFrom(request, + IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", validNodeIds, true, + indexMetaData.getIndex().getName()); + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, clusterState, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).updateSettings(Mockito.any(), Mockito.any()); + } + + public void testPerformActionAttrsNoShard() { + int numAttrs = randomIntBetween(1, 10); + String[][] validAttrs = new String[numAttrs][2]; + for (int i = 0; i < numAttrs; i++) { + validAttrs[i] = new String[] { randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20) }; + } + Settings.Builder indexSettings = settings(Version.CURRENT); + for (String[] attr : validAttrs) { + indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + attr[0], attr[1]); + } + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Index index = indexMetaData.getIndex(); + Set validNodeIds = new HashSet<>(); + Settings validNodeSettings = Settings.EMPTY; + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + int numNodes = randomIntBetween(1, 20); + for (int i = 0; i < numNodes; i++) { + String nodeId = "node_id_" + i; + String nodeName = "node_" + i; + int nodePort = 9300 + i; + String[] nodeAttr = randomFrom(validAttrs); + Settings nodeSettings = Settings.builder().put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName) + .put(Node.NODE_ATTRIBUTES.getKey() + nodeAttr[0], nodeAttr[1]).build(); + nodes.add(DiscoveryNode.createLocal(nodeSettings, new TransportAddress(TransportAddress.META_ADDRESS, nodePort), nodeId)); + validNodeIds.add(nodeId); + } + + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetaData); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + + SetSingleNodeAllocateStep step = createRandomInstance(); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, clusterState, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, Matchers.instanceOf(IndexNotFoundException.class)); + assertEquals(indexMetaData.getIndex(), ((IndexNotFoundException) e).getIndex()); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verifyZeroInteractions(client); + } + + private void assertNodeSelected(IndexMetaData indexMetaData, Index index, + Set validNodeIds, DiscoveryNodes.Builder nodes) throws IOException { + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetaData); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node_id_0", true, ShardRoutingState.STARTED)); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + + SetSingleNodeAllocateStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + UpdateSettingsRequest request = (UpdateSettingsRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertSettingsRequestContainsValueFrom(request, + IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", validNodeIds, true, + indexMetaData.getIndex().getName()); + listener.onResponse(new AcknowledgedResponse(true)); + return null; + } + + }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + + step.performAction(indexMetaData, clusterState, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(true, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).updateSettings(Mockito.any(), Mockito.any()); + } + + private void assertNoValidNode(IndexMetaData indexMetaData, Index index, DiscoveryNodes.Builder nodes) { + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), + indexMetaData); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node_id_0", true, ShardRoutingState.STARTED)); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) + .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + + SetSingleNodeAllocateStep step = createRandomInstance(); + + SetOnce actionCompleted = new SetOnce<>(); + + step.performAction(indexMetaData, clusterState, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(false, actionCompleted.get()); + + Mockito.verifyZeroInteractions(client); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkActionTests.java new file mode 100644 index 0000000000000..658f8bef6d47b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkActionTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ShrinkActionTests extends AbstractActionTestCase { + + @Override + protected ShrinkAction doParseInstance(XContentParser parser) throws IOException { + return ShrinkAction.parse(parser); + } + + @Override + protected ShrinkAction createTestInstance() { + return randomInstance(); + } + + static ShrinkAction randomInstance() { + return new ShrinkAction(randomIntBetween(1, 100)); + } + + @Override + protected ShrinkAction mutateInstance(ShrinkAction action) { + return new ShrinkAction(action.getNumberOfShards() + randomIntBetween(1, 2)); + } + + @Override + protected Reader instanceReader() { + return ShrinkAction::new; + } + + public void testNonPositiveShardNumber() { + Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0))); + assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0")); + } + + public void testToSteps() { + ShrinkAction action = createTestInstance(); + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), + randomAlphaOfLengthBetween(1, 10)); + List steps = action.toSteps(null, phase, nextStepKey); + assertThat(steps.size(), equalTo(8)); + StepKey expectedFirstKey = new StepKey(phase, ShrinkAction.NAME, ReadOnlyAction.NAME); + StepKey expectedSecondKey = new StepKey(phase, ShrinkAction.NAME, SetSingleNodeAllocateStep.NAME); + StepKey expectedThirdKey = new StepKey(phase, ShrinkAction.NAME, CheckShrinkReadyStep.NAME); + StepKey expectedFourthKey = new StepKey(phase, ShrinkAction.NAME, ShrinkStep.NAME); + StepKey expectedFifthKey = new StepKey(phase, ShrinkAction.NAME, ShrunkShardsAllocatedStep.NAME); + StepKey expectedSixthKey = new StepKey(phase, ShrinkAction.NAME, CopyExecutionStateStep.NAME); + StepKey expectedSeventhKey = new StepKey(phase, ShrinkAction.NAME, ShrinkSetAliasStep.NAME); + StepKey expectedEighthKey = new StepKey(phase, ShrinkAction.NAME, ShrunkenIndexCheckStep.NAME); + + assertTrue(steps.get(0) instanceof UpdateSettingsStep); + assertThat(steps.get(0).getKey(), equalTo(expectedFirstKey)); + assertThat(steps.get(0).getNextStepKey(), equalTo(expectedSecondKey)); + assertTrue(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(((UpdateSettingsStep)steps.get(0)).getSettings())); + + assertTrue(steps.get(1) instanceof SetSingleNodeAllocateStep); + assertThat(steps.get(1).getKey(), equalTo(expectedSecondKey)); + assertThat(steps.get(1).getNextStepKey(), equalTo(expectedThirdKey)); + + assertTrue(steps.get(2) instanceof CheckShrinkReadyStep); + assertThat(steps.get(2).getKey(), equalTo(expectedThirdKey)); + assertThat(steps.get(2).getNextStepKey(), equalTo(expectedFourthKey)); + + assertTrue(steps.get(3) instanceof ShrinkStep); + assertThat(steps.get(3).getKey(), equalTo(expectedFourthKey)); + assertThat(steps.get(3).getNextStepKey(), equalTo(expectedFifthKey)); + assertThat(((ShrinkStep) steps.get(3)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + + assertTrue(steps.get(4) instanceof ShrunkShardsAllocatedStep); + assertThat(steps.get(4).getKey(), equalTo(expectedFifthKey)); + assertThat(steps.get(4).getNextStepKey(), equalTo(expectedSixthKey)); + assertThat(((ShrunkShardsAllocatedStep) steps.get(4)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + + assertTrue(steps.get(5) instanceof CopyExecutionStateStep); + assertThat(steps.get(5).getKey(), equalTo(expectedSixthKey)); + assertThat(steps.get(5).getNextStepKey(), equalTo(expectedSeventhKey)); + assertThat(((CopyExecutionStateStep) steps.get(5)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + + assertTrue(steps.get(6) instanceof ShrinkSetAliasStep); + assertThat(steps.get(6).getKey(), equalTo(expectedSeventhKey)); + assertThat(steps.get(6).getNextStepKey(), equalTo(expectedEighthKey)); + assertThat(((ShrinkSetAliasStep) steps.get(6)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + + assertTrue(steps.get(7) instanceof ShrunkenIndexCheckStep); + assertThat(steps.get(7).getKey(), equalTo(expectedEighthKey)); + assertThat(steps.get(7).getNextStepKey(), equalTo(nextStepKey)); + assertThat(((ShrunkenIndexCheckStep) steps.get(7)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + } + + @Override + protected boolean isSafeAction() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java new file mode 100644 index 0000000000000..5fcfcdeea38f0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkSetAliasStepTests.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep.Listener; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ShrinkSetAliasStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public ShrinkSetAliasStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new ShrinkSetAliasStep(stepKey, nextStepKey, client, shrunkIndexPrefix); + } + + @Override + public ShrinkSetAliasStep mutateInstance(ShrinkSetAliasStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new ShrinkSetAliasStep(key, nextKey, instance.getClient(), shrunkIndexPrefix); + } + + @Override + public ShrinkSetAliasStep copyInstance(ShrinkSetAliasStep instance) { + return new ShrinkSetAliasStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getShrunkIndexPrefix()); + } + + public void testPerformAction() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ShrinkSetAliasStep step = createRandomInstance(); + + String sourceIndex = indexMetaData.getIndex().getName(); + String shrunkenIndex = step.getShrunkIndexPrefix() + sourceIndex; + List expectedAliasActions = Arrays.asList( + IndicesAliasesRequest.AliasActions.removeIndex().index(sourceIndex), + IndicesAliasesRequest.AliasActions.add().index(shrunkenIndex).alias(sourceIndex)); + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + IndicesAliasesRequest request = (IndicesAliasesRequest) invocation.getArguments()[0]; + assertThat(request.getAliasActions(), equalTo(expectedAliasActions)); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new AcknowledgedResponse(true)); + return null; + } + + }).when(indicesClient).aliases(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertTrue(actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).aliases(Mockito.any(), Mockito.any()); + } + + public void testPerformActionFailure() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + ShrinkSetAliasStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).aliases(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).aliases(Mockito.any(), Mockito.any()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java new file mode 100644 index 0000000000000..472c22025e195 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrinkStepTests.java @@ -0,0 +1,245 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep.Listener; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Collections; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; + +public class ShrinkStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public ShrinkStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + int numberOfShards = randomIntBetween(1, 20); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new ShrinkStep(stepKey, nextStepKey, client, numberOfShards, shrunkIndexPrefix); + } + + @Override + public ShrinkStep mutateInstance(ShrinkStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + int numberOfShards = instance.getNumberOfShards(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + + switch (between(0, 3)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + numberOfShards = numberOfShards + 1; + break; + case 3: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new ShrinkStep(key, nextKey, instance.getClient(), numberOfShards, shrunkIndexPrefix); + } + + @Override + public ShrinkStep copyInstance(ShrinkStep instance) { + return new ShrinkStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getNumberOfShards(), + instance.getShrunkIndexPrefix()); + } + + public void testPerformAction() throws Exception { + String lifecycleName = randomAlphaOfLength(5); + ShrinkStep step = createRandomInstance(); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(step.getKey().getPhase()); + lifecycleState.setAction(step.getKey().getAction()); + lifecycleState.setStep(step.getKey().getName()); + lifecycleState.setIndexCreationDate(randomNonNegativeLong()); + IndexMetaData sourceIndexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, lifecycleName) + ) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)) + .putAlias(AliasMetaData.builder("my_alias")) + .build(); + + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + ResizeRequest request = (ResizeRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertThat(request.getSourceIndex(), equalTo(sourceIndexMetaData.getIndex().getName())); + assertThat(request.getTargetIndexRequest().aliases(), equalTo(Collections.singleton(new Alias("my_alias")))); + assertThat(request.getTargetIndexRequest().settings(), equalTo(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, step.getNumberOfShards()) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, sourceIndexMetaData.getNumberOfReplicas()) + .put(LifecycleSettings.LIFECYCLE_NAME, lifecycleName) + .put(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", (String) null) + .build())); + assertThat(request.getTargetIndexRequest().settings() + .getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), equalTo(step.getNumberOfShards())); + listener.onResponse(new ResizeResponse(true, true, sourceIndexMetaData.getIndex().getName())); + return null; + } + + }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.performAction(sourceIndexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(true, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).resizeIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionNotComplete() throws Exception { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setIndexCreationDate(randomNonNegativeLong()); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ShrinkStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new ResizeResponse(false, false, indexMetaData.getIndex().getName())); + return null; + } + + }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(false, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).resizeIndex(Mockito.any(), Mockito.any()); + } + + public void testPerformActionFailure() throws Exception { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setIndexCreationDate(randomNonNegativeLong()); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + ShrinkStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).resizeIndex(Mockito.any(), Mockito.any()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepInfoTests.java new file mode 100644 index 0000000000000..1ff0e04531302 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepInfoTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.ShrunkShardsAllocatedStep.Info; + +import java.io.IOException; + +public class ShrunkShardsAllocatedStepInfoTests extends AbstractXContentTestCase { + + @Override + protected Info createTestInstance() { + return new Info(randomBoolean(), randomIntBetween(0, 10000), randomBoolean()); + } + + @Override + protected Info doParseInstance(XContentParser parser) throws IOException { + return Info.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public final void testEqualsAndHashcode() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copyInstance, this::mutateInstance); + } + } + + protected final Info copyInstance(Info instance) throws IOException { + return new Info(instance.shrunkIndexExists(), instance.getActualShards(), instance.allShardsActive()); + } + + protected Info mutateInstance(Info instance) throws IOException { + boolean shrunkIndexExists = instance.shrunkIndexExists(); + int actualShards = instance.getActualShards(); + boolean allShardsActive = instance.allShardsActive(); + switch (between(0, 2)) { + case 0: + shrunkIndexExists = shrunkIndexExists == false; + break; + case 1: + actualShards += between(1, 20); + break; + case 2: + allShardsActive = allShardsActive == false; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new Info(shrunkIndexExists, actualShards, allShardsActive); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java new file mode 100644 index 0000000000000..272b50499d7fd --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep.Result; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class ShrunkShardsAllocatedStepTests extends AbstractStepTestCase { + + @Override + public ShrunkShardsAllocatedStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new ShrunkShardsAllocatedStep(stepKey, nextStepKey, shrunkIndexPrefix); + } + + @Override + public ShrunkShardsAllocatedStep mutateInstance(ShrunkShardsAllocatedStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new ShrunkShardsAllocatedStep(key, nextKey, shrunkIndexPrefix); + } + + @Override + public ShrunkShardsAllocatedStep copyInstance(ShrunkShardsAllocatedStep instance) { + return new ShrunkShardsAllocatedStep(instance.getKey(), instance.getNextStepKey(), instance.getShrunkIndexPrefix()); + } + + public void testConditionMet() { + ShrunkShardsAllocatedStep step = createRandomInstance(); + int shrinkNumberOfShards = randomIntBetween(1, 5); + int originalNumberOfShards = randomIntBetween(1, 5); + String originalIndexName = randomAlphaOfLength(5); + IndexMetaData originalIndexMetadata = IndexMetaData.builder(originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(originalNumberOfShards) + .numberOfReplicas(0).build(); + IndexMetaData shrunkIndexMetadata = IndexMetaData.builder(step.getShrunkIndexPrefix() + originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(shrinkNumberOfShards) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(originalIndexMetadata)) + .put(IndexMetaData.builder(shrunkIndexMetadata)) + .build(); + Index shrinkIndex = shrunkIndexMetadata.getIndex(); + + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + + IndexRoutingTable.Builder builder = IndexRoutingTable.builder(shrinkIndex); + for (int i = 0; i < shrinkNumberOfShards; i++) { + builder.addShard(TestShardRouting.newShardRouting(new ShardId(shrinkIndex, i), + nodeId, true, ShardRoutingState.STARTED)); + } + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .routingTable(RoutingTable.builder().add(builder.build()).build()).build(); + + Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); + assertTrue(result.isComplete()); + assertNull(result.getInfomationContext()); + } + + public void testConditionNotMetBecauseOfActive() { + ShrunkShardsAllocatedStep step = createRandomInstance(); + int shrinkNumberOfShards = randomIntBetween(1, 5); + int originalNumberOfShards = randomIntBetween(1, 5); + String originalIndexName = randomAlphaOfLength(5); + IndexMetaData originalIndexMetadata = IndexMetaData.builder(originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(originalNumberOfShards) + .numberOfReplicas(0).build(); + IndexMetaData shrunkIndexMetadata = IndexMetaData.builder(step.getShrunkIndexPrefix() + originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(shrinkNumberOfShards) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(originalIndexMetadata)) + .put(IndexMetaData.builder(shrunkIndexMetadata)) + .build(); + Index shrinkIndex = shrunkIndexMetadata.getIndex(); + + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + + IndexRoutingTable.Builder builder = IndexRoutingTable.builder(shrinkIndex); + for (int i = 0; i < shrinkNumberOfShards; i++) { + builder.addShard(TestShardRouting.newShardRouting(new ShardId(shrinkIndex, i), + nodeId, true, ShardRoutingState.INITIALIZING)); + } + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .routingTable(RoutingTable.builder().add(builder.build()).build()).build(); + + Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); + assertFalse(result.isComplete()); + assertEquals(new ShrunkShardsAllocatedStep.Info(true, shrinkNumberOfShards, false), + result.getInfomationContext()); + } + + public void testConditionNotMetBecauseOfShrunkIndexDoesntExistYet() { + ShrunkShardsAllocatedStep step = createRandomInstance(); + int originalNumberOfShards = randomIntBetween(1, 5); + String originalIndexName = randomAlphaOfLength(5); + IndexMetaData originalIndexMetadata = IndexMetaData.builder(originalIndexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(originalNumberOfShards) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(originalIndexMetadata)) + .build(); + + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); + assertFalse(result.isComplete()); + assertEquals(new ShrunkShardsAllocatedStep.Info(false, -1, false), result.getInfomationContext()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepInfoTests.java new file mode 100644 index 0000000000000..b4a9021af214e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepInfoTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.indexlifecycle.ShrunkenIndexCheckStep.Info; + +import java.io.IOException; + +public class ShrunkenIndexCheckStepInfoTests extends AbstractXContentTestCase { + + @Override + protected Info createTestInstance() { + return new Info(randomAlphaOfLengthBetween(10, 20)); + } + + @Override + protected Info doParseInstance(XContentParser parser) throws IOException { + return Info.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public final void testEqualsAndHashcode() { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copyInstance, this::mutateInstance); + } + } + + protected final Info copyInstance(Info instance) throws IOException { + return new Info(instance.getOriginalIndexName()); + } + + protected Info mutateInstance(Info instance) throws IOException { + return new Info(randomValueOtherThan(instance.getOriginalIndexName(), () -> randomAlphaOfLengthBetween(10, 20))); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java new file mode 100644 index 0000000000000..64d2dd09f9659 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep.Result; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import static org.hamcrest.Matchers.equalTo; + +public class ShrunkenIndexCheckStepTests extends AbstractStepTestCase { + + @Override + public ShrunkenIndexCheckStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String shrunkIndexPrefix = randomAlphaOfLength(10); + return new ShrunkenIndexCheckStep(stepKey, nextStepKey, shrunkIndexPrefix); + } + + @Override + public ShrunkenIndexCheckStep mutateInstance(ShrunkenIndexCheckStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String shrunkIndexPrefix = instance.getShrunkIndexPrefix(); + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + shrunkIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new ShrunkenIndexCheckStep(key, nextKey, shrunkIndexPrefix); + } + + @Override + public ShrunkenIndexCheckStep copyInstance(ShrunkenIndexCheckStep instance) { + return new ShrunkenIndexCheckStep(instance.getKey(), instance.getNextStepKey(), instance.getShrunkIndexPrefix()); + } + + public void testConditionMet() { + ShrunkenIndexCheckStep step = createRandomInstance(); + String sourceIndex = randomAlphaOfLengthBetween(1, 10); + IndexMetaData indexMetadata = IndexMetaData.builder(step.getShrunkIndexPrefix() + sourceIndex) + .settings(settings(Version.CURRENT).put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, sourceIndex)) + .numberOfShards(1) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); + assertTrue(result.isComplete()); + assertNull(result.getInfomationContext()); + } + + public void testConditionNotMetBecauseNotSameShrunkenIndex() { + ShrunkenIndexCheckStep step = createRandomInstance(); + String sourceIndex = randomAlphaOfLengthBetween(1, 10); + IndexMetaData shrinkIndexMetadata = IndexMetaData.builder(sourceIndex + "hello") + .settings(settings(Version.CURRENT).put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, sourceIndex)) + .numberOfShards(1) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(shrinkIndexMetadata)) + .build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); + assertFalse(result.isComplete()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInfomationContext()); + } + + public void testConditionNotMetBecauseSourceIndexExists() { + ShrunkenIndexCheckStep step = createRandomInstance(); + String sourceIndex = randomAlphaOfLengthBetween(1, 10); + IndexMetaData originalIndexMetadata = IndexMetaData.builder(sourceIndex) + .settings(settings(Version.CURRENT)) + .numberOfShards(100) + .numberOfReplicas(0).build(); + IndexMetaData shrinkIndexMetadata = IndexMetaData.builder(step.getShrunkIndexPrefix() + sourceIndex) + .settings(settings(Version.CURRENT).put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, sourceIndex)) + .numberOfShards(1) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(originalIndexMetadata)) + .put(IndexMetaData.builder(shrinkIndexMetadata)) + .build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); + assertFalse(result.isComplete()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInfomationContext()); + } + + public void testIllegalState() { + ShrunkenIndexCheckStep step = createRandomInstance(); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + IllegalStateException exception = expectThrows(IllegalStateException.class, + () -> step.isConditionMet(indexMetadata.getIndex(), clusterState)); + assertThat(exception.getMessage(), + equalTo("step[is-shrunken-index] is checking an un-shrunken index[" + indexMetadata.getIndex().getName() + "]")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java new file mode 100644 index 0000000000000..4c61f3016a13e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StartILMRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class StartILMRequestTests extends AbstractStreamableTestCase { + + @Override + protected StartILMRequest createBlankInstance() { + return new StartILMRequest(); + } + + @Override + protected StartILMRequest createTestInstance() { + return new StartILMRequest(); + } + + public void testValidate() { + StartILMRequest request = createTestInstance(); + assertNull(request.validate()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StepKeyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StepKeyTests.java new file mode 100644 index 0000000000000..ae90a150b7c4c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StepKeyTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class StepKeyTests extends AbstractSerializingTestCase { + + @Override + public StepKey createTestInstance() { + return new StepKey(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + @Override + protected Writeable.Reader instanceReader() { + return StepKey::new; + } + + @Override + protected StepKey doParseInstance(XContentParser parser) { + return StepKey.parse(parser); + } + + @Override + public StepKey mutateInstance(StepKey instance) { + String phase = instance.getPhase(); + String action = instance.getAction(); + String step = instance.getName(); + + switch (between(0, 2)) { + case 0: + phase += randomAlphaOfLength(5); + break; + case 1: + action += randomAlphaOfLength(5); + break; + case 2: + step += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new StepKey(phase, action, step); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java new file mode 100644 index 0000000000000..be603ee33acc1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/StopILMRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class StopILMRequestTests extends AbstractStreamableTestCase { + + @Override + protected StopILMRequest createBlankInstance() { + return new StopILMRequest(); + } + + @Override + protected StopILMRequest createTestInstance() { + return new StopILMRequest(); + } + + public void testValidate() { + StopILMRequest request = createTestInstance(); + assertNull(request.validate()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStepTests.java new file mode 100644 index 0000000000000..1db1523b5cd82 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TerminalPolicyStepTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +public class TerminalPolicyStepTests extends AbstractStepTestCase { + + @Override + public TerminalPolicyStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + return new TerminalPolicyStep(stepKey, nextStepKey); + } + + @Override + public TerminalPolicyStep mutateInstance(TerminalPolicyStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + if (randomBoolean()) { + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } else { + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } + + return new TerminalPolicyStep(key, nextKey); + } + + @Override + public TerminalPolicyStep copyInstance(TerminalPolicyStep instance) { + return new TerminalPolicyStep(instance.getKey(), instance.getNextStepKey()); + } + public void testInstance() { + assertEquals(new Step.StepKey("completed", "completed", "completed"), TerminalPolicyStep.INSTANCE.getKey()); + assertNull(TerminalPolicyStep.INSTANCE.getNextStepKey()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TestLifecycleType.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TestLifecycleType.java new file mode 100644 index 0000000000000..f68798e9331d1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TestLifecycleType.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class TestLifecycleType implements LifecycleType { + public static final TestLifecycleType INSTANCE = new TestLifecycleType(); + + public static final String TYPE = "test"; + + private TestLifecycleType() { + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void validate(Collection phases) { + // always valid + } + + @Override + public List getOrderedPhases(Map phases) { + return new ArrayList<>(phases.values()); + } + + @Override + public String getNextPhaseName(String currentPhaseName, Map phases) { + List orderedPhaseNames = getOrderedPhases(phases).stream().map(Phase::getName).collect(Collectors.toList()); + int index = orderedPhaseNames.indexOf(currentPhaseName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentPhaseName + "] is not a valid phase for lifecycle type [" + TYPE + "]"); + } else if (index == orderedPhaseNames.size() - 1) { + return null; + } else { + return orderedPhaseNames.get(index + 1); + } + } + + @Override + public String getPreviousPhaseName(String currentPhaseName, Map phases) { + List orderedPhaseNames = getOrderedPhases(phases).stream().map(Phase::getName).collect(Collectors.toList()); + int index = orderedPhaseNames.indexOf(currentPhaseName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentPhaseName + "] is not a valid phase for lifecycle type [" + TYPE + "]"); + } else if (index == 0) { + return null; + } else { + return orderedPhaseNames.get(index - 1); + } + } + + @Override + public List getOrderedActions(Phase phase) { + return new ArrayList<>(phase.getActions().values()); + } + + @Override + public String getNextActionName(String currentActionName, Phase phase) { + List orderedActionNames = getOrderedActions(phase).stream().map(LifecycleAction::getWriteableName) + .collect(Collectors.toList()); + int index = orderedActionNames.indexOf(currentActionName); + if (index < 0) { + throw new IllegalArgumentException("[" + currentActionName + "] is not a valid action for phase [" + phase.getName() + + "] in lifecycle type [" + TYPE + "]"); + } else if (index == orderedActionNames.size() - 1) { + return null; + } else { + return orderedActionNames.get(index + 1); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java new file mode 100644 index 0000000000000..2f0c2f8d18b33 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java @@ -0,0 +1,483 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.ORDERED_VALID_COLD_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.ORDERED_VALID_DELETE_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.ORDERED_VALID_HOT_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.ORDERED_VALID_WARM_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_COLD_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_DELETE_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_HOT_ACTIONS; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_PHASES; +import static org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType.VALID_WARM_ACTIONS; +import static org.hamcrest.Matchers.equalTo; + +public class TimeseriesLifecycleTypeTests extends ESTestCase { + + private static final AllocateAction TEST_ALLOCATE_ACTION = + new AllocateAction(2, Collections.singletonMap("node", "node1"),null, null); + private static final DeleteAction TEST_DELETE_ACTION = new DeleteAction(); + private static final ForceMergeAction TEST_FORCE_MERGE_ACTION = new ForceMergeAction(1); + private static final RolloverAction TEST_ROLLOVER_ACTION = new RolloverAction(new ByteSizeValue(1), null, null); + private static final ShrinkAction TEST_SHRINK_ACTION = new ShrinkAction(1); + private static final ReadOnlyAction TEST_READ_ONLY_ACTION = new ReadOnlyAction(); + + public void testValidatePhases() { + boolean invalid = randomBoolean(); + String phaseName = randomFrom("hot", "warm", "cold", "delete"); + if (invalid) { + phaseName += randomAlphaOfLength(5); + } + Map phases = Collections.singletonMap(phaseName, + new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + if (invalid) { + Exception e = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE.validate(phases.values())); + assertThat(e.getMessage(), equalTo("Timeseries lifecycle does not support phase [" + phaseName + "]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(phases.values()); + } + } + + public void testValidateHotPhase() { + LifecycleAction invalidAction = null; + Map actions = VALID_HOT_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "forcemerge", "delete", "shrink")); + actions.put(invalidAction.getWriteableName(), invalidAction); + } + Map hotPhase = Collections.singletonMap("hot", + new Phase("hot", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.validate(hotPhase.values())); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getWriteableName() + "] defined in phase [hot]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(hotPhase.values()); + } + } + + public void testValidateWarmPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_WARM_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete")); + actions.put(invalidAction.getWriteableName(), invalidAction); + } + Map warmPhase = Collections.singletonMap("warm", + new Phase("warm", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.validate(warmPhase.values())); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getWriteableName() + "] defined in phase [warm]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(warmPhase.values()); + } + } + + public void testValidateColdPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_COLD_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete", "forcemerge", "shrink")); + actions.put(invalidAction.getWriteableName(), invalidAction); + } + Map coldPhase = Collections.singletonMap("cold", + new Phase("cold", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.validate(coldPhase.values())); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getWriteableName() + "] defined in phase [cold]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(coldPhase.values()); + } + } + + public void testValidateDeletePhase() { + LifecycleAction invalidAction = null; + Map actions = VALID_DELETE_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "rollover", "forcemerge", "shrink")); + actions.put(invalidAction.getWriteableName(), invalidAction); + } + Map deletePhase = Collections.singletonMap("delete", + new Phase("delete", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.validate(deletePhase.values())); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getWriteableName() + "] defined in phase [delete]")); + } else { + TimeseriesLifecycleType.INSTANCE.validate(deletePhase.values()); + } + } + + public void testGetOrderedPhases() { + Map phaseMap = new HashMap<>(); + for (String phaseName : randomSubsetOf(randomIntBetween(0, VALID_PHASES.size()), VALID_PHASES)) { + phaseMap.put(phaseName, new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + } + + + assertTrue(isSorted(TimeseriesLifecycleType.INSTANCE.getOrderedPhases(phaseMap), Phase::getName, VALID_PHASES)); + } + + + public void testGetOrderedActionsInvalidPhase() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE + .getOrderedActions(new Phase("invalid", TimeValue.ZERO, Collections.emptyMap()))); + assertThat(exception.getMessage(), equalTo("lifecycle type[timeseries] does not support phase[invalid]")); + } + + public void testGetOrderedActionsHot() { + Map actions = VALID_HOT_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + Phase hotPhase = new Phase("hot", TimeValue.ZERO, actions); + List orderedActions = TimeseriesLifecycleType.INSTANCE.getOrderedActions(hotPhase); + assertTrue(isSorted(orderedActions, LifecycleAction::getWriteableName, ORDERED_VALID_HOT_ACTIONS)); + } + + public void testGetOrderedActionsWarm() { + Map actions = VALID_WARM_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, actions); + List orderedActions = TimeseriesLifecycleType.INSTANCE.getOrderedActions(warmPhase); + assertTrue(isSorted(orderedActions, LifecycleAction::getWriteableName, ORDERED_VALID_WARM_ACTIONS)); + } + + public void testGetOrderedActionsCold() { + Map actions = VALID_COLD_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + Phase coldPhase = new Phase("cold", TimeValue.ZERO, actions); + List orderedActions = TimeseriesLifecycleType.INSTANCE.getOrderedActions(coldPhase); + assertTrue(isSorted(orderedActions, LifecycleAction::getWriteableName, ORDERED_VALID_COLD_ACTIONS)); + } + + public void testGetOrderedActionsDelete() { + Map actions = VALID_DELETE_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getWriteableName, Function.identity())); + Phase deletePhase = new Phase("delete", TimeValue.ZERO, actions); + List orderedActions = TimeseriesLifecycleType.INSTANCE.getOrderedActions(deletePhase); + assertTrue(isSorted(orderedActions, LifecycleAction::getWriteableName, ORDERED_VALID_DELETE_ACTIONS)); + } + + public void testGetNextPhaseName() { + assertNextPhaseName("hot", "warm", new String[] { "hot", "warm" }); + assertNextPhaseName("hot", "warm", new String[] { "hot", "warm", "cold" }); + assertNextPhaseName("hot", "warm", new String[] { "hot", "warm", "cold", "delete" }); + assertNextPhaseName("hot", "warm", new String[] { "warm", "cold", "delete" }); + assertNextPhaseName("hot", "warm", new String[] { "warm", "cold", "delete" }); + assertNextPhaseName("hot", "warm", new String[] { "warm", "delete" }); + assertNextPhaseName("hot", "cold", new String[] { "cold", "delete" }); + assertNextPhaseName("hot", "cold", new String[] { "cold" }); + assertNextPhaseName("hot", "delete", new String[] { "hot", "delete" }); + assertNextPhaseName("hot", "delete", new String[] { "delete" }); + assertNextPhaseName("hot", null, new String[] { "hot" }); + assertNextPhaseName("hot", null, new String[] {}); + + assertNextPhaseName("warm", "cold", new String[] { "hot", "warm", "cold", "delete" }); + assertNextPhaseName("warm", "cold", new String[] { "warm", "cold", "delete" }); + assertNextPhaseName("warm", "cold", new String[] { "cold", "delete" }); + assertNextPhaseName("warm", "cold", new String[] { "cold" }); + assertNextPhaseName("warm", "delete", new String[] { "hot", "warm", "delete" }); + assertNextPhaseName("warm", null, new String[] { "hot", "warm" }); + assertNextPhaseName("warm", null, new String[] { "warm" }); + assertNextPhaseName("warm", null, new String[] { "hot" }); + assertNextPhaseName("warm", null, new String[] {}); + + assertNextPhaseName("cold", "delete", new String[] { "hot", "warm", "cold", "delete" }); + assertNextPhaseName("cold", "delete", new String[] { "warm", "cold", "delete" }); + assertNextPhaseName("cold", "delete", new String[] { "cold", "delete" }); + assertNextPhaseName("cold", "delete", new String[] { "delete" }); + assertNextPhaseName("cold", "delete", new String[] { "hot", "warm", "delete" }); + assertNextPhaseName("cold", null, new String[] { "hot", "warm", "cold" }); + assertNextPhaseName("cold", null, new String[] { "hot", "warm" }); + assertNextPhaseName("cold", null, new String[] { "cold" }); + assertNextPhaseName("cold", null, new String[] { "hot" }); + assertNextPhaseName("cold", null, new String[] {}); + + assertNextPhaseName("delete", null, new String[] { "hot", "warm", "cold" }); + assertNextPhaseName("delete", null, new String[] { "hot", "warm" }); + assertNextPhaseName("delete", null, new String[] { "cold" }); + assertNextPhaseName("delete", null, new String[] { "hot" }); + assertNextPhaseName("delete", null, new String[] {}); + assertNextPhaseName("delete", null, new String[] { "hot", "warm", "cold", "delete" }); + assertNextPhaseName("delete", null, new String[] { "hot", "warm", "delete" }); + assertNextPhaseName("delete", null, new String[] { "cold", "delete" }); + assertNextPhaseName("delete", null, new String[] { "delete" }); + assertNextPhaseName("delete", null, new String[] {}); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.getNextPhaseName("foo", Collections.emptyMap())); + assertEquals("[foo] is not a valid phase for lifecycle type [" + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + exception = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE + .getNextPhaseName("foo", Collections.singletonMap("foo", new Phase("foo", TimeValue.ZERO, Collections.emptyMap())))); + assertEquals("[foo] is not a valid phase for lifecycle type [" + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + } + + public void testGetPreviousPhaseName() { + assertPreviousPhaseName("hot", null, new String[] { "hot", "warm" }); + assertPreviousPhaseName("hot", null, new String[] { "hot", "warm", "cold" }); + assertPreviousPhaseName("hot", null, new String[] { "hot", "warm", "cold", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "warm", "cold", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "warm", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "cold", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "cold" }); + assertPreviousPhaseName("hot", null, new String[] { "hot", "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "delete" }); + assertPreviousPhaseName("hot", null, new String[] { "hot" }); + assertPreviousPhaseName("hot", null, new String[] {}); + + assertPreviousPhaseName("warm", "hot", new String[] { "hot", "warm", "cold", "delete" }); + assertPreviousPhaseName("warm", null, new String[] { "warm", "cold", "delete" }); + assertPreviousPhaseName("warm", "hot", new String[] { "hot", "cold", "delete" }); + assertPreviousPhaseName("warm", null, new String[] { "cold", "delete" }); + assertPreviousPhaseName("warm", "hot", new String[] { "hot", "delete" }); + assertPreviousPhaseName("warm", null, new String[] { "delete" }); + assertPreviousPhaseName("warm", "hot", new String[] { "hot" }); + assertPreviousPhaseName("warm", null, new String[] {}); + + assertPreviousPhaseName("cold", "warm", new String[] { "hot", "warm", "cold", "delete" }); + assertPreviousPhaseName("cold", "hot", new String[] { "hot", "cold", "delete" }); + assertPreviousPhaseName("cold", "warm", new String[] { "warm", "cold", "delete" }); + assertPreviousPhaseName("cold", null, new String[] { "cold", "delete" }); + assertPreviousPhaseName("cold", "warm", new String[] { "hot", "warm", "delete" }); + assertPreviousPhaseName("cold", "hot", new String[] { "hot", "delete" }); + assertPreviousPhaseName("cold", "warm", new String[] { "warm", "delete" }); + assertPreviousPhaseName("cold", null, new String[] { "delete" }); + assertPreviousPhaseName("cold", "warm", new String[] { "hot", "warm" }); + assertPreviousPhaseName("cold", "hot", new String[] { "hot" }); + assertPreviousPhaseName("cold", "warm", new String[] { "warm" }); + assertPreviousPhaseName("cold", null, new String[] {}); + + assertPreviousPhaseName("delete", "cold", new String[] { "hot", "warm", "cold", "delete" }); + assertPreviousPhaseName("delete", "cold", new String[] { "warm", "cold", "delete" }); + assertPreviousPhaseName("delete", "warm", new String[] { "hot", "warm", "delete" }); + assertPreviousPhaseName("delete", "hot", new String[] { "hot", "delete" }); + assertPreviousPhaseName("delete", "cold", new String[] { "cold", "delete" }); + assertPreviousPhaseName("delete", null, new String[] { "delete" }); + assertPreviousPhaseName("delete", "cold", new String[] { "hot", "warm", "cold" }); + assertPreviousPhaseName("delete", "cold", new String[] { "warm", "cold" }); + assertPreviousPhaseName("delete", "warm", new String[] { "hot", "warm" }); + assertPreviousPhaseName("delete", "hot", new String[] { "hot" }); + assertPreviousPhaseName("delete", "cold", new String[] { "cold" }); + assertPreviousPhaseName("delete", null, new String[] {}); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.getPreviousPhaseName("foo", Collections.emptyMap())); + assertEquals("[foo] is not a valid phase for lifecycle type [" + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + exception = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE + .getPreviousPhaseName("foo", Collections.singletonMap("foo", new Phase("foo", TimeValue.ZERO, Collections.emptyMap())))); + assertEquals("[foo] is not a valid phase for lifecycle type [" + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + } + + public void testGetNextActionName() { + // Hot Phase + assertNextActionName("hot", RolloverAction.NAME, null, new String[] {}); + assertNextActionName("hot", RolloverAction.NAME, null, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", "foo", new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", AllocateAction.NAME, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", DeleteAction.NAME, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", ForceMergeAction.NAME, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", ReadOnlyAction.NAME, new String[] { RolloverAction.NAME }); + assertInvalidAction("hot", ShrinkAction.NAME, new String[] { RolloverAction.NAME }); + + // Warm Phase + assertNextActionName("warm", ReadOnlyAction.NAME, AllocateAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, ShrinkAction.NAME, + new String[] { ReadOnlyAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, ForceMergeAction.NAME, + new String[] { ReadOnlyAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, null, new String[] { ReadOnlyAction.NAME }); + + assertNextActionName("warm", ReadOnlyAction.NAME, AllocateAction.NAME, + new String[] { AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, ShrinkAction.NAME, new String[] { ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, ForceMergeAction.NAME, new String[] { ForceMergeAction.NAME }); + assertNextActionName("warm", ReadOnlyAction.NAME, null, new String[] {}); + + assertNextActionName("warm", AllocateAction.NAME, ShrinkAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", AllocateAction.NAME, ForceMergeAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", AllocateAction.NAME, null, new String[] { ReadOnlyAction.NAME, AllocateAction.NAME }); + + assertNextActionName("warm", AllocateAction.NAME, ShrinkAction.NAME, new String[] { ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", AllocateAction.NAME, ForceMergeAction.NAME, new String[] { ForceMergeAction.NAME }); + assertNextActionName("warm", AllocateAction.NAME, null, new String[] {}); + + assertNextActionName("warm", ShrinkAction.NAME, ForceMergeAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertNextActionName("warm", ShrinkAction.NAME, null, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME }); + + assertNextActionName("warm", ShrinkAction.NAME, ForceMergeAction.NAME, new String[] { ForceMergeAction.NAME }); + assertNextActionName("warm", ShrinkAction.NAME, null, new String[] {}); + + assertNextActionName("warm", ForceMergeAction.NAME, null, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + + assertNextActionName("warm", ForceMergeAction.NAME, null, new String[] {}); + + assertInvalidAction("warm", "foo", new String[] { RolloverAction.NAME }); + assertInvalidAction("warm", DeleteAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + assertInvalidAction("warm", RolloverAction.NAME, + new String[] { ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME }); + + // Cold Phase + assertNextActionName("cold", AllocateAction.NAME, null, new String[] { AllocateAction.NAME }); + + assertNextActionName("cold", AllocateAction.NAME, null, new String[] {}); + + assertNextActionName("cold", AllocateAction.NAME, null, new String[] {}); + + assertInvalidAction("cold", "foo", new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", DeleteAction.NAME, new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", ForceMergeAction.NAME, new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", ReadOnlyAction.NAME, new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", RolloverAction.NAME, new String[] { AllocateAction.NAME }); + assertInvalidAction("cold", ShrinkAction.NAME, new String[] { AllocateAction.NAME }); + + // Delete Phase + assertNextActionName("delete", DeleteAction.NAME, null, new String[] {}); + assertNextActionName("delete", DeleteAction.NAME, null, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", "foo", new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", AllocateAction.NAME, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", ForceMergeAction.NAME, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", ReadOnlyAction.NAME, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", RolloverAction.NAME, new String[] { DeleteAction.NAME }); + assertInvalidAction("delete", ShrinkAction.NAME, new String[] { DeleteAction.NAME }); + + Phase phase = new Phase("foo", TimeValue.ZERO, Collections.emptyMap()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.getNextActionName(ShrinkAction.NAME, phase)); + assertEquals("lifecycle type[" + TimeseriesLifecycleType.TYPE + "] does not support phase[" + phase.getName() + "]", + exception.getMessage()); + } + + private void assertNextActionName(String phaseName, String currentAction, String expectedNextAction, String... availableActionNames) { + Map availableActions = convertActionNamesToActions(availableActionNames); + Phase phase = new Phase(phaseName, TimeValue.ZERO, availableActions); + String nextAction = TimeseriesLifecycleType.INSTANCE.getNextActionName(currentAction, phase); + assertEquals(expectedNextAction, nextAction); + } + + private void assertInvalidAction(String phaseName, String currentAction, String... availableActionNames) { + Map availableActions = convertActionNamesToActions(availableActionNames); + Phase phase = new Phase(phaseName, TimeValue.ZERO, availableActions); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> TimeseriesLifecycleType.INSTANCE.getNextActionName(currentAction, phase)); + assertEquals("[" + currentAction + "] is not a valid action for phase [" + phaseName + "] in lifecycle type [" + + TimeseriesLifecycleType.TYPE + "]", exception.getMessage()); + } + + private ConcurrentMap convertActionNamesToActions(String... availableActionNames) { + return Arrays.asList(availableActionNames).stream().map(n -> { + switch (n) { + case AllocateAction.NAME: + return new AllocateAction(null, Collections.singletonMap("foo", "bar"), Collections.emptyMap(), Collections.emptyMap()); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return new ForceMergeAction(1); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return new RolloverAction(ByteSizeValue.parseBytesSizeValue("0b", "test"), TimeValue.ZERO, 1L); + case ShrinkAction.NAME: + return new ShrinkAction(1); + } + return new DeleteAction(); + }).collect(Collectors.toConcurrentMap(LifecycleAction::getWriteableName, Function.identity())); + } + + private void assertNextPhaseName(String currentPhase, String expectedNextPhase, String... availablePhaseNames) { + Map availablePhases = Arrays.asList(availablePhaseNames).stream() + .map(n -> new Phase(n, TimeValue.ZERO, Collections.emptyMap())) + .collect(Collectors.toMap(Phase::getName, Function.identity())); + String nextPhase = TimeseriesLifecycleType.INSTANCE.getNextPhaseName(currentPhase, availablePhases); + assertEquals(expectedNextPhase, nextPhase); + } + + private void assertPreviousPhaseName(String currentPhase, String expectedNextPhase, String... availablePhaseNames) { + Map availablePhases = Arrays.asList(availablePhaseNames).stream() + .map(n -> new Phase(n, TimeValue.ZERO, Collections.emptyMap())) + .collect(Collectors.toMap(Phase::getName, Function.identity())); + String nextPhase = TimeseriesLifecycleType.INSTANCE.getPreviousPhaseName(currentPhase, availablePhases); + assertEquals(expectedNextPhase, nextPhase); + } + + /** + * checks whether an ordered list of objects (usually Phase and LifecycleAction) are found in the same + * order as the ordered VALID_PHASES/VALID_HOT_ACTIONS/... lists + * @param orderedObjects the ordered objects to verify sort order of + * @param getKey the way to retrieve the key to sort against (Phase#getName, LifecycleAction#getName) + * @param validOrderedKeys the source of truth of the sort order + * @param the type of object + */ + private boolean isSorted(List orderedObjects, Function getKey, List validOrderedKeys) { + int validIndex = 0; + for (T obj : orderedObjects) { + String key = getKey.apply(obj); + int i = validIndex; + for (; i < validOrderedKeys.size(); i++) { + if (validOrderedKeys.get(i).equals(key)) { + validIndex = i; + break; + } + } + if (i == validOrderedKeys.size()) { + return false; + } + } + return true; + } + + private LifecycleAction getTestAction(String actionName) { + switch (actionName) { + case AllocateAction.NAME: + return TEST_ALLOCATE_ACTION; + case DeleteAction.NAME: + return TEST_DELETE_ACTION; + case ForceMergeAction.NAME: + return TEST_FORCE_MERGE_ACTION; + case ReadOnlyAction.NAME: + return TEST_READ_ONLY_ACTION; + case RolloverAction.NAME: + return TEST_ROLLOVER_ACTION; + case ShrinkAction.NAME: + return TEST_SHRINK_ACTION; + default: + throw new IllegalArgumentException("unsupported timeseries phase action [" + actionName + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java new file mode 100644 index 0000000000000..6e492e24f9b33 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateRolloverLifecycleDateStepTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; + +public class UpdateRolloverLifecycleDateStepTests extends AbstractStepTestCase { + + @Override + public UpdateRolloverLifecycleDateStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + return new UpdateRolloverLifecycleDateStep(stepKey, nextStepKey); + } + + @Override + public UpdateRolloverLifecycleDateStep mutateInstance(UpdateRolloverLifecycleDateStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + + if (randomBoolean()) { + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } else { + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + } + + return new UpdateRolloverLifecycleDateStep(key, nextKey); + } + + @Override + public UpdateRolloverLifecycleDateStep copyInstance(UpdateRolloverLifecycleDateStep instance) { + return new UpdateRolloverLifecycleDateStep(instance.getKey(), instance.getNextStepKey()); + } + + @SuppressWarnings("unchecked") + public void testPerformAction() { + String alias = randomAlphaOfLength(3); + long creationDate = randomLongBetween(0, 1000000); + long rolloverTime = randomValueOtherThan(creationDate, () -> randomNonNegativeLong()); + IndexMetaData newIndexMetaData = IndexMetaData.builder(randomAlphaOfLength(11)) + .settings(settings(Version.CURRENT)).creationDate(creationDate) + .putAlias(AliasMetaData.builder(alias)).numberOfShards(randomIntBetween(1, 5)) + .numberOfReplicas(randomIntBetween(0, 5)).build(); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .putRolloverInfo(new RolloverInfo(alias, Collections.emptyList(), rolloverTime)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder() + .put(indexMetaData, false) + .put(newIndexMetaData, false)).build(); + + UpdateRolloverLifecycleDateStep step = createRandomInstance(); + ClusterState newState = step.performAction(indexMetaData.getIndex(), clusterState); + long actualRolloverTime = LifecycleExecutionState + .fromIndexMetadata(newState.metaData().index(indexMetaData.getIndex())) + .getLifecycleDate(); + assertThat(actualRolloverTime, equalTo(rolloverTime)); + } + + public void testPerformActionBeforeRolloverHappened() { + String alias = randomAlphaOfLength(3); + long creationDate = randomLongBetween(0, 1000000); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(11)) + .settings(settings(Version.CURRENT).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)) + .creationDate(creationDate).putAlias(AliasMetaData.builder(alias)).numberOfShards(randomIntBetween(1, 5)) + .numberOfReplicas(randomIntBetween(0, 5)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData, false)).build(); + UpdateRolloverLifecycleDateStep step = createRandomInstance(); + + IllegalStateException exceptionThrown = expectThrows(IllegalStateException.class, + () -> step.performAction(indexMetaData.getIndex(), clusterState)); + assertThat(exceptionThrown.getMessage(), + equalTo("no rollover info found for [" + indexMetaData.getIndex().getName() + "], either the index " + + "has not yet rolled over or a subsequent index was created outside of Index Lifecycle Management")); + } + + public void testPerformActionWithNoRolloverAliasSetting() { + long creationDate = randomLongBetween(0, 1000000); + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(11)) + .settings(settings(Version.CURRENT)).creationDate(creationDate).numberOfShards(randomIntBetween(1, 5)) + .numberOfReplicas(randomIntBetween(0, 5)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData, false)).build(); + UpdateRolloverLifecycleDateStep step = createRandomInstance(); + + IllegalStateException exceptionThrown = expectThrows(IllegalStateException.class, + () -> step.performAction(indexMetaData.getIndex(), clusterState)); + assertThat(exceptionThrown.getMessage(), + equalTo("setting [index.lifecycle.rollover_alias] is not set on index [" + indexMetaData.getIndex().getName() +"]")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java new file mode 100644 index 0000000000000..22908146af21f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UpdateSettingsStepTests.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep.Listener; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import static org.hamcrest.Matchers.equalTo; + +public class UpdateSettingsStepTests extends AbstractStepTestCase { + + private Client client; + + @Before + public void setup() { + client = Mockito.mock(Client.class); + } + + @Override + public UpdateSettingsStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + Settings settings = Settings.builder().put(randomAlphaOfLength(10), randomAlphaOfLength(10)).build(); + + return new UpdateSettingsStep(stepKey, nextStepKey, client, settings); + } + + @Override + public UpdateSettingsStep mutateInstance(UpdateSettingsStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + Settings settings = instance.getSettings(); + + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + settings = Settings.builder().put(settings).put(randomAlphaOfLength(10), randomInt()).build(); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new UpdateSettingsStep(key, nextKey, client, settings); + } + + @Override + public UpdateSettingsStep copyInstance(UpdateSettingsStep instance) { + return new UpdateSettingsStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getSettings()); + } + + public void testPerformAction() throws Exception { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + UpdateSettingsStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + UpdateSettingsRequest request = (UpdateSettingsRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertThat(request.settings(), equalTo(step.getSettings())); + assertThat(request.indices(), equalTo(new String[] {indexMetaData.getIndex().getName()})); + listener.onResponse(new AcknowledgedResponse(true)); + return null; + } + + }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); + + SetOnce actionCompleted = new SetOnce<>(); + + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + actionCompleted.set(complete); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError("Unexpected method call", e); + } + }); + + assertEquals(true, actionCompleted.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).updateSettings(Mockito.any(), Mockito.any()); + } + + public void testPerformActionFailure() { + IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + Exception exception = new RuntimeException(); + UpdateSettingsStep step = createRandomInstance(); + + AdminClient adminClient = Mockito.mock(AdminClient.class); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + UpdateSettingsRequest request = (UpdateSettingsRequest) invocation.getArguments()[0]; + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + assertThat(request.settings(), equalTo(step.getSettings())); + assertThat(request.indices(), equalTo(new String[] {indexMetaData.getIndex().getName()})); + listener.onFailure(exception); + return null; + } + + }).when(indicesClient).updateSettings(Mockito.any(), Mockito.any()); + + SetOnce exceptionThrown = new SetOnce<>(); + step.performAction(indexMetaData, null, new Listener() { + + @Override + public void onResponse(boolean complete) { + throw new AssertionError("Unexpected method call"); + } + + @Override + public void onFailure(Exception e) { + assertSame(exception, e); + exceptionThrown.set(true); + } + }); + + assertEquals(true, exceptionThrown.get()); + + Mockito.verify(client, Mockito.only()).admin(); + Mockito.verify(adminClient, Mockito.only()).indices(); + Mockito.verify(indicesClient, Mockito.only()).updateSettings(Mockito.any(), Mockito.any()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java new file mode 100644 index 0000000000000..c864bd76eeac5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleRequestTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction.Request; + +public class DeleteLifecycleRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request mutateInstance(Request request) { + return new Request(request.getPolicyName() + randomAlphaOfLengthBetween(1, 10)); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleResponseTests.java new file mode 100644 index 0000000000000..b671f72cf99c4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/DeleteLifecycleResponseTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction.Response; + +public class DeleteLifecycleResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response mutateInstance(Response response) { + return new Response(response.isAcknowledged() == false); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java new file mode 100644 index 0000000000000..49caa0b48894e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleRequestTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.Request; + +import java.util.Arrays; + +public class GetLifecycleRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request mutateInstance(Request request) { + String[] originalPolicies = request.getPolicyNames(); + String[] newPolicies = Arrays.copyOf(originalPolicies, originalPolicies.length + 1); + newPolicies[originalPolicies.length] = randomAlphaOfLength(5); + return new Request(newPolicies); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java new file mode 100644 index 0000000000000..08688407b3db6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/GetLifecycleResponseTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.TestLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.LifecyclePolicyResponseItem; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.Response; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests.randomTestLifecyclePolicy; + +public class GetLifecycleResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + String randomPrefix = randomAlphaOfLength(5); + List responseItems = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(0, 2); i++) { + responseItems.add(new LifecyclePolicyResponseItem(randomTestLifecyclePolicy(randomPrefix + i), + randomNonNegativeLong(), randomAlphaOfLength(8))); + } + return new Response(responseItems); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new), + new NamedWriteableRegistry.Entry(LifecycleType.class, TestLifecycleType.TYPE, in -> TestLifecycleType.INSTANCE))); + } + + @Override + protected Response mutateInstance(Response response) { + List responseItems = new ArrayList<>(response.getPolicies()); + if (responseItems.size() > 0) { + if (randomBoolean()) { + responseItems.add(new LifecyclePolicyResponseItem(randomTestLifecyclePolicy(randomAlphaOfLength(5)), + randomNonNegativeLong(), randomAlphaOfLength(4))); + } else { + responseItems.remove(0); + } + } else { + responseItems.add(new LifecyclePolicyResponseItem(randomTestLifecyclePolicy(randomAlphaOfLength(2)), + randomNonNegativeLong(), randomAlphaOfLength(4))); + } + return new Response(responseItems); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java new file mode 100644 index 0000000000000..84b966b402323 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepRequestTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.StepKeyTests; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Request; +import org.junit.Before; + +public class MoveToStepRequestTests extends AbstractStreamableXContentTestCase { + + private String index; + private static final StepKeyTests stepKeyTests = new StepKeyTests(); + + @Before + public void setup() { + index = randomAlphaOfLength(5); + } + + @Override + protected Request createTestInstance() { + return new Request(index, stepKeyTests.createTestInstance(), stepKeyTests.createTestInstance()); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(index, parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request mutateInstance(Request request) { + String index = request.getIndex(); + StepKey currentStepKey = request.getCurrentStepKey(); + StepKey nextStepKey = request.getNextStepKey(); + + switch (between(0, 2)) { + case 0: + index += randomAlphaOfLength(5); + break; + case 1: + currentStepKey = stepKeyTests.mutateInstance(currentStepKey); + break; + case 2: + nextStepKey = stepKeyTests.mutateInstance(nextStepKey); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new Request(index, currentStepKey, nextStepKey); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepResponseTests.java new file mode 100644 index 0000000000000..7f9bacd35ac25 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/MoveToStepResponseTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Response; + +public class MoveToStepResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response mutateInstance(Response response) { + return new Response(response.isAcknowledged() == false); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java new file mode 100644 index 0000000000000..5df60a7333143 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleRequestTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Request; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class PutLifecycleRequestTests extends AbstractStreamableXContentTestCase { + + private String lifecycleName; + + @Before + public void setup() { + lifecycleName = randomAlphaOfLength(20); + } + + @Override + protected Request createTestInstance() { + return new Request(LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(lifecycleName)); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return PutLifecycleAction.Request.parseRequest(lifecycleName, parser); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new) + )); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request mutateInstance(Request request) { + String name = randomBoolean() ? lifecycleName : randomAlphaOfLength(5); + LifecyclePolicy policy = randomValueOtherThan(request.getPolicy(), + () -> LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(name)); + return new Request(policy); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleResponseTests.java new file mode 100644 index 0000000000000..0c9acb2aa5dd2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/PutLifecycleResponseTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Response; + +public class PutLifecycleResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response mutateInstance(Response response) { + return new Response(response.isAcknowledged() == false); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..e82f84564cae7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyRequestTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Request; + +import java.io.IOException; +import java.util.Arrays; + +public class RemoveIndexLifecyclePolicyRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(generateRandomStringArray(20, 20, false)); + if (randomBoolean()) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + request.indicesOptions(indicesOptions); + } + if (randomBoolean()) { + request.indices(generateRandomStringArray(20, 20, false)); + } + return request; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request mutateInstance(Request instance) throws IOException { + String[] indices = instance.indices(); + IndicesOptions indicesOptions = instance.indicesOptions(); + switch (between(0, 1)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + () -> generateRandomStringArray(20, 20, false)); + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + Request newRequest = new Request(indices); + newRequest.indicesOptions(indicesOptions); + return newRequest; + } + + public void testNullIndices() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new Request((String[]) null)); + assertEquals("indices cannot be null", exception.getMessage()); + } + + public void testValidate() { + Request request = createTestInstance(); + assertNull(request.validate()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java new file mode 100644 index 0000000000000..a394e593e7307 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RemoveIndexLifecyclePolicyResponseTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Response; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class RemoveIndexLifecyclePolicyResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response createTestInstance() { + List failedIndexes = Arrays.asList(generateRandomStringArray(20, 20, false)); + return new Response(failedIndexes); + } + + @Override + protected Response mutateInstance(Response instance) throws IOException { + List failedIndices = randomValueOtherThan(instance.getFailedIndexes(), + () -> Arrays.asList(generateRandomStringArray(20, 20, false))); + return new Response(failedIndices); + } + + @Override + protected Response doParseInstance(XContentParser parser) throws IOException { + return Response.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testNullFailedIndices() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new Response((List) null)); + assertEquals("failed_indexes cannot be null", exception.getMessage()); + } + + public void testHasFailures() { + Response response = new Response(new ArrayList<>()); + assertFalse(response.hasFailures()); + assertEquals(Collections.emptyList(), response.getFailedIndexes()); + + int size = randomIntBetween(1, 10); + List failedIndexes = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + failedIndexes.add(randomAlphaOfLength(20)); + } + response = new Response(failedIndexes); + assertTrue(response.hasFailures()); + assertEquals(failedIndexes, response.getFailedIndexes()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java new file mode 100644 index 0000000000000..734bcf0b7df36 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryRequestTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Request; + +import java.io.IOException; +import java.util.Arrays; + +public class RetryRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(); + if (randomBoolean()) { + request.indices(generateRandomStringArray(20, 20, false)); + } + if (randomBoolean()) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + request.indicesOptions(indicesOptions); + } + return request; + } + + @Override + protected Request mutateInstance(Request instance) throws IOException { + String[] indices = instance.indices(); + IndicesOptions indicesOptions = instance.indicesOptions(); + switch (between(0, 1)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + () -> generateRandomStringArray(20, 10, false, true)); + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + Request newRequest = new Request(); + newRequest.indices(indices); + newRequest.indicesOptions(indicesOptions); + return newRequest; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryResponseTests.java new file mode 100644 index 0000000000000..24e758f9503c1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/action/RetryResponseTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ +package org.elasticsearch.xpack.core.indexlifecycle.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Response; + +public class RetryResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected Response mutateInstance(Response response) { + return new Response(response.isAcknowledged() == false); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java index 21f11fa5f73c7..ba69e6f750c96 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java @@ -124,7 +124,7 @@ public void testValidateNonDelimited() { public void testValidateNonSemiStructuredText() { FindFileStructureAction.Request request = new FindFileStructureAction.Request(); - request.setFormat(randomFrom(FileStructure.Format.JSON, FileStructure.Format.XML, FileStructure.Format.DELIMITED)); + request.setFormat(randomFrom(FileStructure.Format.NDJSON, FileStructure.Format.XML, FileStructure.Format.DELIMITED)); request.setGrokPattern(randomAlphaOfLength(80)); request.setSample(new BytesArray("foo\n")); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java index 77d4d788db620..7f4866982317a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java @@ -5,14 +5,16 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.xpack.core.ml.calendars.Calendar; import org.elasticsearch.xpack.core.ml.calendars.CalendarTests; import java.io.IOException; -public class PutCalendarActionResponseTests extends AbstractStreamableXContentTestCase { +public class PutCalendarActionResponseTests + extends AbstractHlrcStreamableXContentTestCase { @Override protected PutCalendarAction.Response createTestInstance() { @@ -20,12 +22,29 @@ protected PutCalendarAction.Response createTestInstance() { } @Override - protected PutCalendarAction.Response createBlankInstance() { - return new PutCalendarAction.Response(); + protected PutCalendarAction.Response doParseInstance(XContentParser parser) throws IOException { + return new PutCalendarAction.Response(Calendar.LENIENT_PARSER.parse(parser, null).build()); } @Override - protected PutCalendarAction.Response doParseInstance(XContentParser parser) throws IOException { - return new PutCalendarAction.Response(Calendar.LENIENT_PARSER.parse(parser, null).build()); + protected boolean supportsUnknownFields() { + return true; + } + + @Override + public PutCalendarResponse doHlrcParseInstance(XContentParser parser) throws IOException { + return PutCalendarResponse.fromXContent(parser); + } + + @Override + public PutCalendarAction.Response convertHlrcToInternal(PutCalendarResponse instance) { + org.elasticsearch.client.ml.calendars.Calendar hlrcCalendar = instance.getCalendar(); + Calendar internalCalendar = new Calendar(hlrcCalendar.getId(), hlrcCalendar.getJobIds(), hlrcCalendar.getDescription()); + return new PutCalendarAction.Response(internalCalendar); + } + + @Override + protected PutCalendarAction.Response createBlankInstance() { + return new PutCalendarAction.Response(); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index d2f7d7bdb96b9..2dcfa093c008a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -148,7 +148,7 @@ public void testPoints() throws Exception { assertEquals(Integer.BYTES, points.getBytesPerDimension()); // number of dimensions - assertEquals(1, points.getNumDimensions()); + assertEquals(1, points.getNumIndexDimensions()); // walk the trees: we should see stuff in fieldA AtomicBoolean sawDoc = new AtomicBoolean(false); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index d8e0b693f7008..2290a34752819 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -307,7 +307,7 @@ public void testReloadingKeyStoreException() throws Exception { Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.getSSLConfiguration("xpack.ssl"); - new SSLConfigurationReloader(settings, env, sslService, resourceWatcherService) { + new SSLConfigurationReloader(env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { fail("reload should not be called! [keystore reload exception]"); @@ -348,7 +348,7 @@ public void testReloadingPEMKeyConfigException() throws Exception { Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.getSSLConfiguration("xpack.ssl"); - new SSLConfigurationReloader(settings, env, sslService, resourceWatcherService) { + new SSLConfigurationReloader(env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { fail("reload should not be called! [pem key reload exception]"); @@ -383,7 +383,7 @@ public void testTrustStoreReloadException() throws Exception { Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.getSSLConfiguration("xpack.ssl"); - new SSLConfigurationReloader(settings, env, sslService, resourceWatcherService) { + new SSLConfigurationReloader(env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { fail("reload should not be called! [truststore reload exception]"); @@ -415,7 +415,7 @@ public void testPEMTrustReloadException() throws Exception { Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.getSSLConfiguration("xpack.ssl"); - new SSLConfigurationReloader(settings, env, sslService, resourceWatcherService) { + new SSLConfigurationReloader(env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { fail("reload should not be called! [pem trust reload exception]"); @@ -444,7 +444,7 @@ private void validateSSLConfigurationIsReloaded(Settings settings, Environment e final CountDownLatch reloadLatch = new CountDownLatch(1); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.getSSLConfiguration("xpack.ssl"); - new SSLConfigurationReloader(settings, env, sslService, resourceWatcherService) { + new SSLConfigurationReloader(env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { super.reloadSSLContext(configuration); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java index 88d10071e8543..545a8f91574a7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java @@ -496,8 +496,8 @@ public void testGetConfigurationByContextName() throws Exception { "transport.profiles.prof1.xpack.security.ssl", "transport.profiles.prof2.xpack.security.ssl", "transport.profiles.prof3.xpack.security.ssl", - "xpack.security.authc.realms.realm1.ssl", - "xpack.security.authc.realms.realm2.ssl", + "xpack.security.authc.realms.ldap.realm1.ssl", + "xpack.security.authc.realms.saml.realm2.ssl", "xpack.monitoring.exporters.mon1.ssl", "xpack.monitoring.exporters.mon2.ssl" }; @@ -518,7 +518,7 @@ public void testGetConfigurationByContextName() throws Exception { final Settings settings = builder // Add a realm without SSL settings. This context name should be mapped to the global configuration - .put("xpack.security.authc.realms.realm3.type", "file") + .put("xpack.security.authc.realms.file.realm3.order", 4) // Add an exporter without SSL settings. This context name should be mapped to the global configuration .put("xpack.monitoring.exporters.mon3.type", "http") .setSecureSettings(secureSettings) @@ -538,7 +538,7 @@ public void testGetConfigurationByContextName() throws Exception { // These contexts have no SSL settings, but for convenience we want those components to be able to access their context // by name, and get back the global configuration - final SSLConfiguration realm3Config = sslService.getSSLConfiguration("xpack.security.authc.realms.realm3.ssl"); + final SSLConfiguration realm3Config = sslService.getSSLConfiguration("xpack.security.authc.realms.file.realm3.ssl"); final SSLConfiguration mon3Config = sslService.getSSLConfiguration("xpack.monitoring.exporters.mon3.ssl."); final SSLConfiguration global = globalConfiguration(sslService); assertThat(realm3Config, sameInstance(global)); @@ -560,8 +560,7 @@ public void testReadCertificateInformation() throws Exception { .put("xpack.ssl.keystore.path", jksPath) .put("xpack.ssl.truststore.path", jksPath) .put("xpack.http.ssl.keystore.path", p12Path) - .put("xpack.security.authc.realms.ad.type", "ad") - .put("xpack.security.authc.realms.ad.ssl.certificate_authorities", pemPath) + .put("xpack.security.authc.realms.active_directory.ad.ssl.certificate_authorities", pemPath) .setSecureSettings(secureSettings) .build(); diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java index 04d1bde87d979..29bd948ab6c33 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; @@ -37,11 +36,11 @@ public class TransportDeprecationInfoAction extends TransportMasterNodeReadActio private final IndexNameExpressionResolver indexNameExpressionResolver; @Inject - public TransportDeprecationInfoAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportDeprecationInfoAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, XPackLicenseState licenseState, NodeClient client) { - super(settings, DeprecationInfoAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(DeprecationInfoAction.NAME, transportService, clusterService, threadPool, actionFilters, DeprecationInfoAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; this.client = client; diff --git a/x-pack/plugin/graph/build.gradle b/x-pack/plugin/graph/build.gradle index 069bfa5fbbe2b..f1bac2e54d455 100644 --- a/x-pack/plugin/graph/build.gradle +++ b/x-pack/plugin/graph/build.gradle @@ -15,6 +15,15 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + run { plugin xpackModule('core') } diff --git a/x-pack/plugin/graph/qa/build.gradle b/x-pack/plugin/graph/qa/build.gradle new file mode 100644 index 0000000000000..35bd236df5c23 --- /dev/null +++ b/x-pack/plugin/graph/qa/build.gradle @@ -0,0 +1,17 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +subprojects { + // HACK: please fix this + // we want to add the rest api specs for xpack to qa tests, but we + // need to wait until after the project is evaluated to only apply + // to those that rest tests. this used to be done automatically + // when xpack was a plugin, but now there is no place with xpack as a module. + // instead, we should package these and make them easy to use for rest tests, + // but currently, they must be copied into the resources of the test runner. + project.tasks.withType(RestIntegTestTask) { + File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xpackResources) { + include 'rest-api-spec/api/**' + } + } +} diff --git a/x-pack/qa/smoke-test-graph-with-security/build.gradle b/x-pack/plugin/graph/qa/with-security/build.gradle similarity index 100% rename from x-pack/qa/smoke-test-graph-with-security/build.gradle rename to x-pack/plugin/graph/qa/with-security/build.gradle diff --git a/x-pack/qa/smoke-test-graph-with-security/roles.yml b/x-pack/plugin/graph/qa/with-security/roles.yml similarity index 100% rename from x-pack/qa/smoke-test-graph-with-security/roles.yml rename to x-pack/plugin/graph/qa/with-security/roles.yml diff --git a/x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityIT.java b/x-pack/plugin/graph/qa/with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityIT.java similarity index 100% rename from x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityIT.java rename to x-pack/plugin/graph/qa/with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityIT.java diff --git a/x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityInsufficientRoleIT.java b/x-pack/plugin/graph/qa/with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityInsufficientRoleIT.java similarity index 100% rename from x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityInsufficientRoleIT.java rename to x-pack/plugin/graph/qa/with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityInsufficientRoleIT.java diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java index 096f6bf63be69..d05c075d26b65 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -25,14 +24,14 @@ import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.protocol.xpack.graph.Connection; +import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; +import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import org.elasticsearch.protocol.xpack.graph.GraphExploreResponse; import org.elasticsearch.protocol.xpack.graph.Hop; import org.elasticsearch.protocol.xpack.graph.Vertex; -import org.elasticsearch.protocol.xpack.graph.VertexRequest; -import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId; -import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import org.elasticsearch.protocol.xpack.graph.Vertex.VertexId; +import org.elasticsearch.protocol.xpack.graph.VertexRequest; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; @@ -84,9 +83,9 @@ protected boolean lessThan(Vertex a, Vertex b) { } @Inject - public TransportGraphExploreAction(Settings settings, ThreadPool threadPool, NodeClient client, - TransportService transportService, ActionFilters actionFilters, XPackLicenseState licenseState) { - super(settings, GraphExploreAction.NAME, transportService, actionFilters, (Supplier)GraphExploreRequest::new); + public TransportGraphExploreAction(ThreadPool threadPool, NodeClient client, TransportService transportService, + ActionFilters actionFilters, XPackLicenseState licenseState) { + super(GraphExploreAction.NAME, transportService, actionFilters, (Supplier)GraphExploreRequest::new); this.threadPool = threadPool; this.client = client; this.licenseState = licenseState; @@ -98,7 +97,7 @@ protected void doExecute(Task task, GraphExploreRequest request, ActionListener< new AsyncGraphAction(request, listener).start(); } else { listener.onFailure(LicenseUtils.newComplianceException(XPackField.GRAPH)); - } + } } class AsyncGraphAction { @@ -181,7 +180,7 @@ synchronized void expand() { Hop lastHop = request.getHop(currentHopNumber); currentHopNumber++; Hop currentHop = request.getHop(currentHopNumber); - + final SearchRequest searchRequest = new SearchRequest(request.indices()).types(request.types()).indicesOptions( request.indicesOptions()); if (request.routing() != null) { @@ -189,15 +188,15 @@ synchronized void expand() { } BoolQueryBuilder rootBool = QueryBuilders.boolQuery(); - + // A single sample pool of docs is built at the root of the aggs tree. // For quality's sake it might have made more sense to sample top docs - // for each of the terms from the previous hop (e.g. an initial query for "beatles" + // for each of the terms from the previous hop (e.g. an initial query for "beatles" // may have separate doc-sample pools for significant root terms "john", "paul", "yoko" etc) // but I found this dramatically slowed down execution - each pool typically had different docs which // each had non-overlapping sets of terms that needed frequencies looking up for significant terms. // A common sample pool reduces the specialization that can be given to each root term but - // ultimately is much faster to run because of the shared vocabulary in a single sample set. + // ultimately is much faster to run because of the shared vocabulary in a single sample set. AggregationBuilder sampleAgg = null; if (request.sampleDiversityField() != null) { DiversifiedAggregationBuilder diversifiedSampleAgg = AggregationBuilders.diversifiedSampler("sample") @@ -208,11 +207,11 @@ synchronized void expand() { }else{ sampleAgg = AggregationBuilders.sampler("sample").shardSize(request.sampleSize()); } - + // Add any user-supplied criteria to the root query as a must clause rootBool.must(currentHop.guidingQuery()); - - // Build a MUST clause that matches one of either + + // Build a MUST clause that matches one of either // a:) include clauses supplied by the client or // b:) vertex terms from the previous hop. BoolQueryBuilder sourceTermsOrClause = QueryBuilders.boolQuery(); @@ -220,10 +219,10 @@ synchronized void expand() { addBigOrClause(lastHopFindings, sourceTermsOrClause); rootBool.must(sourceTermsOrClause); - - - //Now build the agg tree that will channel the content -> - // base agg is terms agg for terms from last wave (one per field), + + + //Now build the agg tree that will channel the content -> + // base agg is terms agg for terms from last wave (one per field), // under each is a sig_terms agg to find next candidates (again, one per field)... for (int fieldNum = 0; fieldNum < lastHop.getNumberVertexRequests(); fieldNum++) { VertexRequest lastVr = lastHop.getVertexRequest(fieldNum); @@ -319,7 +318,7 @@ synchronized void expand() { public void onResponse(SearchResponse searchResponse) { // System.out.println(searchResponse); addShardFailures(searchResponse.getShardFailures()); - + ArrayList newConnections = new ArrayList(); ArrayList newVertices = new ArrayList(); Sampler sample = searchResponse.getAggregations().get("sample"); @@ -331,7 +330,7 @@ public void onResponse(SearchResponse searchResponse) { // what percentage of the total scores its own score // provides double totalSignalOutput = getExpandTotalSignalStrength(lastHop, currentHop, sample); - + // Signal output can be zero if we did not encounter any new // terms as part of this stage if (totalSignalOutput > 0) { @@ -342,7 +341,7 @@ public void onResponse(SearchResponse searchResponse) { // Potentially run another round of queries to perform next"hop" - will terminate if no new additions expand(); - + } @@ -364,7 +363,7 @@ private void addAndScoreNewVertices(Hop lastHop, Hop currentHop, Sampler sample, Vertex fromVertex = getVertex(lastVr.fieldName(), lastWaveTerm.getKeyAsString()); for (int k = 0; k < currentHop.getNumberVertexRequests(); k++) { VertexRequest vr = currentHop.getVertexRequest(k); - // As we travel further out into the graph we apply a + // As we travel further out into the graph we apply a // decay to the signals being propagated down the various channels. double decay = 0.95d; if (request.useSignificance()) { @@ -424,14 +423,14 @@ private void addAndScoreNewVertices(Hop lastHop, Hop currentHop, Sampler sample, // Having let the signals from the last results rattle around the graph - // we have adjusted weights for the various vertices we encountered. + // we have adjusted weights for the various vertices we encountered. // Now we review these new additions and remove those with the // weakest weights. // A priority queue is used to trim vertices according to the size settings // requested for each field. private void trimNewAdditions(Hop currentHop, ArrayList newConnections, ArrayList newVertices) { Set evictions = new HashSet<>(); - + for (int k = 0; k < currentHop.getNumberVertexRequests(); k++) { // For each of the fields VertexRequest vr = currentHop.getVertexRequest(k); @@ -450,7 +449,7 @@ private void trimNewAdditions(Hop currentHop, ArrayList newConnectio } } } - // Remove weak new nodes and their dangling connections from the main graph + // Remove weak new nodes and their dangling connections from the main graph if (evictions.size() > 0) { for (Connection connection : newConnections) { if (evictions.contains(connection.getTo())) { @@ -465,7 +464,7 @@ private void trimNewAdditions(Hop currentHop, ArrayList newConnectio // is if the "from" and "to" nodes are a client-supplied set of includes e.g. a list of // music artists then the client may be wanting to draw only the most-interesting connections // between them. See https://github.com/elastic/x-plugins/issues/518#issuecomment-160186424 - // I guess clients could trim the returned connections (which all have weights) but I wonder if + // I guess clients could trim the returned connections (which all have weights) but I wonder if // we can do something server-side here // Helper method - compute the total signal of all scores in the search results @@ -576,7 +575,7 @@ public synchronized void start() { } BoolQueryBuilder rootBool = QueryBuilders.boolQuery(); - + AggregationBuilder rootSampleAgg = null; if (request.sampleDiversityField() != null) { DiversifiedAggregationBuilder diversifiedRootSampleAgg = AggregationBuilders.diversifiedSampler("sample") @@ -587,15 +586,15 @@ public synchronized void start() { } else { rootSampleAgg = AggregationBuilders.sampler("sample").shardSize(request.sampleSize()); } - - - + + + Hop rootHop = request.getHop(0); - + // Add any user-supplied criteria to the root query as a should clause rootBool.must(rootHop.guidingQuery()); - - + + // If any of the root terms have an "include" restriction then // we add a root-level MUST clause that // mandates that at least one of the potentially many terms of @@ -605,7 +604,7 @@ public synchronized void start() { if (includesContainer.should().size() > 0) { rootBool.must(includesContainer); } - + for (int i = 0; i < rootHop.getNumberVertexRequests(); i++) { VertexRequest vr = rootHop.getVertexRequest(i); @@ -613,7 +612,7 @@ public synchronized void start() { SignificantTermsAggregationBuilder sigBuilder = AggregationBuilders.significantTerms("field" + i); sigBuilder.field(vr.fieldName()).shardMinDocCount(vr.shardMinDocCount()).minDocCount(vr.minDocCount()) // Map execution mode used because Sampler agg - // keeps us focused on smaller sets of high quality + // keeps us focused on smaller sets of high quality // docs and therefore examine smaller volumes of terms .executionHint("map").size(vr.size()); // It is feasible that clients could provide a choice of @@ -648,9 +647,9 @@ public synchronized void start() { } rootSampleAgg.subAggregation(termsBuilder); } - } - - + } + + // Run the search SearchSourceBuilder source = new SearchSourceBuilder() .query(rootBool) @@ -669,8 +668,8 @@ public void onResponse(SearchResponse searchResponse) { // Determine the total scores for all interesting terms double totalSignalStrength = getInitialTotalSignalStrength(rootHop, sample); - - + + // Now gather the best matching terms and compute signal weight according to their // share of the total signal strength for (int j = 0; j < rootHop.getNumberVertexRequests(); j++) { @@ -680,7 +679,7 @@ public void onResponse(SearchResponse searchResponse) { List buckets = significantTerms.getBuckets(); for (Bucket bucket : buckets) { double signalWeight = bucket.getSignificanceScore() / totalSignalStrength; - addVertex(vr.fieldName(), bucket.getKeyAsString(), signalWeight, + addVertex(vr.fieldName(), bucket.getKeyAsString(), signalWeight, currentHopNumber, bucket.getSupersetDf(), bucket.getSubsetDf()); } } else { @@ -733,11 +732,11 @@ public void onFailure(Exception e) { private void addNormalizedBoosts(BoolQueryBuilder includesContainer, VertexRequest vr) { TermBoost[] termBoosts = vr.includeValues(); - + if ((includesContainer.should().size() + termBoosts.length) > BooleanQuery.getMaxClauseCount()) { // Too many terms - we need a cheaper form of query to execute this - List termValues = new ArrayList<>(); + List termValues = new ArrayList<>(); for (TermBoost tb : termBoosts) { termValues.add(tb.getTerm()); } diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 778eb261a0705..4c1711f4214e3 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -5,10 +5,13 @@ */ package org.elasticsearch.xpack.graph.rest.action; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; @@ -31,10 +34,14 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.xpack.core.graph.action.GraphExploreAction.INSTANCE; + /** * @see GraphExploreRequest */ public class RestGraphAction extends XPackRestHandler { + private static final Logger logger = LogManager.getLogger(RestGraphAction.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + public static final ParseField TIMEOUT_FIELD = new ParseField("timeout"); public static final ParseField SIGNIFICANCE_FIELD = new ParseField("use_significance"); public static final ParseField RETURN_DETAILED_INFO = new ParseField("return_detailed_stats"); diff --git a/x-pack/plugin/ilm/build.gradle b/x-pack/plugin/ilm/build.gradle new file mode 100644 index 0000000000000..71def8937817c --- /dev/null +++ b/x-pack/plugin/ilm/build.gradle @@ -0,0 +1,34 @@ +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' + +esplugin { + name 'x-pack-ilm' + description 'Elasticsearch Expanded Pack Plugin - Index Lifecycle Management' + classname 'org.elasticsearch.xpack.indexlifecycle.IndexLifecycle' + extendedPlugins = ['x-pack-core'] + hasNativeController false + requiresKeystore true +} +archivesBaseName = 'x-pack-ilm' + +dependencies { + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + +integTest.enabled = false + +run { + plugin xpackModule('core') +} diff --git a/x-pack/plugin/ilm/qa/build.gradle b/x-pack/plugin/ilm/qa/build.gradle new file mode 100644 index 0000000000000..9525da5f4fed2 --- /dev/null +++ b/x-pack/plugin/ilm/qa/build.gradle @@ -0,0 +1,20 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.build' +test.enabled = false + +dependencies { + compile project(':test:framework') +} + +subprojects { + project.tasks.withType(RestIntegTestTask) { + final File xPackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xPackResources) { + include 'rest-api-spec/api/**' + } + } +} + +// the qa modules does not have any source files +licenseHeaders.enabled = false diff --git a/x-pack/plugin/ilm/qa/multi-node/build.gradle b/x-pack/plugin/ilm/qa/multi-node/build.gradle new file mode 100644 index 0000000000000..edd7f3aad472e --- /dev/null +++ b/x-pack/plugin/ilm/qa/multi-node/build.gradle @@ -0,0 +1,20 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') +} + +integTestCluster { + numNodes = 4 + clusterName = 'ilm' + + setting 'xpack.ilm.enabled', 'true' + setting 'xpack.security.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'indices.lifecycle.poll_interval', '1000ms' + +} diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java new file mode 100644 index 0000000000000..0589da29312f8 --- /dev/null +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverStep; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class ChangePolicyforIndexIT extends ESRestTestCase { + + /** + * This test aims to prove that an index will finish the current phase on an + * existing definition when the policy is changed for that index, and that + * after completing the current phase the new policy will be used for + * subsequent phases. + * + * The test creates two policies, one with a hot phase requiring 1 document + * to rollover and a warm phase with an impossible allocation action. The + * second policy has a rollover action requiring 1000 document and a warm + * phase that moves the index to known nodes that will succeed. An index is + * created with the fiorst policy set and the test ensures the policy is in + * the rollover step. It then changes the policy for the index to the second + * policy. It indexes a single document and checks that the index moves past + * the hot phase and through the warm phasee (proving the hot phase + * definition from the first policy was used) and then checks the allocation + * settings from the second policy are set ont he index (proving the second + * policy was used for the warm phase) + */ + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35244") + public void testChangePolicyForIndex() throws Exception { + String indexName = "test-000001"; + // create policy_1 and policy_2 + Map phases1 = new HashMap<>(); + phases1.put("hot", new Phase("hot", TimeValue.ZERO, singletonMap(RolloverAction.NAME, new RolloverAction(null, null, 1L)))); + phases1.put("warm", new Phase("warm", TimeValue.ZERO, + singletonMap(AllocateAction.NAME, new AllocateAction(1, singletonMap("_name", "foobarbaz"), null, null)))); + LifecyclePolicy lifecyclePolicy1 = new LifecyclePolicy("policy_1", phases1); + Map phases2 = new HashMap<>(); + phases2.put("hot", new Phase("hot", TimeValue.ZERO, singletonMap(RolloverAction.NAME, new RolloverAction(null, null, 1000L)))); + phases2.put("warm", new Phase("warm", TimeValue.ZERO, + singletonMap(AllocateAction.NAME, new AllocateAction(1, singletonMap("_name", "node-1,node-2"), null, null)))); + LifecyclePolicy lifecyclePolicy2 = new LifecyclePolicy("policy_1", phases2); + // PUT policy_1 and policy_2 + XContentBuilder builder1 = jsonBuilder(); + lifecyclePolicy1.toXContent(builder1, null); + final StringEntity entity1 = new StringEntity("{ \"policy\":" + Strings.toString(builder1) + "}", ContentType.APPLICATION_JSON); + Request request1 = new Request("PUT", "_ilm/policy/" + "policy_1"); + request1.setEntity(entity1); + assertOK(client().performRequest(request1)); + XContentBuilder builder2 = jsonBuilder(); + lifecyclePolicy2.toXContent(builder2, null); + final StringEntity entity2 = new StringEntity("{ \"policy\":" + Strings.toString(builder2) + "}", ContentType.APPLICATION_JSON); + Request request2 = new Request("PUT", "_ilm/policy/" + "policy_2"); + request2.setEntity(entity2); + assertOK(client().performRequest(request2)); + + // create the test-index index and set the policy to policy_1 + Settings settings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put("index.routing.allocation.include._name", "node-0") + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias").put(LifecycleSettings.LIFECYCLE_NAME, "policy_1").build(); + Request createIndexRequest = new Request("PUT", "/" + indexName); + createIndexRequest.setJsonEntity( + "{\n \"settings\": " + Strings.toString(settings) + ", \"aliases\" : { \"alias\": { \"is_write_index\": true } } }"); + client().performRequest(createIndexRequest); + // wait for the shards to initialize + ensureGreen(indexName); + + // Check the index is on the attempt rollover step + assertBusy(() -> assertStep(indexName, new StepKey("hot", RolloverAction.NAME, RolloverStep.NAME))); + + // Change the policy to policy_2 + Request changePolicyRequest = new Request("PUT", "/" + indexName + "/_settings"); + final StringEntity changePolicyEntity = new StringEntity("{ \"index.lifecycle.name\": \"policy_2\" }", + ContentType.APPLICATION_JSON); + changePolicyRequest.setEntity(changePolicyEntity); + assertOK(client().performRequest(changePolicyRequest)); + + // Check the index is still on the attempt rollover step + assertBusy(() -> assertStep(indexName, new StepKey("hot", RolloverAction.NAME, RolloverStep.NAME))); + + // Index a single document + XContentBuilder document = jsonBuilder().startObject(); + document.field("foo", "bar"); + document.endObject(); + final Request request = new Request("POST", "/" + indexName + "/_doc/1"); + request.setJsonEntity(Strings.toString(document)); + assertOK(client().performRequest(request)); + + // Check the index goes to the warm phase and completes + assertBusy(() -> assertStep(indexName, TerminalPolicyStep.KEY)); + + // Check index is allocated on node-1 and node-2 as per policy_2 + Request getSettingsRequest = new Request("GET", "/" + indexName + "/_settings"); + Response getSettingsResponse = client().performRequest(getSettingsRequest); + assertOK(getSettingsResponse); + Map getSettingsResponseMap = entityAsMap(getSettingsResponse); + @SuppressWarnings("unchecked") + Map indexSettings = (Map) ((Map) getSettingsResponseMap.get(indexName)) + .get("settings"); + @SuppressWarnings("unchecked") + Map routingSettings = (Map) ((Map) indexSettings.get("index")).get("routing"); + @SuppressWarnings("unchecked") + String includesAllocation = (String) ((Map) ((Map) routingSettings.get("allocation")) + .get("include")).get("_name"); + assertEquals("node-1,node-2", includesAllocation); + } + + public void testTempAwaitFix() { + // this is a test stub since there is only one test in this class and it is + // awaits-fixed. This test is to be removed once testChangePolicyForIndex is resolved + } + + private void assertStep(String indexName, StepKey expectedStep) throws IOException { + Response explainResponse = client().performRequest(new Request("GET", "/" + indexName + "/_ilm/explain")); + assertOK(explainResponse); + Map explainResponseMap = entityAsMap(explainResponse); + @SuppressWarnings("unchecked") + Map indexExplainResponse = (Map) ((Map) explainResponseMap.get("indices")) + .get(indexName); + assertEquals(expectedStep.getPhase(), indexExplainResponse.get("phase")); + assertEquals(expectedStep.getAction(), indexExplainResponse.get("action")); + assertEquals(expectedStep.getName(), indexExplainResponse.get("step")); + } +} diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java new file mode 100644 index 0000000000000..8dc8427bc7640 --- /dev/null +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java @@ -0,0 +1,518 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Supplier; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.not; + +public class TimeSeriesLifecycleActionsIT extends ESRestTestCase { + private String index; + private String policy; + + @Before + public void refreshIndex() { + index = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + policy = randomAlphaOfLength(5); + } + + public static void updatePolicy(String indexName, String policy) throws IOException { + + Request changePolicyRequest = new Request("PUT", "/" + indexName + "/_settings"); + final StringEntity changePolicyEntity = new StringEntity("{ \"index.lifecycle.name\": \"" + policy + "\" }", + ContentType.APPLICATION_JSON); + changePolicyRequest.setEntity(changePolicyEntity); + assertOK(client().performRequest(changePolicyRequest)); + } + + public void testFullPolicy() throws Exception { + String originalIndex = index + "-000001"; + String shrunkenOriginalIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + originalIndex; + String secondIndex = index + "-000002"; + createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", "node-0") + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + // create policy + createFullPolicy(TimeValue.ZERO); + // update policy on index + updatePolicy(originalIndex, policy); + // index document {"foo": "bar"} to trigger rollover + index(client(), originalIndex, "_id", "foo", "bar"); + + /* + * These asserts are in the order that they should be satisfied in, in + * order to maximize the time for all operations to complete. + * An "out of order" assert here may result in this test occasionally + * timing out and failing inappropriately. + */ + // asserts that rollover was called + assertBusy(() -> assertTrue(indexExists(secondIndex))); + // asserts that shrink deleted the original index + assertBusy(() -> assertFalse(indexExists(originalIndex))); + // asserts that the delete phase completed for the managed shrunken index + assertBusy(() -> assertFalse(indexExists(shrunkenOriginalIndex))); + } + + public void testMoveToAllocateStep() throws Exception { + String originalIndex = index + "-000001"; + createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", "node-0") + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + // create policy + createFullPolicy(TimeValue.timeValueHours(10)); + // update policy on index + updatePolicy(originalIndex, policy); + + // move to a step + Request moveToStepRequest = new Request("POST", "_ilm/move/" + originalIndex); + assertBusy(() -> assertTrue(getStepKeyForIndex(originalIndex).equals(new StepKey("new", "complete", "complete")))); + moveToStepRequest.setJsonEntity("{\n" + + " \"current_step\": {\n" + + " \"phase\": \"new\",\n" + + " \"action\": \"complete\",\n" + + " \"name\": \"complete\"\n" + + " },\n" + + " \"next_step\": {\n" + + " \"phase\": \"cold\",\n" + + " \"action\": \"allocate\",\n" + + " \"name\": \"allocate\"\n" + + " }\n" + + "}"); + client().performRequest(moveToStepRequest); + assertBusy(() -> assertFalse(indexExists(originalIndex))); + } + + + public void testMoveToRolloverStep() throws Exception { + String originalIndex = index + "-000001"; + String shrunkenOriginalIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + originalIndex; + String secondIndex = index + "-000002"; + createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", "node-0") + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + createFullPolicy(TimeValue.timeValueHours(10)); + // update policy on index + updatePolicy(originalIndex, policy); + + // move to a step + Request moveToStepRequest = new Request("POST", "_ilm/move/" + originalIndex); + // index document to trigger rollover + index(client(), originalIndex, "_id", "foo", "bar"); + logger.info(getStepKeyForIndex(originalIndex)); + moveToStepRequest.setJsonEntity("{\n" + + " \"current_step\": {\n" + + " \"phase\": \"new\",\n" + + " \"action\": \"complete\",\n" + + " \"name\": \"complete\"\n" + + " },\n" + + " \"next_step\": {\n" + + " \"phase\": \"hot\",\n" + + " \"action\": \"rollover\",\n" + + " \"name\": \"attempt_rollover\"\n" + + " }\n" + + "}"); + client().performRequest(moveToStepRequest); + + /* + * These asserts are in the order that they should be satisfied in, in + * order to maximize the time for all operations to complete. + * An "out of order" assert here may result in this test occasionally + * timing out and failing inappropriately. + */ + // asserts that rollover was called + assertBusy(() -> assertTrue(indexExists(secondIndex))); + // asserts that shrink deleted the original index + assertBusy(() -> assertFalse(indexExists(originalIndex))); + // asserts that the delete phase completed for the managed shrunken index + assertBusy(() -> assertFalse(indexExists(shrunkenOriginalIndex))); + } + + public void testRolloverAction() throws Exception { + String originalIndex = index + "-000001"; + String secondIndex = index + "-000002"; + createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + // create policy + createNewSingletonPolicy("hot", new RolloverAction(null, null, 1L)); + // update policy on index + updatePolicy(originalIndex, policy); + // index document {"foo": "bar"} to trigger rollover + index(client(), originalIndex, "_id", "foo", "bar"); + assertBusy(() -> assertTrue(indexExists(secondIndex))); + assertBusy(() -> assertTrue(indexExists(originalIndex))); + } + + public void testRolloverAlreadyExists() throws Exception { + String originalIndex = index + "-000001"; + String secondIndex = index + "-000002"; + createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + // create policy + createNewSingletonPolicy("hot", new RolloverAction(null, null, 1L)); + // update policy on index + updatePolicy(originalIndex, policy); + + // Manually create the new index + Request request = new Request("PUT", "/" + secondIndex); + request.setJsonEntity("{\n \"settings\": " + Strings.toString(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build()) + "}"); + client().performRequest(request); + // wait for the shards to initialize + ensureGreen(secondIndex); + + // index another doc to trigger the policy + index(client(), originalIndex, "_id", "foo", "bar"); + + assertBusy(() -> { + logger.info(originalIndex + ": " + getStepKeyForIndex(originalIndex)); + logger.info(secondIndex + ": " + getStepKeyForIndex(secondIndex)); + assertThat(getStepKeyForIndex(originalIndex), equalTo(new StepKey("hot", RolloverAction.NAME, ErrorStep.NAME))); + assertThat(getFailedStepForIndex(originalIndex), equalTo("update-rollover-lifecycle-date")); + assertThat(getReasonForIndex(originalIndex), equalTo("no rollover info found for [" + originalIndex + "], either the index " + + "has not yet rolled over or a subsequent index was created outside of Index Lifecycle Management")); + }); + } + + public void testAllocateOnlyAllocation() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + String allocateNodeName = "node-" + randomFrom(0, 1); + AllocateAction allocateAction = new AllocateAction(null, null, null, singletonMap("_name", allocateNodeName)); + createNewSingletonPolicy(randomFrom("warm", "cold"), allocateAction); + updatePolicy(index, policy); + assertBusy(() -> { + assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); + }); + ensureGreen(index); + } + + public void testAllocateActionOnlyReplicas() throws Exception { + int numShards = randomFrom(1, 5); + int numReplicas = randomFrom(0, 1); + int finalNumReplicas = (numReplicas + 1) % 2; + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas)); + AllocateAction allocateAction = new AllocateAction(finalNumReplicas, null, null, null); + createNewSingletonPolicy(randomFrom("warm", "cold"), allocateAction); + updatePolicy(index, policy); + assertBusy(() -> { + Map settings = getOnlyIndexSettings(index); + assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); + assertThat(settings.get(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey()), equalTo(String.valueOf(finalNumReplicas))); + }); + } + + public void testDelete() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + createNewSingletonPolicy("delete", new DeleteAction()); + updatePolicy(index, policy); + assertBusy(() -> assertFalse(indexExists(index))); + } + + public void testDeleteOnlyShouldNotMakeIndexReadonly() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + createNewSingletonPolicy("delete", new DeleteAction(), TimeValue.timeValueHours(1)); + updatePolicy(index, policy); + assertBusy(() -> { + assertThat(getStepKeyForIndex(index).getAction(), equalTo("complete")); + Map settings = getOnlyIndexSettings(index); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), not("true")); + }); + indexDocument(); + } + + public void testReadOnly() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + createNewSingletonPolicy("warm", new ReadOnlyAction()); + updatePolicy(index, policy); + assertBusy(() -> { + Map settings = getOnlyIndexSettings(index); + assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + }); + } + + @SuppressWarnings("unchecked") + public void testForceMergeAction() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + for (int i = 0; i < randomIntBetween(2, 10); i++) { + Request request = new Request("PUT", index + "/_doc/" + i); + request.addParameter("refresh", "true"); + request.setEntity(new StringEntity("{\"a\": \"test\"}", ContentType.APPLICATION_JSON)); + client().performRequest(request); + } + + Supplier numSegments = () -> { + try { + Map segmentResponse = getAsMap(index + "/_segments"); + segmentResponse = (Map) segmentResponse.get("indices"); + segmentResponse = (Map) segmentResponse.get(index); + segmentResponse = (Map) segmentResponse.get("shards"); + List> shards = (List>) segmentResponse.get("0"); + return (Integer) shards.get(0).get("num_search_segments"); + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + assertThat(numSegments.get(), greaterThan(1)); + + createNewSingletonPolicy("warm", new ForceMergeAction(1)); + updatePolicy(index, policy); + + assertBusy(() -> { + assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); + Map settings = getOnlyIndexSettings(index); + assertThat(numSegments.get(), equalTo(1)); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + }); + expectThrows(ResponseException.class, this::indexDocument); + } + + public void testShrinkAction() throws Exception { + int numShards = 6; + int divisor = randomFrom(2, 3, 6); + int expectedFinalShards = numShards / divisor; + String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index; + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + createNewSingletonPolicy("warm", new ShrinkAction(expectedFinalShards)); + updatePolicy(index, policy); + assertBusy(() -> { + assertTrue(indexExists(shrunkenIndex)); + assertTrue(aliasExists(shrunkenIndex, index)); + Map settings = getOnlyIndexSettings(shrunkenIndex); + assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY)); + assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards))); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + }); + expectThrows(ResponseException.class, this::indexDocument); + } + + @SuppressWarnings("unchecked") + public void testNonexistentPolicy() throws Exception { + String indexPrefix = randomAlphaOfLengthBetween(5,15).toLowerCase(Locale.ROOT); + final StringEntity template = new StringEntity("{\n" + + " \"index_patterns\": \"" + indexPrefix + "*\",\n" + + " \"settings\": {\n" + + " \"index\": {\n" + + " \"lifecycle\": {\n" + + " \"name\": \"does_not_exist\",\n" + + " \"rollover_alias\": \"test_alias\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}", ContentType.APPLICATION_JSON); + Request templateRequest = new Request("PUT", "_template/test"); + templateRequest.setEntity(template); + client().performRequest(templateRequest); + + policy = randomAlphaOfLengthBetween(5,20); + createNewSingletonPolicy("hot", new RolloverAction(null, null, 1L)); + + index = indexPrefix + "-000001"; + final StringEntity putIndex = new StringEntity("{\n" + + " \"aliases\": {\n" + + " \"test_alias\": {\n" + + " \"is_write_index\": true\n" + + " }\n" + + " }\n" + + "}", ContentType.APPLICATION_JSON); + Request putIndexRequest = new Request("PUT", index); + putIndexRequest.setEntity(putIndex); + client().performRequest(putIndexRequest); + indexDocument(); + + assertBusy(() -> { + Request explainRequest = new Request("GET", index + "/_ilm/explain"); + Response response = client().performRequest(explainRequest); + Map responseMap; + try (InputStream is = response.getEntity().getContent()) { + responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + logger.info(responseMap); + Map indexStatus = (Map)((Map) responseMap.get("indices")).get(index); + assertNull(indexStatus.get("phase")); + assertNull(indexStatus.get("action")); + assertNull(indexStatus.get("step")); + Map stepInfo = (Map) indexStatus.get("step_info"); + assertNotNull(stepInfo); + assertEquals("policy [does_not_exist] does not exist", stepInfo.get("reason")); + assertEquals("illegal_argument_exception", stepInfo.get("type")); + }); + + } + + private void createFullPolicy(TimeValue hotTime) throws IOException { + Map warmActions = new HashMap<>(); + warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1)); + warmActions.put(AllocateAction.NAME, new AllocateAction(1, singletonMap("_name", "node-1,node-2"), null, null)); + warmActions.put(ShrinkAction.NAME, new ShrinkAction(1)); + Map phases = new HashMap<>(); + phases.put("hot", new Phase("hot", hotTime, singletonMap(RolloverAction.NAME, + new RolloverAction(null, null, 1L)))); + phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions)); + phases.put("cold", new Phase("cold", TimeValue.ZERO, singletonMap(AllocateAction.NAME, + new AllocateAction(0, singletonMap("_name", "node-3"), null, null)))); + phases.put("delete", new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, new DeleteAction()))); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); + // PUT policy + XContentBuilder builder = jsonBuilder(); + lifecyclePolicy.toXContent(builder, null); + final StringEntity entity = new StringEntity( + "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); + Request request = new Request("PUT", "_ilm/policy/" + policy); + request.setEntity(entity); + assertOK(client().performRequest(request)); + } + + private void createNewSingletonPolicy(String phaseName, LifecycleAction action) throws IOException { + createNewSingletonPolicy(phaseName, action, TimeValue.ZERO); + } + + private void createNewSingletonPolicy(String phaseName, LifecycleAction action, TimeValue after) throws IOException { + Phase phase = new Phase(phaseName, after, singletonMap(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase)); + XContentBuilder builder = jsonBuilder(); + lifecyclePolicy.toXContent(builder, null); + final StringEntity entity = new StringEntity( + "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); + Request request = new Request("PUT", "_ilm/policy/" + policy); + request.setEntity(entity); + client().performRequest(request); + } + + private void createIndexWithSettings(String index, Settings.Builder settings) throws IOException { + // create the test-index index + Request request = new Request("PUT", "/" + index); + request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings.build()) + + ", \"aliases\" : { \"alias\": { \"is_write_index\": true } } }"); + client().performRequest(request); + // wait for the shards to initialize + ensureGreen(index); + + } + + private static void index(RestClient client, String index, String id, Object... fields) throws IOException { + XContentBuilder document = jsonBuilder().startObject(); + for (int i = 0; i < fields.length; i += 2) { + document.field((String) fields[i], fields[i + 1]); + } + document.endObject(); + final Request request = new Request("POST", "/" + index + "/_doc/" + id); + request.setJsonEntity(Strings.toString(document)); + assertOK(client.performRequest(request)); + } + + @SuppressWarnings("unchecked") + private Map getOnlyIndexSettings(String index) throws IOException { + Map response = (Map) getIndexSettings(index).get(index); + if (response == null) { + return Collections.emptyMap(); + } + return (Map) response.get("settings"); + } + + private StepKey getStepKeyForIndex(String indexName) throws IOException { + Map indexResponse = explainIndex(indexName); + if (indexResponse == null) { + return new StepKey(null, null, null); + } + + String phase = (String) indexResponse.get("phase"); + String action = (String) indexResponse.get("action"); + String step = (String) indexResponse.get("step"); + return new StepKey(phase, action, step); + } + + private String getFailedStepForIndex(String indexName) throws IOException { + Map indexResponse = explainIndex(indexName); + if (indexResponse == null) return null; + + return (String) indexResponse.get("failed_step"); + } + + @SuppressWarnings("unchecked") + private String getReasonForIndex(String indexName) throws IOException { + Map indexResponse = explainIndex(indexName); + if (indexResponse == null) return null; + + return ((Map) indexResponse.get("step_info")).get("reason"); + } + + private Map explainIndex(String indexName) throws IOException { + Request explainRequest = new Request("GET", indexName + "/_ilm/explain"); + Response response = client().performRequest(explainRequest); + Map responseMap; + try (InputStream is = response.getEntity().getContent()) { + responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + + @SuppressWarnings("unchecked") Map indexResponse = ((Map>) responseMap.get("indices")) + .get(indexName); + return indexResponse; + } + + private void indexDocument() throws IOException { + Request indexRequest = new Request("POST", index + "/_doc"); + indexRequest.setEntity(new StringEntity("{\"a\": \"test\"}", ContentType.APPLICATION_JSON)); + Response response = client().performRequest(indexRequest); + logger.info(response.getStatusLine()); + } +} diff --git a/x-pack/plugin/ilm/qa/rest/build.gradle b/x-pack/plugin/ilm/qa/rest/build.gradle new file mode 100644 index 0000000000000..0f1e277e70d26 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/build.gradle @@ -0,0 +1,45 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ilm'), configuration: 'runtime') +} + +task restTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +def clusterCredentials = [username: System.getProperty('tests.rest.cluster.username', 'test_admin'), + password: System.getProperty('tests.rest.cluster.password', 'x-pack-test-password')] + + +restTestRunner { + systemProperty 'tests.rest.cluster.username', clusterCredentials.username + systemProperty 'tests.rest.cluster.password', clusterCredentials.password +} + +restTestCluster { + distribution 'zip' + setting 'xpack.ilm.enabled', 'true' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.security.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + setupCommand 'setup-admin-user', + 'bin/elasticsearch-users', 'useradd', clusterCredentials.username, '-p', clusterCredentials.password, '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: clusterCredentials.username, + password: clusterCredentials.password, + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +check.dependsOn restTest +test.enabled = false diff --git a/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRestIT.java b/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRestIT.java new file mode 100644 index 0000000000000..f784e2b940bfe --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRestIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.lucene.util.TimeUnits; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +import java.util.Objects; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +@TimeoutSuite(millis = 30 * TimeUnits.MINUTE) // as default timeout seems not enough on the jenkins VMs +public class IndexLifecycleRestIT extends ESClientYamlSuiteTestCase { + + private static final String USER = Objects.requireNonNull(System.getProperty("tests.rest.cluster.username")); + private static final String PASS = Objects.requireNonNull(System.getProperty("tests.rest.cluster.password")); + + public IndexLifecycleRestIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(super.restClientSettings()) + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } +} + diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/10_basic.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/10_basic.yml new file mode 100644 index 0000000000000..385430c1bf704 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/10_basic.yml @@ -0,0 +1,218 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test Basic Policy CRUD": + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "10s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "30s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { my_timeseries_lifecycle.version: 1 } + - is_true: my_timeseries_lifecycle.modified_date + - match: { my_timeseries_lifecycle.policy.phases.warm.min_age: "10s" } + - match: { my_timeseries_lifecycle.policy.phases.delete.min_age: "30s" } + + - do: + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + +--- +"Test Policy Update": + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "10s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "30s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { my_timeseries_lifecycle.version: 1 } + - is_true: my_timeseries_lifecycle.modified_date + - match: { my_timeseries_lifecycle.policy.phases.warm.min_age: "10s" } + - match: { my_timeseries_lifecycle.policy.phases.delete.min_age: "30s" } + + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index2 + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "300s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "600s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { my_timeseries_lifecycle.version: 2 } + - is_true: my_timeseries_lifecycle.modified_date + - match: { my_timeseries_lifecycle.policy.phases.warm.min_age: "300s" } + - match: { my_timeseries_lifecycle.policy.phases.delete.min_age: "600s" } + + - do: + indices.delete: + index: my_index + - do: + indices.delete: + index: my_index2 + + - do: + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + +--- +"Test Undeletable Policy In Use": + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "10s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "30s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { my_timeseries_lifecycle.policy.phases.warm.min_age: "10s" } + - match: { my_timeseries_lifecycle.policy.phases.delete.min_age: "30s" } + + - do: + indices.create: + index: my_timeseries_index + body: + settings: + index.lifecycle.name: "my_timeseries_lifecycle" + + - do: + catch: bad_request + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Cannot delete policy [my_timeseries_lifecycle]. It is being used by at least one index [my_timeseries_index]" } + + - do: + ilm.remove_policy: + index: my_timeseries_index + + - do: + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/20_move_to_step.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/20_move_to_step.yml new file mode 100644 index 0000000000000..57223188d655b --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/20_move_to_step.yml @@ -0,0 +1,179 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: + ilm.put_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index_no_policy + +--- +teardown: + + - do: + indices.delete: + index: my_index + + - do: + indices.delete: + index: my_index_no_policy + + - do: + ilm.delete_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + +--- +"Test Basic Move To Step": + + - do: + ilm.move_to_step: + index: "my_index" + body: + current_step: + phase: "new" + action: "complete" + name: "complete" + next_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + + - do: + ilm.explain_lifecycle: + index: "my_index" + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.step: "forcemerge" } + - match: { indices.my_index.action: "forcemerge" } + - match: { indices.my_index.phase: "warm" } + +--- +"Test Invalid Move To Step With Incorrect Current Step": + + - do: + catch: bad_request + ilm.move_to_step: + index: "my_index" + body: + current_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + next_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "index [my_index] is not on current step [{\"phase\":\"warm\",\"action\":\"forcemerge\",\"name\":\"forcemerge\"}]" } + + + - do: + ilm.explain_lifecycle: + index: "my_index" + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.step: "complete" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.phase: "new" } + +--- +"Test Invalid Move To Step With Invalid Next Step": + + - do: + catch: bad_request + ilm.move_to_step: + index: "my_index" + body: + current_step: + phase: "new" + action: "complete" + name: "complete" + next_step: + phase: "invalid" + action: "invalid" + name: "invalid" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "step [{\"phase\":\"invalid\",\"action\":\"invalid\",\"name\":\"invalid\"}] for index [my_index] with policy [my_moveable_timeseries_lifecycle] does not exist" } + + - do: + ilm.explain_lifecycle: + index: "my_index" + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.step: "complete" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.phase: "new" } + +--- +"Test Invalid Move To Step With Invalid Policy": + + - do: + catch: bad_request + ilm.move_to_step: + index: "my_index_no_policy" + body: + current_step: + phase: "hot" + action: "pre-pre-readonly" + name: "after" + next_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "index [my_index_no_policy] is not associated with an Index Lifecycle Policy" } + +--- +"Test Invalid Move To Step With Invalid Index": + + - do: + catch: bad_request + ilm.move_to_step: + index: "does_not_exist" + body: + current_step: + phase: "hot" + action: "pre-pre-readonly" + name: "after" + next_step: + phase: "warm" + action: "forcemerge" + name: "forcemerge" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "index [does_not_exist] does not exist" } diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/30_retry.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/30_retry.yml new file mode 100644 index 0000000000000..c6bdfb2a05e14 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/30_retry.yml @@ -0,0 +1,101 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + + - do: + ilm.put_lifecycle: + policy: "my_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_lifecycle" + +--- +teardown: + + - do: + indices.delete: + index: my_index + + - do: + ilm.delete_lifecycle: + policy: "my_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_lifecycle" + +--- +"Test Invalid Retry With Non-errored Policy": + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_lifecycle" + + - do: + catch: bad_request + ilm.retry: + index: "my_index" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "cannot retry an action for an index [my_index] that has not encountered an error when running a Lifecycle Policy" } + + - do: + ilm.explain_lifecycle: + index: "my_index" + - match: { indices.my_index.policy: "my_lifecycle" } + - match: { indices.my_index.step: "complete" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.phase: "new" } + + +--- +"Test Invalid Retry With No Policy": + + - do: + indices.create: + index: my_index + + - do: + catch: bad_request + ilm.retry: + index: "my_index" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "cannot retry an action for an index [my_index] that has not encountered an error when running a Lifecycle Policy" } + +--- +"Test Invalid Re-run With Invalid Index": + - do: + indices.create: + index: my_index + + - do: + catch: bad_request + ilm.retry: + index: "does_not_exist" + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "index [does_not_exist] does not exist" } diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml new file mode 100644 index 0000000000000..8c8206d861152 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml @@ -0,0 +1,263 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: + ilm.put_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index2 + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: another_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: unmanaged_index + body: + settings: {} + + - do: + indices.create: + index: my_index_no_policy + +--- +teardown: + + - do: + indices.delete: + index: my_index + - do: + indices.delete: + index: my_index2 + - do: + indices.delete: + index: another_index + - do: + indices.delete: + index: unmanaged_index + + - do: + indices.delete: + index: my_index_no_policy + + - do: + ilm.delete_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + +--- +"Test Basic Lifecycle Explain": + + - do: + ilm.explain_lifecycle: + index: "my_index" + + - is_true: indices.my_index.managed + - match: { indices.my_index.index: "my_index" } + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.phase: "new" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.step: "complete" } + - is_true: indices.my_index.phase_time_millis + - is_false: indices.my_index.failed_step + - is_false: indices.my_index.step_info + - is_false: indices.my_index.phase_execution + + - is_false: indices.my_index2 + - is_false: indices.another_index + - is_false: indices.unmanaged_index + +--- +"Test Wildcard Index Lifecycle Explain": + + - do: + ilm.explain_lifecycle: + index: "my_*" + + - is_true: indices.my_index.managed + - match: { indices.my_index.index: "my_index" } + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.phase: "new" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.step: "complete" } + - is_true: indices.my_index.phase_time_millis + - is_false: indices.my_index.failed_step + - is_false: indices.my_index.step_info + - is_false: indices.my_index.phase_execution + + - is_true: indices.my_index2.managed + - match: { indices.my_index2.index: "my_index2" } + - match: { indices.my_index2.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index2.phase: "new" } + - match: { indices.my_index2.action: "complete" } + - match: { indices.my_index2.step: "complete" } + - is_true: indices.my_index2.phase_time_millis + - is_false: indices.my_index2.failed_step + - is_false: indices.my_index2.step_info + - is_false: indices.my_index2.phase_execution + + - is_false: indices.another_index + - is_false: indices.unmanaged_index + + +--- +"Test All Indexes Lifecycle Explain": + + - do: + ilm.explain_lifecycle: + index: "*" + + - is_true: indices.my_index.managed + - match: { indices.my_index.index: "my_index" } + - match: { indices.my_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index.phase: "new" } + - match: { indices.my_index.action: "complete" } + - match: { indices.my_index.step: "complete" } + - is_true: indices.my_index.phase_time_millis + - is_false: indices.my_index.failed_step + - is_false: indices.my_index.step_info + - is_false: indices.my_index.phase_execution + + - is_true: indices.my_index2.managed + - match: { indices.my_index2.index: "my_index2" } + - match: { indices.my_index2.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.my_index2.phase: "new" } + - match: { indices.my_index2.action: "complete" } + - match: { indices.my_index2.step: "complete" } + - is_true: indices.my_index2.phase_time_millis + - is_false: indices.my_index2.failed_step + - is_false: indices.my_index2.step_info + - is_false: indices.my_index2.phase_execution + + - is_true: indices.another_index.managed + - match: { indices.another_index.index: "another_index" } + - match: { indices.another_index.policy: "my_moveable_timeseries_lifecycle" } + - match: { indices.another_index.phase: "new" } + - match: { indices.another_index.action: "complete" } + - match: { indices.another_index.step: "complete" } + - is_true: indices.another_index.phase_time_millis + - is_false: indices.another_index.failed_step + - is_false: indices.another_index.step_info + - is_false: indices.another_index.phase_execution + + - match: { indices.unmanaged_index.index: "unmanaged_index" } + - is_false: indices.unmanaged_index.managed + - is_false: indices.unmanaged_index.policy + - is_false: indices.unmanaged_index.phase + - is_false: indices.unmanaged_index.action + - is_false: indices.unmanaged_index.step + - is_false: indices.another_index.failed_step + - is_false: indices.another_index.step_info + +--- +"Test Unmanaged Index Lifecycle Explain": + + - do: + ilm.explain_lifecycle: + index: "unmanaged_index" + + - match: { indices.unmanaged_index.index: "unmanaged_index" } + - is_false: indices.unmanaged_index.managed + - is_false: indices.unmanaged_index.policy + - is_false: indices.unmanaged_index.phase + - is_false: indices.unmanaged_index.action + - is_false: indices.unmanaged_index.step + - is_false: indices.unmanaged_index.phase_execution + - is_false: indices.another_index.failed_step + - is_false: indices.another_index.step_info + - is_false: indices.my_index + - is_false: indices.my_index2 + - is_false: indices.another_index + +--- +"Test new phase still has phase_time": + + - do: + ilm.put_lifecycle: + policy: "mypolicy" + body: | + { + "policy": { + "phases": { + "hot": { + "min_age": "1000s", + "actions": {} + }, + "warm": { + "min_age": "2000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + } + } + } + } + + - do: + indices.create: + index: foo + body: + settings: + index.lifecycle.name: "mypolicy" + + - do: + ilm.explain_lifecycle: + index: "foo" + + - is_true: indices.foo.managed + - match: { indices.foo.index: "foo" } + - match: { indices.foo.policy: "mypolicy" } + - match: { indices.foo.phase: "new" } + - match: { indices.foo.action: "complete" } + - match: { indices.foo.step: "complete" } + - is_true: indices.foo.phase_time_millis + - is_false: indices.foo.failed_step + - is_false: indices.foo.step_info + - is_false: indices.foo.phase_execution diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_operation_mode.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_operation_mode.yml new file mode 100644 index 0000000000000..e8abc5b2c8d4e --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_operation_mode.yml @@ -0,0 +1,63 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test Changing Operation Modes": + - do: + ilm.get_status: {} + - match: { operation_mode: "RUNNING" } + + - do: + ilm.put_lifecycle: + policy: "my_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "10s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "delete": { + "min_age": "30s", + "actions": { + "delete": {} + } + } + } + } + } + + - do: + ilm.get_status: {} + - match: { operation_mode: "RUNNING" } + + - do: + ilm.stop: {} + + - do: + ilm.get_status: {} + - match: { operation_mode: /STOPP(ED|ING)/ } + + - do: + ilm.start: {} + + - do: + ilm.get_status: {} + - match: { operation_mode: "RUNNING" } + + - do: + ilm.delete_lifecycle: + policy: "my_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_timeseries_lifecycle" diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_remove_policy_for_index.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_remove_policy_for_index.yml new file mode 100644 index 0000000000000..c9537d9779733 --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/60_remove_policy_for_index.yml @@ -0,0 +1,203 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: + ilm.put_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + ilm.put_lifecycle: + policy: "my_alternative_timeseries_lifecycle" + body: | + { + "policy": { + "phases": { + "warm": { + "min_age": "1000s", + "actions": { + "forcemerge": { + "max_num_segments": 10000 + } + } + }, + "hot": { + "min_age": "1000s", + "actions": { } + } + } + } + } + + - do: + ilm.get_lifecycle: + policy: "my_alternative_timeseries_lifecycle" + + - do: + indices.create: + index: my_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: my_index2 + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: another_index + body: + settings: + index.lifecycle.name: "my_moveable_timeseries_lifecycle" + + - do: + indices.create: + index: unmanaged_index + body: + settings: {} + + - do: + indices.create: + index: my_index_no_policy + +--- +teardown: + + - do: + indices.delete: + index: my_index + - do: + indices.delete: + index: my_index2 + - do: + indices.delete: + index: another_index + - do: + indices.delete: + index: unmanaged_index + + - do: + indices.delete: + index: my_index_no_policy + + - do: + ilm.delete_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_moveable_timeseries_lifecycle" + + - do: + ilm.delete_lifecycle: + policy: "my_alternative_timeseries_lifecycle" + + - do: + catch: missing + ilm.get_lifecycle: + policy: "my_alternative_timeseries_lifecycle" + +--- +"Test Remove Policy Single Index": + + - do: + indices.get_settings: + index: "another_index" + + - match: { another_index.settings.index.lifecycle.name: my_moveable_timeseries_lifecycle } + + - do: + ilm.remove_policy: + index: "another_index" + + - is_false: has_failures + - length: { failed_indexes: 0 } + + - do: + indices.get_settings: + index: "another_index" + + - is_false: another_index.settings.index.lifecycle + +--- +"Test Remove Policy Index Pattern": + + - do: + indices.get_settings: + index: "my_*" + + - match: { my_index.settings.index.lifecycle.name: my_moveable_timeseries_lifecycle } + - match: { my_index2.settings.index.lifecycle.name: my_moveable_timeseries_lifecycle } + + - do: + ilm.remove_policy: + index: "my_*" + + - is_false: has_failures + - length: { failed_indexes: 0 } + + - do: + indices.get_settings: + index: "my_*" + + - is_false: my_index.settings.index.lifecycle + - is_false: my_index2.settings.index.lifecycle + +--- +"Test Remove Policy Unmanaged Index": + + - do: + indices.get_settings: + index: "unmanaged_index" + + - is_false: unmanaged_index.settings.index.lifecycle.name + + - do: + ilm.remove_policy: + index: "unmanaged_index" + + - is_false: has_failures + - length: { failed_indexes: 0 } + + - do: + indices.get_settings: + index: "unmanaged_index" + + - is_false: unmanaged_index.settings.index.lifecycle + +--- +"Test Remove Policy Index Does Not Exist": + + - do: + catch: missing + ilm.remove_policy: + index: "doesnt_exist" diff --git a/x-pack/plugin/ilm/qa/with-security/build.gradle b/x-pack/plugin/ilm/qa/with-security/build.gradle new file mode 100644 index 0000000000000..f1b972012e7f8 --- /dev/null +++ b/x-pack/plugin/ilm/qa/with-security/build.gradle @@ -0,0 +1,43 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') +} + +def clusterCredentials = [username: System.getProperty('tests.rest.cluster.username', 'test_admin'), + password: System.getProperty('tests.rest.cluster.password', 'x-pack-test-password')] + +integTestRunner { + systemProperty 'tests.rest.cluster.username', clusterCredentials.username + systemProperty 'tests.rest.cluster.password', clusterCredentials.password +} + +integTestCluster { + setting 'xpack.ilm.enabled', 'true' + setting 'xpack.security.enabled', 'true' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setupIlmUser', + 'bin/elasticsearch-users', + 'useradd', "test_ilm", + '-p', 'x-pack-test-password', '-r', "ilm" + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', + 'useradd', clusterCredentials.username, + '-p', clusterCredentials.password, + '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: clusterCredentials.username, + password: clusterCredentials.password, + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/plugin/ilm/qa/with-security/roles.yml b/x-pack/plugin/ilm/qa/with-security/roles.yml new file mode 100644 index 0000000000000..baf89bea34568 --- /dev/null +++ b/x-pack/plugin/ilm/qa/with-security/roles.yml @@ -0,0 +1,11 @@ +ilm: + cluster: + - monitor + - manage + indices: + - names: [ 'ilm-*' ] + privileges: + - monitor + - manage + - read + - write \ No newline at end of file diff --git a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java new file mode 100644 index 0000000000000..01eb07bb35b4f --- /dev/null +++ b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -0,0 +1,140 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; + +public class PermissionsIT extends ESRestTestCase { + + private String deletePolicy = "deletePolicy"; + private Settings indexSettingsWithPolicy; + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("test_ilm", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Before + public void init() throws Exception { + Request request = new Request("PUT", "/_cluster/settings"); + XContentBuilder pollIntervalEntity = JsonXContent.contentBuilder(); + pollIntervalEntity.startObject(); + pollIntervalEntity.startObject("transient"); + pollIntervalEntity.field(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s"); + pollIntervalEntity.endObject(); + pollIntervalEntity.endObject(); + request.setJsonEntity(Strings.toString(pollIntervalEntity)); + assertOK(adminClient().performRequest(request)); + indexSettingsWithPolicy = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, deletePolicy) + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createNewSingletonPolicy(deletePolicy,"delete", new DeleteAction()); + } + + /** + * Tests that a policy that simply deletes an index after 0s succeeds when an index + * with user `test_admin` is created referencing a policy created by `test_ilm` when both + * users have read/write permissions on the the index. The goal is to verify that one + * does not need to be the same user who created both the policy and the index to have the + * index be properly managed by ILM. + */ + public void testCanManageIndexAndPolicyDifferentUsers() throws Exception { + String index = "ilm-00001"; + createIndexAsAdmin(index, indexSettingsWithPolicy, ""); + assertBusy(() -> assertFalse(indexExists(index))); + } + + /** + * This tests the awkward behavior where an admin can have permissions to create a policy, + * but then not have permissions to operate on an index that was later associated with that policy by another + * user + */ + @SuppressWarnings("unchecked") + public void testCanManageIndexWithNoPermissions() throws Exception { + createIndexAsAdmin("not-ilm", indexSettingsWithPolicy, ""); + Request request = new Request("GET", "/not-ilm/_ilm/explain"); + // test_ilm user does not have permissions on this index + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.FORBIDDEN.getStatus())); + + assertBusy(() -> { + Response response = adminClient().performRequest(request); + assertOK(response); + try (InputStream is = response.getEntity().getContent()) { + Map mapResponse = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + Map indexExplain = (Map) ((Map) mapResponse.get("indices")).get("not-ilm"); + assertThat(indexExplain.get("managed"), equalTo(true)); + assertThat(indexExplain.get("step"), equalTo("ERROR")); + assertThat(indexExplain.get("failed_step"), equalTo("delete")); + Map stepInfo = (Map) indexExplain.get("step_info"); + assertThat(stepInfo.get("type"), equalTo("security_exception")); + assertThat(stepInfo.get("reason"), equalTo("action [indices:admin/delete] is unauthorized for user [test_ilm]")); + } + }); + } + + private void createNewSingletonPolicy(String policy, String phaseName, LifecycleAction action) throws IOException { + Phase phase = new Phase(phaseName, TimeValue.ZERO, singletonMap(action.getWriteableName(), action)); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase)); + XContentBuilder builder = jsonBuilder(); + lifecyclePolicy.toXContent(builder, null); + final StringEntity entity = new StringEntity( + "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); + Request request = new Request("PUT", "_ilm/policy/" + policy); + request.setEntity(entity); + client().performRequest(request); + } + + private void createIndexAsAdmin(String name, Settings settings, String mapping) throws IOException { + Request request = new Request("PUT", "/" + name); + request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings) + + ", \"mappings\" : {" + mapping + "} }"); + assertOK(adminClient().performRequest(request)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java new file mode 100644 index 0000000000000..9e5ef7b01c5f6 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; + +import java.io.IOException; +import java.util.function.LongSupplier; + +public class ExecuteStepsUpdateTask extends ClusterStateUpdateTask { + private static final Logger logger = LogManager.getLogger(ExecuteStepsUpdateTask.class); + private final String policy; + private final Index index; + private final Step startStep; + private final PolicyStepsRegistry policyStepsRegistry; + private final IndexLifecycleRunner lifecycleRunner; + private LongSupplier nowSupplier; + private Step.StepKey nextStepKey = null; + + public ExecuteStepsUpdateTask(String policy, Index index, Step startStep, PolicyStepsRegistry policyStepsRegistry, + IndexLifecycleRunner lifecycleRunner, LongSupplier nowSupplier) { + this.policy = policy; + this.index = index; + this.startStep = startStep; + this.policyStepsRegistry = policyStepsRegistry; + this.nowSupplier = nowSupplier; + this.lifecycleRunner = lifecycleRunner; + } + + String getPolicy() { + return policy; + } + + Index getIndex() { + return index; + } + + Step getStartStep() { + return startStep; + } + + Step.StepKey getNextStepKey() { + return nextStepKey; + } + + /** + * {@link Step}s for the current index and policy are executed in succession until the next step to be + * executed is not a {@link ClusterStateActionStep}, or not a {@link ClusterStateWaitStep}, or does not + * belong to the same phase as the executed step. All other types of steps are executed outside of this + * {@link ClusterStateUpdateTask}, so they are of no concern here. + * + * @param currentState The current state to execute the startStep with + * @return the new cluster state after cluster-state operations and step transitions are applied + * @throws IOException if any exceptions occur + */ + @Override + public ClusterState execute(final ClusterState currentState) throws IOException { + Step currentStep = startStep; + IndexMetaData indexMetaData = currentState.metaData().index(index); + if (indexMetaData == null) { + logger.debug("lifecycle for index [{}] executed but index no longer exists", index.getName()); + // This index doesn't exist any more, there's nothing to execute currently + return currentState; + } + Step registeredCurrentStep = IndexLifecycleRunner.getCurrentStep(policyStepsRegistry, policy, indexMetaData, + LifecycleExecutionState.fromIndexMetadata(indexMetaData)); + if (currentStep.equals(registeredCurrentStep)) { + ClusterState state = currentState; + // We can do cluster state steps all together until we + // either get to a step that isn't a cluster state step or a + // cluster state wait step returns not completed + while (currentStep instanceof ClusterStateActionStep || currentStep instanceof ClusterStateWaitStep) { + nextStepKey = currentStep.getNextStepKey(); + if (currentStep instanceof ClusterStateActionStep) { + // cluster state action step so do the action and + // move the cluster state to the next step + logger.trace("[{}] performing cluster state action ({}) [{}], next: [{}]", + index.getName(), currentStep.getClass().getSimpleName(), currentStep.getKey(), currentStep.getNextStepKey()); + try { + state = ((ClusterStateActionStep) currentStep).performAction(index, state); + } catch (Exception exception) { + return moveToErrorStep(state, currentStep.getKey(), exception); + } + if (currentStep.getNextStepKey() == null) { + return state; + } else { + state = IndexLifecycleRunner.moveClusterStateToNextStep(index, state, currentStep.getKey(), + currentStep.getNextStepKey(), nowSupplier); + } + } else { + // cluster state wait step so evaluate the + // condition, if the condition is met move to the + // next step, if its not met return the current + // cluster state so it can be applied and we will + // wait for the next trigger to evaluate the + // condition again + logger.trace("[{}] waiting for cluster state step condition ({}) [{}], next: [{}]", + index.getName(), currentStep.getClass().getSimpleName(), currentStep.getKey(), currentStep.getNextStepKey()); + ClusterStateWaitStep.Result result; + try { + result = ((ClusterStateWaitStep) currentStep).isConditionMet(index, state); + } catch (Exception exception) { + return moveToErrorStep(state, currentStep.getKey(), exception); + } + if (result.isComplete()) { + logger.trace("[{}] cluster state step condition met successfully ({}) [{}], moving to next step {}", + index.getName(), currentStep.getClass().getSimpleName(), currentStep.getKey(), currentStep.getNextStepKey()); + if (currentStep.getNextStepKey() == null) { + return state; + } else { + state = IndexLifecycleRunner.moveClusterStateToNextStep(index, state, currentStep.getKey(), + currentStep.getNextStepKey(), nowSupplier); + } + } else { + logger.trace("[{}] condition not met ({}) [{}], returning existing state", + index.getName(), currentStep.getClass().getSimpleName(), currentStep.getKey()); + // We may have executed a step and set "nextStepKey" to + // a value, but in this case, since the condition was + // not met, we can't advance any way, so don't attempt + // to run the current step + nextStepKey = null; + ToXContentObject stepInfo = result.getInfomationContext(); + if (stepInfo == null) { + return state; + } else { + return IndexLifecycleRunner.addStepInfoToClusterState(index, state, stepInfo); + } + } + } + // There are actions we need to take in the event a phase + // transition happens, so even if we would continue in the while + // loop, if we are about to go into a new phase, return so that + // other processing can occur + if (currentStep.getKey().getPhase().equals(currentStep.getNextStepKey().getPhase()) == false) { + return state; + } + currentStep = policyStepsRegistry.getStep(indexMetaData, currentStep.getNextStepKey()); + } + return state; + } else { + // either we are no longer the master or the step is now + // not the same as when we submitted the update task. In + // either case we don't want to do anything now + return currentState; + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (oldState.equals(newState) == false) { + IndexMetaData indexMetaData = newState.metaData().index(index); + if (nextStepKey != null && nextStepKey != TerminalPolicyStep.KEY && indexMetaData != null) { + logger.trace("[{}] step sequence starting with {} has completed, running next step {} if it is an async action", + index.getName(), startStep.getKey(), nextStepKey); + // After the cluster state has been processed and we have moved + // to a new step, we need to conditionally execute the step iff + // it is an `AsyncAction` so that it is executed exactly once. + lifecycleRunner.maybeRunAsyncAction(newState, indexMetaData, policy, nextStepKey); + } + } + } + + @Override + public void onFailure(String source, Exception e) { + throw new ElasticsearchException( + "policy [" + policy + "] for index [" + index.getName() + "] failed on step [" + startStep.getKey() + "].", e); + } + + private ClusterState moveToErrorStep(final ClusterState state, Step.StepKey currentStepKey, Exception cause) throws IOException { + logger.error("policy [{}] for index [{}] failed on cluster state step [{}]. Moving to ERROR step", policy, index.getName(), + currentStepKey); + MoveToErrorStepUpdateTask moveToErrorStepUpdateTask = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, + nowSupplier); + return moveToErrorStepUpdateTask.execute(state); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java new file mode 100644 index 0000000000000..1e42846b317d3 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycle.java @@ -0,0 +1,209 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestDeleteLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestExplainLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestGetLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestGetStatusAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestMoveToStepAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestPutLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestRemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestRetryAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestStartILMAction; +import org.elasticsearch.xpack.indexlifecycle.action.RestStopAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportDeleteLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportExplainLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportGetLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportGetStatusAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportMoveToStepAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportPutLifecycleAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportRemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportRetryAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportStartILMAction; +import org.elasticsearch.xpack.indexlifecycle.action.TransportStopILMAction; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; + +public class IndexLifecycle extends Plugin implements ActionPlugin { + private final SetOnce indexLifecycleInitialisationService = new SetOnce<>(); + private Settings settings; + private boolean enabled; + private boolean transportClientMode; + + public IndexLifecycle(Settings settings) { + this.settings = settings; + this.enabled = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings); + this.transportClientMode = XPackPlugin.transportClientMode(settings); + } + + // overridable by tests + protected Clock getClock() { + return Clock.systemUTC(); + } + + public Collection createGuiceModules() { + List modules = new ArrayList<>(); + + if (transportClientMode) { + return modules; + } + + modules.add(b -> XPackPlugin.bindFeatureSet(b, IndexLifecycleFeatureSet.class)); + + return modules; + } + + @Override + public List> getSettings() { + return Arrays.asList( + LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING, + LifecycleSettings.LIFECYCLE_NAME_SETTING, + RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING); + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + if (enabled == false || transportClientMode) { + return emptyList(); + } + indexLifecycleInitialisationService + .set(new IndexLifecycleService(settings, client, clusterService, getClock(), System::currentTimeMillis, xContentRegistry)); + return Collections.singletonList(indexLifecycleInitialisationService.get()); + } + + @Override + public List getNamedWriteables() { + return Arrays.asList(); + } + + @Override + public List getNamedXContent() { + return Arrays.asList( + // Custom Metadata + new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IndexLifecycleMetadata.TYPE), + parser -> IndexLifecycleMetadata.PARSER.parse(parser, null)), + // Lifecycle Types + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p, c) -> TimeseriesLifecycleType.INSTANCE), + // Lifecycle Actions + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse) + ); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + if (enabled == false) { + return emptyList(); + } + return Arrays.asList( + new RestPutLifecycleAction(settings, restController), + new RestGetLifecycleAction(settings, restController), + new RestDeleteLifecycleAction(settings, restController), + new RestExplainLifecycleAction(settings, restController), + new RestRemoveIndexLifecyclePolicyAction(settings, restController), + new RestMoveToStepAction(settings, restController), + new RestRetryAction(settings, restController), + new RestStopAction(settings, restController), + new RestStartILMAction(settings, restController), + new RestGetStatusAction(settings, restController) + ); + } + + @Override + public List> getActions() { + if (enabled == false) { + return emptyList(); + } + return Arrays.asList( + new ActionHandler<>(PutLifecycleAction.INSTANCE, TransportPutLifecycleAction.class), + new ActionHandler<>(GetLifecycleAction.INSTANCE, TransportGetLifecycleAction.class), + new ActionHandler<>(DeleteLifecycleAction.INSTANCE, TransportDeleteLifecycleAction.class), + new ActionHandler<>(ExplainLifecycleAction.INSTANCE, TransportExplainLifecycleAction.class), + new ActionHandler<>(RemoveIndexLifecyclePolicyAction.INSTANCE, TransportRemoveIndexLifecyclePolicyAction.class), + new ActionHandler<>(MoveToStepAction.INSTANCE, TransportMoveToStepAction.class), + new ActionHandler<>(RetryAction.INSTANCE, TransportRetryAction.class), + new ActionHandler<>(StartILMAction.INSTANCE, TransportStartILMAction.class), + new ActionHandler<>(StopILMAction.INSTANCE, TransportStopILMAction.class), + new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class)); + } + + @Override + public void close() { + IndexLifecycleService lifecycleService = indexLifecycleInitialisationService.get(); + if (lifecycleService != null) { + lifecycleService.close(); + } + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSet.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSet.java new file mode 100644 index 0000000000000..2469621316889 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSet.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class IndexLifecycleFeatureSet implements XPackFeatureSet { + + private final boolean enabled; + private final XPackLicenseState licenseState; + private ClusterService clusterService; + + @Inject + public IndexLifecycleFeatureSet(Settings settings, @Nullable XPackLicenseState licenseState, ClusterService clusterService) { + this.clusterService = clusterService; + this.enabled = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings); + this.licenseState = licenseState; + } + + @Override + public String name() { + return XPackField.INDEX_LIFECYCLE; + } + + @Override + public String description() { + return "Index lifecycle management for the Elastic Stack"; + } + + @Override + public boolean available() { + return licenseState != null && licenseState.isIndexLifecycleAllowed(); + } + + @Override + public boolean enabled() { + return enabled; + } + + @Override + public Map nativeCodeInfo() { + return null; + } + + @Override + public void usage(ActionListener listener) { + MetaData metaData = clusterService.state().metaData(); + IndexLifecycleMetadata lifecycleMetadata = metaData.custom(IndexLifecycleMetadata.TYPE); + if (enabled() && lifecycleMetadata != null) { + Map policyUsage = new HashMap<>(); + metaData.indices().forEach(entry -> { + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(entry.value.getSettings()); + Integer indicesManaged = policyUsage.get(policyName); + if (indicesManaged == null) { + indicesManaged = 1; + } else { + indicesManaged = indicesManaged + 1; + } + policyUsage.put(policyName, indicesManaged); + }); + List policyStats = lifecycleMetadata.getPolicies().values().stream().map(policy -> { + Map phaseStats = policy.getPhases().values().stream().map(phase -> { + String[] actionNames = phase.getActions().keySet().toArray(new String[phase.getActions().size()]); + return new Tuple(phase.getName(), new PhaseStats(phase.getMinimumAge(), actionNames)); + }).collect(Collectors.toMap(Tuple::v1, Tuple::v2)); + return new PolicyStats(phaseStats, policyUsage.getOrDefault(policy.getName(), 0)); + }).collect(Collectors.toList()); + listener.onResponse(new IndexLifecycleFeatureSetUsage(available(), enabled(), policyStats)); + } else { + listener.onResponse(new IndexLifecycleFeatureSetUsage(available(), enabled())); + } + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java new file mode 100644 index 0000000000000..f5e33fdb98079 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java @@ -0,0 +1,510 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.InitializePolicyContextStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseCompleteStep; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; + +import java.io.IOException; +import java.util.List; +import java.util.function.LongSupplier; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; + +public class IndexLifecycleRunner { + private static final Logger logger = LogManager.getLogger(IndexLifecycleRunner.class); + private PolicyStepsRegistry stepRegistry; + private ClusterService clusterService; + private LongSupplier nowSupplier; + + public IndexLifecycleRunner(PolicyStepsRegistry stepRegistry, ClusterService clusterService, LongSupplier nowSupplier) { + this.stepRegistry = stepRegistry; + this.clusterService = clusterService; + this.nowSupplier = nowSupplier; + } + + /** + * Return true or false depending on whether the index is ready to be in {@code phase} + */ + boolean isReadyToTransitionToThisPhase(final String policy, final IndexMetaData indexMetaData, final String phase) { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + if (lifecycleState.getLifecycleDate() == null) { + logger.trace("no index creation date has been set yet"); + return true; + } + final Long lifecycleDate = lifecycleState.getLifecycleDate(); + assert lifecycleDate != null && lifecycleDate >= 0 : "expected index to have a lifecycle date but it did not"; + final TimeValue after = stepRegistry.getIndexAgeForPhase(policy, phase); + final long now = nowSupplier.getAsLong(); + final TimeValue age = new TimeValue(now - lifecycleDate); + if (logger.isTraceEnabled()) { + logger.trace("[{}] checking for index age to be at least [{}] before performing actions in " + + "the \"{}\" phase. Now: {}, lifecycle date: {}, age: [{}/{}s]", + indexMetaData.getIndex().getName(), after, phase, + new TimeValue(now).seconds(), + new TimeValue(lifecycleDate).seconds(), + age, age.seconds()); + } + return now >= lifecycleDate + after.getMillis(); + } + + /** + * Run the current step, only if it is an asynchronous wait step. These + * wait criteria are checked periodically from the ILM scheduler + */ + public void runPeriodicStep(String policy, IndexMetaData indexMetaData) { + String index = indexMetaData.getIndex().getName(); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + Step currentStep = getCurrentStep(stepRegistry, policy, indexMetaData, lifecycleState); + if (currentStep == null) { + if (stepRegistry.policyExists(policy) == false) { + markPolicyDoesNotExist(policy, indexMetaData.getIndex(), lifecycleState); + return; + } else { + logger.error("current step [{}] for index [{}] with policy [{}] is not recognized", + getCurrentStepKey(lifecycleState), index, policy); + return; + } + } + + if (currentStep instanceof TerminalPolicyStep) { + logger.debug("policy [{}] for index [{}] complete, skipping execution", policy, index); + return; + } else if (currentStep instanceof ErrorStep) { + logger.debug("policy [{}] for index [{}] on an error step, skipping execution", policy, index); + return; + } + + logger.trace("[{}] maybe running periodic step ({}) with current step {}", + index, currentStep.getClass().getSimpleName(), currentStep.getKey()); + // Only phase changing and async wait steps should be run through periodic polling + if (currentStep instanceof PhaseCompleteStep) { + // Only proceed to the next step if enough time has elapsed to go into the next phase + if (isReadyToTransitionToThisPhase(policy, indexMetaData, currentStep.getNextStepKey().getPhase())) { + moveToStep(indexMetaData.getIndex(), policy, currentStep.getKey(), currentStep.getNextStepKey()); + } + } else if (currentStep instanceof AsyncWaitStep) { + logger.debug("[{}] running periodic policy with current-step [{}]", index, currentStep.getKey()); + ((AsyncWaitStep) currentStep).evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + + @Override + public void onResponse(boolean conditionMet, ToXContentObject stepInfo) { + logger.trace("cs-change-async-wait-callback, [{}] current-step: {}", index, currentStep.getKey()); + if (conditionMet) { + moveToStep(indexMetaData.getIndex(), policy, currentStep.getKey(), currentStep.getNextStepKey()); + } else if (stepInfo != null) { + setStepInfo(indexMetaData.getIndex(), policy, currentStep.getKey(), stepInfo); + } + } + + @Override + public void onFailure(Exception e) { + moveToErrorStep(indexMetaData.getIndex(), policy, currentStep.getKey(), e); + } + }); + } else { + logger.trace("[{}] ignoring non periodic step execution from step transition [{}]", index, currentStep.getKey()); + } + } + + /** + * If the current step (matching the expected step key) is an asynchronous action step, run it + */ + public void maybeRunAsyncAction(ClusterState currentState, IndexMetaData indexMetaData, String policy, StepKey expectedStepKey) { + String index = indexMetaData.getIndex().getName(); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + Step currentStep = getCurrentStep(stepRegistry, policy, indexMetaData, lifecycleState); + if (currentStep == null) { + logger.warn("current step [{}] for index [{}] with policy [{}] is not recognized", + getCurrentStepKey(lifecycleState), index, policy); + return; + } + + logger.trace("[{}] maybe running async action step ({}) with current step {}", + index, currentStep.getClass().getSimpleName(), currentStep.getKey()); + if (currentStep.getKey().equals(expectedStepKey) == false) { + throw new IllegalStateException("expected index [" + indexMetaData.getIndex().getName() + "] with policy [" + policy + + "] to have current step consistent with provided step key (" + expectedStepKey + ") but it was " + currentStep.getKey()); + } + if (currentStep instanceof AsyncActionStep) { + logger.debug("[{}] running policy with async action step [{}]", index, currentStep.getKey()); + ((AsyncActionStep) currentStep).performAction(indexMetaData, currentState, new AsyncActionStep.Listener() { + + @Override + public void onResponse(boolean complete) { + logger.trace("cs-change-async-action-callback, [{}], current-step: {}", index, currentStep.getKey()); + if (complete && ((AsyncActionStep) currentStep).indexSurvives()) { + moveToStep(indexMetaData.getIndex(), policy, currentStep.getKey(), currentStep.getNextStepKey()); + } + } + + @Override + public void onFailure(Exception e) { + moveToErrorStep(indexMetaData.getIndex(), policy, currentStep.getKey(), e); + } + }); + } else { + logger.trace("[{}] ignoring non async action step execution from step transition [{}]", index, currentStep.getKey()); + } + } + + /** + * Run the current step that either waits for index age, or updates/waits-on cluster state. + * Invoked after the cluster state has been changed + */ + public void runPolicyAfterStateChange(String policy, IndexMetaData indexMetaData) { + String index = indexMetaData.getIndex().getName(); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + Step currentStep = getCurrentStep(stepRegistry, policy, indexMetaData, lifecycleState); + if (currentStep == null) { + if (stepRegistry.policyExists(policy) == false) { + markPolicyDoesNotExist(policy, indexMetaData.getIndex(), lifecycleState); + return; + } else { + logger.error("current step [{}] for index [{}] with policy [{}] is not recognized", + getCurrentStepKey(lifecycleState), index, policy); + return; + } + } + + if (currentStep instanceof TerminalPolicyStep) { + logger.debug("policy [{}] for index [{}] complete, skipping execution", policy, index); + return; + } else if (currentStep instanceof ErrorStep) { + logger.debug("policy [{}] for index [{}] on an error step, skipping execution", policy, index); + return; + } + + logger.trace("[{}] maybe running step ({}) after state change: {}", + index, currentStep.getClass().getSimpleName(), currentStep.getKey()); + if (currentStep instanceof PhaseCompleteStep) { + // Only proceed to the next step if enough time has elapsed to go into the next phase + if (isReadyToTransitionToThisPhase(policy, indexMetaData, currentStep.getNextStepKey().getPhase())) { + moveToStep(indexMetaData.getIndex(), policy, currentStep.getKey(), currentStep.getNextStepKey()); + } + } else if (currentStep instanceof ClusterStateActionStep || currentStep instanceof ClusterStateWaitStep) { + logger.debug("[{}] running policy with current-step [{}]", indexMetaData.getIndex().getName(), currentStep.getKey()); + clusterService.submitStateUpdateTask("ilm-execute-cluster-state-steps", + new ExecuteStepsUpdateTask(policy, indexMetaData.getIndex(), currentStep, stepRegistry, this, nowSupplier)); + } else { + logger.trace("[{}] ignoring step execution from cluster state change event [{}]", index, currentStep.getKey()); + } + } + + /** + * Retrieves the current {@link StepKey} from the index settings. Note that + * it is illegal for the step to be set with the phase and/or action unset, + * or for the step to be unset with the phase and/or action set. All three + * settings must be either present or missing. + * + * @param lifecycleState the index custom data to extract the {@link StepKey} from. + */ + public static StepKey getCurrentStepKey(LifecycleExecutionState lifecycleState) { + String currentPhase = lifecycleState.getPhase(); + String currentAction = lifecycleState.getAction(); + String currentStep = lifecycleState.getStep(); + if (Strings.isNullOrEmpty(currentStep)) { + assert Strings.isNullOrEmpty(currentPhase) : "Current phase is not empty: " + currentPhase; + assert Strings.isNullOrEmpty(currentAction) : "Current action is not empty: " + currentAction; + return null; + } else { + assert Strings.isNullOrEmpty(currentPhase) == false; + assert Strings.isNullOrEmpty(currentAction) == false; + return new StepKey(currentPhase, currentAction, currentStep); + } + } + + static Step getCurrentStep(PolicyStepsRegistry stepRegistry, String policy, IndexMetaData indexMetaData, + LifecycleExecutionState lifecycleState) { + StepKey currentStepKey = getCurrentStepKey(lifecycleState); + logger.trace("[{}] retrieved current step key: {}", indexMetaData.getIndex().getName(), currentStepKey); + if (currentStepKey == null) { + return stepRegistry.getFirstStep(policy); + } else { + return stepRegistry.getStep(indexMetaData, currentStepKey); + } + } + + /** + * This method is intended for handling moving to different steps from {@link TransportAction} executions. + * For this reason, it is reasonable to throw {@link IllegalArgumentException} when state is not as expected. + * + * @param indexName The index whose step is to change + * @param currentState The current {@link ClusterState} + * @param currentStepKey The current {@link StepKey} found for the index in the current cluster state + * @param nextStepKey The next step to move the index into + * @param nowSupplier The current-time supplier for updating when steps changed + * @param stepRegistry The steps registry to check a step-key's existence in the index's current policy + * @return The updated cluster state where the index moved to nextStepKey + */ + static ClusterState moveClusterStateToStep(String indexName, ClusterState currentState, StepKey currentStepKey, + StepKey nextStepKey, LongSupplier nowSupplier, + PolicyStepsRegistry stepRegistry) { + IndexMetaData idxMeta = currentState.getMetaData().index(indexName); + Settings indexSettings = idxMeta.getSettings(); + String indexPolicySetting = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings); + + // policy could be updated in-between execution + if (Strings.isNullOrEmpty(indexPolicySetting)) { + throw new IllegalArgumentException("index [" + indexName + "] is not associated with an Index Lifecycle Policy"); + } + + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMeta); + if (currentStepKey.equals(IndexLifecycleRunner.getCurrentStepKey(lifecycleState)) == false) { + throw new IllegalArgumentException("index [" + indexName + "] is not on current step [" + currentStepKey + "]"); + } + + if (stepRegistry.stepExists(indexPolicySetting, nextStepKey) == false) { + throw new IllegalArgumentException("step [" + nextStepKey + "] for index [" + idxMeta.getIndex().getName() + + "] with policy [" + indexPolicySetting + "] does not exist"); + } + + return IndexLifecycleRunner.moveClusterStateToNextStep(idxMeta.getIndex(), currentState, currentStepKey, nextStepKey, nowSupplier); + } + + static ClusterState moveClusterStateToNextStep(Index index, ClusterState clusterState, StepKey currentStep, StepKey nextStep, + LongSupplier nowSupplier) { + IndexMetaData idxMeta = clusterState.getMetaData().index(index); + IndexLifecycleMetadata ilmMeta = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + LifecyclePolicyMetadata policyMetadata = ilmMeta.getPolicyMetadatas() + .get(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings())); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMeta); + LifecycleExecutionState newLifecycleState = moveExecutionStateToNextStep(policyMetadata, + lifecycleState, currentStep, nextStep, nowSupplier); + ClusterState.Builder newClusterStateBuilder = newClusterStateWithLifecycleState(index, clusterState, newLifecycleState); + + return newClusterStateBuilder.build(); + } + + static ClusterState moveClusterStateToErrorStep(Index index, ClusterState clusterState, StepKey currentStep, Exception cause, + LongSupplier nowSupplier) throws IOException { + IndexMetaData idxMeta = clusterState.getMetaData().index(index); + IndexLifecycleMetadata ilmMeta = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + LifecyclePolicyMetadata policyMetadata = ilmMeta.getPolicyMetadatas() + .get(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings())); + XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder(); + causeXContentBuilder.startObject(); + ElasticsearchException.generateThrowableXContent(causeXContentBuilder, ToXContent.EMPTY_PARAMS, cause); + causeXContentBuilder.endObject(); + LifecycleExecutionState nextStepState = moveExecutionStateToNextStep(policyMetadata, + LifecycleExecutionState.fromIndexMetadata(idxMeta), currentStep, new StepKey(currentStep.getPhase(), + currentStep.getAction(), ErrorStep.NAME), nowSupplier); + LifecycleExecutionState.Builder failedState = LifecycleExecutionState.builder(nextStepState); + failedState.setFailedStep(currentStep.getName()); + failedState.setStepInfo(BytesReference.bytes(causeXContentBuilder).utf8ToString()); + ClusterState.Builder newClusterStateBuilder = newClusterStateWithLifecycleState(index, clusterState, failedState.build()); + return newClusterStateBuilder.build(); + } + + ClusterState moveClusterStateToFailedStep(ClusterState currentState, String[] indices) { + ClusterState newState = currentState; + for (String index : indices) { + IndexMetaData indexMetaData = currentState.metaData().index(index); + if (indexMetaData == null) { + throw new IllegalArgumentException("index [" + index + "] does not exist"); + } + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + String failedStep = lifecycleState.getFailedStep(); + if (currentStepKey != null && ErrorStep.NAME.equals(currentStepKey.getName()) + && Strings.isNullOrEmpty(failedStep) == false) { + StepKey nextStepKey = new StepKey(currentStepKey.getPhase(), currentStepKey.getAction(), failedStep); + newState = moveClusterStateToStep(index, currentState, currentStepKey, nextStepKey, nowSupplier, stepRegistry); + } else { + throw new IllegalArgumentException("cannot retry an action for an index [" + + index + "] that has not encountered an error when running a Lifecycle Policy"); + } + } + return newState; + } + + private static LifecycleExecutionState moveExecutionStateToNextStep(LifecyclePolicyMetadata policyMetadata, + LifecycleExecutionState existingState, + StepKey currentStep, StepKey nextStep, + LongSupplier nowSupplier) { + long nowAsMillis = nowSupplier.getAsLong(); + LifecycleExecutionState.Builder updatedState = LifecycleExecutionState.builder(existingState); + updatedState.setPhase(nextStep.getPhase()); + updatedState.setAction(nextStep.getAction()); + updatedState.setStep(nextStep.getName()); + updatedState.setStepTime(nowAsMillis); + + // clear any step info or error-related settings from the current step + updatedState.setFailedStep(null); + updatedState.setStepInfo(null); + + if (currentStep.getPhase().equals(nextStep.getPhase()) == false) { + final String newPhaseDefinition; + final Phase nextPhase; + if ("new".equals(nextStep.getPhase()) || TerminalPolicyStep.KEY.equals(nextStep)) { + nextPhase = null; + } else { + nextPhase = policyMetadata.getPolicy().getPhases().get(nextStep.getPhase()); + } + PhaseExecutionInfo phaseExecutionInfo = new PhaseExecutionInfo(policyMetadata.getName(), nextPhase, + policyMetadata.getVersion(), policyMetadata.getModifiedDate()); + newPhaseDefinition = Strings.toString(phaseExecutionInfo, false, false); + updatedState.setPhaseDefinition(newPhaseDefinition); + updatedState.setPhaseTime(nowAsMillis); + } else if (currentStep.getPhase().equals(InitializePolicyContextStep.INITIALIZATION_PHASE)) { + // The "new" phase is the initialization phase, usually the phase + // time would be set on phase transition, but since there is no + // transition into the "new" phase, we set it any time in the "new" + // phase + updatedState.setPhaseTime(nowAsMillis); + } + + if (currentStep.getAction().equals(nextStep.getAction()) == false) { + updatedState.setActionTime(nowAsMillis); + } + return updatedState.build(); + } + + static ClusterState.Builder newClusterStateWithLifecycleState(Index index, ClusterState clusterState, + LifecycleExecutionState lifecycleState) { + ClusterState.Builder newClusterStateBuilder = ClusterState.builder(clusterState); + newClusterStateBuilder.metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.asMap()))); + return newClusterStateBuilder; + } + + /** + * Conditionally updates cluster state with new step info. The new cluster state is only + * built if the step info has changed, otherwise the same old clusterState is + * returned + * + * @param index the index to modify + * @param clusterState the cluster state to modify + * @param stepInfo the new step info to update + * @return Updated cluster state with stepInfo if changed, otherwise the same cluster state + * if no changes to step info exist + * @throws IOException if parsing step info fails + */ + static ClusterState addStepInfoToClusterState(Index index, ClusterState clusterState, ToXContentObject stepInfo) throws IOException { + IndexMetaData indexMetaData = clusterState.getMetaData().index(index); + if (indexMetaData == null) { + // This index doesn't exist anymore, we can't do anything + return clusterState; + } + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); + final String stepInfoString; + try (XContentBuilder infoXContentBuilder = JsonXContent.contentBuilder()) { + stepInfo.toXContent(infoXContentBuilder, ToXContent.EMPTY_PARAMS); + stepInfoString = BytesReference.bytes(infoXContentBuilder).utf8ToString(); + } + if (stepInfoString.equals(lifecycleState.getStepInfo())) { + return clusterState; + } + LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder(lifecycleState); + newState.setStepInfo(stepInfoString); + ClusterState.Builder newClusterStateBuilder = newClusterStateWithLifecycleState(index, clusterState, newState.build()); + return newClusterStateBuilder.build(); + } + + private void moveToStep(Index index, String policy, StepKey currentStepKey, StepKey nextStepKey) { + logger.debug("[{}] moving to step [{}] {} -> {}", index.getName(), policy, currentStepKey, nextStepKey); + clusterService.submitStateUpdateTask("ilm-move-to-step", + new MoveToNextStepUpdateTask(index, policy, currentStepKey, nextStepKey, nowSupplier, clusterState -> + { + IndexMetaData indexMetaData = clusterState.metaData().index(index); + if (nextStepKey != null && nextStepKey != TerminalPolicyStep.KEY && indexMetaData != null) { + maybeRunAsyncAction(clusterState, indexMetaData, policy, nextStepKey); + } + })); + } + + private void moveToErrorStep(Index index, String policy, StepKey currentStepKey, Exception e) { + logger.error(new ParameterizedMessage("policy [{}] for index [{}] failed on step [{}]. Moving to ERROR step", + policy, index.getName(), currentStepKey), e); + clusterService.submitStateUpdateTask("ilm-move-to-error-step", + new MoveToErrorStepUpdateTask(index, policy, currentStepKey, e, nowSupplier)); + } + + private void setStepInfo(Index index, String policy, StepKey currentStepKey, ToXContentObject stepInfo) { + clusterService.submitStateUpdateTask("ilm-set-step-info", new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo)); + } + + public static ClusterState removePolicyForIndexes(final Index[] indices, ClusterState currentState, List failedIndexes) { + MetaData.Builder newMetadata = MetaData.builder(currentState.getMetaData()); + boolean clusterStateChanged = false; + for (Index index : indices) { + IndexMetaData indexMetadata = currentState.getMetaData().index(index); + if (indexMetadata == null) { + // Index doesn't exist so fail it + failedIndexes.add(index.getName()); + } else { + IndexMetaData.Builder newIdxMetadata = IndexLifecycleRunner.removePolicyForIndex(indexMetadata); + if (newIdxMetadata != null) { + newMetadata.put(newIdxMetadata); + clusterStateChanged = true; + } + } + } + if (clusterStateChanged) { + ClusterState.Builder newClusterState = ClusterState.builder(currentState); + newClusterState.metaData(newMetadata); + return newClusterState.build(); + } else { + return currentState; + } + } + + private static IndexMetaData.Builder removePolicyForIndex(IndexMetaData indexMetadata) { + Settings idxSettings = indexMetadata.getSettings(); + Settings.Builder newSettings = Settings.builder().put(idxSettings); + boolean notChanged = true; + + notChanged &= Strings.isNullOrEmpty(newSettings.remove(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); + notChanged &= Strings.isNullOrEmpty(newSettings.remove(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.getKey())); + long newSettingsVersion = notChanged ? indexMetadata.getSettingsVersion() : 1 + indexMetadata.getSettingsVersion(); + + IndexMetaData.Builder builder = IndexMetaData.builder(indexMetadata); + builder.removeCustom(ILM_CUSTOM_METADATA_KEY); + return builder.settings(newSettings).settingsVersion(newSettingsVersion); + } + + private void markPolicyDoesNotExist(String policyName, Index index, LifecycleExecutionState executionState) { + logger.debug("policy [{}] for index [{}] does not exist, recording this in step_info for this index", + policyName, index.getName()); + setStepInfo(index, policyName, getCurrentStepKey(executionState), + new SetStepInfoUpdateTask.ExceptionWrapper( + new IllegalArgumentException("policy [" + policyName + "] does not exist"))); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java new file mode 100644 index 0000000000000..637c1855dd04b --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java @@ -0,0 +1,266 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateApplier; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; + +import java.io.Closeable; +import java.time.Clock; +import java.util.Collections; +import java.util.Set; +import java.util.function.LongSupplier; + +/** + * A service which runs the {@link LifecyclePolicy}s associated with indexes. + */ +public class IndexLifecycleService + implements ClusterStateListener, ClusterStateApplier, SchedulerEngine.Listener, Closeable, LocalNodeMasterListener { + private static final Logger logger = LogManager.getLogger(IndexLifecycleService.class); + private static final Set IGNORE_ACTIONS_MAINTENANCE_REQUESTED = Collections.singleton(ShrinkAction.NAME); + private volatile boolean isMaster = false; + private volatile TimeValue pollInterval; + + private final SetOnce scheduler = new SetOnce<>(); + private final Clock clock; + private final PolicyStepsRegistry policyRegistry; + private final IndexLifecycleRunner lifecycleRunner; + private final Settings settings; + private Client client; + private ClusterService clusterService; + private LongSupplier nowSupplier; + private SchedulerEngine.Job scheduledJob; + + public IndexLifecycleService(Settings settings, Client client, ClusterService clusterService, Clock clock, LongSupplier nowSupplier, + NamedXContentRegistry xContentRegistry) { + super(); + this.settings = settings; + this.client = client; + this.clusterService = clusterService; + this.clock = clock; + this.nowSupplier = nowSupplier; + this.scheduledJob = null; + this.policyRegistry = new PolicyStepsRegistry(xContentRegistry, client); + this.lifecycleRunner = new IndexLifecycleRunner(policyRegistry, clusterService, nowSupplier); + this.pollInterval = LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING.get(settings); + clusterService.addStateApplier(this); + clusterService.addListener(this); + clusterService.addLocalNodeMasterListener(this); + clusterService.getClusterSettings().addSettingsUpdateConsumer(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING, + this::updatePollInterval); + } + + public void maybeRunAsyncAction(ClusterState clusterState, IndexMetaData indexMetaData, StepKey nextStepKey) { + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexMetaData.getSettings()); + lifecycleRunner.maybeRunAsyncAction(clusterState, indexMetaData, policyName, nextStepKey); + } + + public ClusterState moveClusterStateToStep(ClusterState currentState, String indexName, StepKey currentStepKey, StepKey nextStepKey) { + return IndexLifecycleRunner.moveClusterStateToStep(indexName, currentState, currentStepKey, nextStepKey, + nowSupplier, policyRegistry); + } + + public ClusterState moveClusterStateToFailedStep(ClusterState currentState, String[] indices) { + return lifecycleRunner.moveClusterStateToFailedStep(currentState, indices); + } + + @Override + public void onMaster() { + this.isMaster = true; + maybeScheduleJob(); + + ClusterState clusterState = clusterService.state(); + IndexLifecycleMetadata currentMetadata = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + if (currentMetadata != null) { + OperationMode currentMode = currentMetadata.getOperationMode(); + if (OperationMode.STOPPED.equals(currentMode)) { + return; + } + + boolean safeToStop = true; // true until proven false by a run policy + + // If we just became master, we need to kick off any async actions that + // may have not been run due to master rollover + for (ObjectCursor cursor : clusterState.metaData().indices().values()) { + IndexMetaData idxMeta = cursor.value; + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings()); + if (Strings.isNullOrEmpty(policyName) == false) { + StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(LifecycleExecutionState.fromIndexMetadata(idxMeta)); + if (OperationMode.STOPPING == currentMode && + stepKey != null && + IGNORE_ACTIONS_MAINTENANCE_REQUESTED.contains(stepKey.getAction()) == false) { + logger.info("skipping policy [{}] for index [{}]. stopping Index Lifecycle execution", + policyName, idxMeta.getIndex().getName()); + continue; + } + lifecycleRunner.maybeRunAsyncAction(clusterState, idxMeta, policyName, stepKey); + safeToStop = false; // proven false! + } + } + if (safeToStop && OperationMode.STOPPING == currentMode) { + submitOperationModeUpdate(OperationMode.STOPPED); + } + } + } + + @Override + public void offMaster() { + this.isMaster = false; + cancelJob(); + } + + @Override + public String executorName() { + return ThreadPool.Names.MANAGEMENT; + } + + private void updatePollInterval(TimeValue newInterval) { + this.pollInterval = newInterval; + maybeScheduleJob(); + } + + // pkg-private for testing + SchedulerEngine getScheduler() { + return scheduler.get(); + } + + // pkg-private for testing + SchedulerEngine.Job getScheduledJob() { + return scheduledJob; + } + + private void maybeScheduleJob() { + if (this.isMaster) { + if (scheduler.get() == null) { + scheduler.set(new SchedulerEngine(settings, clock)); + scheduler.get().register(this); + } + scheduledJob = new SchedulerEngine.Job(XPackField.INDEX_LIFECYCLE, new TimeValueSchedule(pollInterval)); + scheduler.get().add(scheduledJob); + } + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + IndexLifecycleMetadata lifecycleMetadata = event.state().metaData().custom(IndexLifecycleMetadata.TYPE); + if (this.isMaster && lifecycleMetadata != null) { + triggerPolicies(event.state(), true); + } + } + + @Override + public void applyClusterState(ClusterChangedEvent event) { + if (event.localNodeMaster()) { // only act if we are master, otherwise + // keep idle until elected + if (event.state().metaData().custom(IndexLifecycleMetadata.TYPE) != null) { + policyRegistry.update(event.state()); + } + } + } + + private void cancelJob() { + if (scheduler.get() != null) { + scheduler.get().remove(XPackField.INDEX_LIFECYCLE); + scheduledJob = null; + } + } + + @Override + public void triggered(SchedulerEngine.Event event) { + if (event.getJobName().equals(XPackField.INDEX_LIFECYCLE)) { + logger.trace("job triggered: " + event.getJobName() + ", " + event.getScheduledTime() + ", " + event.getTriggeredTime()); + triggerPolicies(clusterService.state(), false); + } + } + + /** + * executes the policy execution on the appropriate indices by running cluster-state tasks per index. + * + * If stopping ILM was requested, and it is safe to stop, this will also be done here + * when possible after no policies are executed. + * + * @param clusterState the current cluster state + * @param fromClusterStateChange whether things are triggered from the cluster-state-listener or the scheduler + */ + void triggerPolicies(ClusterState clusterState, boolean fromClusterStateChange) { + IndexLifecycleMetadata currentMetadata = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + + if (currentMetadata == null) { + return; + } + + OperationMode currentMode = currentMetadata.getOperationMode(); + + if (OperationMode.STOPPED.equals(currentMode)) { + return; + } + + boolean safeToStop = true; // true until proven false by a run policy + + // loop through all indices in cluster state and filter for ones that are + // managed by the Index Lifecycle Service they have a index.lifecycle.name setting + // associated to a policy + for (ObjectCursor cursor : clusterState.metaData().indices().values()) { + IndexMetaData idxMeta = cursor.value; + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings()); + if (Strings.isNullOrEmpty(policyName) == false) { + StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(LifecycleExecutionState.fromIndexMetadata(idxMeta)); + if (OperationMode.STOPPING == currentMode && stepKey != null + && IGNORE_ACTIONS_MAINTENANCE_REQUESTED.contains(stepKey.getAction()) == false) { + logger.info("skipping policy [" + policyName + "] for index [" + idxMeta.getIndex().getName() + + "]. stopping Index Lifecycle execution"); + continue; + } + if (fromClusterStateChange) { + lifecycleRunner.runPolicyAfterStateChange(policyName, idxMeta); + } else { + lifecycleRunner.runPeriodicStep(policyName, idxMeta); + } + safeToStop = false; // proven false! + } + } + if (safeToStop && OperationMode.STOPPING == currentMode) { + submitOperationModeUpdate(OperationMode.STOPPED); + } + } + + @Override + public void close() { + SchedulerEngine engine = scheduler.get(); + if (engine != null) { + engine.stop(); + } + } + + public void submitOperationModeUpdate(OperationMode mode) { + clusterService.submitStateUpdateTask("ilm_operation_mode_update", + new OperationModeUpdateTask(mode)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicySecurityClient.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicySecurityClient.java new file mode 100644 index 0000000000000..b923700d25f68 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicySecurityClient.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexlifecycle.Step; + +import java.util.Map; + +/** + * This class wraps a client and calls the client using the headers provided in + * constructor. The intent is to abstract away the fact that there are headers + * so {@link Step}s etc. can call this client as if it was a normal client. + * + * Note: This client will not close the wrapped {@link Client} instance since + * the intent is that the wrapped client is shared between multiple instances of + * this class. + */ +public class LifecyclePolicySecurityClient extends AbstractClient { + + private Client client; + private Map headers; + private String origin; + + public LifecyclePolicySecurityClient(Client client, String origin, Map headers) { + super(client.settings(), client.threadPool()); + this.client = client; + this.origin = origin; + this.headers = headers; + } + + @Override + public void close() { + // Doesn't close the wrapped client since this client object is shared + // among multiple instances + } + + @Override + protected void doExecute(Action action, Request request, + ActionListener listener) { + ClientHelper.executeWithHeadersAsync(headers, origin, client, action, request, listener); + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTask.java new file mode 100644 index 0000000000000..5af1a05309e7e --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTask.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Step; + +import java.io.IOException; +import java.util.function.LongSupplier; + +public class MoveToErrorStepUpdateTask extends ClusterStateUpdateTask { + private final Index index; + private final String policy; + private final Step.StepKey currentStepKey; + private LongSupplier nowSupplier; + private Exception cause; + + public MoveToErrorStepUpdateTask(Index index, String policy, Step.StepKey currentStepKey, Exception cause, LongSupplier nowSupplier) { + this.index = index; + this.policy = policy; + this.currentStepKey = currentStepKey; + this.cause = cause; + this.nowSupplier = nowSupplier; + } + + Index getIndex() { + return index; + } + + String getPolicy() { + return policy; + } + + Step.StepKey getCurrentStepKey() { + return currentStepKey; + } + + Exception getCause() { + return cause; + } + + @Override + public ClusterState execute(ClusterState currentState) throws IOException { + IndexMetaData idxMeta = currentState.getMetaData().index(index); + if (idxMeta == null) { + // Index must have been since deleted, ignore it + return currentState; + } + Settings indexSettings = idxMeta.getSettings(); + LifecycleExecutionState indexILMData = LifecycleExecutionState.fromIndexMetadata(idxMeta); + if (policy.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings)) + && currentStepKey.equals(IndexLifecycleRunner.getCurrentStepKey(indexILMData))) { + return IndexLifecycleRunner.moveClusterStateToErrorStep(index, currentState, currentStepKey, cause, nowSupplier); + } else { + // either the policy has changed or the step is now + // not the same as when we submitted the update task. In + // either case we don't want to do anything now + return currentState; + } + } + + @Override + public void onFailure(String source, Exception e) { + throw new ElasticsearchException("policy [" + policy + "] for index [" + index.getName() + + "] failed trying to move from step [" + currentStepKey + "] to the ERROR step.", e); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java new file mode 100644 index 0000000000000..750fd1af5da42 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Step; + +import java.util.function.Consumer; +import java.util.function.LongSupplier; + +public class MoveToNextStepUpdateTask extends ClusterStateUpdateTask { + private static final Logger logger = LogManager.getLogger(MoveToNextStepUpdateTask.class); + + private final Index index; + private final String policy; + private final Step.StepKey currentStepKey; + private final Step.StepKey nextStepKey; + private final LongSupplier nowSupplier; + private final Consumer stateChangeConsumer; + + public MoveToNextStepUpdateTask(Index index, String policy, Step.StepKey currentStepKey, Step.StepKey nextStepKey, + LongSupplier nowSupplier, Consumer stateChangeConsumer) { + this.index = index; + this.policy = policy; + this.currentStepKey = currentStepKey; + this.nextStepKey = nextStepKey; + this.nowSupplier = nowSupplier; + this.stateChangeConsumer = stateChangeConsumer; + } + + Index getIndex() { + return index; + } + + String getPolicy() { + return policy; + } + + Step.StepKey getCurrentStepKey() { + return currentStepKey; + } + + Step.StepKey getNextStepKey() { + return nextStepKey; + } + + @Override + public ClusterState execute(ClusterState currentState) { + IndexMetaData indexMetaData = currentState.getMetaData().index(index); + if (indexMetaData == null) { + // Index must have been since deleted, ignore it + return currentState; + } + Settings indexSettings = indexMetaData.getSettings(); + LifecycleExecutionState indexILMData = LifecycleExecutionState.fromIndexMetadata(currentState.getMetaData().index(index)); + if (policy.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings)) + && currentStepKey.equals(IndexLifecycleRunner.getCurrentStepKey(indexILMData))) { + logger.trace("moving [{}] to next step ({})", index.getName(), nextStepKey); + return IndexLifecycleRunner.moveClusterStateToNextStep(index, currentState, currentStepKey, nextStepKey, nowSupplier); + } else { + // either the policy has changed or the step is now + // not the same as when we submitted the update task. In + // either case we don't want to do anything now + return currentState; + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (oldState.equals(newState) == false) { + stateChangeConsumer.accept(newState); + } + } + + @Override + public void onFailure(String source, Exception e) { + throw new ElasticsearchException("policy [" + policy + "] for index [" + index.getName() + "] failed trying to move from step [" + + currentStepKey + "] to step [" + nextStepKey + "].", e); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTask.java new file mode 100644 index 0000000000000..0cf24300831cd --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTask.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; + +public class OperationModeUpdateTask extends ClusterStateUpdateTask { + private static final Logger logger = LogManager.getLogger(OperationModeUpdateTask.class); + private final OperationMode mode; + + public OperationModeUpdateTask(OperationMode mode) { + this.mode = mode; + } + + OperationMode getOperationMode() { + return mode; + } + + @Override + public ClusterState execute(ClusterState currentState) { + IndexLifecycleMetadata currentMetadata = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); + if (currentMetadata != null && currentMetadata.getOperationMode().isValidChange(mode) == false) { + return currentState; + } else if (currentMetadata == null) { + currentMetadata = IndexLifecycleMetadata.EMPTY; + } + + final OperationMode newMode; + if (currentMetadata.getOperationMode().isValidChange(mode)) { + newMode = mode; + } else { + newMode = currentMetadata.getOperationMode(); + } + + ClusterState.Builder builder = new ClusterState.Builder(currentState); + MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData()); + metadataBuilder.putCustom(IndexLifecycleMetadata.TYPE, + new IndexLifecycleMetadata(currentMetadata.getPolicyMetadatas(), newMode)); + builder.metaData(metadataBuilder.build()); + return builder.build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("unable to update lifecycle metadata with new mode [" + mode + "]", e); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java new file mode 100644 index 0000000000000..d753a5035f756 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java @@ -0,0 +1,256 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.InitializePolicyContextStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.stream.Collectors; + +public class PolicyStepsRegistry { + private static final Logger logger = LogManager.getLogger(PolicyStepsRegistry.class); + + private final Client client; + // keeps track of existing policies in the cluster state + private final SortedMap lifecyclePolicyMap; + // keeps track of what the first step in a policy is, the key is policy name + private final Map firstStepMap; + // keeps track of a mapping from policy/step-name to respective Step, the key is policy name + private final Map> stepMap; + private final NamedXContentRegistry xContentRegistry; + + public PolicyStepsRegistry(NamedXContentRegistry xContentRegistry, Client client) { + this(new TreeMap<>(), new HashMap<>(), new HashMap<>(), xContentRegistry, client); + } + + PolicyStepsRegistry(SortedMap lifecyclePolicyMap, + Map firstStepMap, Map> stepMap, + NamedXContentRegistry xContentRegistry, Client client) { + this.lifecyclePolicyMap = lifecyclePolicyMap; + this.firstStepMap = firstStepMap; + this.stepMap = stepMap; + this.xContentRegistry = xContentRegistry; + this.client = client; + } + + SortedMap getLifecyclePolicyMap() { + return lifecyclePolicyMap; + } + + Map getFirstStepMap() { + return firstStepMap; + } + + Map> getStepMap() { + return stepMap; + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + public void update(ClusterState clusterState) { + final IndexLifecycleMetadata meta = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); + + assert meta != null : "IndexLifecycleMetadata cannot be null when updating the policy steps registry"; + + Diff> diff = DiffableUtils.diff(lifecyclePolicyMap, meta.getPolicyMetadatas(), + DiffableUtils.getStringKeySerializer(), + // Use a non-diffable value serializer. Otherwise actions in the same + // action and phase that are changed show up as diffs instead of upserts. + // We want to treat any change in the policy as an upsert so the map is + // correctly rebuilt + new DiffableUtils.NonDiffableValueSerializer() { + @Override + public void write(LifecyclePolicyMetadata value, StreamOutput out) { + // This is never called + throw new UnsupportedOperationException("should never be called"); + } + + @Override + public LifecyclePolicyMetadata read(StreamInput in, String key) { + // This is never called + throw new UnsupportedOperationException("should never be called"); + } + }); + DiffableUtils.MapDiff> mapDiff = (DiffableUtils.MapDiff) diff; + + for (String deletedPolicyName : mapDiff.getDeletes()) { + lifecyclePolicyMap.remove(deletedPolicyName); + firstStepMap.remove(deletedPolicyName); + stepMap.remove(deletedPolicyName); + } + + if (mapDiff.getUpserts().isEmpty() == false) { + for (LifecyclePolicyMetadata policyMetadata : mapDiff.getUpserts().values()) { + LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, + policyMetadata.getHeaders()); + lifecyclePolicyMap.put(policyMetadata.getName(), policyMetadata); + List policyAsSteps = policyMetadata.getPolicy().toSteps(policyClient); + if (policyAsSteps.isEmpty() == false) { + firstStepMap.put(policyMetadata.getName(), policyAsSteps.get(0)); + final Map stepMapForPolicy = new HashMap<>(); + for (Step step : policyAsSteps) { + assert ErrorStep.NAME.equals(step.getKey().getName()) == false : "unexpected error step in policy"; + stepMapForPolicy.put(step.getKey(), step); + } + logger.trace("updating cached steps for [{}] policy, new steps: {}", + policyMetadata.getName(), stepMapForPolicy.keySet()); + stepMap.put(policyMetadata.getName(), stepMapForPolicy); + } + } + } + } + + private List parseStepsFromPhase(String policy, String currentPhase, String phaseDef) throws IOException { + final PhaseExecutionInfo phaseExecutionInfo; + LifecyclePolicyMetadata policyMetadata = lifecyclePolicyMap.get(policy); + if (policyMetadata == null) { + throw new IllegalStateException("unable to parse steps for policy [" + policy + "] as it doesn't exist"); + } + LifecyclePolicy currentPolicy = policyMetadata.getPolicy(); + final LifecyclePolicy policyToExecute; + if (InitializePolicyContextStep.INITIALIZATION_PHASE.equals(phaseDef) + || TerminalPolicyStep.COMPLETED_PHASE.equals(phaseDef)) { + // It is ok to re-use potentially modified policy here since we are in an initialization or completed phase + policyToExecute = currentPolicy; + } else { + // if the current phase definition describes an internal step/phase, do not parse + try (XContentParser parser = JsonXContent.jsonXContent.createParser(xContentRegistry, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, phaseDef)) { + phaseExecutionInfo = PhaseExecutionInfo.parse(parser, currentPhase); + } + Map phaseMap = new HashMap<>(currentPolicy.getPhases()); + if (phaseExecutionInfo.getPhase() != null) { + phaseMap.put(currentPhase, phaseExecutionInfo.getPhase()); + } + policyToExecute = new LifecyclePolicy(currentPolicy.getType(), currentPolicy.getName(), phaseMap); + } + LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, + ClientHelper.INDEX_LIFECYCLE_ORIGIN, lifecyclePolicyMap.get(policy).getHeaders()); + final List steps = policyToExecute.toSteps(policyClient); + // Build a list of steps that correspond with the phase the index is currently in + final List phaseSteps; + if (steps == null) { + phaseSteps = new ArrayList<>(); + } else { + phaseSteps = steps.stream() + .filter(e -> e.getKey().getPhase().equals(currentPhase)) + .collect(Collectors.toList()); + } + logger.trace("parsed steps for policy [{}] in phase [{}], definition: [{}], steps: [{}]", + policy, currentPhase, phaseDef, phaseSteps); + return phaseSteps; + } + + @Nullable + public Step getStep(final IndexMetaData indexMetaData, final Step.StepKey stepKey) { + if (ErrorStep.NAME.equals(stepKey.getName())) { + return new ErrorStep(new Step.StepKey(stepKey.getPhase(), stepKey.getAction(), ErrorStep.NAME)); + } + + final String phase = stepKey.getPhase(); + final String policyName = indexMetaData.getSettings().get(LifecycleSettings.LIFECYCLE_NAME); + final Index index = indexMetaData.getIndex(); + + if (policyName == null) { + throw new IllegalArgumentException("failed to retrieve step " + stepKey + " as index [" + index.getName() + "] has no policy"); + } + + // parse phase steps from the phase definition in the index settings + final String phaseJson = Optional.ofNullable(LifecycleExecutionState.fromIndexMetadata(indexMetaData).getPhaseDefinition()) + .orElse(InitializePolicyContextStep.INITIALIZATION_PHASE); + + final List phaseSteps; + try { + phaseSteps = parseStepsFromPhase(policyName, phase, phaseJson); + } catch (IOException e) { + throw new ElasticsearchException("failed to load cached steps for " + stepKey, e); + } catch (XContentParseException parseErr) { + throw new XContentParseException(parseErr.getLocation(), + "failed to load steps for " + stepKey + " from [" + phaseJson + "]", parseErr); + } + + assert phaseSteps.stream().allMatch(step -> step.getKey().getPhase().equals(phase)) : + "expected phase steps loaded from phase definition for [" + index.getName() + "] to be in phase [" + phase + + "] but they were not, steps: " + phaseSteps; + + // Return the step that matches the given stepKey or else null if we couldn't find it + return phaseSteps.stream().filter(step -> step.getKey().equals(stepKey)).findFirst().orElse(null); + } + + /** + * Given a policy and stepkey, return true if a step exists, false otherwise + */ + public boolean stepExists(final String policy, final Step.StepKey stepKey) { + Map steps = stepMap.get(policy); + if (steps == null) { + return false; + } else { + return steps.containsKey(stepKey); + } + } + + public boolean policyExists(final String policy) { + return lifecyclePolicyMap.containsKey(policy); + } + + public Step getFirstStep(String policy) { + return firstStepMap.get(policy); + } + + public TimeValue getIndexAgeForPhase(final String policy, final String phase) { + // These built in phases should never wait + if (InitializePolicyContextStep.INITIALIZATION_PHASE.equals(phase) || TerminalPolicyStep.COMPLETED_PHASE.equals(phase)) { + return TimeValue.ZERO; + } + final LifecyclePolicyMetadata meta = lifecyclePolicyMap.get(policy); + if (meta == null) { + throw new IllegalArgumentException("no policy found with name \"" + policy + "\""); + } else { + final Phase retrievedPhase = meta.getPolicy().getPhases().get(phase); + if (retrievedPhase == null) { + // We don't have that phase registered, proceed right through it + return TimeValue.ZERO; + } else { + return retrievedPhase.getMinimumAge(); + } + } + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTask.java new file mode 100644 index 0000000000000..72c7aa81b9d77 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTask.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Step; + +import java.io.IOException; +import java.util.Objects; + +public class SetStepInfoUpdateTask extends ClusterStateUpdateTask { + private final Index index; + private final String policy; + private final Step.StepKey currentStepKey; + private ToXContentObject stepInfo; + + public SetStepInfoUpdateTask(Index index, String policy, Step.StepKey currentStepKey, ToXContentObject stepInfo) { + this.index = index; + this.policy = policy; + this.currentStepKey = currentStepKey; + this.stepInfo = stepInfo; + } + + Index getIndex() { + return index; + } + + String getPolicy() { + return policy; + } + + Step.StepKey getCurrentStepKey() { + return currentStepKey; + } + + ToXContentObject getStepInfo() { + return stepInfo; + } + + @Override + public ClusterState execute(ClusterState currentState) throws IOException { + IndexMetaData idxMeta = currentState.getMetaData().index(index); + if (idxMeta == null) { + // Index must have been since deleted, ignore it + return currentState; + } + Settings indexSettings = idxMeta.getSettings(); + LifecycleExecutionState indexILMData = LifecycleExecutionState.fromIndexMetadata(idxMeta); + if (policy.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings)) + && Objects.equals(currentStepKey, IndexLifecycleRunner.getCurrentStepKey(indexILMData))) { + return IndexLifecycleRunner.addStepInfoToClusterState(index, currentState, stepInfo); + } else { + // either the policy has changed or the step is now + // not the same as when we submitted the update task. In + // either case we don't want to do anything now + return currentState; + } + } + + @Override + public void onFailure(String source, Exception e) { + throw new ElasticsearchException("policy [" + policy + "] for index [" + index.getName() + + "] failed trying to set step info for step [" + currentStepKey + "].", e); + } + + public static class ExceptionWrapper implements ToXContentObject { + private final Throwable exception; + + public ExceptionWrapper(Throwable exception) { + this.exception = exception; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + ElasticsearchException.generateThrowableXContent(builder, params, exception); + builder.endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/TimeValueSchedule.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/TimeValueSchedule.java new file mode 100644 index 0000000000000..436f8637a0228 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/TimeValueSchedule.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine.Schedule; + +import java.util.Objects; + +public class TimeValueSchedule implements Schedule { + + private TimeValue interval; + + public TimeValueSchedule(TimeValue interval) { + if (interval.millis() <= 0) { + throw new IllegalArgumentException("interval must be greater than 0 milliseconds"); + } + this.interval = interval; + } + + public TimeValue getInterval() { + return interval; + } + + @Override + public long nextScheduledTimeAfter(long startTime, long time) { + assert time >= startTime; + if (startTime == time) { + time++; + } + long delta = time - startTime; + return startTime + (delta / interval.millis() + 1) * interval.millis(); + } + + @Override + public int hashCode() { + return Objects.hash(interval); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TimeValueSchedule other = (TimeValueSchedule) obj; + return Objects.equals(interval, other.interval); + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestDeleteLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestDeleteLifecycleAction.java new file mode 100644 index 0000000000000..081e7d1565f79 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestDeleteLifecycleAction.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; + +import java.io.IOException; + +public class RestDeleteLifecycleAction extends BaseRestHandler { + + public RestDeleteLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.DELETE, "/_ilm/policy/{name}", this); + } + + @Override + public String getName() { + return "ilm_delete_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String lifecycleName = restRequest.param("name"); + DeleteLifecycleAction.Request deleteLifecycleRequest = new DeleteLifecycleAction.Request(lifecycleName); + deleteLifecycleRequest.timeout(restRequest.paramAsTime("timeout", deleteLifecycleRequest.timeout())); + deleteLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", deleteLifecycleRequest.masterNodeTimeout())); + + return channel -> client.execute(DeleteLifecycleAction.INSTANCE, deleteLifecycleRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestExplainLifecycleAction.java new file mode 100644 index 0000000000000..96be5f0fc0337 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestExplainLifecycleAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; + +import java.io.IOException; + +public class RestExplainLifecycleAction extends BaseRestHandler { + + public RestExplainLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/{index}/_ilm/explain", this); + } + + @Override + public String getName() { + return "ilm_explain_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String[] indexes = Strings.splitStringByCommaToArray(restRequest.param("index")); + ExplainLifecycleRequest explainLifecycleRequest = new ExplainLifecycleRequest(); + explainLifecycleRequest.indices(indexes); + explainLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); + String masterNodeTimeout = restRequest.param("master_timeout"); + if (masterNodeTimeout != null) { + explainLifecycleRequest.masterNodeTimeout(masterNodeTimeout); + } + + return channel -> client.execute(ExplainLifecycleAction.INSTANCE, explainLifecycleRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetLifecycleAction.java new file mode 100644 index 0000000000000..b518fe2f08698 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetLifecycleAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; + +import java.io.IOException; + +public class RestGetLifecycleAction extends BaseRestHandler { + + public RestGetLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/_ilm/policy", this); + controller.registerHandler(RestRequest.Method.GET, "/_ilm/policy/{name}", this); + } + + @Override + public String getName() { + return "ilm_get_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String[] lifecycleNames = Strings.splitStringByCommaToArray(restRequest.param("name")); + GetLifecycleAction.Request getLifecycleRequest = new GetLifecycleAction.Request(lifecycleNames); + getLifecycleRequest.timeout(restRequest.paramAsTime("timeout", getLifecycleRequest.timeout())); + getLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", getLifecycleRequest.masterNodeTimeout())); + + return channel -> client.execute(GetLifecycleAction.INSTANCE, getLifecycleRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetStatusAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetStatusAction.java new file mode 100644 index 0000000000000..be2d16ee0be76 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestGetStatusAction.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; + +public class RestGetStatusAction extends BaseRestHandler { + + public RestGetStatusAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/_ilm/status", this); + } + + @Override + public String getName() { + return "ilm_get_operation_mode_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + GetStatusAction.Request request = new GetStatusAction.Request(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(GetStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestMoveToStepAction.java new file mode 100644 index 0000000000000..41228041679e7 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestMoveToStepAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction; + +import java.io.IOException; + +public class RestMoveToStepAction extends BaseRestHandler { + + public RestMoveToStepAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST,"/_ilm/move/{name}", this); + } + + @Override + public String getName() { + return "ilm_move_to_step_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String index = restRequest.param("name"); + XContentParser parser = restRequest.contentParser(); + MoveToStepAction.Request request = MoveToStepAction.Request.parseRequest(index, parser); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(MoveToStepAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java new file mode 100644 index 0000000000000..586c3c683264e --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestPutLifecycleAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; + +import java.io.IOException; + +public class RestPutLifecycleAction extends BaseRestHandler { + + public RestPutLifecycleAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/_ilm/policy/{name}", this); + } + + @Override + public String getName() { + return "ilm_put_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String lifecycleName = restRequest.param("name"); + XContentParser parser = restRequest.contentParser(); + PutLifecycleAction.Request putLifecycleRequest = PutLifecycleAction.Request.parseRequest(lifecycleName, parser); + putLifecycleRequest.timeout(restRequest.paramAsTime("timeout", putLifecycleRequest.timeout())); + putLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putLifecycleRequest.masterNodeTimeout())); + + return channel -> client.execute(PutLifecycleAction.INSTANCE, putLifecycleRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRemoveIndexLifecyclePolicyAction.java new file mode 100644 index 0000000000000..d077b732341ca --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRemoveIndexLifecyclePolicyAction.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; + +import java.io.IOException; + +public class RestRemoveIndexLifecyclePolicyAction extends BaseRestHandler { + + public RestRemoveIndexLifecyclePolicyAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_ilm/remove", this); + } + + @Override + public String getName() { + return "ilm_remove_policy_for_index_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String[] indexes = Strings.splitStringByCommaToArray(restRequest.param("index")); + RemoveIndexLifecyclePolicyAction.Request changePolicyRequest = new RemoveIndexLifecyclePolicyAction.Request(indexes); + changePolicyRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", changePolicyRequest.masterNodeTimeout())); + changePolicyRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, changePolicyRequest.indicesOptions())); + + return channel -> + client.execute(RemoveIndexLifecyclePolicyAction.INSTANCE, changePolicyRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRetryAction.java new file mode 100644 index 0000000000000..9e12c3cc34ed7 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestRetryAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + * + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; + +public class RestRetryAction extends BaseRestHandler { + + public RestRetryAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_ilm/retry", this); + } + + @Override + public String getName() { + return "ilm_retry_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String[] indices = Strings.splitStringByCommaToArray(restRequest.param("index")); + RetryAction.Request request = new RetryAction.Request(indices); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.indices(indices); + request.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); + return channel -> client.execute(RetryAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStartILMAction.java new file mode 100644 index 0000000000000..84f46a30406fd --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStartILMAction.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.StartILMRequest; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; + +public class RestStartILMAction extends BaseRestHandler { + + public RestStartILMAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/_ilm/start", this); + } + + @Override + public String getName() { + return "ilm_start_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + StartILMRequest request = new StartILMRequest(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(StartILMAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStopAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStopAction.java new file mode 100644 index 0000000000000..2f8d3c5e43037 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/RestStopAction.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; + +public class RestStopAction extends BaseRestHandler { + + public RestStopAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/_ilm/stop", this); + } + + @Override + public String getName() { + return "ilm_stop_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + StopILMRequest request = new StopILMRequest(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(StopILMAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportDeleteLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportDeleteLifecycleAction.java new file mode 100644 index 0000000000000..839952943088a --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportDeleteLifecycleAction.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.DeleteLifecycleAction.Response; + +import java.util.Iterator; +import java.util.SortedMap; +import java.util.TreeMap; + +public class TransportDeleteLifecycleAction extends TransportMasterNodeAction { + + @Inject + public TransportDeleteLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(DeleteLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + clusterService.submitStateUpdateTask("delete-lifecycle-" + request.getPolicyName(), + new AckedClusterStateUpdateTask(request, listener) { + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + Iterator indicesIt = currentState.metaData().indices().valuesIt(); + while(indicesIt.hasNext()) { + IndexMetaData idxMeta = indicesIt.next(); + String indexPolicy = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings()); + if (request.getPolicyName().equals(indexPolicy)) { + throw new IllegalArgumentException("Cannot delete policy [" + request.getPolicyName() + + "]. It is being used by at least one index [" + idxMeta.getIndex().getName() + "]"); + } + + } + ClusterState.Builder newState = ClusterState.builder(currentState); + IndexLifecycleMetadata currentMetadata = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); + if (currentMetadata == null + || currentMetadata.getPolicyMetadatas().containsKey(request.getPolicyName()) == false) { + throw new ResourceNotFoundException("Lifecycle policy not found: {}", request.getPolicyName()); + } + SortedMap newPolicies = new TreeMap<>(currentMetadata.getPolicyMetadatas()); + newPolicies.remove(request.getPolicyName()); + IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, currentMetadata.getOperationMode()); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IndexLifecycleMetadata.TYPE, newMetadata).build()); + return newState.build(); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java new file mode 100644 index 0000000000000..0f69b1f21dce7 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.info.TransportClusterInfoAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleExplainResponse; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class TransportExplainLifecycleAction + extends TransportClusterInfoAction { + + private final NamedXContentRegistry xContentRegistry; + + @Inject + public TransportExplainLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + NamedXContentRegistry xContentRegistry) { + super(ExplainLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, + ExplainLifecycleRequest::new, indexNameExpressionResolver); + this.xContentRegistry = xContentRegistry; + } + + @Override + protected ExplainLifecycleResponse newResponse() { + return new ExplainLifecycleResponse(); + } + + @Override + protected String executor() { + // very lightweight operation, no need to fork + return ThreadPool.Names.SAME; + } + + @Override + protected ClusterBlockException checkBlock(ExplainLifecycleRequest request, ClusterState state) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, + indexNameExpressionResolver.concreteIndexNames(state, request)); + } + + @Override + protected void doMasterOperation(ExplainLifecycleRequest request, String[] concreteIndices, ClusterState state, + ActionListener listener) { + Map indexReponses = new HashMap<>(); + for (String index : concreteIndices) { + IndexMetaData idxMetadata = state.metaData().index(index); + Settings idxSettings = idxMetadata.getSettings(); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMetadata); + String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxSettings); + String currentPhase = lifecycleState.getPhase(); + String stepInfo = lifecycleState.getStepInfo(); + BytesArray stepInfoBytes = null; + if (stepInfo != null) { + stepInfoBytes = new BytesArray(stepInfo); + } + // parse existing phase steps from the phase definition in the index settings + String phaseDef = lifecycleState.getPhaseDefinition(); + PhaseExecutionInfo phaseExecutionInfo = null; + if (Strings.isNullOrEmpty(phaseDef) == false) { + try (XContentParser parser = JsonXContent.jsonXContent.createParser(xContentRegistry, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, phaseDef)) { + phaseExecutionInfo = PhaseExecutionInfo.parse(parser, currentPhase); + } catch (IOException e) { + listener.onFailure(new ElasticsearchParseException( + "failed to parse phase definition for index [" + index + "]", e)); + return; + } + } + final IndexLifecycleExplainResponse indexResponse; + if (Strings.hasLength(policyName)) { + indexResponse = IndexLifecycleExplainResponse.newManagedIndexResponse(index, policyName, + lifecycleState.getLifecycleDate(), + lifecycleState.getPhase(), + lifecycleState.getAction(), + lifecycleState.getStep(), + lifecycleState.getFailedStep(), + lifecycleState.getPhaseTime(), + lifecycleState.getActionTime(), + lifecycleState.getStepTime(), + stepInfoBytes, + phaseExecutionInfo); + } else { + indexResponse = IndexLifecycleExplainResponse.newUnmanagedIndexResponse(index); + } + indexReponses.put(indexResponse.getIndex(), indexResponse); + } + listener.onResponse(new ExplainLifecycleResponse(indexReponses)); + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetLifecycleAction.java new file mode 100644 index 0000000000000..4c3657b76cba3 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetLifecycleAction.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.LifecyclePolicyResponseItem; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction.Response; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class TransportGetLifecycleAction extends TransportMasterNodeAction { + + @Inject + public TransportGetLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(GetLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + IndexLifecycleMetadata metadata = clusterService.state().metaData().custom(IndexLifecycleMetadata.TYPE); + if (metadata == null) { + if (request.getPolicyNames().length == 0) { + listener.onResponse(new Response(Collections.emptyList())); + } else { + listener.onFailure(new ResourceNotFoundException("Lifecycle policy not found: {}", + Arrays.toString(request.getPolicyNames()))); + } + } else { + List requestedPolicies; + // if no policies explicitly provided, behave as if `*` was specified + if (request.getPolicyNames().length == 0) { + requestedPolicies = new ArrayList<>(metadata.getPolicyMetadatas().size()); + for (LifecyclePolicyMetadata policyMetadata : metadata.getPolicyMetadatas().values()) { + requestedPolicies.add(new LifecyclePolicyResponseItem(policyMetadata.getPolicy(), + policyMetadata.getVersion(), policyMetadata.getModifiedDateString())); + } + } else { + requestedPolicies = new ArrayList<>(request.getPolicyNames().length); + for (String name : request.getPolicyNames()) { + LifecyclePolicyMetadata policyMetadata = metadata.getPolicyMetadatas().get(name); + if (policyMetadata == null) { + listener.onFailure(new ResourceNotFoundException("Lifecycle policy not found: {}", name)); + return; + } + requestedPolicies.add(new LifecyclePolicyResponseItem(policyMetadata.getPolicy(), + policyMetadata.getVersion(), policyMetadata.getModifiedDateString())); + } + } + listener.onResponse(new Response(requestedPolicies)); + } + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetStatusAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetStatusAction.java new file mode 100644 index 0000000000000..4a76120545087 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportGetStatusAction.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetStatusAction.Response; + +public class TransportGetStatusAction extends TransportMasterNodeAction { + + @Inject + public TransportGetStatusAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(GetStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + IndexLifecycleMetadata metadata = state.metaData().custom(IndexLifecycleMetadata.TYPE); + final Response response; + if (metadata == null) { + // no need to actually install metadata just yet, but safe to say it is not stopped + response = new Response(OperationMode.RUNNING); + } else { + response = new Response(metadata.getOperationMode()); + } + listener.onResponse(response); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java new file mode 100644 index 0000000000000..57f08eba76490 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.MoveToStepAction.Response; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleService; + +public class TransportMoveToStepAction extends TransportMasterNodeAction { + IndexLifecycleService indexLifecycleService; + @Inject + public TransportMoveToStepAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + IndexLifecycleService indexLifecycleService) { + super(MoveToStepAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + Request::new); + this.indexLifecycleService = indexLifecycleService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + IndexMetaData indexMetaData = state.metaData().index(request.getIndex()); + if (indexMetaData == null) { + listener.onFailure(new IllegalArgumentException("index [" + request.getIndex() + "] does not exist")); + return; + } + clusterService.submitStateUpdateTask("index[" + request.getIndex() + "]-move-to-step", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return indexLifecycleService.moveClusterStateToStep(currentState, request.getIndex(), request.getCurrentStepKey(), + request.getNextStepKey()); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + IndexMetaData newIndexMetaData = newState.metaData().index(indexMetaData.getIndex()); + if (newIndexMetaData == null) { + // The index has somehow been deleted - there shouldn't be any opportunity for this to happen, but just in case. + logger.debug("index [" + indexMetaData.getIndex() + "] has been deleted after moving to step [" + + request.getNextStepKey() + "], skipping async action check"); + return; + } + indexLifecycleService.maybeRunAsyncAction(newState, newIndexMetaData, request.getNextStepKey()); + } + + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java new file mode 100644 index 0000000000000..dd4196c0a88d6 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportPutLifecycleAction.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction.Response; + +import java.time.Instant; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.stream.Collectors; + +/** + * This class is responsible for bootstrapping {@link IndexLifecycleMetadata} into the cluster-state, as well + * as adding the desired new policy to be inserted. + */ +public class TransportPutLifecycleAction extends TransportMasterNodeAction { + + @Inject + public TransportPutLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(PutLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + // headers from the thread context stored by the AuthenticationService to be shared between the + // REST layer and the Transport layer here must be accessed within this thread and not in the + // cluster state thread in the ClusterStateUpdateTask below since that thread does not share the + // same context, and therefore does not have access to the appropriate security headers. + Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + clusterService.submitStateUpdateTask("put-lifecycle-" + request.getPolicy().getName(), + new AckedClusterStateUpdateTask(request, listener) { + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + ClusterState.Builder newState = ClusterState.builder(currentState); + IndexLifecycleMetadata currentMetadata = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); + if (currentMetadata == null) { // first time using index-lifecycle feature, bootstrap metadata + currentMetadata = IndexLifecycleMetadata.EMPTY; + } + LifecyclePolicyMetadata existingPolicyMetadata = currentMetadata.getPolicyMetadatas() + .get(request.getPolicy().getName()); + long nextVersion = (existingPolicyMetadata == null) ? 1L : existingPolicyMetadata.getVersion() + 1L; + SortedMap newPolicies = new TreeMap<>(currentMetadata.getPolicyMetadatas()); + LifecyclePolicyMetadata lifecyclePolicyMetadata = new LifecyclePolicyMetadata(request.getPolicy(), filteredHeaders, + nextVersion, Instant.now().toEpochMilli()); + newPolicies.put(lifecyclePolicyMetadata.getName(), lifecyclePolicyMetadata); + IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, OperationMode.RUNNING); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(IndexLifecycleMetadata.TYPE, newMetadata).build()); + return newState.build(); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRemoveIndexLifecyclePolicyAction.java new file mode 100644 index 0000000000000..4e608b511d3e9 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRemoveIndexLifecyclePolicyAction.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.Index; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.RemoveIndexLifecyclePolicyAction.Response; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleRunner; + +import java.util.ArrayList; +import java.util.List; + +public class TransportRemoveIndexLifecyclePolicyAction extends TransportMasterNodeAction { + + @Inject + public TransportRemoveIndexLifecyclePolicyAction(TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(RemoveIndexLifecyclePolicyAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, Request::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + final Index[] indices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), request.indices()); + clusterService.submitStateUpdateTask("remove-lifecycle-for-index", + new AckedClusterStateUpdateTask(request, listener) { + + private final List failedIndexes = new ArrayList<>(); + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return IndexLifecycleRunner.removePolicyForIndexes(indices, currentState, failedIndexes); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(failedIndexes); + } + }); + } + +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java new file mode 100644 index 0000000000000..b0f25ed795144 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Request; +import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Response; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleService; + +public class TransportRetryAction extends TransportMasterNodeAction { + + IndexLifecycleService indexLifecycleService; + + @Inject + public TransportRetryAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + IndexLifecycleService indexLifecycleService) { + super(RetryAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + Request::new); + this.indexLifecycleService = indexLifecycleService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) { + clusterService.submitStateUpdateTask("ilm-re-run", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return indexLifecycleService.moveClusterStateToFailedStep(currentState, request.indices()); + } + + @Override + protected Response newResponse(boolean acknowledged) { + return new Response(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStartILMAction.java new file mode 100644 index 0000000000000..aab9ef90e03d5 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStartILMAction.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.StartILMRequest; +import org.elasticsearch.xpack.core.indexlifecycle.action.StartILMAction; +import org.elasticsearch.xpack.indexlifecycle.OperationModeUpdateTask; + +public class TransportStartILMAction extends TransportMasterNodeAction { + + @Inject + public TransportStartILMAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(StartILMAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + StartILMRequest::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + @Override + protected void masterOperation(StartILMRequest request, ClusterState state, ActionListener listener) { + clusterService.submitStateUpdateTask("ilm_operation_mode_update", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return (new OperationModeUpdateTask(OperationMode.RUNNING)).execute(currentState); + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(StartILMRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStopILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStopILMAction.java new file mode 100644 index 0000000000000..f27373353ecda --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportStopILMAction.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.StopILMRequest; +import org.elasticsearch.xpack.core.indexlifecycle.action.StopILMAction; +import org.elasticsearch.xpack.indexlifecycle.OperationModeUpdateTask; + +public class TransportStopILMAction extends TransportMasterNodeAction { + + @Inject + public TransportStopILMAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(StopILMAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + StopILMRequest::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + @Override + protected void masterOperation(StopILMRequest request, ClusterState state, ActionListener listener) { + clusterService.submitStateUpdateTask("ilm_operation_mode_update", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return (new OperationModeUpdateTask(OperationMode.STOPPING)).execute(currentState); + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(StopILMRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java new file mode 100644 index 0000000000000..3776363cf175e --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/LifecyclePolicyTestsUtils.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexlifecycle; + +import org.elasticsearch.xpack.indexlifecycle.LockableLifecycleType; + +import java.util.Map; + +/** + * This class is here for constructing instances of {@link LifecyclePolicy} that differs from + * the main {@link TimeseriesLifecycleType} one. Since the more generic constructor is package-private so + * that users are not exposed to {@link LifecycleType}, it is still useful to construct different ones for + * testing purposes + */ +public class LifecyclePolicyTestsUtils { + + public static LifecyclePolicy newTestLifecyclePolicy(String policyName, Map phases) { + return new LifecyclePolicy(TestLifecycleType.INSTANCE, policyName, phases); + } + + public static LifecyclePolicy newLockableLifecyclePolicy(String policyName, Map phases) { + return new LifecyclePolicy(LockableLifecycleType.INSTANCE, policyName, phases); + } + + public static LifecyclePolicy randomTimeseriesLifecyclePolicy(String policyName) { + return LifecyclePolicyTests.randomTimeseriesLifecyclePolicy(policyName); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java new file mode 100644 index 0000000000000..21f2b0e70939f --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTaskTests.java @@ -0,0 +1,307 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.index.Index; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.MockStep; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleRunnerTests.MockClusterStateActionStep; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleRunnerTests.MockClusterStateWaitStep; +import org.junit.Before; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class ExecuteStepsUpdateTaskTests extends ESTestCase { + + private static final StepKey firstStepKey = new StepKey("first_phase", "action_1", "step_1"); + private static final StepKey secondStepKey = new StepKey("first_phase", "action_1", "step_2"); + private static final StepKey thirdStepKey = new StepKey("first_phase", "action_1", "step_3"); + private static final StepKey invalidStepKey = new StepKey("invalid", "invalid", "invalid"); + private ClusterState clusterState; + private PolicyStepsRegistry policyStepsRegistry; + private String mixedPolicyName; + private String allClusterPolicyName; + private String invalidPolicyName; + private Index index; + private IndexMetaData indexMetaData; + private MockClusterStateActionStep firstStep; + private MockClusterStateWaitStep secondStep; + private MockClusterStateWaitStep allClusterSecondStep; + private MockStep thirdStep; + private Client client; + private IndexLifecycleMetadata lifecycleMetadata; + private String indexName; + + @Before + public void prepareState() throws IOException { + client = Mockito.mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + firstStep = new MockClusterStateActionStep(firstStepKey, secondStepKey); + secondStep = new MockClusterStateWaitStep(secondStepKey, thirdStepKey); + secondStep.setWillComplete(true); + allClusterSecondStep = new MockClusterStateWaitStep(secondStepKey, TerminalPolicyStep.KEY); + allClusterSecondStep.setWillComplete(true); + thirdStep = new MockStep(thirdStepKey, null); + mixedPolicyName = randomAlphaOfLengthBetween(5, 10); + allClusterPolicyName = randomAlphaOfLengthBetween(1, 4); + invalidPolicyName = randomAlphaOfLength(11); + Phase mixedPhase = new Phase("first_phase", TimeValue.ZERO, Collections.singletonMap(MockAction.NAME, + new MockAction(Arrays.asList(firstStep, secondStep, thirdStep)))); + Phase allClusterPhase = new Phase("first_phase", TimeValue.ZERO, Collections.singletonMap(MockAction.NAME, + new MockAction(Arrays.asList(firstStep, allClusterSecondStep)))); + Phase invalidPhase = new Phase("invalid_phase", TimeValue.ZERO, Collections.singletonMap(MockAction.NAME, + new MockAction(Arrays.asList(new MockClusterStateActionStep(firstStepKey, invalidStepKey))))); + LifecyclePolicy mixedPolicy = newTestLifecyclePolicy(mixedPolicyName, + Collections.singletonMap(mixedPhase.getName(), mixedPhase)); + LifecyclePolicy allClusterPolicy = newTestLifecyclePolicy(allClusterPolicyName, + Collections.singletonMap(allClusterPhase.getName(), allClusterPhase)); + LifecyclePolicy invalidPolicy = newTestLifecyclePolicy(invalidPolicyName, + Collections.singletonMap(invalidPhase.getName(), invalidPhase)); + Map policyMap = new HashMap<>(); + policyMap.put(mixedPolicyName, new LifecyclePolicyMetadata(mixedPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + policyMap.put(allClusterPolicyName, new LifecyclePolicyMetadata(allClusterPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + policyMap.put(invalidPolicyName, new LifecyclePolicyMetadata(invalidPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + policyStepsRegistry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client); + + indexName = randomAlphaOfLength(5); + lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + indexMetaData = setupIndexPolicy(mixedPolicyName); + } + + private IndexMetaData setupIndexPolicy(String policyName) { + // Reset the index to use the "allClusterPolicyName" + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase("new"); + lifecycleState.setAction("init"); + lifecycleState.setStep("init"); + IndexMetaData indexMetadata = IndexMetaData.builder(indexName) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + index = indexMetadata.getIndex(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + policyStepsRegistry.update(clusterState); + return indexMetadata; + } + + public void testNeverExecuteNonClusterStateStep() throws IOException { + setStateToKey(thirdStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, thirdStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + assertThat(task.execute(clusterState), sameInstance(clusterState)); + } + + public void testSuccessThenFailureUnsetNextKey() throws IOException { + secondStep.setWillComplete(false); + setStateToKey(firstStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, firstStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(secondStepKey)); + assertThat(firstStep.getExecuteCount(), equalTo(1L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(task.getNextStepKey(), nullValue()); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), nullValue()); + } + + public void testExecuteUntilFirstNonClusterStateStep() throws IOException { + setStateToKey(secondStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(thirdStepKey)); + assertThat(firstStep.getExecuteCount(), equalTo(0L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), nullValue()); + } + + public void testExecuteInvalidStartStep() throws IOException { + // Unset the index's phase/action/step to simulate starting from scratch + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.getMetaData().index(index))); + lifecycleState.setPhase(null); + lifecycleState.setAction(null); + lifecycleState.setStep(null); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + + policyStepsRegistry.update(clusterState); + + Step invalidStep = new MockClusterStateActionStep(firstStepKey, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(invalidPolicyName, index, + invalidStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + assertSame(newState, clusterState); + } + + public void testExecuteIncompleteWaitStepNoInfo() throws IOException { + secondStep.setWillComplete(false); + setStateToKey(secondStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(secondStepKey)); + assertThat(firstStep.getExecuteCount(), equalTo(0L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), nullValue()); + } + + public void testExecuteIncompleteWaitStepWithInfo() throws IOException { + secondStep.setWillComplete(false); + RandomStepInfo stepInfo = new RandomStepInfo(() -> randomAlphaOfLength(10)); + secondStep.expectedInfo(stepInfo); + setStateToKey(secondStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(secondStepKey)); + assertThat(firstStep.getExecuteCount(), equalTo(0L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), equalTo(stepInfo.toString())); + } + + public void testOnFailure() throws IOException { + setStateToKey(secondStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, secondStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + Exception expectedException = new RuntimeException(); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> task.onFailure(randomAlphaOfLength(10), expectedException)); + assertEquals("policy [" + mixedPolicyName + "] for index [" + index.getName() + "] failed on step [" + startStep.getKey() + "].", + exception.getMessage()); + assertSame(expectedException, exception.getCause()); + } + + public void testClusterActionStepThrowsException() throws IOException { + RuntimeException thrownException = new RuntimeException("error"); + firstStep.setException(thrownException); + setStateToKey(firstStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, firstStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(new StepKey(firstStepKey.getPhase(), firstStepKey.getAction(), ErrorStep.NAME))); + assertThat(firstStep.getExecuteCount(), equalTo(1L)); + assertThat(secondStep.getExecuteCount(), equalTo(0L)); + assertThat(task.getNextStepKey(), equalTo(secondStep.getKey())); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), equalTo("{\"type\":\"runtime_exception\",\"reason\":\"error\"}")); + } + + public void testClusterWaitStepThrowsException() throws IOException { + RuntimeException thrownException = new RuntimeException("error"); + secondStep.setException(thrownException); + setStateToKey(firstStepKey); + Step startStep = policyStepsRegistry.getStep(indexMetaData, firstStepKey); + long now = randomNonNegativeLong(); + ExecuteStepsUpdateTask task = new ExecuteStepsUpdateTask(mixedPolicyName, index, startStep, policyStepsRegistry, null, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(currentStepKey, equalTo(new StepKey(firstStepKey.getPhase(), firstStepKey.getAction(), ErrorStep.NAME))); + assertThat(firstStep.getExecuteCount(), equalTo(1L)); + assertThat(secondStep.getExecuteCount(), equalTo(1L)); + assertThat(task.getNextStepKey(), equalTo(thirdStepKey)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepInfo(), equalTo("{\"type\":\"runtime_exception\",\"reason\":\"error\"}")); + } + + private void setStateToKey(StepKey stepKey) throws IOException { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.getMetaData().index(index))); + lifecycleState.setPhase(stepKey.getPhase()); + lifecycleState.setAction(stepKey.getAction()); + lifecycleState.setStep(stepKey.getName()); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + policyStepsRegistry.update(clusterState); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetTests.java new file mode 100644 index 0000000000000..d83a41b4e60bc --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.junit.Before; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndexLifecycleFeatureSetTests extends ESTestCase { + + private XPackLicenseState licenseState; + private ClusterService clusterService; + + @Before + public void init() throws Exception { + licenseState = mock(XPackLicenseState.class); + clusterService = mock(ClusterService.class); + } + + public void testAvailable() { + IndexLifecycleFeatureSet featureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, licenseState, clusterService); + + when(licenseState.isIndexLifecycleAllowed()).thenReturn(false); + assertThat(featureSet.available(), equalTo(false)); + + when(licenseState.isIndexLifecycleAllowed()).thenReturn(true); + assertThat(featureSet.available(), equalTo(true)); + + featureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, null, clusterService); + assertThat(featureSet.available(), equalTo(false)); + } + + public void testEnabled() { + Settings.Builder settings = Settings.builder().put("xpack.ilm.enabled", false); + IndexLifecycleFeatureSet featureSet = new IndexLifecycleFeatureSet(settings.build(), licenseState, clusterService); + assertThat(featureSet.enabled(), equalTo(false)); + + settings = Settings.builder().put("xpack.ilm.enabled", true); + featureSet = new IndexLifecycleFeatureSet(settings.build(), licenseState, clusterService); + assertThat(featureSet.enabled(), equalTo(true)); + } + + public void testName() { + IndexLifecycleFeatureSet featureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, licenseState, clusterService); + assertThat(featureSet.name(), equalTo("ilm")); + } + + public void testNativeCodeInfo() { + IndexLifecycleFeatureSet featureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, licenseState, clusterService); + assertNull(featureSet.nativeCodeInfo()); + } + + public void testUsageStats() throws Exception { + Map indexPolicies = new HashMap<>(); + List policies = new ArrayList<>(); + String policy1Name = randomAlphaOfLength(10); + String policy2Name = randomAlphaOfLength(10); + String policy3Name = randomAlphaOfLength(10); + indexPolicies.put("index_1", policy1Name); + indexPolicies.put("index_2", policy1Name); + indexPolicies.put("index_3", policy1Name); + indexPolicies.put("index_4", policy1Name); + indexPolicies.put("index_5", policy3Name); + LifecyclePolicy policy1 = new LifecyclePolicy(policy1Name, Collections.emptyMap()); + policies.add(policy1); + PolicyStats policy1Stats = new PolicyStats(Collections.emptyMap(), 4); + + Map phases1 = new HashMap<>(); + LifecyclePolicy policy2 = new LifecyclePolicy(policy2Name, phases1); + policies.add(policy2); + PolicyStats policy2Stats = new PolicyStats(Collections.emptyMap(), 0); + + LifecyclePolicy policy3 = new LifecyclePolicy(policy3Name, Collections.emptyMap()); + policies.add(policy3); + PolicyStats policy3Stats = new PolicyStats(Collections.emptyMap(), 1); + + ClusterState clusterState = buildClusterState(policies, indexPolicies); + Mockito.when(clusterService.state()).thenReturn(clusterState); + + PlainActionFuture future = new PlainActionFuture<>(); + IndexLifecycleFeatureSet ilmFeatureSet = new IndexLifecycleFeatureSet(Settings.EMPTY, licenseState, clusterService); + ilmFeatureSet.usage(future); + IndexLifecycleFeatureSetUsage ilmUsage = (IndexLifecycleFeatureSetUsage) future.get(); + assertThat(ilmUsage.enabled(), equalTo(ilmFeatureSet.enabled())); + assertThat(ilmUsage.available(), equalTo(ilmFeatureSet.available())); + + List policyStatsList = ilmUsage.getPolicyStats(); + assertThat(policyStatsList.size(), equalTo(policies.size())); + assertTrue(policyStatsList.contains(policy1Stats)); + assertTrue(policyStatsList.contains(policy2Stats)); + assertTrue(policyStatsList.contains(policy3Stats)); + + } + + private ClusterState buildClusterState(List lifecyclePolicies, Map indexPolicies) { + Map lifecyclePolicyMetadatasMap = lifecyclePolicies.stream() + .map(p -> new LifecyclePolicyMetadata(p, Collections.emptyMap(), 1, 0L)) + .collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())); + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(lifecyclePolicyMetadatasMap, OperationMode.RUNNING); + + MetaData.Builder metadata = MetaData.builder().putCustom(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata); + indexPolicies.forEach((indexName, policyName) -> { + Settings indexSettings = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyName) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetaData.Builder indexMetadata = IndexMetaData.builder(indexName).settings(indexSettings); + metadata.put(indexMetadata); + }); + + return ClusterState.builder(new ClusterName("my_cluster")).metaData(metadata).build(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java new file mode 100644 index 0000000000000..7bd974d31c176 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleFeatureSetUsageTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class IndexLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected IndexLifecycleFeatureSetUsage createTestInstance() { + boolean available = randomBoolean(); + boolean enabled = randomBoolean(); + List policyStats = new ArrayList<>(); + int size = randomIntBetween(0, 10); + for (int i = 0; i < size; i++) { + policyStats.add(PolicyStatsTests.randomPolicyStats()); + } + return new IndexLifecycleFeatureSetUsage(available, enabled, policyStats); + } + + @Override + protected IndexLifecycleFeatureSetUsage mutateInstance(IndexLifecycleFeatureSetUsage instance) throws IOException { + boolean available = instance.available(); + boolean enabled = instance.enabled(); + List policyStats = instance.getPolicyStats(); + switch (between(0, 2)) { + case 0: + available = available == false; + break; + case 1: + enabled = enabled == false; + break; + case 2: + policyStats = new ArrayList<>(policyStats); + policyStats.add(PolicyStatsTests.randomPolicyStats()); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new IndexLifecycleFeatureSetUsage(available, enabled, policyStats); + } + + @Override + protected Reader instanceReader() { + return IndexLifecycleFeatureSetUsage::new; + } + +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java new file mode 100644 index 0000000000000..a041232d8a7e7 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java @@ -0,0 +1,492 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleExplainResponse; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.GetLifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; +import org.junit.Before; + +import java.io.IOException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +import static org.elasticsearch.client.Requests.clusterHealthRequest; +import static org.elasticsearch.client.Requests.createIndexRequest; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newLockableLifecyclePolicy; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.core.CombinableMatcher.both; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsNull.nullValue; + +@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0) +public class IndexLifecycleInitialisationTests extends ESIntegTestCase { + private Settings settings; + private LifecyclePolicy lifecyclePolicy; + private Phase mockPhase; + private static final ObservableAction OBSERVABLE_ACTION; + static { + List steps = new ArrayList<>(); + Step.StepKey key = new Step.StepKey("mock", ObservableAction.NAME, ObservableClusterStateWaitStep.NAME); + steps.add(new ObservableClusterStateWaitStep(key, TerminalPolicyStep.KEY)); + OBSERVABLE_ACTION = new ObservableAction(steps, true); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + settings.put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), true); + settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); + settings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); + settings.put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s"); + return settings.build(); + } + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Settings transportClientSettings() { + Settings.Builder settings = Settings.builder().put(super.transportClientSettings()); + settings.put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), true); + settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); + settings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); + return settings.build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Before + public void init() { + settings = Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0).put(LifecycleSettings.LIFECYCLE_NAME, "test").build(); + List steps = new ArrayList<>(); + Step.StepKey key = new Step.StepKey("mock", ObservableAction.NAME, ObservableClusterStateWaitStep.NAME); + steps.add(new ObservableClusterStateWaitStep(key, TerminalPolicyStep.KEY)); + Map actions = Collections.singletonMap(ObservableAction.NAME, OBSERVABLE_ACTION); + mockPhase = new Phase("mock", TimeValue.timeValueSeconds(0), actions); + Map phases = Collections.singletonMap("mock", mockPhase); + lifecyclePolicy = newLockableLifecyclePolicy("test", phases); + } + + public void testSingleNodeCluster() throws Exception { + settings = Settings.builder().put(settings).put("index.lifecycle.test.complete", true).build(); + // start master node + logger.info("Starting server1"); + final String server_1 = internalCluster().startNode(); + final String node1 = getLocalNodeId(server_1); + + // test get-lifecycle behavior when IndexLifecycleMetaData is null + GetLifecycleAction.Response getUninitializedLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, + new GetLifecycleAction.Request()).get(); + assertThat(getUninitializedLifecycleResponse.getPolicies().size(), equalTo(0)); + ExecutionException exception = expectThrows(ExecutionException.class,() -> client() + .execute(GetLifecycleAction.INSTANCE, new GetLifecycleAction.Request("non-existent-policy")).get()); + assertThat(exception.getMessage(), containsString("Lifecycle policy not found: [non-existent-policy]")); + + logger.info("Creating lifecycle [test_lifecycle]"); + PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + long lowerBoundModifiedDate = Instant.now().toEpochMilli(); + PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); + assertAcked(putLifecycleResponse); + long upperBoundModifiedDate = Instant.now().toEpochMilli(); + + // assert version and modified_date + GetLifecycleAction.Response getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, + new GetLifecycleAction.Request()).get(); + assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1)); + GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0); + assertThat(responseItem.getLifecyclePolicy(), equalTo(lifecyclePolicy)); + assertThat(responseItem.getVersion(), equalTo(1L)); + long actualModifiedDate = Instant.parse(responseItem.getModifiedDate()).toEpochMilli(); + assertThat(actualModifiedDate, + is(both(greaterThanOrEqualTo(lowerBoundModifiedDate)).and(lessThanOrEqualTo(upperBoundModifiedDate)))); + + logger.info("Creating index [test]"); + CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)) + .actionGet(); + assertAcked(createIndexResponse); + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1)); + assertBusy(() -> { + assertEquals(true, client().admin().indices().prepareExists("test").get().isExists()); + }); + IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_1); + assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); + assertNotNull(indexLifecycleService.getScheduledJob()); + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(TerminalPolicyStep.KEY.getName())); + }); + } + + public void testExplainExecution() throws Exception { + // start node + logger.info("Starting server1"); + final String server_1 = internalCluster().startNode(); + logger.info("Creating lifecycle [test_lifecycle]"); + PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); + assertAcked(putLifecycleResponse); + + GetLifecycleAction.Response getLifecycleResponse = client().execute(GetLifecycleAction.INSTANCE, + new GetLifecycleAction.Request()).get(); + assertThat(getLifecycleResponse.getPolicies().size(), equalTo(1)); + GetLifecycleAction.LifecyclePolicyResponseItem responseItem = getLifecycleResponse.getPolicies().get(0); + assertThat(responseItem.getLifecyclePolicy(), equalTo(lifecyclePolicy)); + assertThat(responseItem.getVersion(), equalTo(1L)); + long actualModifiedDate = Instant.parse(responseItem.getModifiedDate()).toEpochMilli(); + + logger.info("Creating index [test]"); + CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)) + .actionGet(); + assertAcked(createIndexResponse); + + { + PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), mockPhase, 1L, actualModifiedDate); + assertBusy(() -> { + ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest(); + ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get(); + assertThat(explainResponse.getIndexResponses().size(), equalTo(1)); + IndexLifecycleExplainResponse indexResponse = explainResponse.getIndexResponses().get("test"); + assertThat(indexResponse.getStep(), equalTo("observable_cluster_state_action")); + assertThat(indexResponse.getPhaseExecutionInfo(), equalTo(expectedExecutionInfo)); + }); + } + + // complete the step + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Collections.singletonMap("index.lifecycle.test.complete", true)).get(); + + { + PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), null, 1L, actualModifiedDate); + assertBusy(() -> { + ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest(); + ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get(); + assertThat(explainResponse.getIndexResponses().size(), equalTo(1)); + IndexLifecycleExplainResponse indexResponse = explainResponse.getIndexResponses().get("test"); + assertThat(indexResponse.getPhase(), equalTo(TerminalPolicyStep.COMPLETED_PHASE)); + assertThat(indexResponse.getStep(), equalTo(TerminalPolicyStep.KEY.getName())); + assertThat(indexResponse.getPhaseExecutionInfo(), equalTo(expectedExecutionInfo)); + }); + } + } + + public void testMasterDedicatedDataDedicated() throws Exception { + settings = Settings.builder().put(settings).put("index.lifecycle.test.complete", true).build(); + // start master node + logger.info("Starting master-only server1"); + final String server_1 = internalCluster().startMasterOnlyNode(); + // start data node + logger.info("Starting data-only server2"); + final String server_2 = internalCluster().startDataOnlyNode(); + final String node2 = getLocalNodeId(server_2); + + // check that the scheduler was started on the appropriate node + { + IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_1); + assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); + assertNotNull(indexLifecycleService.getScheduledJob()); + } + { + IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_2); + assertNull(indexLifecycleService.getScheduler()); + assertNull(indexLifecycleService.getScheduledJob()); + } + + logger.info("Creating lifecycle [test_lifecycle]"); + PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); + assertAcked(putLifecycleResponse); + logger.info("Creating index [test]"); + CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)) + .actionGet(); + assertAcked(createIndexResponse); + + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node2); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1)); + + assertBusy(() -> { + assertEquals(true, client().admin().indices().prepareExists("test").get().isExists()); + }); + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(TerminalPolicyStep.KEY.getName())); + }); + } + + public void testMasterFailover() throws Exception { + // start one server + logger.info("Starting sever1"); + final String server_1 = internalCluster().startNode(); + final String node1 = getLocalNodeId(server_1); + + logger.info("Creating lifecycle [test_lifecycle]"); + PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); + PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); + assertAcked(putLifecycleResponse); + + logger.info("Creating index [test]"); + CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)) + .actionGet(); + assertAcked(createIndexResponse); + + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); + assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1)); + + logger.info("Starting server2"); + // start another server + internalCluster().startNode(); + + // first wait for 2 nodes in the cluster + logger.info("Waiting for replicas to be assigned"); + ClusterHealthResponse clusterHealth = client().admin().cluster() + .health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet(); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); + + // check step in progress in lifecycle + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(ObservableClusterStateWaitStep.NAME)); + }); + + if (randomBoolean()) { + // this checks that the phase execution is picked up from the phase definition settings + logger.info("updating lifecycle [test_lifecycle] to be empty"); + PutLifecycleAction.Request updateLifecycleRequest = new PutLifecycleAction.Request + (newLockableLifecyclePolicy(lifecyclePolicy.getName(), Collections.emptyMap())); + PutLifecycleAction.Response updateLifecycleResponse = client() + .execute(PutLifecycleAction.INSTANCE, updateLifecycleRequest).get(); + assertAcked(updateLifecycleResponse); + } + + + logger.info("Closing server1"); + // kill the first server + internalCluster().stopCurrentMasterNode(); + + // check that index lifecycle picked back up where it + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(ObservableClusterStateWaitStep.NAME)); + }); + + logger.info("new master is operation"); + // complete the step + AcknowledgedResponse repsonse = client().admin().indices().prepareUpdateSettings("test") + .setSettings(Collections.singletonMap("index.lifecycle.test.complete", true)).get(); + + assertBusy(() -> { + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(client().admin().cluster() + .prepareState().execute().actionGet().getState().getMetaData().index("test")); + assertThat(lifecycleState.getStep(), equalTo(TerminalPolicyStep.KEY.getName())); + }); + } + + public void testPollIntervalUpdate() throws Exception { + TimeValue pollInterval = TimeValue.timeValueSeconds(randomLongBetween(1, 5)); + final String server_1 = internalCluster().startMasterOnlyNode( + Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, pollInterval.getStringRep()).build()); + IndexLifecycleService indexLifecycleService = internalCluster().getInstance(IndexLifecycleService.class, server_1); + assertBusy(() -> { + assertNotNull(indexLifecycleService.getScheduler()); + assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); + }); + { + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + assertThat(schedule.getInterval(), equalTo(pollInterval)); + } + + // update the poll interval + TimeValue newPollInterval = TimeValue.timeValueHours(randomLongBetween(6, 1000)); + Settings newIntervalSettings = Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, + newPollInterval.getStringRep()).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(newIntervalSettings)); + { + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + assertThat(schedule.getInterval(), equalTo(newPollInterval)); + } + } + + private String getLocalNodeId(String name) { + TransportService transportService = internalCluster().getInstance(TransportService.class, name); + String nodeId = transportService.getLocalNode().getId(); + assertThat(nodeId, not(nullValue())); + return nodeId; + } + + public static class TestILMPlugin extends Plugin { + public TestILMPlugin() { + } + + public List> getSettings() { + final Setting COMPLETE_SETTING = Setting.boolSetting("index.lifecycle.test.complete", false, + Setting.Property.Dynamic, Setting.Property.IndexScope); + return Collections.singletonList(COMPLETE_SETTING); + } + + @Override + public List getNamedXContent() { + return Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ObservableAction.NAME), (p) -> { + MockAction.parse(p); + return OBSERVABLE_ACTION; + }) + ); + } + + @Override + public List getNamedWriteables() { + return Arrays.asList(new NamedWriteableRegistry.Entry(LifecycleType.class, LockableLifecycleType.TYPE, + (in) -> LockableLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ObservableAction.NAME, ObservableAction::readObservableAction), + new NamedWriteableRegistry.Entry(ObservableClusterStateWaitStep.class, ObservableClusterStateWaitStep.NAME, + ObservableClusterStateWaitStep::new)); + } + } + + public static class ObservableClusterStateWaitStep extends ClusterStateWaitStep implements NamedWriteable { + public static final String NAME = "observable_cluster_state_action"; + + public ObservableClusterStateWaitStep(StepKey current, StepKey next) { + super(current, next); + } + + public ObservableClusterStateWaitStep(StreamInput in) throws IOException { + this(new StepKey(in.readString(), in.readString(), in.readString()), readOptionalNextStepKey(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(getKey().getPhase()); + out.writeString(getKey().getAction()); + out.writeString(getKey().getName()); + boolean hasNextStep = getNextStepKey() != null; + out.writeBoolean(hasNextStep); + if (hasNextStep) { + out.writeString(getNextStepKey().getPhase()); + out.writeString(getNextStepKey().getAction()); + out.writeString(getNextStepKey().getName()); + } + } + + private static StepKey readOptionalNextStepKey(StreamInput in) throws IOException { + if (in.readBoolean()) { + return new StepKey(in.readString(), in.readString(), in.readString()); + } + return null; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + boolean complete = clusterState.metaData().index("test").getSettings() + .getAsBoolean("index.lifecycle.test.complete", false); + return new Result(complete, null); + } + } + + public static class ObservableAction extends MockAction { + + ObservableAction(List steps, boolean safe) { + super(steps, safe); + } + + public static ObservableAction readObservableAction(StreamInput in) throws IOException { + List steps = in.readList(ObservableClusterStateWaitStep::new); + boolean safe = in.readBoolean(); + return new ObservableAction(steps, safe); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(getSteps().stream().map(s -> (ObservableClusterStateWaitStep) s).collect(Collectors.toList())); + out.writeBoolean(isSafeAction()); + } + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java new file mode 100644 index 0000000000000..97f78c89dc9ea --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaData.Custom; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.test.AbstractDiffableSerializationTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; +import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; +import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata.IndexLifecycleMetadataDiff; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.randomTimeseriesLifecyclePolicy; + +public class IndexLifecycleMetadataTests extends AbstractDiffableSerializationTestCase { + + @Override + protected IndexLifecycleMetadata createTestInstance() { + int numPolicies = randomIntBetween(1, 5); + Map policies = new HashMap<>(numPolicies); + for (int i = 0; i < numPolicies; i++) { + LifecyclePolicy policy = randomTimeseriesLifecyclePolicy(randomAlphaOfLength(4) + i); + policies.put(policy.getName(), new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + } + return new IndexLifecycleMetadata(policies, randomFrom(OperationMode.values())); + } + + @Override + protected IndexLifecycleMetadata doParseInstance(XContentParser parser) throws IOException { + return IndexLifecycleMetadata.PARSER.apply(parser, null); + } + + @Override + protected Reader instanceReader() { + return IndexLifecycleMetadata::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, + (in) -> TimeseriesLifecycleType.INSTANCE), + new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new) + )); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), + (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + @Override + protected MetaData.Custom mutateInstance(MetaData.Custom instance) { + IndexLifecycleMetadata metadata = (IndexLifecycleMetadata) instance; + Map policies = metadata.getPolicyMetadatas(); + policies = new TreeMap<>(policies); + OperationMode mode = metadata.getOperationMode(); + if (randomBoolean()) { + String policyName = randomAlphaOfLength(10); + policies.put(policyName, new LifecyclePolicyMetadata(randomTimeseriesLifecyclePolicy(policyName), Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + } else { + mode = randomValueOtherThan(metadata.getOperationMode(), () -> randomFrom(OperationMode.values())); + } + return new IndexLifecycleMetadata(policies, mode); + } + + @Override + protected Custom makeTestChanges(Custom testInstance) { + return mutateInstance(testInstance); + } + + @Override + protected Reader> diffReader() { + return IndexLifecycleMetadataDiff::new; + } + + public void testMinimumSupportedVersion() { + assertEquals(Version.V_7_0_0_alpha1, createTestInstance().getMinimalSupportedVersion()); + } + + public void testcontext() { + assertEquals(MetaData.ALL_CONTEXTS, createTestInstance().context()); + } + + public static IndexLifecycleMetadata createTestInstance(int numPolicies, OperationMode mode) { + SortedMap policies = new TreeMap<>(); + for (int i = 0; i < numPolicies; i++) { + int numberPhases = randomInt(5); + Map phases = new HashMap<>(numberPhases); + for (int j = 0; j < numberPhases; j++) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = Collections.emptyMap(); + if (randomBoolean()) { + actions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction()); + } + String phaseName = randomAlphaOfLength(10); + phases.put(phaseName, new Phase(phaseName, after, actions)); + } + String policyName = randomAlphaOfLength(10); + policies.put(policyName, new LifecyclePolicyMetadata(newTestLifecyclePolicy(policyName, phases), Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + } + return new IndexLifecycleMetadata(policies, mode); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java new file mode 100644 index 0000000000000..a8a896b5c4ea5 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java @@ -0,0 +1,1479 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexlifecycle.AbstractStepTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.AsyncWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateActionStep; +import org.elasticsearch.xpack.core.indexlifecycle.ClusterStateWaitStep; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.MockStep; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; +import org.mockito.ArgumentMatcher; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; + +public class IndexLifecycleRunnerTests extends ESTestCase { + private static final NamedXContentRegistry REGISTRY; + + static { + try (IndexLifecycle indexLifecycle = new IndexLifecycle(Settings.EMPTY)) { + List entries = new ArrayList<>(indexLifecycle.getNamedXContent()); + REGISTRY = new NamedXContentRegistry(entries); + } + } + + /** A real policy steps registry where getStep can be overridden so that JSON doesn't have to be parsed */ + private class MockPolicyStepsRegistry extends PolicyStepsRegistry { + private BiFunction fn = null; + + MockPolicyStepsRegistry(SortedMap lifecyclePolicyMap, Map firstStepMap, + Map> stepMap, NamedXContentRegistry xContentRegistry, Client client) { + super(lifecyclePolicyMap, firstStepMap, stepMap, xContentRegistry, client); + } + + public void setResolver(BiFunction fn) { + this.fn = fn; + } + + @Override + public Step getStep(IndexMetaData indexMetaData, StepKey stepKey) { + if (fn == null) { + logger.info("--> retrieving step {}", stepKey); + return super.getStep(indexMetaData, stepKey); + } else { + logger.info("--> returning mock step"); + return fn.apply(indexMetaData, stepKey); + } + } + } + + private MockPolicyStepsRegistry createOneStepPolicyStepRegistry(String policyName, Step step) { + return createOneStepPolicyStepRegistry(policyName, step, "test"); + } + + private MockPolicyStepsRegistry createOneStepPolicyStepRegistry(String policyName, Step step, String indexName) { + LifecyclePolicy policy = new LifecyclePolicy(policyName, new HashMap<>()); + SortedMap lifecyclePolicyMap = new TreeMap<>(); + lifecyclePolicyMap.put(policyName, new LifecyclePolicyMetadata(policy, new HashMap<>(), 1, 1)); + Map firstStepMap = new HashMap<>(); + firstStepMap.put(policyName, step); + Map> stepMap = new HashMap<>(); + Map policySteps = new HashMap<>(); + policySteps.put(step.getKey(), step); + stepMap.put(policyName, policySteps); + Map> indexSteps = new HashMap<>(); + List steps = new ArrayList<>(); + steps.add(step); + Index index = new Index(indexName, indexName + "uuid"); + indexSteps.put(index, steps); + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + return new MockPolicyStepsRegistry(lifecyclePolicyMap, firstStepMap, stepMap, REGISTRY, client); + } + + public void testRunPolicyTerminalPolicyStep() { + String policyName = "async_action_policy"; + TerminalPolicyStep step = TerminalPolicyStep.INSTANCE; + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + Mockito.verifyZeroInteractions(clusterService); + } + + public void testRunPolicyErrorStep() { + String policyName = "async_action_policy"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + MockClusterStateWaitStep step = new MockClusterStateWaitStep(stepKey, null); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder(); + newState.setPhase(stepKey.getPhase()); + newState.setAction(stepKey.getAction()); + newState.setStep(ErrorStep.NAME); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .putCustom(ILM_CUSTOM_METADATA_KEY, newState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + Mockito.verifyZeroInteractions(clusterService); + } + + public void testRunStateChangePolicyWithNoNextStep() throws Exception { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, null); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ThreadPool threadPool = new TestThreadPool("name"); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + ClusterServiceUtils.setState(clusterService, state); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + + ClusterState before = clusterService.state(); + CountDownLatch latch = new CountDownLatch(1); + step.setLatch(latch); + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + latch.await(5, TimeUnit.SECONDS); + ClusterState after = clusterService.state(); + + assertEquals(before, after); + assertThat(step.getExecuteCount(), equalTo(1L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunStateChangePolicyWithNextStep() throws Exception { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + StepKey nextStepKey = new StepKey("phase", "action", "next_cluster_state_action_step"); + MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey); + MockClusterStateActionStep nextStep = new MockClusterStateActionStep(nextStepKey, null); + MockPolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + stepRegistry.setResolver((i, k) -> { + if (stepKey.equals(k)) { + return step; + } else if (nextStepKey.equals(k)) { + return nextStep; + } else { + fail("should not try to retrieve different step"); + return null; + } + }); + ThreadPool threadPool = new TestThreadPool("name"); + LifecycleExecutionState les = LifecycleExecutionState.builder() + .setPhase("phase") + .setAction("action") + .setStep("cluster_state_action_step") + .build(); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap()) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + ClusterServiceUtils.setState(clusterService, state); + long stepTime = randomLong(); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> stepTime); + + ClusterState before = clusterService.state(); + CountDownLatch latch = new CountDownLatch(1); + nextStep.setLatch(latch); + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + + // The cluster state can take a few extra milliseconds to update after the steps are executed + assertBusy(() -> assertNotEquals(before, clusterService.state())); + LifecycleExecutionState newExecutionState = LifecycleExecutionState + .fromIndexMetadata(clusterService.state().metaData().index(indexMetaData.getIndex())); + assertThat(newExecutionState.getPhase(), equalTo("phase")); + assertThat(newExecutionState.getAction(), equalTo("action")); + assertThat(newExecutionState.getStep(), equalTo("next_cluster_state_action_step")); + assertThat(newExecutionState.getStepTime(), equalTo(stepTime)); + assertThat(step.getExecuteCount(), equalTo(1L)); + assertThat(nextStep.getExecuteCount(), equalTo(1L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunAsyncActionDoesNotRun() { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "async_action_step"); + MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ThreadPool threadPool = new TestThreadPool("name"); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + ClusterServiceUtils.setState(clusterService, state); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + + ClusterState before = clusterService.state(); + // State changes should not run AsyncAction steps + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + ClusterState after = clusterService.state(); + + assertEquals(before, after); + assertThat(step.getExecuteCount(), equalTo(0L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + StepKey nextStepKey = new StepKey("phase", "action", "async_action_step"); + MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey); + MockAsyncActionStep nextStep = new MockAsyncActionStep(nextStepKey, null); + MockPolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + stepRegistry.setResolver((i, k) -> { + if (stepKey.equals(k)) { + return step; + } else if (nextStepKey.equals(k)) { + return nextStep; + } else { + fail("should not try to retrieve different step"); + return null; + } + }); + ThreadPool threadPool = new TestThreadPool("name"); + LifecycleExecutionState les = LifecycleExecutionState.builder() + .setPhase("phase") + .setAction("action") + .setStep("cluster_state_action_step") + .build(); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap()) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + logger.info("--> state: {}", state); + ClusterServiceUtils.setState(clusterService, state); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + + ClusterState before = clusterService.state(); + CountDownLatch latch = new CountDownLatch(1); + step.setLatch(latch); + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + // Wait for the cluster state action step + latch.await(5, TimeUnit.SECONDS); + + CountDownLatch asyncLatch = new CountDownLatch(1); + nextStep.setLatch(asyncLatch); + + // Wait for the async action step + asyncLatch.await(5, TimeUnit.SECONDS); + ClusterState after = clusterService.state(); + + assertNotEquals(before, after); + assertThat(step.getExecuteCount(), equalTo(1L)); + assertThat(nextStep.getExecuteCount(), equalTo(1L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunPeriodicStep() throws Exception { + String policyName = "foo"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + StepKey nextStepKey = new StepKey("phase", "action", "async_action_step"); + MockAsyncWaitStep step = new MockAsyncWaitStep(stepKey, nextStepKey); + MockAsyncWaitStep nextStep = new MockAsyncWaitStep(nextStepKey, null); + MockPolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + stepRegistry.setResolver((i, k) -> { + if (stepKey.equals(k)) { + return step; + } else if (nextStepKey.equals(k)) { + return nextStep; + } else { + fail("should not try to retrieve different step"); + return null; + } + }); + ThreadPool threadPool = new TestThreadPool("name"); + LifecycleExecutionState les = LifecycleExecutionState.builder() + .setPhase("phase") + .setAction("action") + .setStep("cluster_state_action_step") + .build(); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap()) + .build(); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + DiscoveryNode node = clusterService.localNode(); + IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .metaData(MetaData.builder() + .put(indexMetaData, true) + .putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .nodes(DiscoveryNodes.builder() + .add(node) + .masterNodeId(node.getId()) + .localNodeId(node.getId())) + .build(); + logger.info("--> state: {}", state); + ClusterServiceUtils.setState(clusterService, state); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + + ClusterState before = clusterService.state(); + CountDownLatch latch = new CountDownLatch(1); + step.setLatch(latch); + runner.runPeriodicStep(policyName, indexMetaData); + latch.await(5, TimeUnit.SECONDS); + + ClusterState after = clusterService.state(); + + assertEquals(before, after); + assertThat(step.getExecuteCount(), equalTo(1L)); + assertThat(nextStep.getExecuteCount(), equalTo(0L)); + clusterService.close(); + threadPool.shutdownNow(); + } + + public void testRunPolicyClusterStateActionStep() { + String policyName = "cluster_state_action_policy"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, null); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + Mockito.verify(clusterService, Mockito.times(1)).submitStateUpdateTask(Mockito.matches("ilm-execute-cluster-state-steps"), + Mockito.argThat(new ExecuteStepsUpdateTaskMatcher(indexMetaData.getIndex(), policyName, step))); + Mockito.verifyNoMoreInteractions(clusterService); + } + + public void testRunPolicyClusterStateWaitStep() { + String policyName = "cluster_state_action_policy"; + StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step"); + MockClusterStateWaitStep step = new MockClusterStateWaitStep(stepKey, null); + step.setWillComplete(true); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + Mockito.verify(clusterService, Mockito.times(1)).submitStateUpdateTask(Mockito.matches("ilm-execute-cluster-state-steps"), + Mockito.argThat(new ExecuteStepsUpdateTaskMatcher(indexMetaData.getIndex(), policyName, step))); + Mockito.verifyNoMoreInteractions(clusterService); + } + + public void testRunPolicyAsyncActionStepClusterStateChangeIgnored() { + String policyName = "async_action_policy"; + StepKey stepKey = new StepKey("phase", "action", "async_action_step"); + MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null); + Exception expectedException = new RuntimeException(); + step.setException(expectedException); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + assertEquals(0, step.getExecuteCount()); + Mockito.verifyZeroInteractions(clusterService); + } + + public void testRunPolicyAsyncWaitStepClusterStateChangeIgnored() { + String policyName = "async_wait_policy"; + StepKey stepKey = new StepKey("phase", "action", "async_wait_step"); + MockAsyncWaitStep step = new MockAsyncWaitStep(stepKey, null); + Exception expectedException = new RuntimeException(); + step.setException(expectedException); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + + runner.runPolicyAfterStateChange(policyName, indexMetaData); + + assertEquals(0, step.getExecuteCount()); + Mockito.verifyZeroInteractions(clusterService); + } + + public void testRunPolicyThatDoesntExist() { + String policyName = "cluster_state_action_policy"; + ClusterService clusterService = mock(ClusterService.class); + IndexLifecycleRunner runner = new IndexLifecycleRunner(new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, null), + clusterService, () -> 0L); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + // verify that no exception is thrown + runner.runPolicyAfterStateChange(policyName, indexMetaData); + Mockito.verify(clusterService, Mockito.times(1)).submitStateUpdateTask(Mockito.matches("ilm-set-step-info"), + Mockito.argThat(new SetStepInfoUpdateTaskMatcher(indexMetaData.getIndex(), policyName, null, + (builder, params) -> { + builder.startObject(); + builder.field("reason", "policy [does_not_exist] does not exist"); + builder.field("type", "illegal_argument_exception"); + builder.endObject(); + return builder; + }))); + Mockito.verifyNoMoreInteractions(clusterService); + } + + public void testGetCurrentStepKey() { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState.build()); + assertNull(stepKey); + + String phase = randomAlphaOfLength(20); + String action = randomAlphaOfLength(20); + String step = randomAlphaOfLength(20); + LifecycleExecutionState.Builder lifecycleState2 = LifecycleExecutionState.builder(); + lifecycleState2.setPhase(phase); + lifecycleState2.setAction(action); + lifecycleState2.setStep(step); + stepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState2.build()); + assertNotNull(stepKey); + assertEquals(phase, stepKey.getPhase()); + assertEquals(action, stepKey.getAction()); + assertEquals(step, stepKey.getName()); + + phase = randomAlphaOfLength(20); + action = randomAlphaOfLength(20); + step = null; + LifecycleExecutionState.Builder lifecycleState3 = LifecycleExecutionState.builder(); + lifecycleState3.setPhase(phase); + lifecycleState3.setAction(action); + lifecycleState3.setStep(step); + AssertionError error3 = expectThrows(AssertionError.class, () -> IndexLifecycleRunner.getCurrentStepKey(lifecycleState3.build())); + assertEquals("Current phase is not empty: " + phase, error3.getMessage()); + + phase = null; + action = randomAlphaOfLength(20); + step = null; + LifecycleExecutionState.Builder lifecycleState4 = LifecycleExecutionState.builder(); + lifecycleState4.setPhase(phase); + lifecycleState4.setAction(action); + lifecycleState4.setStep(step); + AssertionError error4 = expectThrows(AssertionError.class, () -> IndexLifecycleRunner.getCurrentStepKey(lifecycleState4.build())); + assertEquals("Current action is not empty: " + action, error4.getMessage()); + + phase = null; + action = randomAlphaOfLength(20); + step = randomAlphaOfLength(20); + LifecycleExecutionState.Builder lifecycleState5 = LifecycleExecutionState.builder(); + lifecycleState5.setPhase(phase); + lifecycleState5.setAction(action); + lifecycleState5.setStep(step); + AssertionError error5 = expectThrows(AssertionError.class, () -> IndexLifecycleRunner.getCurrentStepKey(lifecycleState5.build())); + assertEquals(null, error5.getMessage()); + + phase = null; + action = null; + step = randomAlphaOfLength(20); + LifecycleExecutionState.Builder lifecycleState6 = LifecycleExecutionState.builder(); + lifecycleState6.setPhase(phase); + lifecycleState6.setAction(action); + lifecycleState6.setStep(step); + AssertionError error6 = expectThrows(AssertionError.class, () -> IndexLifecycleRunner.getCurrentStepKey(lifecycleState6.build())); + assertEquals(null, error6.getMessage()); + } + + public void testGetCurrentStep() { + String policyName = "policy"; + StepKey firstStepKey = new StepKey("phase_1", "action_1", "step_1"); + StepKey secondStepKey = new StepKey("phase_1", "action_1", "step_2"); + Step firstStep = new MockStep(firstStepKey, secondStepKey); + Map firstStepMap = new HashMap<>(); + firstStepMap.put(policyName, firstStep); + Map> stepMap = new HashMap<>(); + Index index = new Index("test", "uuid"); + + Step.StepKey MOCK_STEP_KEY = new Step.StepKey("mock", "mock", "mock"); + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases(policyName); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + String phaseName = randomFrom(policy.getPhases().keySet()); + Phase phase = policy.getPhases().get(phaseName); + PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); + String phaseJson = Strings.toString(pei); + LifecycleAction action = randomFrom(phase.getActions().values()); + Step step = randomFrom(action.toSteps(client, phaseName, MOCK_STEP_KEY)); + Settings indexSettings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName) + .build(); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhaseDefinition(phaseJson); + lifecycleState.setPhase(step.getKey().getPhase()); + lifecycleState.setAction(step.getKey().getAction()); + lifecycleState.setStep(step.getKey().getName()); + IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()) + .settings(indexSettings) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .build(); + SortedMap metas = new TreeMap<>(); + metas.put(policyName, policyMetadata); + PolicyStepsRegistry registry = new PolicyStepsRegistry(metas, firstStepMap, stepMap, REGISTRY, client); + + // First step is retrieved because there are no settings for the index + Step stepFromNoSettings = IndexLifecycleRunner.getCurrentStep(registry, policy.getName(), indexMetaData, + LifecycleExecutionState.builder().build()); + assertEquals(firstStep, stepFromNoSettings); + + // The step that was written into the metadata is retrieved + Step currentStep = IndexLifecycleRunner.getCurrentStep(registry, policy.getName(), indexMetaData, lifecycleState.build()); + assertEquals(step.getKey(), currentStep.getKey()); + } + + public void testMoveClusterStateToNextStep() { + String indexName = "my_index"; + LifecyclePolicy policy = randomValueOtherThanMany(p -> p.getPhases().size() == 0, + () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy")); + Phase nextPhase = policy.getPhases().values().stream().findFirst().get(); + List policyMetadatas = Collections.singletonList( + new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong())); + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStep = new StepKey(nextPhase.getName(), "next_action", "next_step"); + long now = randomNonNegativeLong(); + + // test going from null lifecycle settings to next step + ClusterState clusterState = buildClusterState(indexName, + Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy.getName()), LifecycleExecutionState.builder().build(), policyMetadatas); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, + () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + // test going from set currentStep settings to nextStep + Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy.getName()); + if (randomBoolean()) { + lifecycleState.setStepInfo(randomAlphaOfLength(20)); + } + + clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + index = clusterState.metaData().index(indexName).getIndex(); + newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + } + + public void testMoveClusterStateToNextStepSamePhase() { + String indexName = "my_index"; + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStep = new StepKey("current_phase", "next_action", "next_step"); + long now = randomNonNegativeLong(); + + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), LifecycleExecutionState.builder().build(), + Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, + () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + if (randomBoolean()) { + lifecycleState.setStepInfo(randomAlphaOfLength(20)); + } + + clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + index = clusterState.metaData().index(indexName).getIndex(); + newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + } + + public void testMoveClusterStateToNextStepSameAction() { + String indexName = "my_index"; + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStep = new StepKey("current_phase", "current_action", "next_step"); + long now = randomNonNegativeLong(); + + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), LifecycleExecutionState.builder().build(), + Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, + () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + if (randomBoolean()) { + lifecycleState.setStepInfo(randomAlphaOfLength(20)); + } + clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + index = clusterState.metaData().index(indexName).getIndex(); + newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now); + assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); + } + + public void testSuccessfulValidatedMoveClusterStateToNextStep() { + String indexName = "my_index"; + String policyName = "my_policy"; + LifecyclePolicy policy = randomValueOtherThanMany(p -> p.getPhases().size() == 0, + () -> LifecyclePolicyTests.randomTestLifecyclePolicy(policyName)); + Phase nextPhase = policy.getPhases().values().stream().findFirst().get(); + List policyMetadatas = Collections.singletonList( + new LifecyclePolicyMetadata(policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong())); + StepKey currentStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStepKey = new StepKey(nextPhase.getName(), "next_action", "next_step"); + long now = randomNonNegativeLong(); + Step step = new MockStep(nextStepKey, nextStepKey); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step, indexName); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + + Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyName); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey, + nextStepKey, () -> now, stepRegistry); + assertClusterStateOnNextStep(clusterState, index, currentStepKey, nextStepKey, newClusterState, now); + } + + public void testValidatedMoveClusterStateToNextStepWithoutPolicy() { + String indexName = "my_index"; + String policyName = "policy"; + StepKey currentStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStepKey = new StepKey("next_phase", "next_action", "next_step"); + long now = randomNonNegativeLong(); + Step step = new MockStep(nextStepKey, nextStepKey); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + + Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, randomBoolean() ? "" : null); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey, + nextStepKey, () -> now, stepRegistry)); + assertThat(exception.getMessage(), equalTo("index [my_index] is not associated with an Index Lifecycle Policy")); + } + + public void testValidatedMoveClusterStateToNextStepInvalidCurrentStep() { + String indexName = "my_index"; + String policyName = "my_policy"; + StepKey currentStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey notCurrentStepKey = new StepKey("not_current_phase", "not_current_action", "not_current_step"); + StepKey nextStepKey = new StepKey("next_phase", "next_action", "next_step"); + long now = randomNonNegativeLong(); + Step step = new MockStep(nextStepKey, nextStepKey); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + + Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, notCurrentStepKey, + nextStepKey, () -> now, stepRegistry)); + assertThat(exception.getMessage(), equalTo("index [my_index] is not on current step " + + "[{\"phase\":\"not_current_phase\",\"action\":\"not_current_action\",\"name\":\"not_current_step\"}]")); + } + + public void testValidatedMoveClusterStateToNextStepInvalidNextStep() { + String indexName = "my_index"; + String policyName = "my_policy"; + StepKey currentStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey nextStepKey = new StepKey("next_phase", "next_action", "next_step"); + long now = randomNonNegativeLong(); + Step step = new MockStep(currentStepKey, nextStepKey); + PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step); + + Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey, + nextStepKey, () -> now, stepRegistry)); + assertThat(exception.getMessage(), + equalTo("step [{\"phase\":\"next_phase\",\"action\":\"next_action\",\"name\":\"next_step\"}] " + + "for index [my_index] with policy [my_policy] does not exist")); + } + + public void testMoveClusterStateToErrorStep() throws IOException { + String indexName = "my_index"; + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + + ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToErrorStep(index, clusterState, currentStep, cause, () -> now); + assertClusterStateOnErrorStep(clusterState, index, currentStep, newClusterState, now, + "{\"type\":\"exception\",\"reason\":\"THIS IS AN EXPECTED CAUSE\"}"); + + cause = new IllegalArgumentException("non elasticsearch-exception"); + newClusterState = IndexLifecycleRunner.moveClusterStateToErrorStep(index, clusterState, currentStep, cause, () -> now); + assertClusterStateOnErrorStep(clusterState, index, currentStep, newClusterState, now, + "{\"type\":\"illegal_argument_exception\",\"reason\":\"non elasticsearch-exception\"}"); + } + + public void testMoveClusterStateToFailedStep() { + String indexName = "my_index"; + String[] indices = new String[] { indexName }; + String policyName = "my_policy"; + long now = randomNonNegativeLong(); + StepKey failedStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey errorStepKey = new StepKey(failedStepKey.getPhase(), failedStepKey.getAction(), ErrorStep.NAME); + Step step = new MockStep(failedStepKey, null); + PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step, indexName); + Settings.Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(errorStepKey.getPhase()); + lifecycleState.setAction(errorStepKey.getAction()); + lifecycleState.setStep(errorStepKey.getName()); + lifecycleState.setFailedStep(failedStepKey.getName()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + ClusterState nextClusterState = runner.moveClusterStateToFailedStep(clusterState, indices); + IndexLifecycleRunnerTests.assertClusterStateOnNextStep(clusterState, index, errorStepKey, failedStepKey, + nextClusterState, now); + } + + public void testMoveClusterStateToFailedStepIndexNotFound() { + String existingIndexName = "my_index"; + String invalidIndexName = "does_not_exist"; + ClusterState clusterState = buildClusterState(existingIndexName, Settings.builder(), LifecycleExecutionState.builder().build(), + Collections.emptyList()); + IndexLifecycleRunner runner = new IndexLifecycleRunner(null, null, () -> 0L); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> runner.moveClusterStateToFailedStep(clusterState, new String[] { invalidIndexName })); + assertThat(exception.getMessage(), equalTo("index [" + invalidIndexName + "] does not exist")); + } + + public void testMoveClusterStateToFailedStepInvalidPolicySetting() { + String indexName = "my_index"; + String[] indices = new String[] { indexName }; + String policyName = "my_policy"; + long now = randomNonNegativeLong(); + StepKey failedStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey errorStepKey = new StepKey(failedStepKey.getPhase(), failedStepKey.getAction(), ErrorStep.NAME); + Step step = new MockStep(failedStepKey, null); + PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step); + Settings.Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, (String) null); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(errorStepKey.getPhase()); + lifecycleState.setAction(errorStepKey.getAction()); + lifecycleState.setStep(errorStepKey.getName()); + lifecycleState.setFailedStep(failedStepKey.getName()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> runner.moveClusterStateToFailedStep(clusterState, indices)); + assertThat(exception.getMessage(), equalTo("index [" + indexName + "] is not associated with an Index Lifecycle Policy")); + } + + public void testMoveClusterStateToFailedNotOnError() { + String indexName = "my_index"; + String[] indices = new String[] { indexName }; + String policyName = "my_policy"; + long now = randomNonNegativeLong(); + StepKey failedStepKey = new StepKey("current_phase", "current_action", "current_step"); + Step step = new MockStep(failedStepKey, null); + PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step); + Settings.Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, (String) null); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(failedStepKey.getPhase()); + lifecycleState.setAction(failedStepKey.getAction()); + lifecycleState.setStep(failedStepKey.getName()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> runner.moveClusterStateToFailedStep(clusterState, indices)); + assertThat(exception.getMessage(), equalTo("cannot retry an action for an index [" + indices[0] + + "] that has not encountered an error when running a Lifecycle Policy")); + } + + public void testAddStepInfoToClusterState() throws IOException { + String indexName = "my_index"; + StepKey currentStep = new StepKey("current_phase", "current_action", "current_step"); + RandomStepInfo stepInfo = new RandomStepInfo(() -> randomAlphaOfLength(10)); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + ClusterState clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + ClusterState newClusterState = IndexLifecycleRunner.addStepInfoToClusterState(index, clusterState, stepInfo); + assertClusterStateStepInfo(clusterState, index, currentStep, newClusterState, stepInfo); + ClusterState runAgainClusterState = IndexLifecycleRunner.addStepInfoToClusterState(index, newClusterState, stepInfo); + assertSame(newClusterState, runAgainClusterState); + } + + private ClusterState buildClusterState(String indexName, Settings.Builder indexSettingsBuilder, + LifecycleExecutionState lifecycleState, + List lifecyclePolicyMetadatas) { + Settings indexSettings = indexSettingsBuilder.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetaData indexMetadata = IndexMetaData.builder(indexName) + .settings(indexSettings) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.asMap()) + .build(); + + Map lifecyclePolicyMetadatasMap = lifecyclePolicyMetadatas.stream() + .collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())); + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(lifecyclePolicyMetadatasMap, OperationMode.RUNNING); + + MetaData metadata = MetaData.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata) + .build(); + return ClusterState.builder(new ClusterName("my_cluster")).metaData(metadata).build(); + } + + private static LifecyclePolicy createPolicy(String policyName, StepKey safeStep, StepKey unsafeStep) { + Map phases = new HashMap<>(); + if (safeStep != null) { + assert MockAction.NAME.equals(safeStep.getAction()) : "The safe action needs to be MockAction.NAME"; + assert unsafeStep == null + || safeStep.getPhase().equals(unsafeStep.getPhase()) == false : "safe and unsafe actions must be in different phases"; + Map actions = new HashMap<>(); + List steps = Collections.singletonList(new MockStep(safeStep, null)); + MockAction safeAction = new MockAction(steps, true); + actions.put(safeAction.getWriteableName(), safeAction); + Phase phase = new Phase(safeStep.getPhase(), TimeValue.timeValueMillis(0), actions); + phases.put(phase.getName(), phase); + } + if (unsafeStep != null) { + assert MockAction.NAME.equals(unsafeStep.getAction()) : "The unsafe action needs to be MockAction.NAME"; + Map actions = new HashMap<>(); + List steps = Collections.singletonList(new MockStep(unsafeStep, null)); + MockAction unsafeAction = new MockAction(steps, false); + actions.put(unsafeAction.getWriteableName(), unsafeAction); + Phase phase = new Phase(unsafeStep.getPhase(), TimeValue.timeValueMillis(0), actions); + phases.put(phase.getName(), phase); + } + return newTestLifecyclePolicy(policyName, phases); + } + + public void testRemovePolicyForIndex() { + String indexName = randomAlphaOfLength(10); + String oldPolicyName = "old_policy"; + StepKey currentStep = new StepKey(randomAlphaOfLength(10), MockAction.NAME, randomAlphaOfLength(10)); + LifecyclePolicy oldPolicy = createPolicy(oldPolicyName, currentStep, null); + Settings.Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, oldPolicyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + List policyMetadatas = new ArrayList<>(); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + Index index = clusterState.metaData().index(indexName).getIndex(); + Index[] indices = new Index[] { index }; + List failedIndexes = new ArrayList<>(); + + ClusterState newClusterState = IndexLifecycleRunner.removePolicyForIndexes(indices, clusterState, failedIndexes); + + assertTrue(failedIndexes.isEmpty()); + assertIndexNotManagedByILM(newClusterState, index); + } + + public void testRemovePolicyForIndexNoCurrentPolicy() { + String indexName = randomAlphaOfLength(10); + Settings.Builder indexSettingsBuilder = Settings.builder(); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, LifecycleExecutionState.builder().build(), + Collections.emptyList()); + Index index = clusterState.metaData().index(indexName).getIndex(); + Index[] indices = new Index[] { index }; + List failedIndexes = new ArrayList<>(); + + ClusterState newClusterState = IndexLifecycleRunner.removePolicyForIndexes(indices, clusterState, failedIndexes); + + assertTrue(failedIndexes.isEmpty()); + assertIndexNotManagedByILM(newClusterState, index); + } + + public void testRemovePolicyForIndexIndexDoesntExist() { + String indexName = randomAlphaOfLength(10); + String oldPolicyName = "old_policy"; + LifecyclePolicy oldPolicy = newTestLifecyclePolicy(oldPolicyName, Collections.emptyMap()); + StepKey currentStep = AbstractStepTestCase.randomStepKey(); + Settings.Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, oldPolicyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + List policyMetadatas = new ArrayList<>(); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + Index index = new Index("doesnt_exist", "im_not_here"); + Index[] indices = new Index[] { index }; + List failedIndexes = new ArrayList<>(); + + ClusterState newClusterState = IndexLifecycleRunner.removePolicyForIndexes(indices, clusterState, failedIndexes); + + assertEquals(1, failedIndexes.size()); + assertEquals("doesnt_exist", failedIndexes.get(0)); + assertSame(clusterState, newClusterState); + } + + public void testRemovePolicyForIndexIndexInUnsafe() { + String indexName = randomAlphaOfLength(10); + String oldPolicyName = "old_policy"; + StepKey currentStep = new StepKey(randomAlphaOfLength(10), MockAction.NAME, randomAlphaOfLength(10)); + LifecyclePolicy oldPolicy = createPolicy(oldPolicyName, null, currentStep); + Settings.Builder indexSettingsBuilder = Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, oldPolicyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStep.getPhase()); + lifecycleState.setAction(currentStep.getAction()); + lifecycleState.setStep(currentStep.getName()); + List policyMetadatas = new ArrayList<>(); + policyMetadatas.add(new LifecyclePolicyMetadata(oldPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); + Index index = clusterState.metaData().index(indexName).getIndex(); + Index[] indices = new Index[] { index }; + List failedIndexes = new ArrayList<>(); + + ClusterState newClusterState = IndexLifecycleRunner.removePolicyForIndexes(indices, clusterState, failedIndexes); + + assertTrue(failedIndexes.isEmpty()); + assertIndexNotManagedByILM(newClusterState, index); + } + + public void testIsReadyToTransition() { + String policyName = "async_action_policy"; + StepKey stepKey = new StepKey("phase", MockAction.NAME, MockAction.NAME); + MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null); + step.setWillComplete(true); + SortedMap lifecyclePolicyMap = new TreeMap<>(Collections.singletonMap(policyName, + new LifecyclePolicyMetadata(createPolicy(policyName, null, step.getKey()), new HashMap<>(), + randomNonNegativeLong(), randomNonNegativeLong()))); + Map firstStepMap = Collections.singletonMap(policyName, step); + Map policySteps = Collections.singletonMap(step.getKey(), step); + Map> stepMap = Collections.singletonMap(policyName, policySteps); + PolicyStepsRegistry policyStepsRegistry = new PolicyStepsRegistry(lifecyclePolicyMap, firstStepMap, + stepMap, NamedXContentRegistry.EMPTY, null); + ClusterService clusterService = mock(ClusterService.class); + final AtomicLong now = new AtomicLong(5); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyStepsRegistry, clusterService, now::get); + IndexMetaData indexMetaData = IndexMetaData.builder("my_index").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)) + .numberOfReplicas(randomIntBetween(0, 5)) + .build(); + // With no time, always transition + assertTrue("index should be able to transition with no creation date", + runner.isReadyToTransitionToThisPhase(policyName, indexMetaData, "phase")); + + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setIndexCreationDate(10L); + indexMetaData = IndexMetaData.builder(indexMetaData) + .settings(Settings.builder() + .put(indexMetaData.getSettings()) + .build()) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .build(); + // Index is not old enough to transition + assertFalse("index is not able to transition if it isn't old enough", + runner.isReadyToTransitionToThisPhase(policyName, indexMetaData, "phase")); + + // Set to the fuuuuuttuuuuuuurre + now.set(Long.MAX_VALUE); + assertTrue("index should be able to transition past phase's age", + runner.isReadyToTransitionToThisPhase(policyName, indexMetaData, "phase")); + } + + + public static void assertIndexNotManagedByILM(ClusterState clusterState, Index index) { + MetaData metadata = clusterState.metaData(); + assertNotNull(metadata); + IndexMetaData indexMetadata = metadata.getIndexSafe(index); + assertNotNull(indexMetadata); + Settings indexSettings = indexMetadata.getSettings(); + assertNotNull(indexSettings); + assertFalse(LifecycleSettings.LIFECYCLE_NAME_SETTING.exists(indexSettings)); + assertFalse(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING.exists(indexSettings)); + } + + public static void assertClusterStateOnPolicy(ClusterState oldClusterState, Index index, String expectedPolicy, StepKey previousStep, + StepKey expectedStep, ClusterState newClusterState, long now) { + assertNotSame(oldClusterState, newClusterState); + MetaData newMetadata = newClusterState.metaData(); + assertNotSame(oldClusterState.metaData(), newMetadata); + IndexMetaData newIndexMetadata = newMetadata.getIndexSafe(index); + assertNotSame(oldClusterState.metaData().index(index), newIndexMetadata); + LifecycleExecutionState newLifecycleState = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(index)); + LifecycleExecutionState oldLifecycleState = LifecycleExecutionState + .fromIndexMetadata(oldClusterState.metaData().index(index)); + assertNotSame(oldLifecycleState, newLifecycleState); + assertEquals(expectedStep.getPhase(), newLifecycleState.getPhase()); + assertEquals(expectedStep.getAction(), newLifecycleState.getAction()); + assertEquals(expectedStep.getName(), newLifecycleState.getStep()); + if (Objects.equals(previousStep.getPhase(), expectedStep.getPhase())) { + assertEquals(oldLifecycleState.getPhase(), newLifecycleState.getPhase()); + } else { + assertEquals(now, newLifecycleState.getPhaseTime().longValue()); + } + if (Objects.equals(previousStep.getAction(), expectedStep.getAction())) { + assertEquals(oldLifecycleState.getActionTime(), newLifecycleState.getActionTime()); + } else { + assertEquals(now, newLifecycleState.getActionTime().longValue()); + } + if (Objects.equals(previousStep.getName(), expectedStep.getName())) { + assertEquals(oldLifecycleState.getStepTime(), newLifecycleState.getStepTime()); + } else { + assertEquals(now, newLifecycleState.getStepTime().longValue()); + } + assertEquals(null, newLifecycleState.getFailedStep()); + assertEquals(null, newLifecycleState.getStepInfo()); + } + + public static void assertClusterStateOnNextStep(ClusterState oldClusterState, Index index, StepKey currentStep, StepKey nextStep, + ClusterState newClusterState, long now) { + assertNotSame(oldClusterState, newClusterState); + MetaData newMetadata = newClusterState.metaData(); + assertNotSame(oldClusterState.metaData(), newMetadata); + IndexMetaData newIndexMetadata = newMetadata.getIndexSafe(index); + assertNotSame(oldClusterState.metaData().index(index), newIndexMetadata); + LifecycleExecutionState newLifecycleState = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(index)); + LifecycleExecutionState oldLifecycleState = LifecycleExecutionState + .fromIndexMetadata(oldClusterState.metaData().index(index)); + assertNotSame(oldLifecycleState, newLifecycleState); + assertEquals(nextStep.getPhase(), newLifecycleState.getPhase()); + assertEquals(nextStep.getAction(), newLifecycleState.getAction()); + assertEquals(nextStep.getName(), newLifecycleState.getStep()); + if (currentStep.getPhase().equals(nextStep.getPhase())) { + assertEquals(oldLifecycleState.getPhaseTime(), newLifecycleState.getPhaseTime()); + } else { + assertEquals(now, newLifecycleState.getPhaseTime().longValue()); + } + if (currentStep.getAction().equals(nextStep.getAction())) { + assertEquals(oldLifecycleState.getActionTime(), newLifecycleState.getActionTime()); + } else { + assertEquals(now, newLifecycleState.getActionTime().longValue()); + } + assertEquals(now, newLifecycleState.getStepTime().longValue()); + assertEquals(null, newLifecycleState.getFailedStep()); + assertEquals(null, newLifecycleState.getStepInfo()); + } + + private void assertClusterStateOnErrorStep(ClusterState oldClusterState, Index index, StepKey currentStep, + ClusterState newClusterState, long now, String expectedCauseValue) throws IOException { + assertNotSame(oldClusterState, newClusterState); + MetaData newMetadata = newClusterState.metaData(); + assertNotSame(oldClusterState.metaData(), newMetadata); + IndexMetaData newIndexMetadata = newMetadata.getIndexSafe(index); + assertNotSame(oldClusterState.metaData().index(index), newIndexMetadata); + LifecycleExecutionState newLifecycleState = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(index)); + LifecycleExecutionState oldLifecycleState = LifecycleExecutionState + .fromIndexMetadata(oldClusterState.metaData().index(index)); + assertNotSame(oldLifecycleState, newLifecycleState); + assertEquals(currentStep.getPhase(), newLifecycleState.getPhase()); + assertEquals(currentStep.getAction(), newLifecycleState.getAction()); + assertEquals(ErrorStep.NAME, newLifecycleState.getStep()); + assertEquals(currentStep.getName(), newLifecycleState.getFailedStep()); + assertEquals(expectedCauseValue, newLifecycleState.getStepInfo()); + assertEquals(oldLifecycleState.getPhaseTime(), newLifecycleState.getPhaseTime()); + assertEquals(oldLifecycleState.getActionTime(), newLifecycleState.getActionTime()); + assertEquals(now, newLifecycleState.getStepTime().longValue()); + } + + private void assertClusterStateStepInfo(ClusterState oldClusterState, Index index, StepKey currentStep, ClusterState newClusterState, + ToXContentObject stepInfo) throws IOException { + XContentBuilder stepInfoXContentBuilder = JsonXContent.contentBuilder(); + stepInfo.toXContent(stepInfoXContentBuilder, ToXContent.EMPTY_PARAMS); + String expectedstepInfoValue = BytesReference.bytes(stepInfoXContentBuilder).utf8ToString(); + assertNotSame(oldClusterState, newClusterState); + MetaData newMetadata = newClusterState.metaData(); + assertNotSame(oldClusterState.metaData(), newMetadata); + IndexMetaData newIndexMetadata = newMetadata.getIndexSafe(index); + assertNotSame(oldClusterState.metaData().index(index), newIndexMetadata); + LifecycleExecutionState newLifecycleState = LifecycleExecutionState + .fromIndexMetadata(newClusterState.metaData().index(index)); + LifecycleExecutionState oldLifecycleState = LifecycleExecutionState + .fromIndexMetadata(oldClusterState.metaData().index(index)); + assertNotSame(oldLifecycleState, newLifecycleState); + assertEquals(currentStep.getPhase(), newLifecycleState.getPhase()); + assertEquals(currentStep.getAction(), newLifecycleState.getAction()); + assertEquals(currentStep.getName(), newLifecycleState.getStep()); + assertEquals(expectedstepInfoValue, newLifecycleState.getStepInfo()); + assertEquals(oldLifecycleState.getPhaseTime(), newLifecycleState.getPhaseTime()); + assertEquals(oldLifecycleState.getActionTime(), newLifecycleState.getActionTime()); + assertEquals(newLifecycleState.getStepTime(), newLifecycleState.getStepTime()); + } + + private static class MockAsyncActionStep extends AsyncActionStep { + + private Exception exception; + private boolean willComplete; + private boolean indexSurvives = true; + private long executeCount = 0; + private CountDownLatch latch; + + MockAsyncActionStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey, null); + } + + void setException(Exception exception) { + this.exception = exception; + } + + @Override + public boolean indexSurvives() { + return indexSurvives; + } + + void setWillComplete(boolean willComplete) { + this.willComplete = willComplete; + } + + long getExecuteCount() { + return executeCount; + } + + public void setLatch(CountDownLatch latch) { + this.latch = latch; + } + + @Override + public void performAction(IndexMetaData indexMetaData, ClusterState currentState, Listener listener) { + executeCount++; + if (latch != null) { + latch.countDown(); + } + if (exception == null) { + listener.onResponse(willComplete); + } else { + listener.onFailure(exception); + } + } + + } + + private static class MockAsyncWaitStep extends AsyncWaitStep { + + private Exception exception; + private boolean willComplete; + private long executeCount = 0; + private ToXContentObject expectedInfo = null; + private CountDownLatch latch; + + MockAsyncWaitStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey, null); + } + + void setException(Exception exception) { + this.exception = exception; + } + + long getExecuteCount() { + return executeCount; + } + + public void setLatch(CountDownLatch latch) { + this.latch = latch; + } + + @Override + public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { + executeCount++; + if (latch != null) { + latch.countDown(); + } + if (exception == null) { + listener.onResponse(willComplete, expectedInfo); + } else { + listener.onFailure(exception); + } + } + + } + + static class MockClusterStateActionStep extends ClusterStateActionStep { + + private RuntimeException exception; + private long executeCount = 0; + private CountDownLatch latch; + + MockClusterStateActionStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + public void setException(RuntimeException exception) { + this.exception = exception; + } + + public void setLatch(CountDownLatch latch) { + this.latch = latch; + } + + public long getExecuteCount() { + return executeCount; + } + + @Override + public ClusterState performAction(Index index, ClusterState clusterState) { + executeCount++; + if (latch != null) { + latch.countDown(); + } + if (exception != null) { + throw exception; + } + return clusterState; + } + } + + static class MockClusterStateWaitStep extends ClusterStateWaitStep { + + private RuntimeException exception; + private boolean willComplete; + private long executeCount = 0; + private ToXContentObject expectedInfo = null; + + MockClusterStateWaitStep(StepKey key, StepKey nextStepKey) { + super(key, nextStepKey); + } + + public void setException(RuntimeException exception) { + this.exception = exception; + } + + public void setWillComplete(boolean willComplete) { + this.willComplete = willComplete; + } + + void expectedInfo(ToXContentObject expectedInfo) { + this.expectedInfo = expectedInfo; + } + + public long getExecuteCount() { + return executeCount; + } + + @Override + public Result isConditionMet(Index index, ClusterState clusterState) { + executeCount++; + if (exception != null) { + throw exception; + } + return new Result(willComplete, expectedInfo); + } + + } + + private static class SetStepInfoUpdateTaskMatcher extends ArgumentMatcher { + + private Index index; + private String policy; + private StepKey currentStepKey; + private ToXContentObject stepInfo; + + SetStepInfoUpdateTaskMatcher(Index index, String policy, StepKey currentStepKey, ToXContentObject stepInfo) { + this.index = index; + this.policy = policy; + this.currentStepKey = currentStepKey; + this.stepInfo = stepInfo; + } + + @Override + public boolean matches(Object argument) { + if (argument == null || argument instanceof SetStepInfoUpdateTask == false) { + return false; + } + SetStepInfoUpdateTask task = (SetStepInfoUpdateTask) argument; + return Objects.equals(index, task.getIndex()) && + Objects.equals(policy, task.getPolicy())&& + Objects.equals(currentStepKey, task.getCurrentStepKey()) && + Objects.equals(xContentToString(stepInfo), xContentToString(task.getStepInfo())); + } + + private String xContentToString(ToXContentObject xContent) { + try { + XContentBuilder builder = JsonXContent.contentBuilder(); + stepInfo.toXContent(builder, ToXContent.EMPTY_PARAMS); + return BytesReference.bytes(builder).utf8ToString(); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + } + + private static class ExecuteStepsUpdateTaskMatcher extends ArgumentMatcher { + + private Index index; + private String policy; + private Step startStep; + + ExecuteStepsUpdateTaskMatcher(Index index, String policy, Step startStep) { + this.index = index; + this.policy = policy; + this.startStep = startStep; + } + + @Override + public boolean matches(Object argument) { + if (argument == null || argument instanceof ExecuteStepsUpdateTask == false) { + return false; + } + ExecuteStepsUpdateTask task = (ExecuteStepsUpdateTask) argument; + return Objects.equals(index, task.getIndex()) && + Objects.equals(policy, task.getPolicy()) && + Objects.equals(startStep, task.getStartStep()); + } + + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java new file mode 100644 index 0000000000000..13fe9c1c69002 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.MockAction; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.junit.After; +import org.junit.Before; +import org.mockito.Mockito; + +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneId; +import java.util.Collections; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.ExecutorService; + +import static org.elasticsearch.node.Node.NODE_MASTER_SETTING; +import static org.elasticsearch.xpack.core.indexlifecycle.AbstractStepTestCase.randomStepKey; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndexLifecycleServiceTests extends ESTestCase { + + private ClusterService clusterService; + private IndexLifecycleService indexLifecycleService; + private String nodeId; + private DiscoveryNode masterNode; + private IndicesAdminClient indicesClient; + private long now; + + @Before + public void prepareServices() { + nodeId = randomAlphaOfLength(10); + ExecutorService executorService = mock(ExecutorService.class); + clusterService = mock(ClusterService.class); + masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + now = randomNonNegativeLong(); + Clock clock = Clock.fixed(Instant.ofEpochMilli(now), ZoneId.of(randomFrom(ZoneId.getAvailableZoneIds()))); + + doAnswer(invocationOnMock -> null).when(clusterService).addListener(any()); + doAnswer(invocationOnMock -> { + Runnable runnable = (Runnable) invocationOnMock.getArguments()[0]; + runnable.run(); + return null; + }).when(executorService).execute(any()); + Settings settings = Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s").build(); + when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, + Collections.singleton(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING))); + + Client client = mock(Client.class); + AdminClient adminClient = mock(AdminClient.class); + indicesClient = mock(IndicesAdminClient.class); + when(client.admin()).thenReturn(adminClient); + when(adminClient.indices()).thenReturn(indicesClient); + when(client.settings()).thenReturn(Settings.EMPTY); + + indexLifecycleService = new IndexLifecycleService(Settings.EMPTY, client, clusterService, clock, () -> now, null); + Mockito.verify(clusterService).addListener(indexLifecycleService); + Mockito.verify(clusterService).addStateApplier(indexLifecycleService); + } + + @After + public void cleanup() { + indexLifecycleService.close(); + } + + + public void testStoppedModeSkip() { + String policyName = randomAlphaOfLengthBetween(1, 20); + IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = + new IndexLifecycleRunnerTests.MockClusterStateActionStep(randomStepKey(), randomStepKey()); + MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + SortedMap policyMap = new TreeMap<>(); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()) + .settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder() + .fPut(index.getName(), indexMetadata); + MetaData metaData = MetaData.builder() + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPED)) + .indices(indices.build()) + .persistentSettings(settings(Version.CURRENT).build()) + .build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); + indexLifecycleService.applyClusterState(event); + indexLifecycleService.triggerPolicies(currentState, randomBoolean()); + assertThat(mockStep.getExecuteCount(), equalTo(0L)); + } + + public void testRequestedStopOnShrink() { + Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, randomAlphaOfLength(5)); + String policyName = randomAlphaOfLengthBetween(1, 20); + IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = + new IndexLifecycleRunnerTests.MockClusterStateActionStep(mockShrinkStep, randomStepKey()); + MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + SortedMap policyMap = new TreeMap<>(); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(mockShrinkStep.getPhase()); + lifecycleState.setAction(mockShrinkStep.getAction()); + lifecycleState.setStep(mockShrinkStep.getName()); + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()) + .settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder() + .fPut(index.getName(), indexMetadata); + MetaData metaData = MetaData.builder() + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) + .indices(indices.build()) + .persistentSettings(settings(Version.CURRENT).build()) + .build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); + SetOnce executedShrink = new SetOnce<>(); + doAnswer(invocationOnMock -> { + executedShrink.set(true); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any(ExecuteStepsUpdateTask.class)); + indexLifecycleService.applyClusterState(event); + indexLifecycleService.triggerPolicies(currentState, true); + assertTrue(executedShrink.get()); + } + + public void testRequestedStopOnSafeAction() { + String policyName = randomAlphaOfLengthBetween(1, 20); + Step.StepKey currentStepKey = randomStepKey(); + IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = + new IndexLifecycleRunnerTests.MockClusterStateActionStep(currentStepKey, randomStepKey()); + MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + SortedMap policyMap = new TreeMap<>(); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(currentStepKey.getPhase()); + lifecycleState.setAction(currentStepKey.getAction()); + lifecycleState.setStep(currentStepKey.getName()); + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()) + .settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder() + .fPut(index.getName(), indexMetadata); + MetaData metaData = MetaData.builder() + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) + .indices(indices.build()) + .persistentSettings(settings(Version.CURRENT).build()) + .build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); + + SetOnce ranPolicy = new SetOnce<>(); + SetOnce moveToMaintenance = new SetOnce<>(); + doAnswer(invocationOnMock -> { + ranPolicy.set(true); + throw new AssertionError("invalid invocation"); + }).when(clusterService).submitStateUpdateTask(anyString(), any(ExecuteStepsUpdateTask.class)); + + doAnswer(invocationOnMock -> { + OperationModeUpdateTask task = (OperationModeUpdateTask) invocationOnMock.getArguments()[1]; + assertThat(task.getOperationMode(), equalTo(OperationMode.STOPPED)); + moveToMaintenance.set(true); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any(OperationModeUpdateTask.class)); + + indexLifecycleService.applyClusterState(event); + indexLifecycleService.triggerPolicies(currentState, randomBoolean()); + assertNull(ranPolicy.get()); + assertTrue(moveToMaintenance.get()); + } + + public void testTriggeredDifferentJob() { + Mockito.reset(clusterService); + SchedulerEngine.Event schedulerEvent = new SchedulerEngine.Event("foo", randomLong(), randomLong()); + indexLifecycleService.triggered(schedulerEvent); + Mockito.verifyZeroInteractions(indicesClient, clusterService); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicyClientTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicyClientTests.java new file mode 100644 index 0000000000000..4fec7ba80db8e --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LifecyclePolicyClientTests.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ClientHelper; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class LifecyclePolicyClientTests extends ESTestCase { + + public void testExecuteWithHeadersAsyncNoHeaders() throws InterruptedException { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + SearchRequest request = new SearchRequest("foo"); + + try (LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, + Collections.emptyMap())) { + policyClient.execute(SearchAction.INSTANCE, request, listener); + } + + latch.await(); + } + + public void testExecuteWithHeadersAsyncWrongHeaders() throws InterruptedException { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + SearchRequest request = new SearchRequest("foo"); + Map headers = new HashMap<>(1); + headers.put("foo", "foo"); + headers.put("bar", "bar"); + + try (LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, + headers)) { + policyClient.execute(SearchAction.INSTANCE, request, listener); + } + + latch.await(); + } + + public void testExecuteWithHeadersAsyncWithHeaders() throws Exception { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertThat(threadContext.getHeaders().size(), equalTo(2)); + assertThat(threadContext.getHeaders().get("es-security-runas-user"), equalTo("foo")); + assertThat(threadContext.getHeaders().get("_xpack_security_authentication"), equalTo("bar")); + latch.countDown(); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + SearchRequest request = new SearchRequest("foo"); + Map headers = new HashMap<>(1); + headers.put("es-security-runas-user", "foo"); + headers.put("_xpack_security_authentication", "bar"); + + try (LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, + headers)) { + policyClient.execute(SearchAction.INSTANCE, request, listener); + } + + latch.await(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LockableLifecycleType.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LockableLifecycleType.java new file mode 100644 index 0000000000000..3e09133c435a8 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/LockableLifecycleType.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +/** + * This {@link LifecycleType} is used for encapsulating test policies + * used in integration tests where the underlying {@link LifecycleAction}s are + * able to communicate with the test + */ +public class LockableLifecycleType implements LifecycleType { + public static final String TYPE = "lockable"; + public static final LockableLifecycleType INSTANCE = new LockableLifecycleType(); + + @Override + public List getOrderedPhases(Map phases) { + return new ArrayList<>(phases.values()); + } + + @Override + public String getNextPhaseName(String currentPhaseName, Map phases) { + return null; + } + + @Override + public String getPreviousPhaseName(String currentPhaseName, Map phases) { + return null; + } + + @Override + public List getOrderedActions(Phase phase) { + return new ArrayList<>(phase.getActions().values()); + } + + @Override + public String getNextActionName(String currentActionName, Phase phase) { + return null; + } + + @Override + public void validate(Collection phases) { + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void writeTo(StreamOutput out) { + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java new file mode 100644 index 0000000000000..dc3a6602f39ba --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToErrorStepUpdateTaskTests.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class MoveToErrorStepUpdateTaskTests extends ESTestCase { + + String policy; + ClusterState clusterState; + Index index; + + @Before + public void setupClusterState() { + policy = randomAlphaOfLength(10); + LifecyclePolicy lifecyclePolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policy); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policy)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + index = indexMetadata.getIndex(); + IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata( + Collections.singletonMap(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())), + OperationMode.RUNNING); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .build(); + clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + } + + public void testExecuteSuccessfullyMoved() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + + setStateToKey(currentStepKey); + + MoveToErrorStepUpdateTask task = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, () -> now); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey actualKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(actualKey, equalTo(new StepKey(currentStepKey.getPhase(), currentStepKey.getAction(), ErrorStep.NAME))); + assertThat(lifecycleState.getFailedStep(), equalTo(currentStepKey.getName())); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepTime(), equalTo(now)); + + XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder(); + causeXContentBuilder.startObject(); + ElasticsearchException.generateThrowableXContent(causeXContentBuilder, ToXContent.EMPTY_PARAMS, cause); + causeXContentBuilder.endObject(); + String expectedCauseValue = BytesReference.bytes(causeXContentBuilder).utf8ToString(); + assertThat(lifecycleState.getStepInfo(), equalTo(expectedCauseValue)); + } + + public void testExecuteNoopDifferentStep() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + StepKey notCurrentStepKey = new StepKey("not-current", "not-current", "not-current"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + setStateToKey(notCurrentStepKey); + MoveToErrorStepUpdateTask task = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, () -> now); + ClusterState newState = task.execute(clusterState); + assertThat(newState, sameInstance(clusterState)); + } + + public void testExecuteNoopDifferentPolicy() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + setStateToKey(currentStepKey); + setStatePolicy("not-" + policy); + MoveToErrorStepUpdateTask task = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, () -> now); + ClusterState newState = task.execute(clusterState); + assertThat(newState, sameInstance(clusterState)); + } + + public void testOnFailure() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + long now = randomNonNegativeLong(); + Exception cause = new ElasticsearchException("THIS IS AN EXPECTED CAUSE"); + + setStateToKey(currentStepKey); + + MoveToErrorStepUpdateTask task = new MoveToErrorStepUpdateTask(index, policy, currentStepKey, cause, () -> now); + Exception expectedException = new RuntimeException(); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> task.onFailure(randomAlphaOfLength(10), expectedException)); + assertEquals("policy [" + policy + "] for index [" + index.getName() + "] failed trying to move from step [" + currentStepKey + + "] to the ERROR step.", exception.getMessage()); + assertSame(expectedException, exception.getCause()); + } + + private void setStatePolicy(String policy) { + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .updateSettings(Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), index.getName())).build(); + + } + private void setStateToKey(StepKey stepKey) { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.metaData().index(index))); + lifecycleState.setPhase(stepKey.getPhase()); + lifecycleState.setAction(stepKey.getAction()); + lifecycleState.setStep(stepKey.getName()); + + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTaskTests.java new file mode 100644 index 0000000000000..f166bba25c986 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTaskTests.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; + +public class MoveToNextStepUpdateTaskTests extends ESTestCase { + + String policy; + ClusterState clusterState; + Index index; + LifecyclePolicy lifecyclePolicy; + + @Before + public void setupClusterState() { + policy = randomAlphaOfLength(10); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policy)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + index = indexMetadata.getIndex(); + lifecyclePolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policy); + IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata( + Collections.singletonMap(policy, new LifecyclePolicyMetadata(lifecyclePolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())), + OperationMode.RUNNING); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .build(); + clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + } + + public void testExecuteSuccessfullyMoved() { + long now = randomNonNegativeLong(); + List steps = lifecyclePolicy.toSteps(null); + StepKey currentStepKey = steps.get(0).getKey(); + StepKey nextStepKey = steps.get(0).getNextStepKey(); + + setStateToKey(currentStepKey, now); + + AtomicBoolean changed = new AtomicBoolean(false); + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, nextStepKey, + () -> now, state -> changed.set(true)); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey actualKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(actualKey, equalTo(nextStepKey)); + assertThat(lifecycleState.getPhaseTime(), equalTo(now)); + assertThat(lifecycleState.getActionTime(), equalTo(now)); + assertThat(lifecycleState.getStepTime(), equalTo(now)); + task.clusterStateProcessed("source", clusterState, newState); + assertTrue(changed.get()); + } + + public void testExecuteDifferentCurrentStep() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + StepKey notCurrentStepKey = new StepKey("not-current", "not-current", "not-current"); + long now = randomNonNegativeLong(); + setStateToKey(notCurrentStepKey, now); + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, null, () -> now, null); + ClusterState newState = task.execute(clusterState); + assertSame(newState, clusterState); + } + + public void testExecuteDifferentPolicy() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + long now = randomNonNegativeLong(); + setStateToKey(currentStepKey, now); + setStatePolicy("not-" + policy); + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, null, () -> now, null); + ClusterState newState = task.execute(clusterState); + assertSame(newState, clusterState); + } + + public void testExecuteSuccessfulMoveWithInvalidNextStep() { + long now = randomNonNegativeLong(); + List steps = lifecyclePolicy.toSteps(null); + StepKey currentStepKey = steps.get(0).getKey(); + StepKey invalidNextStep = new StepKey("next-invalid", "next-invalid", "next-invalid"); + + setStateToKey(currentStepKey, now); + + SetOnce changed = new SetOnce<>(); + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, + invalidNextStep, () -> now, s -> changed.set(true)); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey actualKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(actualKey, equalTo(invalidNextStep)); + assertThat(lifecycleState.getPhaseTime(), equalTo(now)); + assertThat(lifecycleState.getActionTime(), equalTo(now)); + assertThat(lifecycleState.getStepTime(), equalTo(now)); + task.clusterStateProcessed("source", clusterState, newState); + assertTrue(changed.get()); + } + + public void testOnFailure() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + StepKey nextStepKey = new StepKey("next-phase", "next-action", "next-name"); + long now = randomNonNegativeLong(); + + setStateToKey(currentStepKey, now); + + MoveToNextStepUpdateTask task = new MoveToNextStepUpdateTask(index, policy, currentStepKey, nextStepKey, () -> now, state -> {}); + Exception expectedException = new RuntimeException(); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> task.onFailure(randomAlphaOfLength(10), expectedException)); + assertEquals("policy [" + policy + "] for index [" + index.getName() + "] failed trying to move from step [" + currentStepKey + + "] to step [" + nextStepKey + "].", exception.getMessage()); + assertSame(expectedException, exception.getCause()); + } + + private void setStatePolicy(String policy) { + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .updateSettings(Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), index.getName())).build(); + + } + private void setStateToKey(StepKey stepKey, long now) { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.metaData().index(index))); + lifecycleState.setPhase(stepKey.getPhase()); + lifecycleState.setPhaseTime(now); + lifecycleState.setAction(stepKey.getAction()); + lifecycleState.setActionTime(now); + lifecycleState.setStep(stepKey.getName()); + lifecycleState.setStepTime(now); + lifecycleState.setPhaseDefinition("{\"actions\":{\"TEST_ACTION\":{}}}"); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTaskTests.java new file mode 100644 index 0000000000000..dccd12e15f114 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/OperationModeUpdateTaskTests.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class OperationModeUpdateTaskTests extends ESTestCase { + + public void testExecute() { + assertMove(OperationMode.RUNNING, OperationMode.STOPPING); + assertMove(OperationMode.STOPPING, randomFrom(OperationMode.RUNNING, OperationMode.STOPPED)); + assertMove(OperationMode.STOPPED, OperationMode.RUNNING); + + OperationMode mode = randomFrom(OperationMode.values()); + assertNoMove(mode, mode); + assertNoMove(OperationMode.STOPPED, OperationMode.STOPPING); + assertNoMove(OperationMode.RUNNING, OperationMode.STOPPED); + } + + public void testExecuteWithEmptyMetadata() { + OperationMode requestedMode = OperationMode.STOPPING; + OperationMode newMode = executeUpdate(false, IndexLifecycleMetadata.EMPTY.getOperationMode(), + requestedMode, false); + assertThat(newMode, equalTo(requestedMode)); + + requestedMode = randomFrom(OperationMode.RUNNING, OperationMode.STOPPED); + newMode = executeUpdate(false, IndexLifecycleMetadata.EMPTY.getOperationMode(), + requestedMode, false); + assertThat(newMode, equalTo(OperationMode.RUNNING)); + } + + private void assertMove(OperationMode currentMode, OperationMode requestedMode) { + OperationMode newMode = executeUpdate(true, currentMode, requestedMode, false); + assertThat(newMode, equalTo(requestedMode)); + } + + private void assertNoMove(OperationMode currentMode, OperationMode requestedMode) { + OperationMode newMode = executeUpdate(true, currentMode, requestedMode, true); + assertThat(newMode, equalTo(currentMode)); + } + + private OperationMode executeUpdate(boolean metadataInstalled, OperationMode currentMode, OperationMode requestMode, + boolean assertSameClusterState) { + IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), currentMode); + ImmutableOpenMap.Builder customsMapBuilder = ImmutableOpenMap.builder(); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()); + if (metadataInstalled) { + metaData.customs(customsMapBuilder.fPut(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata).build()); + } + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + OperationModeUpdateTask task = new OperationModeUpdateTask(requestMode); + ClusterState newState = task.execute(state); + if (assertSameClusterState) { + assertSame(state, newState); + } else { + assertThat(state, not(equalTo(newState))); + } + IndexLifecycleMetadata newMetaData = newState.metaData().custom(IndexLifecycleMetadata.TYPE); + assertThat(newMetaData.getPolicyMetadatas(), equalTo(indexLifecycleMetadata.getPolicyMetadatas())); + return newMetaData.getOperationMode(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PhaseStatsTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PhaseStatsTests.java new file mode 100644 index 0000000000000..fe7fd1fca05d3 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PhaseStatsTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; + +import java.io.IOException; +import java.util.Arrays; + +public class PhaseStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected PhaseStats createTestInstance() { + return randomPhaseStats(); + } + + static PhaseStats randomPhaseStats() { + TimeValue minimumAge = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + String[] actionNames = generateRandomStringArray(10, 20, false); + return new PhaseStats(minimumAge, actionNames); + } + + @Override + protected PhaseStats mutateInstance(PhaseStats instance) throws IOException { + TimeValue minimumAge = instance.getAfter(); + String[] actionNames = instance.getActionNames(); + switch (between(0, 1)) { + case 0: + minimumAge = randomValueOtherThan(minimumAge, + () -> TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after")); + break; + case 1: + actionNames = Arrays.copyOf(actionNames, actionNames.length + 1); + actionNames[actionNames.length - 1] = randomAlphaOfLengthBetween(10, 20); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PhaseStats(minimumAge, actionNames); + } + + @Override + protected Reader instanceReader() { + return PhaseStats::new; + } + +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStatsTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStatsTests.java new file mode 100644 index 0000000000000..5ced745c2fb3f --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStatsTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PhaseStats; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleFeatureSetUsage.PolicyStats; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class PolicyStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected PolicyStats createTestInstance() { + return randomPolicyStats(); + } + + static PolicyStats randomPolicyStats() { + Map phaseStats = new HashMap<>(); + int size = randomIntBetween(0, 10); + for (int i = 0; i < size; i++) { + phaseStats.put(randomAlphaOfLength(10), PhaseStatsTests.randomPhaseStats()); + } + int numberIndicesManaged = randomIntBetween(0, 1000); + return new PolicyStats(phaseStats, numberIndicesManaged); + } + + @Override + protected PolicyStats mutateInstance(PolicyStats instance) throws IOException { + Map phaseStats = instance.getPhaseStats(); + int numberIndicesManaged = instance.getIndicesManaged(); + switch (between(0, 1)) { + case 0: + phaseStats = new HashMap<>(phaseStats); + phaseStats.put(randomAlphaOfLength(11), PhaseStatsTests.randomPhaseStats()); + break; + case 1: + numberIndicesManaged += randomIntBetween(1, 10); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PolicyStats(phaseStats, numberIndicesManaged); + } + + @Override + protected Reader instanceReader() { + return PolicyStats::new; + } + +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistryTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistryTests.java new file mode 100644 index 0000000000000..611522a59b0d4 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistryTests.java @@ -0,0 +1,417 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.InitializePolicyContextStep; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyTests; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.MockStep; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; +import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkStep; +import org.elasticsearch.xpack.core.indexlifecycle.Step; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; + +public class PolicyStepsRegistryTests extends ESTestCase { + private static final Step.StepKey MOCK_STEP_KEY = new Step.StepKey("mock", "mock", "mock"); + private static final NamedXContentRegistry REGISTRY = new NamedXContentRegistry(new IndexLifecycle(Settings.EMPTY).getNamedXContent()); + + private IndexMetaData emptyMetaData(Index index) { + return IndexMetaData.builder(index.getName()).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + } + + public void testGetFirstStep() { + String policyName = randomAlphaOfLengthBetween(2, 10); + Step expectedFirstStep = new MockStep(MOCK_STEP_KEY, null); + Map firstStepMap = Collections.singletonMap(policyName, expectedFirstStep); + PolicyStepsRegistry registry = new PolicyStepsRegistry(null, firstStepMap, null, NamedXContentRegistry.EMPTY, null); + Step actualFirstStep = registry.getFirstStep(policyName); + assertThat(actualFirstStep, sameInstance(expectedFirstStep)); + } + + public void testGetFirstStepUnknownPolicy() { + String policyName = randomAlphaOfLengthBetween(2, 10); + Step expectedFirstStep = new MockStep(MOCK_STEP_KEY, null); + Map firstStepMap = Collections.singletonMap(policyName, expectedFirstStep); + PolicyStepsRegistry registry = new PolicyStepsRegistry(null, firstStepMap, null, NamedXContentRegistry.EMPTY, null); + Step actualFirstStep = registry.getFirstStep(policyName + "unknown"); + assertNull(actualFirstStep); + } + + public void testGetStep() { + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases("policy"); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + String phaseName = randomFrom(policy.getPhases().keySet()); + Phase phase = policy.getPhases().get(phaseName); + PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); + String phaseJson = Strings.toString(pei); + LifecycleAction action = randomFrom(phase.getActions().values()); + Step step = randomFrom(action.toSteps(client, phaseName, MOCK_STEP_KEY)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhaseDefinition(phaseJson); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, "policy") + .build()) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .build(); + SortedMap metas = new TreeMap<>(); + metas.put("policy", policyMetadata); + PolicyStepsRegistry registry = new PolicyStepsRegistry(metas, null, null, REGISTRY, client); + Step actualStep = registry.getStep(indexMetaData, step.getKey()); + assertThat(actualStep.getKey(), equalTo(step.getKey())); + } + + public void testGetStepErrorStep() { + Step.StepKey errorStepKey = new Step.StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), ErrorStep.NAME); + Step expectedStep = new ErrorStep(errorStepKey); + Index index = new Index("test", "uuid"); + Map> indexSteps = Collections.singletonMap(index, Collections.singletonList(expectedStep)); + PolicyStepsRegistry registry = new PolicyStepsRegistry(null, null, null, NamedXContentRegistry.EMPTY, null); + Step actualStep = registry.getStep(emptyMetaData(index), errorStepKey); + assertThat(actualStep, equalTo(expectedStep)); + } + + public void testGetStepUnknownPolicy() { + PolicyStepsRegistry registry = new PolicyStepsRegistry(null, null, null, NamedXContentRegistry.EMPTY, null); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> registry.getStep(emptyMetaData(new Index("test", "uuid")), MOCK_STEP_KEY)); + assertThat(e.getMessage(), + containsString("failed to retrieve step {\"phase\":\"mock\",\"action\":\"mock\",\"name\":\"mock\"}" + + " as index [test] has no policy")); + } + + public void testGetStepForIndexWithNoPhaseGetsInitializationStep() { + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicy("policy"); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, "policy") + .build()) + .build(); + SortedMap metas = new TreeMap<>(); + metas.put("policy", policyMetadata); + PolicyStepsRegistry registry = new PolicyStepsRegistry(metas, null, null, REGISTRY, client); + Step step = registry.getStep(indexMetaData, InitializePolicyContextStep.KEY); + assertNotNull(step); + } + + public void testGetStepUnknownStepKey() { + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases("policy"); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), 1, randomNonNegativeLong()); + String phaseName = randomFrom(policy.getPhases().keySet()); + Phase phase = policy.getPhases().get(phaseName); + PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong()); + String phaseJson = Strings.toString(pei); + LifecycleAction action = randomFrom(phase.getActions().values()); + Step step = randomFrom(action.toSteps(client, phaseName, MOCK_STEP_KEY)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhaseDefinition(phaseJson); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, "policy") + .build()) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .build(); + SortedMap metas = new TreeMap<>(); + metas.put("policy", policyMetadata); + PolicyStepsRegistry registry = new PolicyStepsRegistry(metas, null, null, REGISTRY, client); + Step actualStep = registry.getStep(indexMetaData, + new Step.StepKey(step.getKey().getPhase(), step.getKey().getAction(), step.getKey().getName() + "-bad")); + assertNull(actualStep); + } + + public void testUpdateFromNothingToSomethingToNothing() throws Exception { + Index index = new Index("test", "uuid"); + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + String policyName = randomAlphaOfLength(5); + LifecyclePolicy newPolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policyName); + logger.info("--> policy: {}", newPolicy); + List policySteps = newPolicy.toSteps(client); + Map headers = new HashMap<>(); + if (randomBoolean()) { + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + Map policyMap = Collections.singletonMap(newPolicy.getName(), + new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong())); + IndexLifecycleMetadata lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase("new"); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .put(IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.uuid", "uuid") + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT.id) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())) + .build(); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + metaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + logger.info("--> metadata: {}", Strings.toString(builder)); + } + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + // start with empty registry + PolicyStepsRegistry registry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client); + + // add new policy + registry.update(currentState); + + assertThat(registry.getFirstStep(newPolicy.getName()), equalTo(policySteps.get(0))); + assertThat(registry.getLifecyclePolicyMap().size(), equalTo(1)); + assertNotNull(registry.getLifecyclePolicyMap().get(newPolicy.getName())); + assertThat(registry.getLifecyclePolicyMap().get(newPolicy.getName()).getHeaders(), equalTo(headers)); + assertThat(registry.getFirstStepMap().size(), equalTo(1)); + assertThat(registry.getStepMap().size(), equalTo(1)); + Map registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName()); + assertThat(registeredStepsForPolicy.size(), equalTo(policySteps.size())); + for (Step step : policySteps) { + LifecycleExecutionState.Builder newIndexState = LifecycleExecutionState.builder(); + newIndexState.setPhase(step.getKey().getPhase()); + currentState = ClusterState.builder(currentState) + .metaData(MetaData.builder(currentState.metaData()) + .put(IndexMetaData.builder(currentState.metaData().index("test")) + .settings(Settings.builder().put(currentState.metaData().index("test").getSettings())) + .putCustom(ILM_CUSTOM_METADATA_KEY, newIndexState.build().asMap()))) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + registry.update(currentState); + assertThat(registeredStepsForPolicy.get(step.getKey()), equalTo(step)); + assertThat(registry.getStep(metaData.index(index), step.getKey()), equalTo(step)); + } + + Map registryPolicyMap = registry.getLifecyclePolicyMap(); + Map registryFirstStepMap = registry.getFirstStepMap(); + Map> registryStepMap = registry.getStepMap(); + registry.update(currentState); + assertThat(registry.getLifecyclePolicyMap(), equalTo(registryPolicyMap)); + assertThat(registry.getFirstStepMap(), equalTo(registryFirstStepMap)); + assertThat(registry.getStepMap(), equalTo(registryStepMap)); + + // remove policy + lifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + currentState = ClusterState.builder(currentState) + .metaData( + MetaData.builder(metaData) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata)).build(); + registry.update(currentState); + assertTrue(registry.getLifecyclePolicyMap().isEmpty()); + assertTrue(registry.getFirstStepMap().isEmpty()); + assertTrue(registry.getStepMap().isEmpty()); + } + + public void testUpdateChangedPolicy() { + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + String policyName = randomAlphaOfLengthBetween(5, 10); + LifecyclePolicy newPolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policyName); + Map headers = new HashMap<>(); + if (randomBoolean()) { + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + Map policyMap = Collections.singletonMap(newPolicy.getName(), + new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong())); + IndexLifecycleMetadata lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .build(); + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + PolicyStepsRegistry registry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client); + // add new policy + registry.update(currentState); + + // swap out policy + newPolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policyName); + lifecycleMetadata = new IndexLifecycleMetadata(Collections.singletonMap(policyName, + new LifecyclePolicyMetadata(newPolicy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())), OperationMode.RUNNING); + currentState = ClusterState.builder(currentState) + .metaData(MetaData.builder(metaData).putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata)).build(); + registry.update(currentState); + // TODO(talevy): assert changes... right now we do not support updates to policies. will require internal cleanup + } + + public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Exception { + Index index = new Index("test", "uuid"); + Client client = mock(Client.class); + Mockito.when(client.settings()).thenReturn(Settings.EMPTY); + String policyName = randomAlphaOfLength(5); + Map actions = new HashMap<>(); + actions.put("shrink", new ShrinkAction(1)); + Map phases = new HashMap<>(); + Phase warmPhase = new Phase("warm", TimeValue.ZERO, actions); + PhaseExecutionInfo pei = new PhaseExecutionInfo(policyName, warmPhase, 1, randomNonNegativeLong()); + String phaseJson = Strings.toString(pei); + phases.put("warm", new Phase("warm", TimeValue.ZERO, actions)); + LifecyclePolicy newPolicy = new LifecyclePolicy(policyName, phases); + // Modify the policy + actions = new HashMap<>(); + actions.put("shrink", new ShrinkAction(2)); + phases = new HashMap<>(); + phases.put("warm", new Phase("warm", TimeValue.ZERO, actions)); + LifecyclePolicy updatedPolicy = new LifecyclePolicy(policyName, phases); + logger.info("--> policy: {}", newPolicy); + logger.info("--> updated policy: {}", updatedPolicy); + List policySteps = newPolicy.toSteps(client); + Map headers = new HashMap<>(); + if (randomBoolean()) { + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + Map policyMap = Collections.singletonMap(newPolicy.getName(), + new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong())); + IndexLifecycleMetadata lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase("warm"); + lifecycleState.setPhaseDefinition(phaseJson); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .put(IndexMetaData.builder("test") + .settings(Settings.builder() + .put("index.uuid", "uuid") + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT.id) + .put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())) + .build(); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + metaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + logger.info("--> metadata: {}", Strings.toString(builder)); + } + String nodeId = randomAlphaOfLength(10); + DiscoveryNode masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT) + .put(Node.NODE_MASTER_SETTING.getKey(), true).build(), + new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + // start with empty registry + PolicyStepsRegistry registry = new PolicyStepsRegistry(REGISTRY, client); + + // add new policy + registry.update(currentState); + + Map registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName()); + Step shrinkStep = registeredStepsForPolicy.entrySet().stream() + .filter(e -> e.getKey().getPhase().equals("warm") && e.getKey().getName().equals("shrink")) + .findFirst().get().getValue(); + Step gotStep = registry.getStep(metaData.index(index), shrinkStep.getKey()); + assertThat(((ShrinkStep) shrinkStep).getNumberOfShards(), equalTo(1)); + assertThat(((ShrinkStep) gotStep).getNumberOfShards(), equalTo(1)); + + // Update the policy with the new policy, but keep the phase the same + policyMap = Collections.singletonMap(updatedPolicy.getName(), new LifecyclePolicyMetadata(updatedPolicy, headers, + randomNonNegativeLong(), randomNonNegativeLong())); + lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); + metaData = MetaData.builder(metaData) + .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .build(); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + metaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + logger.info("--> metadata: {}", Strings.toString(builder)); + } + currentState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + + // Update the policies + registry.update(currentState); + + registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName()); + shrinkStep = registeredStepsForPolicy.entrySet().stream() + .filter(e -> e.getKey().getPhase().equals("warm") && e.getKey().getName().equals("shrink")) + .findFirst().get().getValue(); + gotStep = registry.getStep(metaData.index(index), shrinkStep.getKey()); + assertThat(((ShrinkStep) shrinkStep).getNumberOfShards(), equalTo(2)); + assertThat(((ShrinkStep) gotStep).getNumberOfShards(), equalTo(1)); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/RandomStepInfo.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/RandomStepInfo.java new file mode 100644 index 0000000000000..85084223481c3 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/RandomStepInfo.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Supplier; + +public class RandomStepInfo implements ToXContentObject { + + private final String key; + private final String value; + + public RandomStepInfo(Supplier randomStringSupplier) { + this.key = randomStringSupplier.get(); + this.value = randomStringSupplier.get(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(key, value); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RandomStepInfo other = (RandomStepInfo) obj; + return Objects.equals(key, other.key) && Objects.equals(value, other.value); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} \ No newline at end of file diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTaskTests.java new file mode 100644 index 0000000000000..a8b16d3ecfdf9 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/SetStepInfoUpdateTaskTests.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; +import org.junit.Before; + +import java.io.IOException; + +import static org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class SetStepInfoUpdateTaskTests extends ESTestCase { + + String policy; + ClusterState clusterState; + Index index; + + @Before + public void setupClusterState() { + policy = randomAlphaOfLength(10); + IndexMetaData indexMetadata = IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(settings(Version.CURRENT) + .put(LifecycleSettings.LIFECYCLE_NAME, policy)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + index = indexMetadata.getIndex(); + MetaData metaData = MetaData.builder() + .persistentSettings(settings(Version.CURRENT).build()) + .put(IndexMetaData.builder(indexMetadata)) + .build(); + clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); + } + + public void testExecuteSuccessfullySet() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + ToXContentObject stepInfo = getRandomStepInfo(); + setStateToKey(currentStepKey); + + SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); + ClusterState newState = task.execute(clusterState); + LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(newState.getMetaData().index(index)); + StepKey actualKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + assertThat(actualKey, equalTo(currentStepKey)); + assertThat(lifecycleState.getPhaseTime(), nullValue()); + assertThat(lifecycleState.getActionTime(), nullValue()); + assertThat(lifecycleState.getStepTime(), nullValue()); + + XContentBuilder infoXContentBuilder = JsonXContent.contentBuilder(); + stepInfo.toXContent(infoXContentBuilder, ToXContent.EMPTY_PARAMS); + String expectedCauseValue = BytesReference.bytes(infoXContentBuilder).utf8ToString(); + assertThat(lifecycleState.getStepInfo(), equalTo(expectedCauseValue)); + } + + private ToXContentObject getRandomStepInfo() { + String key = randomAlphaOfLength(20); + String value = randomAlphaOfLength(20); + return (b, p) -> { + b.startObject(); + b.field(key, value); + b.endObject(); + return b; + }; + } + + public void testExecuteNoopDifferentStep() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + StepKey notCurrentStepKey = new StepKey("not-current", "not-current", "not-current"); + ToXContentObject stepInfo = getRandomStepInfo(); + setStateToKey(notCurrentStepKey); + SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); + ClusterState newState = task.execute(clusterState); + assertThat(newState, sameInstance(clusterState)); + } + + public void testExecuteNoopDifferentPolicy() throws IOException { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + ToXContentObject stepInfo = getRandomStepInfo(); + setStateToKey(currentStepKey); + setStatePolicy("not-" + policy); + SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); + ClusterState newState = task.execute(clusterState); + assertThat(newState, sameInstance(clusterState)); + } + + public void testOnFailure() { + StepKey currentStepKey = new StepKey("current-phase", "current-action", "current-name"); + ToXContentObject stepInfo = getRandomStepInfo(); + + setStateToKey(currentStepKey); + + SetStepInfoUpdateTask task = new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo); + Exception expectedException = new RuntimeException(); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> task.onFailure(randomAlphaOfLength(10), expectedException)); + assertEquals("policy [" + policy + "] for index [" + index.getName() + "] failed trying to set step info for step [" + + currentStepKey + "].", exception.getMessage()); + assertSame(expectedException, exception.getCause()); + } + + private void setStatePolicy(String policy) { + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .updateSettings(Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), index.getName())).build(); + + } + private void setStateToKey(StepKey stepKey) { + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder( + LifecycleExecutionState.fromIndexMetadata(clusterState.metaData().index(index))); + lifecycleState.setPhase(stepKey.getPhase()); + lifecycleState.setAction(stepKey.getAction()); + lifecycleState.setStep(stepKey.getName()); + + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.getMetaData()) + .put(IndexMetaData.builder(clusterState.getMetaData().index(index)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()))).build(); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeValueScheduleTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeValueScheduleTests.java new file mode 100644 index 0000000000000..f34a2aa458d16 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeValueScheduleTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.indexlifecycle; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.concurrent.TimeUnit; + +public class TimeValueScheduleTests extends ESTestCase { + + public TimeValueSchedule createRandomInstance() { + return new TimeValueSchedule(createRandomTimeValue()); + } + + private TimeValue createRandomTimeValue() { + return new TimeValue(randomLongBetween(1, 10000), randomFrom(TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS)); + } + + public void testHascodeAndEquals() { + for (int i = 0; i < 20; i++) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createRandomInstance(), + instance -> new TimeValueSchedule(instance.getInterval()), + instance -> new TimeValueSchedule(randomValueOtherThan(instance.getInterval(), () -> createRandomTimeValue()))); + } + } + + public void testNextScheduledTimeFirstTriggerNotReached() { + long start = randomNonNegativeLong(); + TimeValue interval = createRandomTimeValue(); + long triggerTime = start + interval.millis(); + long now = start + randomLongBetween(0, interval.millis() - 1); + TimeValueSchedule schedule = new TimeValueSchedule(interval); + assertEquals(triggerTime, schedule.nextScheduledTimeAfter(start, now)); + } + + public void testNextScheduledTimeAtFirstInterval() { + long start = randomNonNegativeLong(); + TimeValue interval = createRandomTimeValue(); + long triggerTime = start + 2 * interval.millis(); + long now = start + interval.millis(); + TimeValueSchedule schedule = new TimeValueSchedule(interval); + assertEquals(triggerTime, schedule.nextScheduledTimeAfter(start, now)); + } + + public void testNextScheduledTimeAtStartTime() { + long start = randomNonNegativeLong(); + TimeValue interval = createRandomTimeValue(); + long triggerTime = start + interval.millis(); + TimeValueSchedule schedule = new TimeValueSchedule(interval); + assertEquals(triggerTime, schedule.nextScheduledTimeAfter(start, start)); + } + + public void testNextScheduledTimeAfterFirstTrigger() { + long start = randomNonNegativeLong(); + TimeValue interval = createRandomTimeValue(); + long numberIntervalsPassed = randomLongBetween(0, 10000); + long triggerTime = start + (numberIntervalsPassed + 1) * interval.millis(); + long now = start + + randomLongBetween(numberIntervalsPassed * interval.millis(), (numberIntervalsPassed + 1) * interval.millis() - 1); + TimeValueSchedule schedule = new TimeValueSchedule(interval); + assertEquals(triggerTime, schedule.nextScheduledTimeAfter(start, now)); + } + + public void testInvalidInterval() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new TimeValueSchedule(new TimeValue(0))); + assertEquals("interval must be greater than 0 milliseconds", exception.getMessage()); + } +} diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java index eddedda59b3e8..ec4b96cb52bea 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.logstash; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; @@ -59,7 +59,7 @@ public Collection createGuiceModules() { public UnaryOperator> getIndexTemplateMetaDataUpgrader() { return templates -> { TemplateUtils.loadTemplateIntoMap("/" + LOGSTASH_TEMPLATE_NAME + ".json", templates, LOGSTASH_TEMPLATE_NAME, - Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN, Loggers.getLogger(Logstash.class)); + Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN, LogManager.getLogger(Logstash.class)); return templates; }; } diff --git a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle index cc5a2cd68dde5..b47016c134459 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle +++ b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle @@ -7,6 +7,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.watcher.enabled', 'false' diff --git a/x-pack/plugin/ml/qa/build.gradle b/x-pack/plugin/ml/qa/build.gradle index 5b3dcd7c8508a..35bd236df5c23 100644 --- a/x-pack/plugin/ml/qa/build.gradle +++ b/x-pack/plugin/ml/qa/build.gradle @@ -15,16 +15,3 @@ subprojects { } } } - -gradle.projectsEvaluated { - subprojects { - Task assemble = project.tasks.findByName('assemble') - if (assemble) { - assemble.enabled = false - } - Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') - if (dependenciesInfo) { - dependenciesInfo.enabled = false - } - } -} diff --git a/x-pack/plugin/ml/qa/disabled/build.gradle b/x-pack/plugin/ml/qa/disabled/build.gradle index a24036651d504..2aa5d47acef0d 100644 --- a/x-pack/plugin/ml/qa/disabled/build.gradle +++ b/x-pack/plugin/ml/qa/disabled/build.gradle @@ -7,6 +7,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.ml.enabled', 'false' numNodes = 1 diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java index 33a611fcb57e2..9c19ffe639f7d 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -204,7 +204,7 @@ public void testCategorizationPerformance() { flushJob(jobId, false); long duration = System.currentTimeMillis() - startTime; - Loggers.getLogger(CategorizationIT.class).info("Performance test with tokenization in " + + LogManager.getLogger(CategorizationIT.class).info("Performance test with tokenization in " + (MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA ? "Java" : "C++") + " took " + duration + "ms"); } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java index 5772d0be42853..9825189ba64cd 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; import org.elasticsearch.xpack.core.ml.notifications.AuditorField; +import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.ml.MachineLearning; import org.junit.After; import org.junit.Before; @@ -27,6 +28,7 @@ import java.util.Date; import java.util.List; import java.util.Locale; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -63,6 +65,16 @@ private void setupDataAccessRole(String index) throws IOException { client().performRequest(request); } + private void setupFullAccessRole(String index) throws IOException { + Request request = new Request("PUT", "/_xpack/security/role/test_data_access"); + request.setJsonEntity("{" + + " \"indices\" : [" + + " { \"names\": [\"" + index + "\"], \"privileges\": [\"all\"] }" + + " ]" + + "}"); + client().performRequest(request); + } + private void setupUser(String user, List roles) throws IOException { String password = new String(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING.getChars()); @@ -359,7 +371,75 @@ public void testInsufficientSearchPrivilegesOnPut() throws Exception { assertThat(e.getMessage(), containsString("Cannot create datafeed")); assertThat(e.getMessage(), - containsString("user ml_admin lacks permissions on the indices to be searched")); + containsString("user ml_admin lacks permissions on the indices")); + } + + public void testInsufficientSearchPrivilegesOnPutWithRollup() throws Exception { + setupDataAccessRole("airline-data-aggs-rollup"); + String jobId = "privs-put-job-rollup"; + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); + + String rollupJobId = "rollup-" + jobId; + Request createRollupRequest = new Request("PUT", "/_xpack/rollup/job/" + rollupJobId); + createRollupRequest.setJsonEntity("{\n" + + "\"index_pattern\": \"airline-data-aggs\",\n" + + " \"rollup_index\": \"airline-data-aggs-rollup\",\n" + + " \"cron\": \"*/30 * * * * ?\",\n" + + " \"page_size\" :1000,\n" + + " \"groups\" : {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time stamp\",\n" + + " \"interval\": \"2m\",\n" + + " \"delay\": \"7d\"\n" + + " },\n" + + " \"terms\": {\n" + + " \"fields\": [\"airline\"]\n" + + " }" + + " },\n" + + " \"metrics\": [\n" + + " {\n" + + " \"field\": \"responsetime\",\n" + + " \"metrics\": [\"avg\",\"min\",\"max\",\"sum\"]\n" + + " },\n" + + " {\n" + + " \"field\": \"time stamp\",\n" + + " \"metrics\": [\"min\",\"max\"]\n" + + " }\n" + + " ]\n" + + "}"); + client().performRequest(createRollupRequest); + + String datafeedId = "datafeed-" + jobId; + String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":3600000}," + + "\"aggregations\":{" + + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + + "\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}"; + + + ResponseException e = expectThrows(ResponseException.class, () -> + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs-rollup", "doc") + .setAggregations(aggregations) + .setAuthHeader(BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS) //want to search, but no admin access + .build()); + assertThat(e.getMessage(), containsString("Cannot create datafeed")); + assertThat(e.getMessage(), + containsString("user ml_admin_plus_data lacks permissions on the indices")); } public void testInsufficientSearchPrivilegesOnPreview() throws Exception { @@ -615,7 +695,7 @@ public void testLookbackWithoutPermissions() throws Exception { // There should be a notification saying that there was a problem extracting data client().performRequest(new Request("POST", "/_refresh")); Response notificationsResponse = client().performRequest( - new Request("GET", AuditorField.NOTIFICATIONS_INDEX + "/_search?q=job_id:" + jobId)); + new Request("GET", AuditorField.NOTIFICATIONS_INDEX + "/_search?size=1000&q=job_id:" + jobId)); String notificationsResponseAsString = EntityUtils.toString(notificationsResponse.getEntity()); assertThat(notificationsResponseAsString, containsString("\"message\":\"Datafeed is encountering errors extracting data: " + "action [indices:data/read/search] is unauthorized for user [ml_admin_plus_data]\"")); @@ -663,6 +743,171 @@ public void testLookbackWithPipelineBucketAgg() throws Exception { assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); } + public void testLookbackOnlyGivenAggregationsWithHistogramAndRollupIndex() throws Exception { + String jobId = "aggs-histogram-rollup-job"; + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); + + String rollupJobId = "rollup-" + jobId; + Request createRollupRequest = new Request("PUT", "/_xpack/rollup/job/" + rollupJobId); + createRollupRequest.setJsonEntity("{\n" + + "\"index_pattern\": \"airline-data-aggs\",\n" + + " \"rollup_index\": \"airline-data-aggs-rollup\",\n" + + " \"cron\": \"*/30 * * * * ?\",\n" + + " \"page_size\" :1000,\n" + + " \"groups\" : {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time stamp\",\n" + + " \"interval\": \"2m\",\n" + + " \"delay\": \"7d\"\n" + + " },\n" + + " \"terms\": {\n" + + " \"fields\": [\"airline\"]\n" + + " }" + + " },\n" + + " \"metrics\": [\n" + + " {\n" + + " \"field\": \"responsetime\",\n" + + " \"metrics\": [\"avg\",\"min\",\"max\",\"sum\"]\n" + + " },\n" + + " {\n" + + " \"field\": \"time stamp\",\n" + + " \"metrics\": [\"min\",\"max\"]\n" + + " }\n" + + " ]\n" + + "}"); + client().performRequest(createRollupRequest); + client().performRequest(new Request("POST", "/_xpack/rollup/job/" + rollupJobId + "/_start")); + + assertBusy(() -> { + Response getRollup = client().performRequest(new Request("GET", "/_xpack/rollup/job/" + rollupJobId)); + String body = EntityUtils.toString(getRollup.getEntity()); + assertThat(body, containsString("\"job_state\":\"started\"")); + assertThat(body, containsString("\"rollups_indexed\":4")); + }, 60, TimeUnit.SECONDS); + + client().performRequest(new Request("POST", "/_xpack/rollup/job/" + rollupJobId + "/_stop")); + assertBusy(() -> { + Response getRollup = client().performRequest(new Request("GET", "/_xpack/rollup/job/" + rollupJobId)); + assertThat(EntityUtils.toString(getRollup.getEntity()), containsString("\"job_state\":\"stopped\"")); + }, 60, TimeUnit.SECONDS); + + final Request refreshRollupIndex = new Request("POST", "airline-data-aggs-rollup/_refresh"); + client().performRequest(refreshRollupIndex); + + String datafeedId = "datafeed-" + jobId; + String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":3600000}," + + "\"aggregations\":{" + + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + + "\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}"; + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs-rollup", "response").setAggregations(aggregations).build(); + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId); + waitUntilJobIsClosed(jobId); + Response jobStatsResponse = client().performRequest(new Request("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + String jobStatsResponseAsString = EntityUtils.toString(jobStatsResponse.getEntity()); + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":2")); + } + + public void testLookbackWithoutPermissionsAndRollup() throws Exception { + setupFullAccessRole("airline-data-aggs-rollup"); + String jobId = "rollup-permission-test-network-job"; + Request createJobRequest = new Request("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + createJobRequest.setJsonEntity("{\n" + + " \"description\": \"Aggs job\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"summary_count_field_name\": \"doc_count\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"mean\",\n" + + " \"field_name\": \"responsetime\",\n" + + " \"by_field_name\": \"airline\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\"time_field\": \"time stamp\"}\n" + + "}"); + client().performRequest(createJobRequest); + + String rollupJobId = "rollup-" + jobId; + Request createRollupRequest = new Request("PUT", "/_xpack/rollup/job/" + rollupJobId); + createRollupRequest.setJsonEntity("{\n" + + "\"index_pattern\": \"airline-data-aggs\",\n" + + " \"rollup_index\": \"airline-data-aggs-rollup\",\n" + + " \"cron\": \"*/30 * * * * ?\",\n" + + " \"page_size\" :1000,\n" + + " \"groups\" : {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time stamp\",\n" + + " \"interval\": \"2m\",\n" + + " \"delay\": \"7d\"\n" + + " },\n" + + " \"terms\": {\n" + + " \"fields\": [\"airline\"]\n" + + " }" + + " },\n" + + " \"metrics\": [\n" + + " {\n" + + " \"field\": \"responsetime\",\n" + + " \"metrics\": [\"avg\",\"min\",\"max\",\"sum\"]\n" + + " },\n" + + " {\n" + + " \"field\": \"time stamp\",\n" + + " \"metrics\": [\"min\",\"max\"]\n" + + " }\n" + + " ]\n" + + "}"); + client().performRequest(createRollupRequest); + + String datafeedId = "datafeed-" + jobId; + String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":3600000}," + + "\"aggregations\":{" + + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + + "\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}"; + + + // At the time we create the datafeed the user can access the network-data index that we have access to + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs-rollup", "doc") + .setAggregations(aggregations) + .setChunkingTimespan("300s") + .setAuthHeader(BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS) + .build(); + + // Change the role so that the user can no longer access network-data + setupFullAccessRole("some-other-data"); + + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId, BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS); + waitUntilJobIsClosed(jobId); + // There should be a notification saying that there was a problem extracting data + client().performRequest(new Request("POST", "/_refresh")); + Response notificationsResponse = client().performRequest( + new Request("GET", AuditorField.NOTIFICATIONS_INDEX + "/_search?size=1000&q=job_id:" + jobId)); + String notificationsResponseAsString = EntityUtils.toString(notificationsResponse.getEntity()); + assertThat(notificationsResponseAsString, containsString("\"message\":\"Datafeed is encountering errors extracting data: " + + "action [indices:admin/xpack/rollup/search] is unauthorized for user [ml_admin_plus_data]\"")); + } + public void testRealtime() throws Exception { String jobId = "job-realtime-1"; createJob(jobId, "airline"); @@ -882,7 +1127,8 @@ public static void openJob(RestClient client, String jobId) throws IOException { @After public void clearMlState() throws Exception { new MlRestTestStateCleaner(logger, adminClient()).clearMlMetadata(); - ESRestTestCase.waitForPendingTasks(adminClient()); + // Don't check rollup jobs because we clear them in the superclass. + waitForPendingTasks(adminClient(), taskName -> taskName.startsWith(RollupJob.NAME)); } private static class DatafeedBuilder { diff --git a/x-pack/plugin/ml/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java b/x-pack/plugin/ml/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java index 6701a27a361ff..5127b36b00701 100644 --- a/x-pack/plugin/ml/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java +++ b/x-pack/plugin/ml/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java @@ -321,6 +321,9 @@ public void testOpenForOutput() throws IOException, InterruptedException { OutputStream os = NAMED_PIPE_HELPER.openNamedPipeOutputStream(pipeName, Duration.ofSeconds(10)); assertNotNull(os); + // In some rare cases writer can close before the reader has had a chance + // to read what is written. On Windows this can cause ConnectNamedPipe to + // error with ERROR_NO_DATA try (OutputStreamWriter writer = new OutputStreamWriter(os, StandardCharsets.UTF_8)) { writer.write(GOODBYE_WORLD); writer.write('\n'); diff --git a/x-pack/plugin/ml/qa/single-node-tests/build.gradle b/x-pack/plugin/ml/qa/single-node-tests/build.gradle index 88ca4dd118ea4..f856c3d4c5ff4 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/single-node-tests/build.gradle @@ -7,6 +7,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java index 7c9f163a2250d..21e06362a70a3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; @@ -20,9 +19,8 @@ public class InvalidLicenseEnforcer extends AbstractComponent { private final DatafeedManager datafeedManager; private final AutodetectProcessManager autodetectProcessManager; - InvalidLicenseEnforcer(Settings settings, XPackLicenseState licenseState, ThreadPool threadPool, + InvalidLicenseEnforcer(XPackLicenseState licenseState, ThreadPool threadPool, DatafeedManager datafeedManager, AutodetectProcessManager autodetectProcessManager) { - super(settings); this.threadPool = threadPool; this.licenseState = licenseState; this.datafeedManager = datafeedManager; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 2e90e678351c4..086754054b494 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; @@ -22,7 +23,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -264,7 +264,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public static final Setting MAX_LAZY_ML_NODES = Setting.intSetting("xpack.ml.max_lazy_ml_nodes", 0, 0, 3, Property.Dynamic, Property.NodeScope); - private static final Logger logger = Loggers.getLogger(XPackPlugin.class); + private static final Logger logger = LogManager.getLogger(XPackPlugin.class); private final Settings settings; private final Environment env; @@ -363,13 +363,13 @@ public Collection createComponents(Client client, ClusterService cluster return emptyList(); } - Auditor auditor = new Auditor(client, clusterService.nodeName()); + Auditor auditor = new Auditor(client, clusterService.getNodeName()); JobResultsProvider jobResultsProvider = new JobResultsProvider(client, settings); - UpdateJobProcessNotifier notifier = new UpdateJobProcessNotifier(settings, client, clusterService, threadPool); + UpdateJobProcessNotifier notifier = new UpdateJobProcessNotifier(client, clusterService, threadPool); JobManager jobManager = new JobManager(env, settings, jobResultsProvider, clusterService, auditor, client, notifier); - JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(settings, client); - JobResultsPersister jobResultsPersister = new JobResultsPersister(settings, client); + JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(client); + JobResultsPersister jobResultsPersister = new JobResultsPersister(client); AutodetectProcessFactory autodetectProcessFactory; NormalizerProcessFactory normalizerProcessFactory; @@ -412,7 +412,7 @@ public Collection createComponents(Client client, ClusterService cluster autodetectProcessManager); // This object's constructor attaches to the license state, so there's no need to retain another reference to it - new InvalidLicenseEnforcer(settings, getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); + new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); // run node startup tasks autodetectProcessManager.onNodeStartup(); @@ -422,11 +422,11 @@ public Collection createComponents(Client client, ClusterService cluster jobResultsProvider, jobManager, autodetectProcessManager, - new MlInitializationService(settings, threadPool, clusterService, client), + new MlInitializationService(threadPool, clusterService, client), jobDataCountsPersister, datafeedManager, auditor, - new MlAssignmentNotifier(settings, auditor, clusterService) + new MlAssignmentNotifier(auditor, clusterService) ); } @@ -438,7 +438,7 @@ public List> getPersistentTasksExecutor(ClusterServic return Arrays.asList( new TransportOpenJobAction.OpenJobPersistentTasksExecutor(settings, clusterService, autodetectProcessManager.get()), - new TransportStartDatafeedAction.StartDatafeedPersistentTasksExecutor(settings, datafeedManager.get()) + new TransportStartDatafeedAction.StartDatafeedPersistentTasksExecutor(datafeedManager.get()) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index d9b8ea7cd4226..e499f554641ce 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.Constants; import org.apache.lucene.util.Counter; import org.elasticsearch.ElasticsearchException; @@ -16,7 +17,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.Environment; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Platforms; @@ -82,7 +82,7 @@ public MachineLearningFeatureSet(Environment environment, ClusterService cluster } } } catch (IOException | TimeoutException e) { - Loggers.getLogger(MachineLearningFeatureSet.class).error("Cannot get native code info for Machine Learning", e); + LogManager.getLogger(MachineLearningFeatureSet.class).error("Cannot get native code info for Machine Learning", e); throw new ElasticsearchException("Cannot communicate with Machine Learning native code"); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java index 37d714d1777da..1bd4157ed48b4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -32,8 +31,7 @@ public class MlAssignmentNotifier extends AbstractComponent implements ClusterSt private final AtomicBoolean enabled = new AtomicBoolean(false); - MlAssignmentNotifier(Settings settings, Auditor auditor, ClusterService clusterService) { - super(settings); + MlAssignmentNotifier(Auditor auditor, ClusterService clusterService) { this.auditor = auditor; this.clusterService = clusterService; clusterService.addLocalNodeMasterListener(this); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java index 4d0911f6691e1..190933b1e9316 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.ml; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -32,7 +32,7 @@ */ public class MlDailyMaintenanceService implements Releasable { - private static final Logger LOGGER = Loggers.getLogger(MlDailyMaintenanceService.class); + private static final Logger LOGGER = LogManager.getLogger(MlDailyMaintenanceService.class); private static final int MAX_TIME_OFFSET_MINUTES = 120; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java index c96a12ffa1047..016fcd5e9287b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; @@ -23,8 +22,7 @@ class MlInitializationService extends AbstractComponent implements ClusterStateL private volatile MlDailyMaintenanceService mlDailyMaintenanceService; - MlInitializationService(Settings settings, ThreadPool threadPool, ClusterService clusterService, Client client) { - super(settings); + MlInitializationService(ThreadPool threadPool, ClusterService clusterService, Client client) { this.threadPool = threadPool; this.clusterService = clusterService; this.client = client; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index efc0517900ec4..261534556968a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -28,7 +28,6 @@ public MlLifeCycleService(Environment environment, ClusterService clusterService public MlLifeCycleService(Environment environment, ClusterService clusterService, DatafeedManager datafeedManager, AutodetectProcessManager autodetectProcessManager) { - super(environment.settings()); this.environment = environment; this.datafeedManager = datafeedManager; this.autodetectProcessManager = autodetectProcessManager; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 229abc3843eb1..95000665bccad 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -19,10 +19,11 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -37,8 +38,6 @@ import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.notifications.Auditor; @@ -65,11 +64,11 @@ public class TransportCloseJobAction extends TransportTasksAction) DeleteCalendarAction.Request::new); this.client = client; this.jobManager = jobManager; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java index 854ff19e16f41..c1c2dbfe64703 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -41,10 +40,9 @@ public class TransportDeleteCalendarEventAction extends HandledTransportAction listener) { - Auditor auditor = new Auditor(client, clusterService.nodeName()); + Auditor auditor = new Auditor(client, clusterService.getNodeName()); List dataRemovers = Arrays.asList( new ExpiredResultsRemover(client, clusterService, auditor), new ExpiredForecastsRemover(client, threadPool), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java index 54acdf3712c7c..75f4bdf85f0ec 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -45,9 +44,9 @@ public class TransportDeleteFilterAction extends HandledTransportAction) DeleteFilterAction.Request::new); this.clusterService = clusterService; this.client = client; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java index e91f75964fca2..2575e71444447 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -73,8 +72,8 @@ public class TransportDeleteForecastAction extends HandledTransportAction>> listenersByJobId; @Inject - public TransportDeleteJobAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportDeleteJobAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, PersistentTasksService persistentTasksService, Client client, Auditor auditor, JobResultsProvider jobResultsProvider) { - super(settings, DeleteJobAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(DeleteJobAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteJobAction.Request::new); this.client = client; this.persistentTasksService = persistentTasksService; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteModelSnapshotAction.java index 6d0721b03d972..ac0cbd6ce0478 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteModelSnapshotAction.java @@ -14,15 +14,14 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.ml.job.persistence.JobDataDeleter; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.JobManager; +import org.elasticsearch.xpack.ml.job.persistence.JobDataDeleter; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.notifications.Auditor; @@ -38,11 +37,10 @@ public class TransportDeleteModelSnapshotAction extends HandledTransportAction { @Inject - public TransportFinalizeJobExecutionAction(Settings settings, TransportService transportService, - ClusterService clusterService, ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, FinalizeJobExecutionAction.NAME, transportService, clusterService, threadPool, actionFilters, + public TransportFinalizeJobExecutionAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(FinalizeJobExecutionAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, FinalizeJobExecutionAction.Request::new); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java index 0906af9a80d46..41b9cab23e1ed 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -25,9 +24,8 @@ public class TransportFindFileStructureAction private final ThreadPool threadPool; @Inject - public TransportFindFileStructureAction(Settings settings, TransportService transportService, ActionFilters actionFilters, - ThreadPool threadPool) { - super(settings, FindFileStructureAction.NAME, transportService, actionFilters, FindFileStructureAction.Request::new); + public TransportFindFileStructureAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool) { + super(FindFileStructureAction.NAME, transportService, actionFilters, FindFileStructureAction.Request::new); this.threadPool = threadPool; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFlushJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFlushJobAction.java index f5aa98bc36147..74f84d86a48e4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFlushJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFlushJobAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.FlushJobAction; @@ -23,10 +22,9 @@ public class TransportFlushJobAction extends TransportJobTaskAction { @Inject - public TransportFlushJobAction(Settings settings, TransportService transportService, - ClusterService clusterService, ActionFilters actionFilters, + public TransportFlushJobAction(TransportService transportService, ClusterService clusterService, ActionFilters actionFilters, AutodetectProcessManager processManager) { - super(settings, FlushJobAction.NAME, clusterService, transportService, actionFilters, + super(FlushJobAction.NAME, clusterService, transportService, actionFilters, FlushJobAction.Request::new, FlushJobAction.Response::new, ThreadPool.Names.SAME, processManager); // ThreadPool.Names.SAME, because operations is executed by autodetect worker thread } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java index ad9e6a7c2630a..c6f560997e3c0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -42,10 +41,9 @@ public class TransportForecastJobAction extends TransportJobTaskAction) GetBucketsAction.Request::new); + super(GetBucketsAction.NAME, transportService, actionFilters, (Supplier) GetBucketsAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.jobManager = jobManager; this.client = client; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java index 96ba9e6fbbebf..9a4dc49521b6f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -35,10 +34,9 @@ public class TransportGetCalendarEventsAction extends HandledTransportAction) GetCalendarEventsAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.clusterService = clusterService; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarsAction.java index e9a9cd06d92c5..5406a06fd25bc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarsAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; @@ -26,9 +25,9 @@ public class TransportGetCalendarsAction extends HandledTransportAction) GetCategoriesAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.client = client; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java index b4e3851eda820..8a6a9cb8f810b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java @@ -14,11 +14,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -29,10 +28,10 @@ public class TransportGetDatafeedsAction extends TransportMasterNodeReadAction { @Inject - public TransportGetDatafeedsAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportGetDatafeedsAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetDatafeedsAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(GetDatafeedsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetDatafeedsAction.Request::new, indexNameExpressionResolver); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java index 1f7e55fc488a6..28192f58e6583 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java @@ -15,7 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.List; import java.util.Set; @@ -34,10 +33,10 @@ public class TransportGetDatafeedsStatsAction extends TransportMasterNodeReadAct GetDatafeedsStatsAction.Response> { @Inject - public TransportGetDatafeedsStatsAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportGetDatafeedsStatsAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetDatafeedsStatsAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(GetDatafeedsStatsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetDatafeedsStatsAction.Request::new, indexNameExpressionResolver); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java index 55fbdfa0f55db..2ff18b689a7da 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -49,10 +48,8 @@ public class TransportGetFiltersAction extends HandledTransportAction) GetInfluencersAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.client = client; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java index 0932d19723237..478c364b5263a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsAction.java @@ -14,24 +14,23 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.GetJobsAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; -import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.ml.job.JobManager; public class TransportGetJobsAction extends TransportMasterNodeReadAction { private final JobManager jobManager; @Inject - public TransportGetJobsAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportGetJobsAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, JobManager jobManager) { - super(settings, GetJobsAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(GetJobsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetJobsAction.Request::new, indexNameExpressionResolver); this.jobManager = jobManager; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java index 7217fcc6ec9a7..83fce9326b028 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java @@ -16,9 +16,9 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -31,7 +31,6 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.stats.ForecastStats; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -55,12 +54,10 @@ public class TransportGetJobsStatsAction extends TransportTasksAction(Collections.singletonList(jobStats), 1, Job.RESULTS_FIELD)); }, listener::onFailure); - + } else { listener.onResponse(new QueryPage<>(Collections.emptyList(), 0, Job.RESULTS_FIELD)); } @@ -160,7 +157,7 @@ void gatherStatsForClosedJobs(MlMetadata mlMetadata, GetJobsStatsAction.Request void gatherForecastStats(String jobId, Consumer handler, Consumer errorHandler) { jobResultsProvider.getForecastStats(jobId, handler, errorHandler); } - + void gatherDataCountsAndModelSizeStats(String jobId, BiConsumer handler, Consumer errorHandler) { jobResultsProvider.dataCounts(jobId, dataCounts -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java index 7a0e0b1c4deb1..590cafabd8ebe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetModelSnapshotsAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; @@ -27,10 +26,9 @@ public class TransportGetModelSnapshotsAction extends HandledTransportAction) GetOverallBucketsAction.Request::new); this.threadPool = threadPool; this.clusterService = clusterService; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java index 15a78efd9fda2..c17577cbaa6a2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetRecordsAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; @@ -27,11 +26,9 @@ public class TransportGetRecordsAction extends HandledTransportAction) GetRecordsAction.Request::new); + super(GetRecordsAction.NAME, transportService, actionFilters, (Supplier) GetRecordsAction.Request::new); this.jobResultsProvider = jobResultsProvider; this.jobManager = jobManager; this.client = client; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java index 252cf97d0c519..5ff43c6dc7e84 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -30,11 +29,9 @@ public class TransportIsolateDatafeedAction extends TransportTasksAction { @Inject - public TransportIsolateDatafeedAction(Settings settings, TransportService transportService, - ActionFilters actionFilters, ClusterService clusterService) { - super(settings, IsolateDatafeedAction.NAME, clusterService, transportService, actionFilters, - IsolateDatafeedAction.Request::new, IsolateDatafeedAction.Response::new, - MachineLearning.UTILITY_THREAD_POOL_NAME); + public TransportIsolateDatafeedAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService) { + super(IsolateDatafeedAction.NAME, clusterService, transportService, actionFilters, IsolateDatafeedAction.Request::new, + IsolateDatafeedAction.Response::new, MachineLearning.UTILITY_THREAD_POOL_NAME); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java index 07bb6152e8c1d..f610e88adcefc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -38,11 +37,11 @@ public abstract class TransportJobTaskAction requestSupplier, Supplier responseSupplier, String nodeExecutor, AutodetectProcessManager processManager) { - super(settings, actionName, clusterService, transportService, actionFilters, + super(actionName, clusterService, transportService, actionFilters, requestSupplier, responseSupplier, nodeExecutor); this.processManager = processManager; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java index a9b43c3bcc47d..52899a90ade7a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -31,11 +30,10 @@ public class TransportKillProcessAction extends TransportJobTaskAction) MlInfoAction.Request::new); + public TransportMlInfoAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService) { + super(MlInfoAction.NAME, transportService, actionFilters, (Supplier) MlInfoAction.Request::new); this.clusterService = clusterService; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index b620816cc8252..a3128f507d09e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.action; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; @@ -34,7 +35,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -104,12 +104,12 @@ public class TransportOpenJobAction extends TransportMasterNodeAction { @Inject - public TransportPersistJobAction(Settings settings, TransportService transportService, - ClusterService clusterService, ActionFilters actionFilters, AutodetectProcessManager processManager) { - super(settings, PersistJobAction.NAME, clusterService, transportService, actionFilters, + public TransportPersistJobAction(TransportService transportService, ClusterService clusterService, ActionFilters actionFilters, + AutodetectProcessManager processManager) { + super(PersistJobAction.NAME, clusterService, transportService, actionFilters, PersistJobAction.Request::new, PersistJobAction.Response::new, ThreadPool.Names.SAME, processManager); // ThreadPool.Names.SAME, because operations is executed by autodetect worker thread } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java index 7284a490eaa8f..fd9a22e74dddf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -44,11 +43,9 @@ public class TransportPostCalendarEventsAction extends HandledTransportAction) PreviewDatafeedAction.Request::new); this.threadPool = threadPool; this.client = client; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java index 7611a27cd5a1d..7f21bc562649d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -39,9 +38,8 @@ public class TransportPutCalendarAction extends HandledTransportAction) PutCalendarAction.Request::new); this.client = client; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java index 60b8235ec84b7..4aabcb6114edd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.tasks.Task; @@ -32,6 +33,8 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; +import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; +import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; @@ -42,6 +45,9 @@ import java.io.IOException; import java.util.Map; +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + public class TransportPutDatafeedAction extends TransportMasterNodeAction { private final XPackLicenseState licenseState; @@ -54,8 +60,8 @@ public TransportPutDatafeedAction(Settings settings, TransportService transportS ClusterService clusterService, ThreadPool threadPool, Client client, XPackLicenseState licenseState, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, PutDatafeedAction.NAME, transportService, clusterService, threadPool, - actionFilters, indexNameExpressionResolver, PutDatafeedAction.Request::new); + super(PutDatafeedAction.NAME, transportService, clusterService, threadPool, + actionFilters, indexNameExpressionResolver, PutDatafeedAction.Request::new); this.licenseState = licenseState; this.client = client; this.securityContext = XPackSettings.SECURITY_ENABLED.get(settings) ? @@ -78,23 +84,48 @@ protected void masterOperation(PutDatafeedAction.Request request, ClusterState s // If security is enabled only create the datafeed if the user requesting creation has // permission to read the indices the datafeed is going to read from if (licenseState.isAuthAllowed()) { - final String username = securityContext.getUser().principal(); - ActionListener privResponseListener = ActionListener.wrap( - r -> handlePrivsResponse(username, request, r, listener), - listener::onFailure); - HasPrivilegesRequest privRequest = new HasPrivilegesRequest(); + final String[] indices = request.getDatafeed().getIndices().toArray(new String[0]); + + final String username = securityContext.getUser().principal(); + final HasPrivilegesRequest privRequest = new HasPrivilegesRequest(); + privRequest.applicationPrivileges(new RoleDescriptor.ApplicationResourcePrivileges[0]); privRequest.username(username); privRequest.clusterPrivileges(Strings.EMPTY_ARRAY); - // We just check for permission to use the search action. In reality we'll also - // use the scroll action, but that's considered an implementation detail. - privRequest.indexPrivileges(RoleDescriptor.IndicesPrivileges.builder() - .indices(request.getDatafeed().getIndices().toArray(new String[0])) - .privileges(SearchAction.NAME) - .build()); - privRequest.applicationPrivileges(new RoleDescriptor.ApplicationResourcePrivileges[0]); - client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); + final RoleDescriptor.IndicesPrivileges.Builder indicesPrivilegesBuilder = RoleDescriptor.IndicesPrivileges.builder() + .indices(indices); + + ActionListener privResponseListener = ActionListener.wrap( + r -> handlePrivsResponse(username, request, r, listener), + listener::onFailure); + + ActionListener getRollupIndexCapsActionHandler = ActionListener.wrap( + response -> { + if (response.getJobs().isEmpty()) { // This means no rollup indexes are in the config + indicesPrivilegesBuilder.privileges(SearchAction.NAME); + } else { + indicesPrivilegesBuilder.privileges(SearchAction.NAME, RollupSearchAction.NAME); + } + privRequest.indexPrivileges(indicesPrivilegesBuilder.build()); + client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); + }, + e -> { + if (e instanceof IndexNotFoundException) { + indicesPrivilegesBuilder.privileges(SearchAction.NAME); + privRequest.indexPrivileges(indicesPrivilegesBuilder.build()); + client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); + } else { + listener.onFailure(e); + } + } + ); + + executeAsyncWithOrigin(client, + ML_ORIGIN, + GetRollupIndexCapsAction.INSTANCE, + new GetRollupIndexCapsAction.Request(indices), + getRollupIndexCapsActionHandler); } else { putDatafeed(request, threadPool.getThreadContext().getHeaders(), listener); } @@ -115,8 +146,7 @@ private void handlePrivsResponse(String username, PutDatafeedAction.Request requ builder.endObject(); listener.onFailure(Exceptions.authorizationError("Cannot create datafeed [{}]" + - " because user {} lacks permissions on the indices to be" + - " searched: {}", + " because user {} lacks permissions on the indices: {}", request.getDatafeed().getId(), username, Strings.toString(builder))); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java index 19bf35aaed617..f88eb150ba5af 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -40,9 +39,8 @@ public class TransportPutFilterAction extends HandledTransportAction) PutFilterAction.Request::new); + public TransportPutFilterAction(TransportService transportService, ActionFilters actionFilters, Client client) { + super(PutFilterAction.NAME, transportService, actionFilters, (Supplier) PutFilterAction.Request::new); this.client = client; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java index eb8fbb86fc9e3..c1244649bffca 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutJobAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; @@ -32,11 +31,11 @@ public class TransportPutJobAction extends TransportMasterNodeAction { @Inject - public TransportUpdateDatafeedAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportUpdateDatafeedAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, UpdateDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(UpdateDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, UpdateDatafeedAction.Request::new); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java index c8dbf9273829f..682016bac4d79 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; @@ -54,10 +53,10 @@ public class TransportUpdateFilterAction extends HandledTransportAction) UpdateFilterAction.Request::new); + super(UpdateFilterAction.NAME, transportService, actionFilters, + (Supplier) UpdateFilterAction.Request::new); this.client = client; this.jobManager = jobManager; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateJobAction.java index c3c1a7c44b46d..3e9abf3ae4179 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateJobAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.PutJobAction; @@ -26,10 +25,10 @@ public class TransportUpdateJobAction extends TransportMasterNodeAction { @Inject - public TransportUpdateProcessAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportUpdateProcessAction(TransportService transportService, ClusterService clusterService, ActionFilters actionFilters, AutodetectProcessManager processManager) { - super(settings, UpdateProcessAction.NAME, clusterService, transportService, actionFilters, + super(UpdateProcessAction.NAME, clusterService, transportService, actionFilters, UpdateProcessAction.Request::new, UpdateProcessAction.Response::new, ThreadPool.Names.SAME, processManager); // ThreadPool.Names.SAME, because operations is executed by autodetect worker thread } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateDetectorAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateDetectorAction.java index 71b8ca5d6dc57..197f9e3284823 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateDetectorAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateDetectorAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; @@ -20,8 +19,8 @@ public class TransportValidateDetectorAction extends HandledTransportAction { @Inject - public TransportValidateDetectorAction(Settings settings, TransportService transportService, ActionFilters actionFilters) { - super(settings, ValidateDetectorAction.NAME, transportService, actionFilters, + public TransportValidateDetectorAction(TransportService transportService, ActionFilters actionFilters) { + super(ValidateDetectorAction.NAME, transportService, actionFilters, (Supplier) ValidateDetectorAction.Request::new); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateJobConfigAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateJobConfigAction.java index c0347aa27d1d6..7311ab2502f25 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateJobConfigAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportValidateJobConfigAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; @@ -20,8 +19,8 @@ public class TransportValidateJobConfigAction extends HandledTransportAction { @Inject - public TransportValidateJobConfigAction(Settings settings, TransportService transportService, ActionFilters actionFilters) { - super(settings, ValidateJobConfigAction.NAME, transportService, actionFilters, + public TransportValidateJobConfigAction(TransportService transportService, ActionFilters actionFilters) { + super(ValidateJobConfigAction.NAME, transportService, actionFilters, (Supplier< ValidateJobConfigAction.Request>) ValidateJobConfigAction.Request::new); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 1fa402f4e2485..54a79ee199ee1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.ml.datafeed; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; @@ -20,10 +20,10 @@ import org.elasticsearch.xpack.core.ml.action.PersistJobAction; import org.elasticsearch.xpack.core.ml.action.PostDataAction; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; -import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import org.elasticsearch.xpack.ml.notifications.Auditor; import java.io.ByteArrayOutputStream; @@ -39,7 +39,7 @@ class DatafeedJob { - private static final Logger LOGGER = Loggers.getLogger(DatafeedJob.class); + private static final Logger LOGGER = LogManager.getLogger(DatafeedJob.class); private static final int NEXT_TASK_DELAY_MS = 100; private final Auditor auditor; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 3d4d66eba92a3..9f4191b38f2de 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -67,7 +66,6 @@ public class DatafeedManager extends AbstractComponent { public DatafeedManager(ThreadPool threadPool, Client client, ClusterService clusterService, DatafeedJobBuilder datafeedJobBuilder, Supplier currentTimeSupplier, Auditor auditor) { - super(Settings.EMPTY); this.client = Objects.requireNonNull(client); this.clusterService = Objects.requireNonNull(clusterService); this.threadPool = threadPool; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index ce3f611b2227a..24b108d694a61 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.ml.datafeed; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -26,7 +26,7 @@ public class DatafeedNodeSelector { - private static final Logger LOGGER = Loggers.getLogger(DatafeedNodeSelector.class); + private static final Logger LOGGER = LogManager.getLogger(DatafeedNodeSelector.class); private final DatafeedConfig datafeed; private final PersistentTasksCustomMetaData.PersistentTask jobTask; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java index 8fd1ced17293a..77e2c695db7d5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java @@ -5,14 +5,19 @@ */ package org.elasticsearch.xpack.ml.datafeed.extractor; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; import org.elasticsearch.xpack.ml.datafeed.extractor.aggregation.AggregationDataExtractorFactory; import org.elasticsearch.xpack.ml.datafeed.extractor.chunked.ChunkedDataExtractorFactory; +import org.elasticsearch.xpack.ml.datafeed.extractor.aggregation.RollupDataExtractorFactory; import org.elasticsearch.xpack.ml.datafeed.extractor.scroll.ScrollDataExtractorFactory; -import org.elasticsearch.xpack.core.ml.job.config.Job; public interface DataExtractorFactory { DataExtractor newExtractor(long start, long end); @@ -22,16 +27,44 @@ public interface DataExtractorFactory { */ static void create(Client client, DatafeedConfig datafeed, Job job, ActionListener listener) { ActionListener factoryHandler = ActionListener.wrap( - factory -> listener.onResponse(datafeed.getChunkingConfig().isEnabled() - ? new ChunkedDataExtractorFactory(client, datafeed, job, factory) : factory) - , listener::onFailure + factory -> listener.onResponse(datafeed.getChunkingConfig().isEnabled() + ? new ChunkedDataExtractorFactory(client, datafeed, job, factory) : factory) + , listener::onFailure ); - boolean isScrollSearch = datafeed.hasAggregations() == false; - if (isScrollSearch) { - ScrollDataExtractorFactory.create(client, datafeed, job, factoryHandler); - } else { - factoryHandler.onResponse(new AggregationDataExtractorFactory(client, datafeed, job)); - } + ActionListener getRollupIndexCapsActionHandler = ActionListener.wrap( + response -> { + if (response.getJobs().isEmpty()) { // This means no rollup indexes are in the config + if (datafeed.hasAggregations()) { + factoryHandler.onResponse(new AggregationDataExtractorFactory(client, datafeed, job)); + } else { + ScrollDataExtractorFactory.create(client, datafeed, job, factoryHandler); + } + } else { + if (datafeed.hasAggregations()) { // Rollup indexes require aggregations + RollupDataExtractorFactory.create(client, datafeed, job, response.getJobs(), factoryHandler); + } else { + listener.onFailure(new IllegalArgumentException("Aggregations are required when using Rollup indices")); + } + } + }, + e -> { + if (e instanceof IndexNotFoundException) { + listener.onFailure(new ResourceNotFoundException("datafeed [" + datafeed.getId() + + "] cannot retrieve data because index " + ((IndexNotFoundException)e).getIndex() + " does not exist")); + } else { + listener.onFailure(e); + } + } + ); + + GetRollupIndexCapsAction.Request request = new GetRollupIndexCapsAction.Request(datafeed.getIndices().toArray(new String[0])); + + ClientHelper.executeAsyncWithOrigin( + client, + ClientHelper.ML_ORIGIN, + GetRollupIndexCapsAction.INSTANCE, + request, + getRollupIndexCapsActionHandler); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java new file mode 100644 index 0000000000000..df858f45c825e --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java @@ -0,0 +1,167 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; +import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Abstract class for aggregated data extractors, e.g. {@link RollupDataExtractor} + * + * @param The request builder type for getting data from ElasticSearch + */ +abstract class AbstractAggregationDataExtractor> + implements DataExtractor { + + private static final Logger LOGGER = LogManager.getLogger(AbstractAggregationDataExtractor.class); + + /** + * The number of key-value pairs written in each batch to process. + * This has to be a number that is small enough to allow for responsive + * cancelling and big enough to not cause overhead by calling the + * post data action too often. The value of 1000 was determined via + * such testing. + */ + private static int BATCH_KEY_VALUE_PAIRS = 1000; + + protected final Client client; + protected final AggregationDataExtractorContext context; + private boolean hasNext; + private boolean isCancelled; + private AggregationToJsonProcessor aggregationToJsonProcessor; + private ByteArrayOutputStream outputStream; + + AbstractAggregationDataExtractor(Client client, AggregationDataExtractorContext dataExtractorContext) { + this.client = Objects.requireNonNull(client); + context = Objects.requireNonNull(dataExtractorContext); + hasNext = true; + isCancelled = false; + outputStream = new ByteArrayOutputStream(); + } + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public boolean isCancelled() { + return isCancelled; + } + + @Override + public void cancel() { + LOGGER.debug("[{}] Data extractor received cancel request", context.jobId); + isCancelled = true; + hasNext = false; + } + + @Override + public Optional next() throws IOException { + if (!hasNext()) { + throw new NoSuchElementException(); + } + + if (aggregationToJsonProcessor == null) { + Aggregations aggs = search(); + if (aggs == null) { + hasNext = false; + return Optional.empty(); + } + initAggregationProcessor(aggs); + } + + return Optional.ofNullable(processNextBatch()); + } + + private Aggregations search() throws IOException { + LOGGER.debug("[{}] Executing aggregated search", context.jobId); + SearchResponse searchResponse = executeSearchRequest(buildSearchRequest(buildBaseSearchSource())); + LOGGER.debug("[{}] Search response was obtained", context.jobId); + ExtractorUtils.checkSearchWasSuccessful(context.jobId, searchResponse); + return validateAggs(searchResponse.getAggregations()); + } + + private void initAggregationProcessor(Aggregations aggs) throws IOException { + aggregationToJsonProcessor = new AggregationToJsonProcessor(context.timeField, context.fields, context.includeDocCount, + context.start); + aggregationToJsonProcessor.process(aggs); + } + + protected SearchResponse executeSearchRequest(T searchRequestBuilder) { + return ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, searchRequestBuilder::get); + } + + private SearchSourceBuilder buildBaseSearchSource() { + // For derivative aggregations the first bucket will always be null + // so query one extra histogram bucket back and hope there is data + // in that bucket + long histogramSearchStartTime = Math.max(0, context.start - ExtractorUtils.getHistogramIntervalMillis(context.aggs)); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder() + .size(0) + .query(ExtractorUtils.wrapInTimeRangeQuery(context.query, context.timeField, histogramSearchStartTime, context.end)); + + context.aggs.getAggregatorFactories().forEach(searchSourceBuilder::aggregation); + context.aggs.getPipelineAggregatorFactories().forEach(searchSourceBuilder::aggregation); + return searchSourceBuilder; + } + + protected abstract T buildSearchRequest(SearchSourceBuilder searchRequestBuilder); + + private Aggregations validateAggs(@Nullable Aggregations aggs) { + if (aggs == null) { + return null; + } + List aggsAsList = aggs.asList(); + if (aggsAsList.isEmpty()) { + return null; + } + if (aggsAsList.size() > 1) { + throw new IllegalArgumentException("Multiple top level aggregations not supported; found: " + + aggsAsList.stream().map(Aggregation::getName).collect(Collectors.toList())); + } + + return aggs; + } + + private InputStream processNextBatch() throws IOException { + outputStream.reset(); + + hasNext = aggregationToJsonProcessor.writeDocs(BATCH_KEY_VALUE_PAIRS, outputStream); + return new ByteArrayInputStream(outputStream.toByteArray()); + } + + protected long getHistogramInterval() { + return ExtractorUtils.getHistogramIntervalMillis(context.aggs); + } + + public AggregationDataExtractorContext getContext() { + return context; + } + +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java index 6f68239a765ff..8705c1beee867 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java @@ -5,28 +5,10 @@ */ package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation; -import org.apache.logging.log4j.Logger; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.Aggregations; -import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; -import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Objects; -import java.util.Optional; -import java.util.stream.Collectors; +import org.elasticsearch.search.builder.SearchSourceBuilder; /** * An implementation that extracts data from elasticsearch using search with aggregations on a client. @@ -34,132 +16,19 @@ * stored and they are then processed in batches. Cancellation is supported between batches. * Note that this class is NOT thread-safe. */ -class AggregationDataExtractor implements DataExtractor { - - private static final Logger LOGGER = Loggers.getLogger(AggregationDataExtractor.class); - - /** - * The number of key-value pairs written in each batch to process. - * This has to be a number that is small enough to allow for responsive - * cancelling and big enough to not cause overhead by calling the - * post data action too often. The value of 1000 was determined via - * such testing. - */ - private static int BATCH_KEY_VALUE_PAIRS = 1000; - - private final Client client; - private final AggregationDataExtractorContext context; - private boolean hasNext; - private boolean isCancelled; - private AggregationToJsonProcessor aggregationToJsonProcessor; - private ByteArrayOutputStream outputStream; +class AggregationDataExtractor extends AbstractAggregationDataExtractor { AggregationDataExtractor(Client client, AggregationDataExtractorContext dataExtractorContext) { - this.client = Objects.requireNonNull(client); - context = Objects.requireNonNull(dataExtractorContext); - hasNext = true; - isCancelled = false; - outputStream = new ByteArrayOutputStream(); + super(client, dataExtractorContext); } @Override - public boolean hasNext() { - return hasNext; - } + protected SearchRequestBuilder buildSearchRequest(SearchSourceBuilder searchSourceBuilder) { - @Override - public boolean isCancelled() { - return isCancelled; - } - - @Override - public void cancel() { - LOGGER.trace("[{}] Data extractor received cancel request", context.jobId); - isCancelled = true; - hasNext = false; + return new SearchRequestBuilder(client, SearchAction.INSTANCE) + .setSource(searchSourceBuilder) + .setIndices(context.indices) + .setTypes(context.types); } - @Override - public Optional next() throws IOException { - if (!hasNext()) { - throw new NoSuchElementException(); - } - - if (aggregationToJsonProcessor == null) { - Aggregations aggs = search(); - if (aggs == null) { - hasNext = false; - return Optional.empty(); - } - initAggregationProcessor(aggs); - } - - return Optional.ofNullable(processNextBatch()); - } - - private Aggregations search() throws IOException { - LOGGER.debug("[{}] Executing aggregated search", context.jobId); - SearchResponse searchResponse = executeSearchRequest(buildSearchRequest()); - LOGGER.debug("[{}] Search response was obtained", context.jobId); - ExtractorUtils.checkSearchWasSuccessful(context.jobId, searchResponse); - return validateAggs(searchResponse.getAggregations()); - } - - private void initAggregationProcessor(Aggregations aggs) throws IOException { - aggregationToJsonProcessor = new AggregationToJsonProcessor(context.timeField, context.fields, context.includeDocCount, - context.start); - aggregationToJsonProcessor.process(aggs); - } - - protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { - return ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, searchRequestBuilder::get); - } - - private SearchRequestBuilder buildSearchRequest() { - // For derivative aggregations the first bucket will always be null - // so query one extra histogram bucket back and hope there is data - // in that bucket - long histogramSearchStartTime = Math.max(0, context.start - getHistogramInterval()); - - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE) - .setIndices(context.indices) - .setTypes(context.types) - .setSize(0) - .setQuery(ExtractorUtils.wrapInTimeRangeQuery(context.query, context.timeField, histogramSearchStartTime, context.end)); - - context.aggs.getAggregatorFactories().forEach(searchRequestBuilder::addAggregation); - context.aggs.getPipelineAggregatorFactories().forEach(searchRequestBuilder::addAggregation); - return searchRequestBuilder; - } - - private Aggregations validateAggs(@Nullable Aggregations aggs) { - if (aggs == null) { - return null; - } - List aggsAsList = aggs.asList(); - if (aggsAsList.isEmpty()) { - return null; - } - if (aggsAsList.size() > 1) { - throw new IllegalArgumentException("Multiple top level aggregations not supported; found: " - + aggsAsList.stream().map(Aggregation::getName).collect(Collectors.toList())); - } - - return aggs; - } - - private InputStream processNextBatch() throws IOException { - outputStream.reset(); - - hasNext = aggregationToJsonProcessor.writeDocs(BATCH_KEY_VALUE_PAIRS, outputStream); - return new ByteArrayInputStream(outputStream.toByteArray()); - } - - private long getHistogramInterval() { - return ExtractorUtils.getHistogramIntervalMillis(context.aggs); - } - - AggregationDataExtractorContext getContext() { - return context; - } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java index 864a83afae7e7..c934653a6268e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java @@ -5,17 +5,17 @@ */ package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.Max; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.Percentile; import org.elasticsearch.search.aggregations.metrics.Percentiles; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -42,7 +42,7 @@ */ class AggregationToJsonProcessor { - private static final Logger LOGGER = Loggers.getLogger(AggregationToJsonProcessor.class); + private static final Logger LOGGER = LogManager.getLogger(AggregationToJsonProcessor.class); private final String timeField; private final Set fields; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractor.java new file mode 100644 index 0000000000000..f5de574e99a96 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractor.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; + +/** + * An implementation that extracts data from elasticsearch using search with aggregations against rollup indexes on a client. + * The first time {@link #next()} is called, the search is executed. The result aggregations are + * stored and they are then processed in batches. Cancellation is supported between batches. + * Note that this class is NOT thread-safe. + */ +class RollupDataExtractor extends AbstractAggregationDataExtractor { + + RollupDataExtractor(Client client, AggregationDataExtractorContext dataExtractorContext) { + super(client, dataExtractorContext); + } + + @Override + protected RollupSearchAction.RequestBuilder buildSearchRequest(SearchSourceBuilder searchSourceBuilder) { + SearchRequest searchRequest = new SearchRequest().indices(context.indices).source(searchSourceBuilder); + + return new RollupSearchAction.RequestBuilder(client, searchRequest); + } + +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java new file mode 100644 index 0000000000000..c8a96d6c306af --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java @@ -0,0 +1,218 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.Intervals; +import org.elasticsearch.xpack.core.rollup.action.RollableIndexCaps; +import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps.RollupFieldCaps; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; +import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils.getHistogramAggregation; +import static org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils.getHistogramIntervalMillis; +import static org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils.validateAndGetCalendarInterval; + +public class RollupDataExtractorFactory implements DataExtractorFactory { + + private final Client client; + private final DatafeedConfig datafeedConfig; + private final Job job; + + private RollupDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job) { + this.client = Objects.requireNonNull(client); + this.datafeedConfig = Objects.requireNonNull(datafeedConfig); + this.job = Objects.requireNonNull(job); + } + + @Override + public DataExtractor newExtractor(long start, long end) { + long histogramInterval = datafeedConfig.getHistogramIntervalMillis(); + AggregationDataExtractorContext dataExtractorContext = new AggregationDataExtractorContext( + job.getId(), + job.getDataDescription().getTimeField(), + job.getAnalysisConfig().analysisFields(), + datafeedConfig.getIndices(), + datafeedConfig.getTypes(), + datafeedConfig.getQuery(), + datafeedConfig.getAggregations(), + Intervals.alignToCeil(start, histogramInterval), + Intervals.alignToFloor(end, histogramInterval), + job.getAnalysisConfig().getSummaryCountFieldName().equals(DatafeedConfig.DOC_COUNT), + datafeedConfig.getHeaders()); + return new RollupDataExtractor(client, dataExtractorContext); + } + + public static void create(Client client, + DatafeedConfig datafeed, + Job job, + Map rollupJobsWithCaps, + ActionListener listener) { + + final AggregationBuilder datafeedHistogramAggregation = getHistogramAggregation( + datafeed.getAggregations().getAggregatorFactories()); + if ((datafeedHistogramAggregation instanceof DateHistogramAggregationBuilder) == false) { + listener.onFailure( + new IllegalArgumentException("Rollup requires that the datafeed configuration use a [date_histogram] aggregation," + + " not a [histogram] aggregation over the time field.")); + return; + } + + final String timeField = ((ValuesSourceAggregationBuilder) datafeedHistogramAggregation).field(); + + Set rollupCapsSet = rollupJobsWithCaps.values() + .stream() + .flatMap(rollableIndexCaps -> rollableIndexCaps.getJobCaps().stream()) + .map(rollupJobCaps -> ParsedRollupCaps.fromJobFieldCaps(rollupJobCaps.getFieldCaps(), timeField)) + .collect(Collectors.toSet()); + + final long datafeedInterval = getHistogramIntervalMillis(datafeedHistogramAggregation); + + List validIntervalCaps = rollupCapsSet.stream() + .filter(rollupCaps -> validInterval(datafeedInterval, rollupCaps)) + .collect(Collectors.toList()); + + if (validIntervalCaps.isEmpty()) { + listener.onFailure( + new IllegalArgumentException( + "Rollup capabilities do not have a [date_histogram] aggregation with an interval " + + "that is a multiple of the datafeed's interval.") + ); + return; + } + final List flattenedAggs = new ArrayList<>(); + flattenAggregations(datafeed.getAggregations().getAggregatorFactories(), datafeedHistogramAggregation, flattenedAggs); + + if (validIntervalCaps.stream().noneMatch(rollupJobConfig -> hasAggregations(rollupJobConfig, flattenedAggs))) { + listener.onFailure( + new IllegalArgumentException("Rollup capabilities do not support all the datafeed aggregations at the desired interval.") + ); + return; + } + + listener.onResponse(new RollupDataExtractorFactory(client, datafeed, job)); + } + + private static boolean validInterval(long datafeedInterval, ParsedRollupCaps rollupJobGroupConfig) { + if (rollupJobGroupConfig.hasDatehistogram() == false) { + return false; + } + if ("UTC".equalsIgnoreCase(rollupJobGroupConfig.getTimezone()) == false) { + return false; + } + try { + long jobInterval = validateAndGetCalendarInterval(rollupJobGroupConfig.getInterval()); + return datafeedInterval % jobInterval == 0; + } catch (ElasticsearchStatusException exception) { + return false; + } + } + + private static void flattenAggregations(final Collection datafeedAggregations, + final AggregationBuilder datafeedHistogramAggregation, + final List flattenedAggregations) { + for (AggregationBuilder aggregationBuilder : datafeedAggregations) { + if (aggregationBuilder.equals(datafeedHistogramAggregation) == false) { + flattenedAggregations.add((ValuesSourceAggregationBuilder)aggregationBuilder); + } + flattenAggregations(aggregationBuilder.getSubAggregations(), datafeedHistogramAggregation, flattenedAggregations); + } + } + + private static boolean hasAggregations(ParsedRollupCaps rollupCaps, List datafeedAggregations) { + for (ValuesSourceAggregationBuilder aggregationBuilder : datafeedAggregations) { + String type = aggregationBuilder.getType(); + String field = aggregationBuilder.field(); + if (aggregationBuilder instanceof TermsAggregationBuilder) { + if (rollupCaps.supportedTerms.contains(field) == false) { + return false; + } + } else { + if (rollupCaps.supportedMetrics.contains(field + "_" + type) == false) { + return false; + } + } + } + return true; + } + + private static class ParsedRollupCaps { + private final Set supportedMetrics; + private final Set supportedTerms; + private final Map datehistogramAgg; + private static final List aggsToIgnore = + Arrays.asList(HistogramAggregationBuilder.NAME, DateHistogramAggregationBuilder.NAME); + + private static ParsedRollupCaps fromJobFieldCaps(Map rollupFieldCaps, String timeField) { + Map datehistogram = null; + RollupFieldCaps timeFieldCaps = rollupFieldCaps.get(timeField); + if (timeFieldCaps != null) { + for(Map agg : timeFieldCaps.getAggs()) { + if (agg.get("agg").equals(DateHistogramAggregationBuilder.NAME)) { + datehistogram = agg; + } + } + } + Set supportedMetrics = new HashSet<>(); + Set supportedTerms = new HashSet<>(); + rollupFieldCaps.forEach((field, fieldCaps) -> { + fieldCaps.getAggs().forEach(agg -> { + String type = (String)agg.get("agg"); + if (type.equals(TermsAggregationBuilder.NAME)) { + supportedTerms.add(field); + } else if (aggsToIgnore.contains(type) == false) { + supportedMetrics.add(field + "_" + type); + } + }); + }); + return new ParsedRollupCaps(supportedMetrics, supportedTerms, datehistogram); + } + + private ParsedRollupCaps(Set supportedMetrics, Set supportedTerms, Map datehistogramAgg) { + this.supportedMetrics = supportedMetrics; + this.supportedTerms = supportedTerms; + this.datehistogramAgg = datehistogramAgg; + } + + private String getInterval() { + if (datehistogramAgg == null) { + return null; + } + return (String)datehistogramAgg.get(DateHistogramGroupConfig.INTERVAL); + } + + private String getTimezone() { + if (datehistogramAgg == null) { + return null; + } + return (String)datehistogramAgg.get(DateHistogramGroupConfig.TIME_ZONE); + } + + private boolean hasDatehistogram() { + return datehistogramAgg != null; + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java index b1daff2b7e783..dea9aca1d48d2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java @@ -5,20 +5,26 @@ */ package org.elasticsearch.xpack.ml.datafeed.extractor.chunked; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.Min; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; +import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; +import org.elasticsearch.xpack.ml.datafeed.extractor.aggregation.RollupDataExtractorFactory; import java.io.IOException; import java.io.InputStream; @@ -43,7 +49,14 @@ */ public class ChunkedDataExtractor implements DataExtractor { - private static final Logger LOGGER = Loggers.getLogger(ChunkedDataExtractor.class); + private interface DataSummary { + long estimateChunk(); + boolean hasData(); + long earliestTime(); + long getDataTimeSpread(); + } + + private static final Logger LOGGER = LogManager.getLogger(ChunkedDataExtractor.class); private static final String EARLIEST_TIME = "earliest_time"; private static final String LATEST_TIME = "latest_time"; @@ -54,6 +67,7 @@ public class ChunkedDataExtractor implements DataExtractor { private final Client client; private final DataExtractorFactory dataExtractorFactory; private final ChunkedDataExtractorContext context; + private final DataSummaryFactory dataSummaryFactory; private long currentStart; private long currentEnd; private long chunkSpan; @@ -67,6 +81,7 @@ public ChunkedDataExtractor(Client client, DataExtractorFactory dataExtractorFac this.currentStart = context.start; this.currentEnd = context.start; this.isCancelled = false; + this.dataSummaryFactory = new DataSummaryFactory(); } @Override @@ -93,48 +108,21 @@ public Optional next() throws IOException { } private void setUpChunkedSearch() throws IOException { - DataSummary dataSummary = requestDataSummary(); - if (dataSummary.totalHits > 0) { - currentStart = context.timeAligner.alignToFloor(dataSummary.earliestTime); + DataSummary dataSummary = dataSummaryFactory.buildDataSummary(); + if (dataSummary.hasData()) { + currentStart = context.timeAligner.alignToFloor(dataSummary.earliestTime()); currentEnd = currentStart; chunkSpan = context.chunkSpan == null ? dataSummary.estimateChunk() : context.chunkSpan.getMillis(); chunkSpan = context.timeAligner.alignToCeil(chunkSpan); - LOGGER.debug("[{}]Chunked search configured: totalHits = {}, dataTimeSpread = {} ms, chunk span = {} ms", - context.jobId, dataSummary.totalHits, dataSummary.getDataTimeSpread(), chunkSpan); + LOGGER.debug("[{}]Chunked search configured: kind = {}, dataTimeSpread = {} ms, chunk span = {} ms", + context.jobId, dataSummary.getClass().getSimpleName(), dataSummary.getDataTimeSpread(), chunkSpan); } else { // search is over currentEnd = context.end; } } - private DataSummary requestDataSummary() throws IOException { - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE) - .setSize(0) - .setIndices(context.indices) - .setTypes(context.types) - .setQuery(ExtractorUtils.wrapInTimeRangeQuery(context.query, context.timeField, currentStart, context.end)) - .addAggregation(AggregationBuilders.min(EARLIEST_TIME).field(context.timeField)) - .addAggregation(AggregationBuilders.max(LATEST_TIME).field(context.timeField)); - - SearchResponse response = executeSearchRequest(searchRequestBuilder); - LOGGER.debug("[{}] Data summary response was obtained", context.jobId); - - ExtractorUtils.checkSearchWasSuccessful(context.jobId, response); - - Aggregations aggregations = response.getAggregations(); - long earliestTime = 0; - long latestTime = 0; - long totalHits = response.getHits().getTotalHits(); - if (totalHits > 0) { - Min min = aggregations.get(EARLIEST_TIME); - earliestTime = (long) min.getValue(); - Max max = aggregations.get(LATEST_TIME); - latestTime = (long) max.getValue(); - } - return new DataSummary(earliestTime, latestTime, totalHits); - } - - protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { + protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) { return ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, searchRequestBuilder::get); } @@ -182,19 +170,101 @@ public void cancel() { isCancelled = true; } - private class DataSummary { + ChunkedDataExtractorContext getContext() { + return context; + } + + private class DataSummaryFactory { + + /** + * If there are aggregations, an AggregatedDataSummary object is created. It returns a ScrollingDataSummary otherwise. + * + * By default a DatafeedConfig with aggregations, should already have a manual ChunkingConfig created. + * However, the end user could have specifically set the ChunkingConfig to AUTO, which would not really work for aggregations. + * So, if we need to gather an appropriate chunked time for aggregations, we can utilize the AggregatedDataSummary + * + * @return DataSummary object + * @throws IOException when timefield range search fails + */ + private DataSummary buildDataSummary() throws IOException { + return context.hasAggregations ? newAggregatedDataSummary() : newScrolledDataSummary(); + } + + private DataSummary newScrolledDataSummary() throws IOException { + SearchRequestBuilder searchRequestBuilder = rangeSearchRequest().setTypes(context.types); + + SearchResponse response = executeSearchRequest(searchRequestBuilder); + LOGGER.debug("[{}] Scrolling Data summary response was obtained", context.jobId); + + ExtractorUtils.checkSearchWasSuccessful(context.jobId, response); + + Aggregations aggregations = response.getAggregations(); + long earliestTime = 0; + long latestTime = 0; + long totalHits = response.getHits().getTotalHits(); + if (totalHits > 0) { + Min min = aggregations.get(EARLIEST_TIME); + earliestTime = (long) min.getValue(); + Max max = aggregations.get(LATEST_TIME); + latestTime = (long) max.getValue(); + } + return new ScrolledDataSummary(earliestTime, latestTime, totalHits); + } + + private DataSummary newAggregatedDataSummary() throws IOException { + // TODO: once RollupSearchAction is changed from indices:admin* to indices:data/read/* this branch is not needed + ActionRequestBuilder searchRequestBuilder = + dataExtractorFactory instanceof RollupDataExtractorFactory ? rollupRangeSearchRequest() : rangeSearchRequest(); + SearchResponse response = executeSearchRequest(searchRequestBuilder); + LOGGER.debug("[{}] Aggregating Data summary response was obtained", context.jobId); + + ExtractorUtils.checkSearchWasSuccessful(context.jobId, response); + + Aggregations aggregations = response.getAggregations(); + Min min = aggregations.get(EARLIEST_TIME); + Max max = aggregations.get(LATEST_TIME); + return new AggregatedDataSummary(min.getValue(), max.getValue(), context.histogramInterval); + } + + private SearchSourceBuilder rangeSearchBuilder() { + return new SearchSourceBuilder() + .size(0) + .query(ExtractorUtils.wrapInTimeRangeQuery(context.query, context.timeField, currentStart, context.end)) + .aggregation(AggregationBuilders.min(EARLIEST_TIME).field(context.timeField)) + .aggregation(AggregationBuilders.max(LATEST_TIME).field(context.timeField)); + } + + private SearchRequestBuilder rangeSearchRequest() { + return new SearchRequestBuilder(client, SearchAction.INSTANCE) + .setIndices(context.indices) + .setSource(rangeSearchBuilder()); + } + + private RollupSearchAction.RequestBuilder rollupRangeSearchRequest() { + SearchRequest searchRequest = new SearchRequest().indices(context.indices).source(rangeSearchBuilder()); + return new RollupSearchAction.RequestBuilder(client, searchRequest); + } + } + + private class ScrolledDataSummary implements DataSummary { private long earliestTime; private long latestTime; private long totalHits; - private DataSummary(long earliestTime, long latestTime, long totalHits) { + private ScrolledDataSummary(long earliestTime, long latestTime, long totalHits) { this.earliestTime = earliestTime; this.latestTime = latestTime; this.totalHits = totalHits; } - private long getDataTimeSpread() { + @Override + public long earliestTime() { + return earliestTime; + } + + @Override + public long getDataTimeSpread() { return latestTime - earliestTime; } @@ -206,7 +276,8 @@ private long getDataTimeSpread() { * However, assuming this as the chunk span may often lead to half-filled pages or empty searches. * It is beneficial to take a multiple of that. Based on benchmarking, we set this to 10x. */ - private long estimateChunk() { + @Override + public long estimateChunk() { long dataTimeSpread = getDataTimeSpread(); if (totalHits <= 0 || dataTimeSpread <= 0) { return context.end - currentEnd; @@ -214,9 +285,46 @@ private long estimateChunk() { long estimatedChunk = 10 * (context.scrollSize * getDataTimeSpread()) / totalHits; return Math.max(estimatedChunk, MIN_CHUNK_SPAN); } + + @Override + public boolean hasData() { + return totalHits > 0; + } } - ChunkedDataExtractorContext getContext() { - return context; + private class AggregatedDataSummary implements DataSummary { + + private final double earliestTime; + private final double latestTime; + private final long histogramIntervalMillis; + + private AggregatedDataSummary(double earliestTime, double latestTime, long histogramInterval) { + this.earliestTime = earliestTime; + this.latestTime = latestTime; + this.histogramIntervalMillis = histogramInterval; + } + + /** + * This heuristic is a direct copy of the manual chunking config auto-creation done in {@link DatafeedConfig.Builder} + */ + @Override + public long estimateChunk() { + return DatafeedConfig.Builder.DEFAULT_AGGREGATION_CHUNKING_BUCKETS * histogramIntervalMillis; + } + + @Override + public boolean hasData() { + return (Double.isInfinite(earliestTime) || Double.isInfinite(latestTime)) == false; + } + + @Override + public long earliestTime() { + return (long)earliestTime; + } + + @Override + public long getDataTimeSpread() { + return (long)latestTime - (long)earliestTime; + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorContext.java index 38c2efd8679c0..bb32b40f7cde3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorContext.java @@ -31,10 +31,13 @@ interface TimeAligner { final TimeValue chunkSpan; final TimeAligner timeAligner; final Map headers; + final boolean hasAggregations; + final Long histogramInterval; ChunkedDataExtractorContext(String jobId, String timeField, List indices, List types, QueryBuilder query, int scrollSize, long start, long end, @Nullable TimeValue chunkSpan, - TimeAligner timeAligner, Map headers) { + TimeAligner timeAligner, Map headers, boolean hasAggregations, + @Nullable Long histogramInterval) { this.jobId = Objects.requireNonNull(jobId); this.timeField = Objects.requireNonNull(timeField); this.indices = indices.toArray(new String[indices.size()]); @@ -46,5 +49,7 @@ interface TimeAligner { this.chunkSpan = chunkSpan; this.timeAligner = Objects.requireNonNull(timeAligner); this.headers = headers; + this.hasAggregations = hasAggregations; + this.histogramInterval = histogramInterval; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java index 7b5bac64740d6..67079cf2e6777 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java @@ -42,7 +42,10 @@ public DataExtractor newExtractor(long start, long end) { timeAligner.alignToFloor(end), datafeedConfig.getChunkingConfig().getTimeSpan(), timeAligner, - datafeedConfig.getHeaders()); + datafeedConfig.getHeaders(), + datafeedConfig.hasAggregations(), + datafeedConfig.hasAggregations() ? datafeedConfig.getHistogramIntervalMillis() : null + ); return new ChunkedDataExtractor(client, dataExtractorFactory, dataExtractorContext); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java similarity index 92% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java index 710aa017e741b..232cd53a359ce 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java @@ -3,10 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.datafeed.extractor.scroll; +package org.elasticsearch.xpack.ml.datafeed.extractor.fields; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.joda.time.base.BaseDateTime; import java.util.List; @@ -17,7 +18,7 @@ * Represents a field to be extracted by the datafeed. * It encapsulates the extraction logic. */ -abstract class ExtractedField { +public abstract class ExtractedField { public enum ExtractionMethod { SOURCE, DOC_VALUE, SCRIPT_FIELD @@ -51,6 +52,10 @@ public ExtractionMethod getExtractionMethod() { public abstract Object[] value(SearchHit hit); + public String getDocValueFormat() { + return DocValueFieldsContext.USE_DEFAULT_FORMAT; + } + public static ExtractedField newTimeField(String name, ExtractionMethod extractionMethod) { if (extractionMethod == ExtractionMethod.SOURCE) { throw new IllegalArgumentException("time field cannot be extracted from source"); @@ -93,6 +98,8 @@ public Object[] value(SearchHit hit) { private static class TimeField extends FromFields { + private static final String EPOCH_MILLIS_FORMAT = "epoch_millis"; + TimeField(String name, ExtractionMethod extractionMethod) { super(name, name, extractionMethod); } @@ -112,6 +119,11 @@ public Object[] value(SearchHit hit) { } return value; } + + @Override + public String getDocValueFormat() { + return EPOCH_MILLIS_FORMAT; + } } private static class FromSource extends ExtractedField { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFields.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFields.java new file mode 100644 index 0000000000000..f9b2467fbcfd3 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFields.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.extractor.fields; + +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.xpack.core.ml.utils.MlStrings; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * The fields the datafeed has to extract + */ +public class ExtractedFields { + + private static final String TEXT = "text"; + + private final List allFields; + private final List docValueFields; + private final String[] sourceFields; + + public ExtractedFields(List allFields) { + this.allFields = Collections.unmodifiableList(allFields); + this.docValueFields = filterFields(ExtractedField.ExtractionMethod.DOC_VALUE, allFields); + this.sourceFields = filterFields(ExtractedField.ExtractionMethod.SOURCE, allFields).stream().map(ExtractedField::getName) + .toArray(String[]::new); + } + + public List getAllFields() { + return allFields; + } + + public String[] getSourceFields() { + return sourceFields; + } + + public List getDocValueFields() { + return docValueFields; + } + + private static List filterFields(ExtractedField.ExtractionMethod method, List fields) { + return fields.stream().filter(field -> field.getExtractionMethod() == method).collect(Collectors.toList()); + } + + public static ExtractedFields build(Collection allFields, Set scriptFields, + FieldCapabilitiesResponse fieldsCapabilities) { + ExtractionMethodDetector extractionMethodDetector = new ExtractionMethodDetector(scriptFields, fieldsCapabilities); + return new ExtractedFields(allFields.stream().map(field -> extractionMethodDetector.detect(field)).collect(Collectors.toList())); + } + + protected static class ExtractionMethodDetector { + + private final Set scriptFields; + private final FieldCapabilitiesResponse fieldsCapabilities; + + protected ExtractionMethodDetector(Set scriptFields, FieldCapabilitiesResponse fieldsCapabilities) { + this.scriptFields = scriptFields; + this.fieldsCapabilities = fieldsCapabilities; + } + + protected ExtractedField detect(String field) { + String internalField = field; + ExtractedField.ExtractionMethod method = ExtractedField.ExtractionMethod.SOURCE; + if (scriptFields.contains(field)) { + method = ExtractedField.ExtractionMethod.SCRIPT_FIELD; + } else if (isAggregatable(field)) { + method = ExtractedField.ExtractionMethod.DOC_VALUE; + if (isFieldOfType(field, "date")) { + return ExtractedField.newTimeField(field, method); + } + } else if (isFieldOfType(field, TEXT)) { + String parentField = MlStrings.getParentField(field); + // Field is text so check if it is a multi-field + if (Objects.equals(parentField, field) == false && fieldsCapabilities.getField(parentField) != null) { + // Field is a multi-field which means it won't be available in source. Let's take the parent instead. + internalField = parentField; + method = isAggregatable(parentField) ? ExtractedField.ExtractionMethod.DOC_VALUE + : ExtractedField.ExtractionMethod.SOURCE; + } + } + return ExtractedField.newField(field, internalField, method); + } + + protected boolean isAggregatable(String field) { + Map fieldCaps = fieldsCapabilities.getField(field); + if (fieldCaps == null || fieldCaps.isEmpty()) { + throw new IllegalArgumentException("cannot retrieve field [" + field + "] because it has no mappings"); + } + for (FieldCapabilities capsPerIndex : fieldCaps.values()) { + if (!capsPerIndex.isAggregatable()) { + return false; + } + } + return true; + } + + private boolean isFieldOfType(String field, String type) { + Map fieldCaps = fieldsCapabilities.getField(field); + if (fieldCaps != null && fieldCaps.size() == 1) { + return fieldCaps.containsKey(type); + } + return false; + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFields.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFields.java new file mode 100644 index 0000000000000..cf87671bf3301 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFields.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.extractor.fields; + +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * The fields to extract for a datafeed that requires a time field + */ +public class TimeBasedExtractedFields extends ExtractedFields { + + private final ExtractedField timeField; + + public TimeBasedExtractedFields(ExtractedField timeField, List allFields) { + super(allFields); + if (!allFields.contains(timeField)) { + throw new IllegalArgumentException("timeField should also be contained in allFields"); + } + this.timeField = Objects.requireNonNull(timeField); + } + + public String timeField() { + return timeField.getName(); + } + + public Long timeFieldValue(SearchHit hit) { + Object[] value = timeField.value(hit); + if (value.length != 1) { + throw new RuntimeException("Time field [" + timeField.getAlias() + "] expected a single value; actual was: " + + Arrays.toString(value)); + } + if (value[0] instanceof Long) { + return (Long) value[0]; + } + throw new RuntimeException("Time field [" + timeField.getAlias() + "] expected a long value; actual was: " + value[0]); + } + + public static TimeBasedExtractedFields build(Job job, DatafeedConfig datafeed, FieldCapabilitiesResponse fieldsCapabilities) { + Set scriptFields = datafeed.getScriptFields().stream().map(sf -> sf.fieldName()).collect(Collectors.toSet()); + ExtractionMethodDetector extractionMethodDetector = new ExtractionMethodDetector(scriptFields, fieldsCapabilities); + String timeField = job.getDataDescription().getTimeField(); + if (scriptFields.contains(timeField) == false && extractionMethodDetector.isAggregatable(timeField) == false) { + throw new IllegalArgumentException("cannot retrieve time field [" + timeField + "] because it is not aggregatable"); + } + ExtractedField timeExtractedField = ExtractedField.newTimeField(timeField, scriptFields.contains(timeField) ? + ExtractedField.ExtractionMethod.SCRIPT_FIELD : ExtractedField.ExtractionMethod.DOC_VALUE); + List remainingFields = job.allInputFields().stream().filter(f -> !f.equals(timeField)).collect(Collectors.toList()); + List allExtractedFields = new ArrayList<>(remainingFields.size() + 1); + allExtractedFields.add(timeExtractedField); + remainingFields.stream().forEach(field -> allExtractedFields.add(extractionMethodDetector.detect(field))); + return new TimeBasedExtractedFields(timeExtractedField, allExtractedFields); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedFields.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedFields.java deleted file mode 100644 index 25cb405ec74a6..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedFields.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.datafeed.extractor.scroll; - -import org.elasticsearch.action.fieldcaps.FieldCapabilities; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; -import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.core.ml.utils.MlStrings; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * The fields the datafeed has to extract - */ -class ExtractedFields { - - private static final String TEXT = "text"; - - private final ExtractedField timeField; - private final List allFields; - private final String[] docValueFields; - private final String[] sourceFields; - - ExtractedFields(ExtractedField timeField, List allFields) { - if (!allFields.contains(timeField)) { - throw new IllegalArgumentException("timeField should also be contained in allFields"); - } - this.timeField = Objects.requireNonNull(timeField); - this.allFields = Collections.unmodifiableList(allFields); - this.docValueFields = filterFields(ExtractedField.ExtractionMethod.DOC_VALUE, allFields); - this.sourceFields = filterFields(ExtractedField.ExtractionMethod.SOURCE, allFields); - } - - public List getAllFields() { - return allFields; - } - - public String[] getSourceFields() { - return sourceFields; - } - - public String[] getDocValueFields() { - return docValueFields; - } - - private static String[] filterFields(ExtractedField.ExtractionMethod method, List fields) { - List result = new ArrayList<>(); - for (ExtractedField field : fields) { - if (field.getExtractionMethod() == method) { - result.add(field.getName()); - } - } - return result.toArray(new String[result.size()]); - } - - public String timeField() { - return timeField.getName(); - } - - public Long timeFieldValue(SearchHit hit) { - Object[] value = timeField.value(hit); - if (value.length != 1) { - throw new RuntimeException("Time field [" + timeField.getAlias() + "] expected a single value; actual was: " - + Arrays.toString(value)); - } - if (value[0] instanceof Long) { - return (Long) value[0]; - } - throw new RuntimeException("Time field [" + timeField.getAlias() + "] expected a long value; actual was: " + value[0]); - } - - public static ExtractedFields build(Job job, DatafeedConfig datafeed, FieldCapabilitiesResponse fieldsCapabilities) { - Set scriptFields = datafeed.getScriptFields().stream().map(sf -> sf.fieldName()).collect(Collectors.toSet()); - ExtractionMethodDetector extractionMethodDetector = new ExtractionMethodDetector(datafeed.getId(), scriptFields, - fieldsCapabilities); - String timeField = job.getDataDescription().getTimeField(); - if (scriptFields.contains(timeField) == false && extractionMethodDetector.isAggregatable(timeField) == false) { - throw ExceptionsHelper.badRequestException("datafeed [" + datafeed.getId() + "] cannot retrieve time field [" + timeField - + "] because it is not aggregatable"); - } - ExtractedField timeExtractedField = ExtractedField.newTimeField(timeField, scriptFields.contains(timeField) ? - ExtractedField.ExtractionMethod.SCRIPT_FIELD : ExtractedField.ExtractionMethod.DOC_VALUE); - List remainingFields = job.allInputFields().stream().filter(f -> !f.equals(timeField)).collect(Collectors.toList()); - List allExtractedFields = new ArrayList<>(remainingFields.size() + 1); - allExtractedFields.add(timeExtractedField); - remainingFields.stream().forEach(field -> allExtractedFields.add(extractionMethodDetector.detect(field))); - return new ExtractedFields(timeExtractedField, allExtractedFields); - } - - private static class ExtractionMethodDetector { - - private final String datafeedId; - private final Set scriptFields; - private final FieldCapabilitiesResponse fieldsCapabilities; - - private ExtractionMethodDetector(String datafeedId, Set scriptFields, FieldCapabilitiesResponse fieldsCapabilities) { - this.datafeedId = datafeedId; - this.scriptFields = scriptFields; - this.fieldsCapabilities = fieldsCapabilities; - } - - private ExtractedField detect(String field) { - String internalField = field; - ExtractedField.ExtractionMethod method = ExtractedField.ExtractionMethod.SOURCE; - if (scriptFields.contains(field)) { - method = ExtractedField.ExtractionMethod.SCRIPT_FIELD; - } else if (isAggregatable(field)) { - method = ExtractedField.ExtractionMethod.DOC_VALUE; - } else if (isText(field)) { - String parentField = MlStrings.getParentField(field); - // Field is text so check if it is a multi-field - if (Objects.equals(parentField, field) == false && fieldsCapabilities.getField(parentField) != null) { - // Field is a multi-field which means it won't be available in source. Let's take the parent instead. - internalField = parentField; - method = isAggregatable(parentField) ? ExtractedField.ExtractionMethod.DOC_VALUE - : ExtractedField.ExtractionMethod.SOURCE; - } - } - return ExtractedField.newField(field, internalField, method); - } - - private boolean isAggregatable(String field) { - Map fieldCaps = fieldsCapabilities.getField(field); - if (fieldCaps == null || fieldCaps.isEmpty()) { - throw ExceptionsHelper.badRequestException("datafeed [" + datafeedId + "] cannot retrieve field [" + field - + "] because it has no mappings"); - } - for (FieldCapabilities capsPerIndex : fieldCaps.values()) { - if (!capsPerIndex.isAggregatable()) { - return false; - } - } - return true; - } - - private boolean isText(String field) { - Map fieldCaps = fieldsCapabilities.getField(field); - if (fieldCaps != null && fieldCaps.size() == 1) { - return fieldCaps.containsKey(TEXT); - } - return false; - } - } -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index 7274343a9991b..d890ce8a3fe74 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.datafeed.extractor.scroll; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; @@ -15,15 +16,14 @@ import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequestBuilder; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.StoredFieldsContext; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -42,9 +42,8 @@ */ class ScrollDataExtractor implements DataExtractor { - private static final Logger LOGGER = Loggers.getLogger(ScrollDataExtractor.class); + private static final Logger LOGGER = LogManager.getLogger(ScrollDataExtractor.class); private static final TimeValue SCROLL_TIMEOUT = new TimeValue(30, TimeUnit.MINUTES); - private static final String EPOCH_MILLIS_FORMAT = "epoch_millis"; private final Client client; private final ScrollDataExtractorContext context; @@ -112,12 +111,8 @@ private SearchRequestBuilder buildSearchRequest(long start) { .setQuery(ExtractorUtils.wrapInTimeRangeQuery( context.query, context.extractedFields.timeField(), start, context.end)); - for (String docValueField : context.extractedFields.getDocValueFields()) { - if (docValueField.equals(context.extractedFields.timeField())) { - searchRequestBuilder.addDocValueField(docValueField, EPOCH_MILLIS_FORMAT); - } else { - searchRequestBuilder.addDocValueField(docValueField, DocValueFieldsContext.USE_DEFAULT_FORMAT); - } + for (ExtractedField docValueField : context.extractedFields.getDocValueFields()) { + searchRequestBuilder.addDocValueField(docValueField.getName(), docValueField.getDocValueFormat()); } String[] sourceFields = context.extractedFields.getSourceFields(); if (sourceFields.length == 0) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorContext.java index d1666497d241c..08e693849ec05 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorContext.java @@ -7,6 +7,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.TimeBasedExtractedFields; import java.util.List; import java.util.Map; @@ -15,7 +16,7 @@ class ScrollDataExtractorContext { final String jobId; - final ExtractedFields extractedFields; + final TimeBasedExtractedFields extractedFields; final String[] indices; final String[] types; final QueryBuilder query; @@ -25,7 +26,7 @@ class ScrollDataExtractorContext { final long end; final Map headers; - ScrollDataExtractorContext(String jobId, ExtractedFields extractedFields, List indices, List types, + ScrollDataExtractorContext(String jobId, TimeBasedExtractedFields extractedFields, List indices, List types, QueryBuilder query, List scriptFields, int scrollSize, long start, long end, Map headers) { this.jobId = Objects.requireNonNull(jobId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java index 2c6e0deaebd9f..67689bd51b8b5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java @@ -16,8 +16,10 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlStrings; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.TimeBasedExtractedFields; import java.util.Objects; @@ -26,9 +28,9 @@ public class ScrollDataExtractorFactory implements DataExtractorFactory { private final Client client; private final DatafeedConfig datafeedConfig; private final Job job; - private final ExtractedFields extractedFields; + private final TimeBasedExtractedFields extractedFields; - private ScrollDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job, ExtractedFields extractedFields) { + private ScrollDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job, TimeBasedExtractedFields extractedFields) { this.client = Objects.requireNonNull(client); this.datafeedConfig = Objects.requireNonNull(datafeedConfig); this.job = Objects.requireNonNull(job); @@ -56,12 +58,14 @@ public static void create(Client client, DatafeedConfig datafeed, Job job, Actio // Step 2. Contruct the factory and notify listener ActionListener fieldCapabilitiesHandler = ActionListener.wrap( fieldCapabilitiesResponse -> { - ExtractedFields extractedFields = ExtractedFields.build(job, datafeed, fieldCapabilitiesResponse); + TimeBasedExtractedFields extractedFields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse); listener.onResponse(new ScrollDataExtractorFactory(client, datafeed, job, extractedFields)); }, e -> { if (e instanceof IndexNotFoundException) { listener.onFailure(new ResourceNotFoundException("datafeed [" + datafeed.getId() + "] cannot retrieve data because index " + ((IndexNotFoundException) e).getIndex() + " does not exist")); + } else if (e instanceof IllegalArgumentException) { + listener.onFailure(ExceptionsHelper.badRequestException("[" + datafeed.getId() + "] " + e.getMessage())); } else { listener.onFailure(e); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/SearchHitToJsonProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/SearchHitToJsonProcessor.java index 52808ce397829..577d114d957e5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/SearchHitToJsonProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/SearchHitToJsonProcessor.java @@ -9,6 +9,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedFields; import java.io.IOException; import java.io.OutputStream; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java index a508735af07f3..935af8d35ed75 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java @@ -35,7 +35,7 @@ * Runs the high-level steps needed to create ingest configs for the specified file. In order: * 1. Determine the most likely character set (UTF-8, UTF-16LE, ISO-8859-2, etc.) * 2. Load a sample of the file, consisting of the first 1000 lines of the file - * 3. Determine the most likely file structure - one of ND-JSON, XML, delimited or semi-structured text + * 3. Determine the most likely file structure - one of NDJSON, XML, delimited or semi-structured text * 4. Create an appropriate structure object and delegate writing configs to it */ public final class FileStructureFinderManager { @@ -73,9 +73,9 @@ public final class FileStructureFinderManager { * These need to be ordered so that the more generic formats come after the more specific ones */ private static final List ORDERED_STRUCTURE_FACTORIES = Collections.unmodifiableList(Arrays.asList( - new JsonFileStructureFinderFactory(), + new NdJsonFileStructureFinderFactory(), new XmlFileStructureFinderFactory(), - // ND-JSON will often also be valid (although utterly weird) CSV, so JSON must come before CSV + // NDJSON will often also be valid (although utterly weird) CSV, so NDJSON must come before CSV new DelimitedFileStructureFinderFactory(',', '"', 2, false), new DelimitedFileStructureFinderFactory('\t', '"', 2, false), new DelimitedFileStructureFinderFactory(';', '"', 4, false), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java similarity index 83% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinder.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java index 8d58ef4e5ca8c..d7ba426d6a391 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java @@ -25,16 +25,16 @@ import static org.elasticsearch.common.xcontent.json.JsonXContent.jsonXContent; /** - * Really ND-JSON. + * Newline-delimited JSON. */ -public class JsonFileStructureFinder implements FileStructureFinder { +public class NdJsonFileStructureFinder implements FileStructureFinder { private final List sampleMessages; private final FileStructure structure; - static JsonFileStructureFinder makeJsonFileStructureFinder(List explanation, String sample, String charsetName, - Boolean hasByteOrderMarker, FileStructureOverrides overrides, - TimeoutChecker timeoutChecker) throws IOException { + static NdJsonFileStructureFinder makeNdJsonFileStructureFinder(List explanation, String sample, String charsetName, + Boolean hasByteOrderMarker, FileStructureOverrides overrides, + TimeoutChecker timeoutChecker) throws IOException { List> sampleRecords = new ArrayList<>(); @@ -43,10 +43,10 @@ static JsonFileStructureFinder makeJsonFileStructureFinder(List explanat XContentParser parser = jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, sampleMessage); sampleRecords.add(parser.mapOrdered()); - timeoutChecker.check("JSON parsing"); + timeoutChecker.check("NDJSON parsing"); } - FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.JSON) + FileStructure.Builder structureBuilder = new FileStructure.Builder(FileStructure.Format.NDJSON) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) .setSampleStart(sampleMessages.stream().limit(2).collect(Collectors.joining("\n", "", "\n"))) @@ -84,10 +84,10 @@ static JsonFileStructureFinder makeJsonFileStructureFinder(List explanat .setExplanation(explanation) .build(); - return new JsonFileStructureFinder(sampleMessages, structure); + return new NdJsonFileStructureFinder(sampleMessages, structure); } - private JsonFileStructureFinder(List sampleMessages, FileStructure structure) { + private NdJsonFileStructureFinder(List sampleMessages, FileStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderFactory.java similarity index 78% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactory.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderFactory.java index e49f597a83c3b..43612890bc8a8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderFactory.java @@ -17,15 +17,15 @@ import static org.elasticsearch.common.xcontent.json.JsonXContent.jsonXContent; -public class JsonFileStructureFinderFactory implements FileStructureFinderFactory { +public class NdJsonFileStructureFinderFactory implements FileStructureFinderFactory { @Override public boolean canFindFormat(FileStructure.Format format) { - return format == null || format == FileStructure.Format.JSON; + return format == null || format == FileStructure.Format.NDJSON; } /** - * This format matches if the sample consists of one or more JSON documents. + * This format matches if the sample consists of one or more NDJSON documents. * If there is more than one, they must be newline-delimited. The * documents must be non-empty, to prevent lines containing "{}" from matching. */ @@ -41,35 +41,35 @@ public boolean canCreateFromSample(List explanation, String sample) { DeprecationHandler.THROW_UNSUPPORTED_OPERATION, new ContextPrintingStringReader(sampleLine))) { if (parser.map().isEmpty()) { - explanation.add("Not JSON because an empty object was parsed: [" + sampleLine + "]"); + explanation.add("Not NDJSON because an empty object was parsed: [" + sampleLine + "]"); return false; } ++completeDocCount; if (parser.nextToken() != null) { - explanation.add("Not newline delimited JSON because a line contained more than a single object: [" + + explanation.add("Not newline delimited NDJSON because a line contained more than a single object: [" + sampleLine + "]"); return false; } } } } catch (IOException | IllegalStateException e) { - explanation.add("Not JSON because there was a parsing exception: [" + e.getMessage().replaceAll("\\s?\r?\n\\s?", " ") + "]"); + explanation.add("Not NDJSON because there was a parsing exception: [" + e.getMessage().replaceAll("\\s?\r?\n\\s?", " ") + "]"); return false; } if (completeDocCount == 0) { - explanation.add("Not JSON because sample didn't contain a complete document"); + explanation.add("Not NDJSON because sample didn't contain a complete document"); return false; } - explanation.add("Deciding sample is newline delimited JSON"); + explanation.add("Deciding sample is newline delimited NDJSON"); return true; } @Override public FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker, FileStructureOverrides overrides, TimeoutChecker timeoutChecker) throws IOException { - return JsonFileStructureFinder.makeJsonFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, overrides, + return NdJsonFileStructureFinder.makeNdJsonFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, overrides, timeoutChecker); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 1e97e98c42c3b..2e7a19bca1c09 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -83,6 +83,7 @@ public class JobManager extends AbstractComponent { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(JobManager.class)); + private final Settings settings; private final Environment environment; private final JobResultsProvider jobResultsProvider; private final ClusterService clusterService; @@ -98,7 +99,7 @@ public class JobManager extends AbstractComponent { public JobManager(Environment environment, Settings settings, JobResultsProvider jobResultsProvider, ClusterService clusterService, Auditor auditor, Client client, UpdateJobProcessNotifier updateJobProcessNotifier) { - super(settings); + this.settings = settings; this.environment = environment; this.jobResultsProvider = Objects.requireNonNull(jobResultsProvider); this.clusterService = Objects.requireNonNull(clusterService); @@ -490,7 +491,7 @@ public void revertSnapshot(RevertModelSnapshotAction.Request request, ActionList ModelSnapshot modelSnapshot) { final ModelSizeStats modelSizeStats = modelSnapshot.getModelSizeStats(); - final JobResultsPersister persister = new JobResultsPersister(settings, client); + final JobResultsPersister persister = new JobResultsPersister(client); // Step 3. After the model size stats is persisted, also persist the snapshot's quantiles and respond // ------- diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java index 6b871c074619e..29e98d01ca9ac 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java @@ -5,16 +5,14 @@ */ package org.elasticsearch.xpack.ml.job; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; @@ -46,9 +44,9 @@ * will fetch the valid state of those external resources ensuring the process is * in sync. */ -public class UpdateJobProcessNotifier extends AbstractComponent { +public class UpdateJobProcessNotifier { - private static final Logger LOGGER = Loggers.getLogger(UpdateJobProcessNotifier.class); + private static final Logger logger = LogManager.getLogger(UpdateJobProcessNotifier.class); private final Client client; private final ClusterService clusterService; @@ -57,8 +55,7 @@ public class UpdateJobProcessNotifier extends AbstractComponent { private volatile ThreadPool.Cancellable cancellable; - public UpdateJobProcessNotifier(Settings settings, Client client, ClusterService clusterService, ThreadPool threadPool) { - super(settings); + public UpdateJobProcessNotifier(Client client, ClusterService clusterService, ThreadPool threadPool) { this.client = client; this.clusterService = clusterService; this.threadPool = threadPool; @@ -112,7 +109,7 @@ void executeProcessUpdates(Iterator updatesIterator) { if (update.isJobUpdate() && clusterService.localNode().isMasterNode() == false) { assert clusterService.localNode().isMasterNode(); - LOGGER.error("Job update was submitted to non-master node [" + clusterService.nodeName() + "]; update for job [" + logger.error("Job update was submitted to non-master node [" + clusterService.getNodeName() + "]; update for job [" + update.getJobId() + "] will be ignored"); executeProcessUpdates(updatesIterator); return; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java index a0e00ebf73353..01fa529234bf4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.categorization; -import org.elasticsearch.common.logging.Loggers; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.grok.Grok; import java.util.ArrayList; @@ -112,7 +112,7 @@ public static String findBestGrokMatchFromExamples(String jobId, String regex, C // We should never get here. If we do it implies a bug in the original categorization, // as it's produced a regex that doesn't match the examples. assert matcher.matches() : exampleProcessor.pattern() + " did not match " + example; - Loggers.getLogger(GrokPatternCreator.class).error("[{}] Pattern [{}] did not match example [{}]", jobId, + LogManager.getLogger(GrokPatternCreator.class).error("[{}] Pattern [{}] did not match example [{}]", jobId, exampleProcessor.pattern(), example); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java index e8655548592e1..371b7e05f6341 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.ml.job.persistence; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -29,7 +29,7 @@ * and iterate through them in batches. */ public abstract class BatchedDocumentsIterator { - private static final Logger LOGGER = Loggers.getLogger(BatchedDocumentsIterator.class); + private static final Logger LOGGER = LogManager.getLogger(BatchedDocumentsIterator.class); private static final String CONTEXT_ALIVE_DURATION = "5m"; private static final int BATCH_SIZE = 10000; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataCountsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataCountsPersister.java index d3b66f47661f2..c0b8b4d93659b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataCountsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataCountsPersister.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -34,8 +33,7 @@ public class JobDataCountsPersister extends AbstractComponent { private final Client client; - public JobDataCountsPersister(Settings settings, Client client) { - super(settings); + public JobDataCountsPersister(Client client) { this.client = client; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java index 1e0825d14f90a..400ca28d97419 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.persistence; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkAction; @@ -14,7 +15,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -36,7 +36,7 @@ public class JobDataDeleter { - private static final Logger LOGGER = Loggers.getLogger(JobDataDeleter.class); + private static final Logger LOGGER = LogManager.getLogger(JobDataDeleter.class); private final Client client; private final String jobId; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java index f318737e0b474..5ee05feaebf7c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -49,8 +48,7 @@ public class JobRenormalizedResultsPersister extends AbstractComponent { private final Client client; private BulkRequest bulkRequest; - public JobRenormalizedResultsPersister(String jobId, Settings settings, Client client) { - super(settings); + public JobRenormalizedResultsPersister(String jobId, Client client) { this.jobId = jobId; this.client = client; bulkRequest = new BulkRequest(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java index 233a2b4078ac7..782f1fc39ef24 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -68,8 +67,7 @@ public class JobResultsPersister extends AbstractComponent { private final Client client; - public JobResultsPersister(Settings settings, Client client) { - super(settings); + public JobResultsPersister(Client client) { this.client = client; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index 9338d24dd68da..768d7bd5ec562 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.persistence; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; @@ -41,7 +42,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -129,7 +129,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; public class JobResultsProvider { - private static final Logger LOGGER = Loggers.getLogger(JobResultsProvider.class); + private static final Logger LOGGER = LogManager.getLogger(JobResultsProvider.class); private static final int RECORDS_SIZE_PARAM = 10000; public static final int BUCKETS_FOR_ESTABLISHED_MEMORY_SIZE = 20; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java index a800ff95136e1..9a2c6a4938b2f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.ml.job.persistence; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; @@ -33,7 +33,7 @@ */ public class StateStreamer { - private static final Logger LOGGER = Loggers.getLogger(StateStreamer.class); + private static final Logger LOGGER = LogManager.getLogger(StateStreamer.class); private final Client client; private volatile boolean isCancelled; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporter.java index 0d2b1bb345a1b..1bc214e7b4025 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporter.java @@ -7,7 +7,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister; @@ -52,10 +51,7 @@ public class DataCountsReporter extends AbstractComponent { private DataStreamDiagnostics diagnostics; - public DataCountsReporter(Settings settings, Job job, DataCounts counts, JobDataCountsPersister dataCountsPersister) { - - super(settings); - + public DataCountsReporter(Job job, DataCounts counts, JobDataCountsPersister dataCountsPersister) { this.job = job; this.dataCountsPersister = dataCountsPersister; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java index 3f93d46b72737..c58e0e177a6b0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -58,7 +58,7 @@ public class AutodetectCommunicator implements Closeable { - private static final Logger LOGGER = Loggers.getLogger(AutodetectCommunicator.class); + private static final Logger LOGGER = LogManager.getLogger(AutodetectCommunicator.class); private static final Duration FLUSH_PROCESS_CHECK_FREQUENCY = Duration.ofSeconds(1); private final Job job; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 8dbc13038c7f7..887ea5262aeb2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -108,6 +108,7 @@ public class AutodetectProcessManager extends AbstractComponent { public static final Setting MIN_DISK_SPACE_OFF_HEAP = Setting.byteSizeSetting("xpack.ml.min_disk_space_off_heap", new ByteSizeValue(5, ByteSizeUnit.GB), Property.NodeScope); + private final Settings settings; private final Client client; private final Environment environment; private final ThreadPool threadPool; @@ -136,7 +137,7 @@ public AutodetectProcessManager(Environment environment, Settings settings, Clie JobDataCountsPersister jobDataCountsPersister, AutodetectProcessFactory autodetectProcessFactory, NormalizerFactory normalizerFactory, NamedXContentRegistry xContentRegistry, Auditor auditor) { - super(settings); + this.settings = settings; this.environment = environment; this.client = client; this.threadPool = threadPool; @@ -493,12 +494,9 @@ AutodetectCommunicator create(JobTask jobTask, AutodetectParams autodetectParams Job job = jobManager.getJobOrThrowIfUnknown(jobId); // A TP with no queue, so that we fail immediately if there are no threads available ExecutorService autoDetectExecutorService = threadPool.executor(MachineLearning.AUTODETECT_THREAD_POOL_NAME); - DataCountsReporter dataCountsReporter = new DataCountsReporter(settings, - job, - autodetectParams.dataCounts(), - jobDataCountsPersister); + DataCountsReporter dataCountsReporter = new DataCountsReporter(job, autodetectParams.dataCounts(), jobDataCountsPersister); ScoresUpdater scoresUpdater = new ScoresUpdater(job, jobResultsProvider, - new JobRenormalizedResultsPersister(job.getId(), settings, client), normalizerFactory); + new JobRenormalizedResultsPersister(job.getId(), client), normalizerFactory); ExecutorService renormalizerExecutorService = threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME); Renormalizer renormalizer = new ShortCircuitingRenormalizer(jobId, scoresUpdater, renormalizerExecutorService); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java index ea31c5de4dffa..3185ebc6f1c7d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.internal.io.IOUtils; @@ -33,7 +33,7 @@ public class NativeAutodetectProcessFactory implements AutodetectProcessFactory { - private static final Logger LOGGER = Loggers.getLogger(NativeAutodetectProcessFactory.class); + private static final Logger LOGGER = LogManager.getLogger(NativeAutodetectProcessFactory.class); private static final NamedPipeHelper NAMED_PIPE_HELPER = new NamedPipeHelper(); public static final Duration PROCESS_STARTUP_TIMEOUT = Duration.ofSeconds(10); @@ -68,7 +68,7 @@ public AutodetectProcess createAutodetectProcess(Job job, int numberOfFields = job.allInputFields().size() + (includeTokensField ? 1 : 0) + 1; AutodetectStateProcessor stateProcessor = new AutodetectStateProcessor(client, job.getId()); - AutodetectResultsParser resultsParser = new AutodetectResultsParser(settings); + AutodetectResultsParser resultsParser = new AutodetectResultsParser(); NativeAutodetectProcess autodetect = new NativeAutodetectProcess( job.getId(), processPipes.getLogStream().get(), processPipes.getProcessInStream().get(), processPipes.getProcessOutStream().get(), processPipes.getRestoreStream().orElse(null), numberOfFields, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/ProcessContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/ProcessContext.java index 474968adc932f..8771712dfde13 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/ProcessContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/ProcessContext.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.ml.action.TransportOpenJobAction.JobTask; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -21,7 +21,7 @@ */ final class ProcessContext { - private static final Logger LOGGER = Loggers.getLogger(ProcessContext.class); + private static final Logger LOGGER = LogManager.getLogger(ProcessContext.class); private final ReentrantLock lock = new ReentrantLock(); private final JobTask jobTask; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java index b86ec4de8257f..aa045b6f80696 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect.output; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -74,7 +74,7 @@ */ public class AutoDetectResultProcessor { - private static final Logger LOGGER = Loggers.getLogger(AutoDetectResultProcessor.class); + private static final Logger LOGGER = LogManager.getLogger(AutoDetectResultProcessor.class); /** * This is how far behind real-time we'll update the job with the latest established model memory. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParser.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParser.java index d4f9c431f7bdf..8d7e64c3d45d2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParser.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParser.java @@ -7,7 +7,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -27,11 +26,6 @@ * start array symbol and the data must be terminated with the end array symbol. */ public class AutodetectResultsParser extends AbstractComponent { - - public AutodetectResultsParser(Settings settings) { - super(settings); - } - public Iterator parseResults(InputStream in) throws ElasticsearchParseException { try { XContentParser parser = XContentFactory.xContent(XContentType.JSON) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvDataToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvDataToProcessWriter.java index 8734a88336876..887b43d0b5927 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvDataToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvDataToProcessWriter.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect.writer; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzer; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; @@ -41,7 +41,7 @@ */ class CsvDataToProcessWriter extends AbstractDataToProcessWriter { - private static final Logger LOGGER = Loggers.getLogger(CsvDataToProcessWriter.class); + private static final Logger LOGGER = LogManager.getLogger(CsvDataToProcessWriter.class); /** * Maximum number of lines allowed within a single CSV record. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java index 15a162c6a3c00..92fe2c3b0b50a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect.writer; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -37,7 +37,7 @@ */ class JsonDataToProcessWriter extends AbstractDataToProcessWriter { - private static final Logger LOGGER = Loggers.getLogger(JsonDataToProcessWriter.class); + private static final Logger LOGGER = LogManager.getLogger(JsonDataToProcessWriter.class); private NamedXContentRegistry xContentRegistry; JsonDataToProcessWriter(boolean includeControlField, boolean includeTokensField, AutodetectProcess autodetectProcess, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnostics.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnostics.java index a225587d0bb75..f491f90c9bb08 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnostics.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/diagnostics/DataStreamDiagnostics.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.ml.job.process.diagnostics; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; @@ -23,7 +23,7 @@ public class DataStreamDiagnostics { */ private static final int DATA_SPARSITY_THRESHOLD = 2; - private static final Logger LOGGER = Loggers.getLogger(DataStreamDiagnostics.class); + private static final Logger LOGGER = LogManager.getLogger(DataStreamDiagnostics.class); private final BucketDiagnostics bucketDiagnostics; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java index 5d320a1bd715c..652d575b07ab0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java @@ -27,7 +27,6 @@ * - It can be used to produce results in testing that do not vary based on changes to the real normalization algorithms */ public class MultiplyingNormalizerProcess implements NormalizerProcess { - private static final Logger LOGGER = LogManager.getLogger(MultiplyingNormalizerProcess.class); private final double factor; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/Normalizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/Normalizer.java index 74eb01987c562..755e3da102f63 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/Normalizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/Normalizer.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.ml.job.process.normalizer.output.NormalizerResultHandler; import java.io.IOException; @@ -29,7 +29,7 @@ * and in exactly the same order as the inputs. */ public class Normalizer { - private static final Logger LOGGER = Loggers.getLogger(Normalizer.class); + private static final Logger LOGGER = LogManager.getLogger(Normalizer.class); private final String jobId; private final NormalizerProcessFactory processFactory; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/ScoresUpdater.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/ScoresUpdater.java index c2ef2fab7f8a4..47ab2364db67e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/ScoresUpdater.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/ScoresUpdater.java @@ -5,16 +5,16 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.ml.job.persistence.BatchedDocumentsIterator; -import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.persistence.JobRenormalizedResultsPersister; +import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import java.util.ArrayList; import java.util.Deque; @@ -27,7 +27,7 @@ * with the renormalized scores */ public class ScoresUpdater { - private static final Logger LOGGER = Loggers.getLogger(ScoresUpdater.class); + private static final Logger LOGGER = LogManager.getLogger(ScoresUpdater.class); /** * Target number of buckets to renormalize at a time diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/ShortCircuitingRenormalizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/ShortCircuitingRenormalizer.java index 7db66387db8e0..0bd5a11609d6d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/ShortCircuitingRenormalizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/ShortCircuitingRenormalizer.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; import java.util.Deque; @@ -21,7 +21,7 @@ */ public class ShortCircuitingRenormalizer implements Renormalizer { - private static final Logger LOGGER = Loggers.getLogger(ShortCircuitingRenormalizer.class); + private static final Logger LOGGER = LogManager.getLogger(ShortCircuitingRenormalizer.class); private final String jobId; private final ScoresUpdater scoresUpdater; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java index bb14a5fa1bdee..981d257afa1a0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.retention; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -13,7 +14,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -55,7 +55,7 @@ */ public class ExpiredForecastsRemover implements MlDataRemover { - private static final Logger LOGGER = Loggers.getLogger(ExpiredForecastsRemover.class); + private static final Logger LOGGER = LogManager.getLogger(ExpiredForecastsRemover.class); private static final int MAX_FORECASTS = 10000; private static final String RESULTS_INDEX_PATTERN = AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*"; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java index 47a10a8aea381..15aadb0347cd7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.retention; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -15,7 +16,6 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; @@ -45,7 +45,7 @@ */ public class ExpiredModelSnapshotsRemover extends AbstractExpiredJobDataRemover { - private static final Logger LOGGER = Loggers.getLogger(ExpiredModelSnapshotsRemover.class); + private static final Logger LOGGER = LogManager.getLogger(ExpiredModelSnapshotsRemover.class); /** * The max number of snapshots to fetch per job. It is set to 10K, the default for an index as diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index c882c90116880..8e6e27ab4a228 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.ml.job.retention; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -43,7 +43,7 @@ */ public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover { - private static final Logger LOGGER = Loggers.getLogger(ExpiredResultsRemover.class); + private static final Logger LOGGER = LogManager.getLogger(ExpiredResultsRemover.class); private final Client client; private final Auditor auditor; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java index fd4085d202041..ea094dfe6b4f1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.retention; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -12,7 +13,6 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; @@ -35,7 +35,7 @@ */ public class UnusedStateRemover implements MlDataRemover { - private static final Logger LOGGER = Loggers.getLogger(UnusedStateRemover.class); + private static final Logger LOGGER = LogManager.getLogger(UnusedStateRemover.class); private final Client client; private final ClusterService clusterService; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java index 458ad2a8fa3e1..addf478708a9e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.ml.notifications; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -27,7 +27,7 @@ public class Auditor { - private static final Logger LOGGER = Loggers.getLogger(Auditor.class); + private static final Logger LOGGER = LogManager.getLogger(Auditor.class); private final Client client; private final String nodeName; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java index 747074028953c..721e07721e32d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.ml.process; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.ml.process.logging.CppLogMessageHandler; import org.elasticsearch.xpack.ml.utils.NamedPipeHelper; @@ -27,8 +27,9 @@ /** * Maintains the connection to the native controller daemon that can start other processes. */ +@SuppressWarnings("ALL") public class NativeController { - private static final Logger LOGGER = Loggers.getLogger(NativeController.class); + private static final Logger LOGGER = LogManager.getLogger(NativeController.class); /** * Process controller native program name diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java index 9670fadfefff3..1f9aee7bbeea3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java @@ -6,8 +6,8 @@ package org.elasticsearch.xpack.ml.process; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; @@ -21,7 +21,7 @@ */ public class NativeStorageProvider { - private static final Logger LOGGER = Loggers.getLogger(NativeStorageProvider.class); + private static final Logger LOGGER = LogManager.getLogger(NativeStorageProvider.class); private static final String LOCAL_STORAGE_SUBFOLDER = "ml-local-data"; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java index 341b9ae371b82..01638d7c68fc0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.process.logging; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; @@ -13,7 +14,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -47,7 +47,7 @@ */ public class CppLogMessageHandler implements Closeable { - private static final Logger LOGGER = Loggers.getLogger(CppLogMessageHandler.class); + private static final Logger LOGGER = LogManager.getLogger(CppLogMessageHandler.class); private static final int DEFAULT_READBUF_SIZE = 1024; private static final int DEFAULT_ERROR_STORE_SIZE = 5; private static final long MAX_MESSAGE_INTERVAL_SECONDS = 10; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java index 485556d8441ea..f1c1144d1f391 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java @@ -5,15 +5,29 @@ */ package org.elasticsearch.xpack.ml; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.monitoring.Monitoring; import org.elasticsearch.xpack.security.Security; import java.nio.file.Path; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; public class LocalStateMachineLearning extends LocalStateCompositeXPackPlugin { @@ -50,6 +64,38 @@ protected XPackLicenseState getLicenseState() { @Override protected XPackLicenseState getLicenseState() { return thisVar.getLicenseState(); } }); + plugins.add(new MockedRollupPlugin()); } -} + /** + * This is only required as we now have to have the GetRollupIndexCapsAction as a valid action in our node. + * The MachineLearningLicenseTests attempt to create a datafeed referencing this LocalStateMachineLearning object. + * Consequently, we need to be able to take this rollup action (response does not matter) + * as the datafeed extractor now depends on it. + */ + public static class MockedRollupPlugin extends Plugin implements ActionPlugin { + + @Override + public List> getActions() { + return Collections.singletonList( + new ActionHandler<>(GetRollupIndexCapsAction.INSTANCE, MockedRollupIndexCapsTransport.class) + ); + } + + public static class MockedRollupIndexCapsTransport + extends TransportAction { + + @Inject + public MockedRollupIndexCapsTransport(TransportService transportService) { + super(GetRollupIndexCapsAction.NAME, new ActionFilters(new HashSet<>()), transportService.getTaskManager()); + } + + @Override + protected void doExecute(Task task, + GetRollupIndexCapsAction.Request request, + ActionListener listener) { + listener.onResponse(new GetRollupIndexCapsAction.Response()); + } + } + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java index 3055dc2bb37f9..7cd0d3cf00817 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -35,7 +34,7 @@ public class MlAssignmentNotifierTests extends ESTestCase { public void testClusterChanged_info() throws Exception { Auditor auditor = mock(Auditor.class); ClusterService clusterService = mock(ClusterService.class); - MlAssignmentNotifier notifier = new MlAssignmentNotifier(Settings.EMPTY, auditor, clusterService); + MlAssignmentNotifier notifier = new MlAssignmentNotifier(auditor, clusterService); notifier.onMaster(); DiscoveryNode node = @@ -63,7 +62,7 @@ public void testClusterChanged_info() throws Exception { public void testClusterChanged_warning() throws Exception { Auditor auditor = mock(Auditor.class); ClusterService clusterService = mock(ClusterService.class); - MlAssignmentNotifier notifier = new MlAssignmentNotifier(Settings.EMPTY, auditor, clusterService); + MlAssignmentNotifier notifier = new MlAssignmentNotifier(auditor, clusterService); notifier.onMaster(); ClusterState previous = ClusterState.builder(new ClusterName("_name")) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java index c7f50440f0e54..5ded1b205a110 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -62,7 +61,7 @@ public void setUpMocks() { } public void testInitialize() { - MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); + MlInitializationService initializationService = new MlInitializationService(threadPool, clusterService, client); ClusterState cs = ClusterState.builder(new ClusterName("_name")) .nodes(DiscoveryNodes.builder() @@ -77,7 +76,7 @@ public void testInitialize() { } public void testInitialize_noMasterNode() { - MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); + MlInitializationService initializationService = new MlInitializationService(threadPool, clusterService, client); ClusterState cs = ClusterState.builder(new ClusterName("_name")) .nodes(DiscoveryNodes.builder() @@ -90,7 +89,7 @@ public void testInitialize_noMasterNode() { } public void testInitialize_alreadyInitialized() { - MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); + MlInitializationService initializationService = new MlInitializationService(threadPool, clusterService, client); ClusterState cs = ClusterState.builder(new ClusterName("_name")) .nodes(DiscoveryNodes.builder() @@ -108,7 +107,7 @@ public void testInitialize_alreadyInitialized() { } public void testNodeGoesFromMasterToNonMasterAndBack() { - MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); + MlInitializationService initializationService = new MlInitializationService(threadPool, clusterService, client); MlDailyMaintenanceService initialDailyMaintenanceService = mock(MlDailyMaintenanceService.class); initializationService.setDailyMaintenanceService(initialDailyMaintenanceService); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java index 98b84ed81137b..34072184ac7a5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java @@ -14,7 +14,9 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; @@ -27,9 +29,6 @@ import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; -import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; @@ -263,9 +262,8 @@ public void testDoExecute_whenNothingToClose() { ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(clusterState); - TransportCloseJobAction transportAction = new TransportCloseJobAction(Settings.EMPTY, - mock(TransportService.class), mock(ThreadPool.class), mock(ActionFilters.class), - clusterService, mock(Client.class), mock(Auditor.class), mock(PersistentTasksService.class)); + TransportCloseJobAction transportAction = new TransportCloseJobAction(mock(TransportService.class), mock(ThreadPool.class), + mock(ActionFilters.class), clusterService, mock(Client.class), mock(Auditor.class), mock(PersistentTasksService.class)); AtomicBoolean gotResponse = new AtomicBoolean(false); CloseJobAction.Request request = new Request("foo"); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 4b8ad1d08aed3..ad5d2dbe5efab 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -57,7 +56,7 @@ public class DatafeedNodeSelectorTests extends ESTestCase { @Before public void init() { - resolver = new IndexNameExpressionResolver(Settings.EMPTY); + resolver = new IndexNameExpressionResolver(); nodes = DiscoveryNodes.builder() .add(new DiscoveryNode("node_name", "node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT)) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java index 11ff693bad7ed..9e229e2b057f1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java @@ -14,23 +14,39 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; +import org.elasticsearch.xpack.core.rollup.action.RollableIndexCaps; +import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.GroupConfig; +import org.elasticsearch.xpack.core.rollup.job.MetricConfig; +import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests; import org.elasticsearch.xpack.ml.datafeed.extractor.aggregation.AggregationDataExtractorFactory; import org.elasticsearch.xpack.ml.datafeed.extractor.chunked.ChunkedDataExtractorFactory; +import org.elasticsearch.xpack.ml.datafeed.extractor.aggregation.RollupDataExtractorFactory; import org.elasticsearch.xpack.ml.datafeed.extractor.scroll.ScrollDataExtractorFactory; -import org.elasticsearch.xpack.core.ml.job.config.DataDescription; -import org.elasticsearch.xpack.core.ml.job.config.Job; import org.junit.Before; +import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.HashMap; +import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Matchers.any; import static org.mockito.Matchers.same; @@ -41,6 +57,7 @@ public class DataExtractorFactoryTests extends ESTestCase { private FieldCapabilitiesResponse fieldsCapabilities; + private GetRollupIndexCapsAction.Response getRollupIndexResponse; private Client client; @@ -54,12 +71,22 @@ public void setUpTests() { givenAggregatableField("time", "date"); givenAggregatableField("field", "keyword"); + getRollupIndexResponse = mock(GetRollupIndexCapsAction.Response.class); + when(getRollupIndexResponse.getJobs()).thenReturn(new HashMap<>()); + doAnswer(invocationMock -> { @SuppressWarnings("raw_types") ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; listener.onResponse(fieldsCapabilities); return null; }).when(client).execute(same(FieldCapabilitiesAction.INSTANCE), any(), any()); + + doAnswer(invocationMock -> { + @SuppressWarnings("raw_types") + ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(getRollupIndexResponse); + return null; + }).when(client).execute(same(GetRollupIndexCapsAction.INSTANCE), any(), any()); } public void testCreateDataExtractorFactoryGivenDefaultScroll() { @@ -165,6 +192,162 @@ public void testCreateDataExtractorFactoryGivenDefaultAggregationWithAutoChunk() DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); } + public void testCreateDataExtractorFactoryGivenRollupAndValidAggregation() { + givenAggregatableRollup("myField", "max", 5, "termField"); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); + jobBuilder.setDataDescription(dataDescription); + DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo"); + datafeedConfig.setChunkingConfig(ChunkingConfig.newOff()); + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("myField"); + TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); + datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); + ActionListener listener = ActionListener.wrap( + dataExtractorFactory -> assertThat(dataExtractorFactory, instanceOf(RollupDataExtractorFactory.class)), + e -> fail() + ); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + } + + public void testCreateDataExtractorFactoryGivenRollupAndValidAggregationAndAutoChunk() { + givenAggregatableRollup("myField", "max", 5, "termField"); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); + jobBuilder.setDataDescription(dataDescription); + DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo"); + datafeedConfig.setChunkingConfig(ChunkingConfig.newAuto()); + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("myField"); + TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); + datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); + ActionListener listener = ActionListener.wrap( + dataExtractorFactory -> assertThat(dataExtractorFactory, instanceOf(ChunkedDataExtractorFactory.class)), + e -> fail() + ); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + } + + public void testCreateDataExtractorFactoryGivenRollupButNoAggregations() { + givenAggregatableRollup("myField", "max", 5); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); + jobBuilder.setDataDescription(dataDescription); + DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo"); + datafeedConfig.setChunkingConfig(ChunkingConfig.newOff()); + + ActionListener listener = ActionListener.wrap( + dataExtractorFactory -> fail(), + e -> { + assertThat(e.getMessage(), equalTo("Aggregations are required when using Rollup indices")); + assertThat(e, instanceOf(IllegalArgumentException.class)); + } + ); + + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + } + + public void testCreateDataExtractorFactoryGivenRollupWithBadInterval() { + givenAggregatableRollup("myField", "max", 7, "termField"); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); + jobBuilder.setDataDescription(dataDescription); + DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo"); + datafeedConfig.setChunkingConfig(ChunkingConfig.newOff()); + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("myField"); + TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); + datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); + ActionListener listener = ActionListener.wrap( + dataExtractorFactory -> fail(), + e -> { + assertThat(e.getMessage(), + containsString("Rollup capabilities do not have a [date_histogram] aggregation with an interval " + + "that is a multiple of the datafeed's interval.")); + assertThat(e, instanceOf(IllegalArgumentException.class)); + } + ); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + } + + public void testCreateDataExtractorFactoryGivenRollupMissingTerms() { + givenAggregatableRollup("myField", "max", 5); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); + jobBuilder.setDataDescription(dataDescription); + DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo"); + datafeedConfig.setChunkingConfig(ChunkingConfig.newOff()); + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("myField"); + TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); + datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); + ActionListener listener = ActionListener.wrap( + dataExtractorFactory -> fail(), + e -> { + assertThat(e.getMessage(), + containsString("Rollup capabilities do not support all the datafeed aggregations at the desired interval.")); + assertThat(e, instanceOf(IllegalArgumentException.class)); + } + ); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + } + + public void testCreateDataExtractorFactoryGivenRollupMissingMetric() { + givenAggregatableRollup("myField", "max", 5, "termField"); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = DatafeedManagerTests.createDatafeedJob(); + jobBuilder.setDataDescription(dataDescription); + DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo"); + datafeedConfig.setChunkingConfig(ChunkingConfig.newOff()); + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("otherField"); + TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); + datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); + ActionListener listener = ActionListener.wrap( + dataExtractorFactory -> fail(), + e -> { + assertThat(e.getMessage(), + containsString("Rollup capabilities do not support all the datafeed aggregations at the desired interval.")); + assertThat(e, instanceOf(IllegalArgumentException.class)); + } + ); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + } + + private void givenAggregatableRollup(String field, String type, int minuteInterval, String... groupByTerms) { + List metricConfigs = Arrays.asList(new MetricConfig(field, Collections.singletonList(type)), + new MetricConfig("time", Arrays.asList("min", "max"))); + TermsGroupConfig termsGroupConfig = null; + if (groupByTerms.length > 0) { + termsGroupConfig = new TermsGroupConfig(groupByTerms); + } + RollupJobConfig rollupJobConfig = new RollupJobConfig("rollupJob1", + "myIndexes*", + "myIndex_rollup", + "*/30 * * * * ?", + 300, + new GroupConfig( + new DateHistogramGroupConfig("time", DateHistogramInterval.minutes(minuteInterval)), null, termsGroupConfig), + metricConfigs, + null); + RollupJobCaps rollupJobCaps = new RollupJobCaps(rollupJobConfig); + RollableIndexCaps rollableIndexCaps = new RollableIndexCaps("myIndex_rollup", Collections.singletonList(rollupJobCaps)); + Map jobs = new HashMap<>(1); + jobs.put("rollupJob1", rollableIndexCaps); + when(getRollupIndexResponse.getJobs()).thenReturn(jobs); + } + private void givenAggregatableField(String field, String type) { FieldCapabilities fieldCaps = mock(FieldCapabilities.class); when(fieldCaps.isSearchable()).thenReturn(true); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorTests.java index 93bdc1258905d..6561cfd56e23b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorTests.java @@ -46,7 +46,7 @@ public class AggregationDataExtractorTests extends ESTestCase { - private Client client; + private Client testClient; private List capturedSearchRequests; private String jobId; private String timeField; @@ -61,7 +61,7 @@ private class TestDataExtractor extends AggregationDataExtractor { private SearchResponse nextResponse; TestDataExtractor(long start, long end) { - super(client, createContext(start, end)); + super(testClient, createContext(start, end)); } @Override @@ -77,7 +77,7 @@ void setNextResponse(SearchResponse searchResponse) { @Before public void setUpTests() { - client = mock(Client.class); + testClient = mock(Client.class); capturedSearchRequests = new ArrayList<>(); jobId = "test-job"; timeField = "time"; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java index e85b1e3a6dfd2..4f0d3ec8e9f71 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java @@ -5,7 +5,8 @@ */ package org.elasticsearch.xpack.ml.datafeed.extractor.chunked; -import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; @@ -44,7 +45,7 @@ public class ChunkedDataExtractorTests extends ESTestCase { private Client client; - private List capturedSearchRequests; + private List capturedSearchRequests; private String jobId; private String timeField; private List types; @@ -62,9 +63,13 @@ private class TestDataExtractor extends ChunkedDataExtractor { super(client, dataExtractorFactory, createContext(start, end)); } + TestDataExtractor(long start, long end, boolean hasAggregations, Long histogramInterval) { + super(client, dataExtractorFactory, createContext(start, end, hasAggregations, histogramInterval)); + } + @Override - protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { - capturedSearchRequests.add(searchRequestBuilder); + protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) { + capturedSearchRequests.add(searchRequestBuilder.request()); return nextResponse; } @@ -136,6 +141,89 @@ public void testExtractionGivenSpecifiedChunk() throws IOException { assertThat(searchRequest, not(containsString("\"sort\""))); } + public void testExtractionGivenSpecifiedChunkAndAggs() throws IOException { + chunkSpan = TimeValue.timeValueSeconds(1); + TestDataExtractor extractor = new TestDataExtractor(1000L, 2300L, true, 1000L); + extractor.setNextResponse(createSearchResponse(0L, 1000L, 2200L)); + + InputStream inputStream1 = mock(InputStream.class); + InputStream inputStream2 = mock(InputStream.class); + InputStream inputStream3 = mock(InputStream.class); + + DataExtractor subExtactor1 = new StubSubExtractor(inputStream1, inputStream2); + when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1); + + DataExtractor subExtactor2 = new StubSubExtractor(inputStream3); + when(dataExtractorFactory.newExtractor(2000L, 2300L)).thenReturn(subExtactor2); + + assertThat(extractor.hasNext(), is(true)); + assertEquals(inputStream1, extractor.next().get()); + assertThat(extractor.hasNext(), is(true)); + assertEquals(inputStream2, extractor.next().get()); + assertThat(extractor.hasNext(), is(true)); + assertEquals(inputStream3, extractor.next().get()); + assertThat(extractor.hasNext(), is(true)); + assertThat(extractor.next().isPresent(), is(false)); + + verify(dataExtractorFactory).newExtractor(1000L, 2000L); + verify(dataExtractorFactory).newExtractor(2000L, 2300L); + Mockito.verifyNoMoreInteractions(dataExtractorFactory); + + assertThat(capturedSearchRequests.size(), equalTo(1)); + String searchRequest = capturedSearchRequests.get(0).toString().replaceAll("\\s", ""); + assertThat(searchRequest, containsString("\"size\":0")); + assertThat(searchRequest, containsString("\"query\":{\"bool\":{\"filter\":[{\"match_all\":{\"boost\":1.0}}," + + "{\"range\":{\"time\":{\"from\":1000,\"to\":2300,\"include_lower\":true,\"include_upper\":false," + + "\"format\":\"epoch_millis\",\"boost\":1.0}}}]")); + assertThat(searchRequest, containsString("\"aggregations\":{\"earliest_time\":{\"min\":{\"field\":\"time\"}}," + + "\"latest_time\":{\"max\":{\"field\":\"time\"}}}}")); + assertThat(searchRequest, not(containsString("\"sort\""))); + } + + public void testExtractionGivenAutoChunkAndAggs() throws IOException { + chunkSpan = null; + TestDataExtractor extractor = new TestDataExtractor(100_000L, 450_000L, true, 200L); + + extractor.setNextResponse(createSearchResponse(0L, 100_000L, 400_000L)); + + InputStream inputStream1 = mock(InputStream.class); + InputStream inputStream2 = mock(InputStream.class); + + // 200 * 1_000 == 200_000 + DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(100_000L, 300_000L)).thenReturn(subExtactor1); + + DataExtractor subExtactor2 = new StubSubExtractor(inputStream2); + when(dataExtractorFactory.newExtractor(300_000L, 450_000L)).thenReturn(subExtactor2); + + assertThat(extractor.hasNext(), is(true)); + assertEquals(inputStream1, extractor.next().get()); + assertThat(extractor.hasNext(), is(true)); + assertEquals(inputStream2, extractor.next().get()); + assertThat(extractor.next().isPresent(), is(false)); + assertThat(extractor.hasNext(), is(false)); + + verify(dataExtractorFactory).newExtractor(100_000L, 300_000L); + verify(dataExtractorFactory).newExtractor(300_000L, 450_000L); + Mockito.verifyNoMoreInteractions(dataExtractorFactory); + + assertThat(capturedSearchRequests.size(), equalTo(1)); + } + + public void testExtractionGivenAutoChunkAndAggsAndNoData() throws IOException { + chunkSpan = null; + TestDataExtractor extractor = new TestDataExtractor(100L, 500L, true, 200L); + + extractor.setNextResponse(createNullSearchResponse()); + + assertThat(extractor.next().isPresent(), is(false)); + assertThat(extractor.hasNext(), is(false)); + + Mockito.verifyNoMoreInteractions(dataExtractorFactory); + + assertThat(capturedSearchRequests.size(), equalTo(1)); + } + public void testExtractionGivenAutoChunkAndScrollSize1000() throws IOException { chunkSpan = null; scrollSize = 1000; @@ -430,6 +518,27 @@ private SearchResponse createSearchResponse(long totalHits, long earliestTime, l return searchResponse; } + private SearchResponse createNullSearchResponse() { + SearchResponse searchResponse = mock(SearchResponse.class); + when(searchResponse.status()).thenReturn(RestStatus.OK); + SearchHit[] hits = new SearchHit[0]; + SearchHits searchHits = new SearchHits(hits, 0, 1); + when(searchResponse.getHits()).thenReturn(searchHits); + + List aggs = new ArrayList<>(); + Min min = mock(Min.class); + when(min.getValue()).thenReturn(Double.POSITIVE_INFINITY); + when(min.getName()).thenReturn("earliest_time"); + aggs.add(min); + Max max = mock(Max.class); + when(max.getValue()).thenReturn(Double.POSITIVE_INFINITY); + when(max.getName()).thenReturn("latest_time"); + aggs.add(max); + Aggregations aggregations = new Aggregations(aggs) {}; + when(searchResponse.getAggregations()).thenReturn(aggregations); + return searchResponse; + } + private SearchResponse createErrorResponse() { SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.status()).thenReturn(RestStatus.INTERNAL_SERVER_ERROR); @@ -445,8 +554,12 @@ private SearchResponse createResponseWithShardFailures() { } private ChunkedDataExtractorContext createContext(long start, long end) { + return createContext(start, end, false, null); + } + + private ChunkedDataExtractorContext createContext(long start, long end, boolean hasAggregations, Long histogramInterval) { return new ChunkedDataExtractorContext(jobId, timeField, indices, types, query, scrollSize, start, end, chunkSpan, - ChunkedDataExtractorFactory.newIdentityTimeAligner(), Collections.emptyMap()); + ChunkedDataExtractorFactory.newIdentityTimeAligner(), Collections.emptyMap(), hasAggregations, histogramInterval); } private static class StubSubExtractor implements DataExtractor { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedFieldTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java similarity index 89% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedFieldTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java index d2e13368d0d29..1e5e6fa652db1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedFieldTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java @@ -3,10 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.datafeed.extractor.scroll; +package org.elasticsearch.xpack.ml.datafeed.extractor.fields; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; import org.elasticsearch.xpack.ml.test.SearchHitBuilder; import org.joda.time.DateTime; @@ -140,4 +142,14 @@ public void testAliasVersusName() { assertThat(field.getName(), equalTo("b")); assertThat(field.value(hit), equalTo(new Integer[] { 2 })); } + + public void testGetDocValueFormat() { + for (ExtractedField.ExtractionMethod method : ExtractedField.ExtractionMethod.values()) { + assertThat(ExtractedField.newField("f", method).getDocValueFormat(), equalTo(DocValueFieldsContext.USE_DEFAULT_FORMAT)); + } + assertThat(ExtractedField.newTimeField("doc_value_time", ExtractedField.ExtractionMethod.DOC_VALUE).getDocValueFormat(), + equalTo("epoch_millis")); + assertThat(ExtractedField.newTimeField("source_time", ExtractedField.ExtractionMethod.SCRIPT_FIELD).getDocValueFormat(), + equalTo("epoch_millis")); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldsTests.java new file mode 100644 index 0000000000000..222531141364a --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldsTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.datafeed.extractor.fields; + +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ExtractedFieldsTests extends ESTestCase { + + public void testAllTypesOfFields() { + ExtractedField docValue1 = ExtractedField.newField("doc1", ExtractedField.ExtractionMethod.DOC_VALUE); + ExtractedField docValue2 = ExtractedField.newField("doc2", ExtractedField.ExtractionMethod.DOC_VALUE); + ExtractedField scriptField1 = ExtractedField.newField("scripted1", ExtractedField.ExtractionMethod.SCRIPT_FIELD); + ExtractedField scriptField2 = ExtractedField.newField("scripted2", ExtractedField.ExtractionMethod.SCRIPT_FIELD); + ExtractedField sourceField1 = ExtractedField.newField("src1", ExtractedField.ExtractionMethod.SOURCE); + ExtractedField sourceField2 = ExtractedField.newField("src2", ExtractedField.ExtractionMethod.SOURCE); + ExtractedFields extractedFields = new ExtractedFields(Arrays.asList( + docValue1, docValue2, scriptField1, scriptField2, sourceField1, sourceField2)); + + assertThat(extractedFields.getAllFields().size(), equalTo(6)); + assertThat(extractedFields.getDocValueFields().stream().map(ExtractedField::getName).toArray(String[]::new), + equalTo(new String[] {"doc1", "doc2"})); + assertThat(extractedFields.getSourceFields(), equalTo(new String[] {"src1", "src2"})); + } + + public void testBuildGivenMixtureOfTypes() { + Map timeCaps = new HashMap<>(); + timeCaps.put("date", createFieldCaps(true)); + Map valueCaps = new HashMap<>(); + valueCaps.put("float", createFieldCaps(true)); + valueCaps.put("keyword", createFieldCaps(true)); + Map airlineCaps = new HashMap<>(); + airlineCaps.put("text", createFieldCaps(false)); + FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); + when(fieldCapabilitiesResponse.getField("time")).thenReturn(timeCaps); + when(fieldCapabilitiesResponse.getField("value")).thenReturn(valueCaps); + when(fieldCapabilitiesResponse.getField("airline")).thenReturn(airlineCaps); + + ExtractedFields extractedFields = ExtractedFields.build(Arrays.asList("time", "value", "airline", "airport"), + new HashSet<>(Collections.singletonList("airport")), fieldCapabilitiesResponse); + + assertThat(extractedFields.getDocValueFields().size(), equalTo(2)); + assertThat(extractedFields.getDocValueFields().get(0).getName(), equalTo("time")); + assertThat(extractedFields.getDocValueFields().get(0).getDocValueFormat(), equalTo("epoch_millis")); + assertThat(extractedFields.getDocValueFields().get(1).getName(), equalTo("value")); + assertThat(extractedFields.getDocValueFields().get(1).getDocValueFormat(), equalTo(DocValueFieldsContext.USE_DEFAULT_FORMAT)); + assertThat(extractedFields.getSourceFields(), equalTo(new String[] {"airline"})); + assertThat(extractedFields.getAllFields().size(), equalTo(4)); + } + + public void testBuildGivenMultiFields() { + Job.Builder jobBuilder = new Job.Builder("foo"); + jobBuilder.setDataDescription(new DataDescription.Builder()); + Detector.Builder detector = new Detector.Builder("count", null); + detector.setByFieldName("airline.text"); + detector.setOverFieldName("airport.keyword"); + jobBuilder.setAnalysisConfig(new AnalysisConfig.Builder(Collections.singletonList(detector.build()))); + + DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("feed", jobBuilder.getId()); + datafeedBuilder.setIndices(Collections.singletonList("foo")); + + Map text = new HashMap<>(); + text.put("text", createFieldCaps(false)); + Map keyword = new HashMap<>(); + keyword.put("keyword", createFieldCaps(true)); + FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); + when(fieldCapabilitiesResponse.getField("airline")).thenReturn(text); + when(fieldCapabilitiesResponse.getField("airline.text")).thenReturn(text); + when(fieldCapabilitiesResponse.getField("airport")).thenReturn(text); + when(fieldCapabilitiesResponse.getField("airport.keyword")).thenReturn(keyword); + + ExtractedFields extractedFields = ExtractedFields.build(Arrays.asList("airline.text", "airport.keyword"), + Collections.emptySet(), fieldCapabilitiesResponse); + + assertThat(extractedFields.getDocValueFields().size(), equalTo(1)); + assertThat(extractedFields.getDocValueFields().get(0).getName(), equalTo("airport.keyword")); + assertThat(extractedFields.getSourceFields().length, equalTo(1)); + assertThat(extractedFields.getSourceFields()[0], equalTo("airline")); + assertThat(extractedFields.getAllFields().size(), equalTo(2)); + + assertThat(extractedFields.getAllFields().stream().filter(f -> f.getName().equals("airport.keyword")).findFirst().get().getAlias(), + equalTo("airport.keyword")); + assertThat(extractedFields.getAllFields().stream().filter(f -> f.getName().equals("airline")).findFirst().get().getAlias(), + equalTo("airline.text")); + } + + public void testBuildGivenFieldWithoutMappings() { + FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ExtractedFields.build( + Collections.singletonList("value"), Collections.emptySet(), fieldCapabilitiesResponse)); + assertThat(e.getMessage(), equalTo("cannot retrieve field [value] because it has no mappings")); + } + + private static FieldCapabilities createFieldCaps(boolean isAggregatable) { + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(isAggregatable); + return fieldCaps; + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedFieldsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java similarity index 73% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedFieldsTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java index 7e98dd417cd81..5e388afad282a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ExtractedFieldsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java @@ -3,14 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.datafeed.extractor.scroll; +package org.elasticsearch.xpack.ml.datafeed.extractor.fields; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; @@ -30,20 +29,21 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class ExtractedFieldsTests extends ESTestCase { +public class TimeBasedExtractedFieldsTests extends ESTestCase { private ExtractedField timeField = ExtractedField.newTimeField("time", ExtractedField.ExtractionMethod.DOC_VALUE); public void testInvalidConstruction() { - expectThrows(IllegalArgumentException.class, () -> new ExtractedFields(timeField, Collections.emptyList())); + expectThrows(IllegalArgumentException.class, () -> new TimeBasedExtractedFields(timeField, Collections.emptyList())); } public void testTimeFieldOnly() { - ExtractedFields extractedFields = new ExtractedFields(timeField, Arrays.asList(timeField)); + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Arrays.asList(timeField)); assertThat(extractedFields.getAllFields(), equalTo(Arrays.asList(timeField))); assertThat(extractedFields.timeField(), equalTo("time")); - assertThat(extractedFields.getDocValueFields(), equalTo(new String[] { timeField.getName() })); + assertThat(extractedFields.getDocValueFields().stream().map(ExtractedField::getName).toArray(String[]::new), + equalTo(new String[] { timeField.getName() })); assertThat(extractedFields.getSourceFields().length, equalTo(0)); } @@ -54,41 +54,42 @@ public void testAllTypesOfFields() { ExtractedField scriptField2 = ExtractedField.newField("scripted2", ExtractedField.ExtractionMethod.SCRIPT_FIELD); ExtractedField sourceField1 = ExtractedField.newField("src1", ExtractedField.ExtractionMethod.SOURCE); ExtractedField sourceField2 = ExtractedField.newField("src2", ExtractedField.ExtractionMethod.SOURCE); - ExtractedFields extractedFields = new ExtractedFields(timeField, Arrays.asList(timeField, + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Arrays.asList(timeField, docValue1, docValue2, scriptField1, scriptField2, sourceField1, sourceField2)); assertThat(extractedFields.getAllFields().size(), equalTo(7)); assertThat(extractedFields.timeField(), equalTo("time")); - assertThat(extractedFields.getDocValueFields(), equalTo(new String[] {"time", "doc1", "doc2"})); + assertThat(extractedFields.getDocValueFields().stream().map(ExtractedField::getName).toArray(String[]::new), + equalTo(new String[] {"time", "doc1", "doc2"})); assertThat(extractedFields.getSourceFields(), equalTo(new String[] {"src1", "src2"})); } public void testTimeFieldValue() { - final long millis = randomLong(); - final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", new DateTime(millis)).build(); - final ExtractedFields extractedFields = new ExtractedFields(timeField, Collections.singletonList(timeField)); + long millis = randomLong(); + SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", new DateTime(millis)).build(); + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Collections.singletonList(timeField)); assertThat(extractedFields.timeFieldValue(hit), equalTo(millis)); } public void testStringTimeFieldValue() { - final long millis = randomLong(); - final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", Long.toString(millis)).build(); - final ExtractedFields extractedFields = new ExtractedFields(timeField, Collections.singletonList(timeField)); + long millis = randomLong(); + SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", Long.toString(millis)).build(); + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Collections.singletonList(timeField)); assertThat(extractedFields.timeFieldValue(hit), equalTo(millis)); } public void testPre6xTimeFieldValue() { // Prior to 6.x, timestamps were simply `long` milliseconds-past-the-epoch values - final long millis = randomLong(); - final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", millis).build(); - final ExtractedFields extractedFields = new ExtractedFields(timeField, Collections.singletonList(timeField)); + long millis = randomLong(); + SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", millis).build(); + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Collections.singletonList(timeField)); assertThat(extractedFields.timeFieldValue(hit), equalTo(millis)); } public void testTimeFieldValueGivenEmptyArray() { SearchHit hit = new SearchHitBuilder(1).addField("time", Collections.emptyList()).build(); - ExtractedFields extractedFields = new ExtractedFields(timeField, Arrays.asList(timeField)); + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Arrays.asList(timeField)); expectThrows(RuntimeException.class, () -> extractedFields.timeFieldValue(hit)); } @@ -96,7 +97,7 @@ public void testTimeFieldValueGivenEmptyArray() { public void testTimeFieldValueGivenValueHasTwoElements() { SearchHit hit = new SearchHitBuilder(1).addField("time", Arrays.asList(1L, 2L)).build(); - ExtractedFields extractedFields = new ExtractedFields(timeField, Arrays.asList(timeField)); + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Arrays.asList(timeField)); expectThrows(RuntimeException.class, () -> extractedFields.timeFieldValue(hit)); } @@ -104,7 +105,7 @@ public void testTimeFieldValueGivenValueHasTwoElements() { public void testTimeFieldValueGivenValueIsString() { SearchHit hit = new SearchHitBuilder(1).addField("time", "a string").build(); - ExtractedFields extractedFields = new ExtractedFields(timeField, Arrays.asList(timeField)); + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Arrays.asList(timeField)); expectThrows(RuntimeException.class, () -> extractedFields.timeFieldValue(hit)); } @@ -134,13 +135,15 @@ public void testBuildGivenMixtureOfTypes() { when(fieldCapabilitiesResponse.getField("value")).thenReturn(valueCaps); when(fieldCapabilitiesResponse.getField("airline")).thenReturn(airlineCaps); - ExtractedFields extractedFields = ExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), + TimeBasedExtractedFields extractedFields = TimeBasedExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), fieldCapabilitiesResponse); assertThat(extractedFields.timeField(), equalTo("time")); - assertThat(extractedFields.getDocValueFields().length, equalTo(2)); - assertThat(extractedFields.getDocValueFields()[0], equalTo("time")); - assertThat(extractedFields.getDocValueFields()[1], equalTo("value")); + assertThat(extractedFields.getDocValueFields().size(), equalTo(2)); + assertThat(extractedFields.getDocValueFields().get(0).getName(), equalTo("time")); + assertThat(extractedFields.getDocValueFields().get(0).getDocValueFormat(), equalTo("epoch_millis")); + assertThat(extractedFields.getDocValueFields().get(1).getName(), equalTo("value")); + assertThat(extractedFields.getDocValueFields().get(1).getDocValueFormat(), equalTo(DocValueFieldsContext.USE_DEFAULT_FORMAT)); assertThat(extractedFields.getSourceFields().length, equalTo(1)); assertThat(extractedFields.getSourceFields()[0], equalTo("airline")); assertThat(extractedFields.getAllFields().size(), equalTo(4)); @@ -170,13 +173,13 @@ public void testBuildGivenMultiFields() { when(fieldCapabilitiesResponse.getField("airport")).thenReturn(text); when(fieldCapabilitiesResponse.getField("airport.keyword")).thenReturn(keyword); - ExtractedFields extractedFields = ExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), + TimeBasedExtractedFields extractedFields = TimeBasedExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), fieldCapabilitiesResponse); assertThat(extractedFields.timeField(), equalTo("time")); - assertThat(extractedFields.getDocValueFields().length, equalTo(2)); - assertThat(extractedFields.getDocValueFields()[0], equalTo("time")); - assertThat(extractedFields.getDocValueFields()[1], equalTo("airport.keyword")); + assertThat(extractedFields.getDocValueFields().size(), equalTo(2)); + assertThat(extractedFields.getDocValueFields().get(0).getName(), equalTo("time")); + assertThat(extractedFields.getDocValueFields().get(1).getName(), equalTo("airport.keyword")); assertThat(extractedFields.getSourceFields().length, equalTo(1)); assertThat(extractedFields.getSourceFields()[0], equalTo("airline")); assertThat(extractedFields.getAllFields().size(), equalTo(3)); @@ -204,10 +207,9 @@ public void testBuildGivenTimeFieldIsNotAggregatable() { FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); when(fieldCapabilitiesResponse.getField("time")).thenReturn(timeCaps); - ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> ExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), fieldCapabilitiesResponse)); - assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.getMessage(), equalTo("datafeed [feed] cannot retrieve time field [time] because it is not aggregatable")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> TimeBasedExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), fieldCapabilitiesResponse)); + assertThat(e.getMessage(), equalTo("cannot retrieve time field [time] because it is not aggregatable")); } public void testBuildGivenTimeFieldIsNotAggregatableInSomeIndices() { @@ -226,10 +228,9 @@ public void testBuildGivenTimeFieldIsNotAggregatableInSomeIndices() { FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); when(fieldCapabilitiesResponse.getField("time")).thenReturn(timeCaps); - ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> ExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), fieldCapabilitiesResponse)); - assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.getMessage(), equalTo("datafeed [feed] cannot retrieve time field [time] because it is not aggregatable")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> TimeBasedExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), fieldCapabilitiesResponse)); + assertThat(e.getMessage(), equalTo("cannot retrieve time field [time] because it is not aggregatable")); } public void testBuildGivenFieldWithoutMappings() { @@ -247,10 +248,9 @@ public void testBuildGivenFieldWithoutMappings() { FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); when(fieldCapabilitiesResponse.getField("time")).thenReturn(timeCaps); - ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, - () -> ExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), fieldCapabilitiesResponse)); - assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.getMessage(), equalTo("datafeed [feed] cannot retrieve field [value] because it has no mappings")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> TimeBasedExtractedFields.build(jobBuilder.build(new Date()), datafeedBuilder.build(), fieldCapabilitiesResponse)); + assertThat(e.getMessage(), equalTo("cannot retrieve field [value] because it has no mappings")); } private static FieldCapabilities createFieldCaps(boolean isAggregatable) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index f72ae9b46b13f..93a76c5402b36 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -27,6 +27,8 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.TimeBasedExtractedFields; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -61,7 +63,7 @@ public class ScrollDataExtractorTests extends ESTestCase { private List capturedContinueScrollIds; private ArgumentCaptor capturedClearScrollRequests; private String jobId; - private ExtractedFields extractedFields; + private TimeBasedExtractedFields extractedFields; private List types; private List indices; private QueryBuilder query; @@ -128,7 +130,7 @@ public void setUpTests() { capturedContinueScrollIds = new ArrayList<>(); jobId = "test-job"; ExtractedField timeField = ExtractedField.newField("time", ExtractedField.ExtractionMethod.DOC_VALUE); - extractedFields = new ExtractedFields(timeField, + extractedFields = new TimeBasedExtractedFields(timeField, Arrays.asList(timeField, ExtractedField.newField("field_1", ExtractedField.ExtractionMethod.DOC_VALUE))); indices = Arrays.asList("index-1", "index-2"); types = Arrays.asList("type-1", "type-2"); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/SearchHitToJsonProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/SearchHitToJsonProcessorTests.java index 60e14023d3685..41a74814461c9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/SearchHitToJsonProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/SearchHitToJsonProcessorTests.java @@ -7,6 +7,9 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedFields; +import org.elasticsearch.xpack.ml.datafeed.extractor.fields.TimeBasedExtractedFields; import org.elasticsearch.xpack.ml.test.SearchHitBuilder; import java.io.ByteArrayOutputStream; @@ -23,7 +26,8 @@ public void testProcessGivenSingleHit() throws IOException { ExtractedField missingField = ExtractedField.newField("missing", ExtractedField.ExtractionMethod.DOC_VALUE); ExtractedField singleField = ExtractedField.newField("single", ExtractedField.ExtractionMethod.DOC_VALUE); ExtractedField arrayField = ExtractedField.newField("array", ExtractedField.ExtractionMethod.DOC_VALUE); - ExtractedFields extractedFields = new ExtractedFields(timeField, Arrays.asList(timeField, missingField, singleField, arrayField)); + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, + Arrays.asList(timeField, missingField, singleField, arrayField)); SearchHit hit = new SearchHitBuilder(8) .addField("time", 1000L) @@ -41,7 +45,8 @@ public void testProcessGivenMultipleHits() throws IOException { ExtractedField missingField = ExtractedField.newField("missing", ExtractedField.ExtractionMethod.DOC_VALUE); ExtractedField singleField = ExtractedField.newField("single", ExtractedField.ExtractionMethod.DOC_VALUE); ExtractedField arrayField = ExtractedField.newField("array", ExtractedField.ExtractionMethod.DOC_VALUE); - ExtractedFields extractedFields = new ExtractedFields(timeField, Arrays.asList(timeField, missingField, singleField, arrayField)); + TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, + Arrays.asList(timeField, missingField, singleField, arrayField)); SearchHit hit1 = new SearchHitBuilder(8) .addField("time", 1000L) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactoryTests.java index 53f3a2a4d4ca6..0ed3f54112617 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactoryTests.java @@ -12,7 +12,7 @@ public class DelimitedFileStructureFinderFactoryTests extends FileStructureTestC private FileStructureFinderFactory semiColonDelimitedfactory = new DelimitedFileStructureFinderFactory(';', '"', 4, false); private FileStructureFinderFactory pipeDelimitedFactory = new DelimitedFileStructureFinderFactory('|', '"', 5, true); - // CSV - no need to check JSON or XML because they come earlier in the order we check formats + // CSV - no need to check NDJSON or XML because they come earlier in the order we check formats public void testCanCreateCsvFromSampleGivenCsv() { @@ -39,7 +39,7 @@ public void testCanCreateCsvFromSampleGivenText() { assertFalse(csvFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); } - // TSV - no need to check JSON, XML or CSV because they come earlier in the order we check formats + // TSV - no need to check NDJSON, XML or CSV because they come earlier in the order we check formats public void testCanCreateTsvFromSampleGivenTsv() { @@ -61,7 +61,7 @@ public void testCanCreateTsvFromSampleGivenText() { assertFalse(tsvFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); } - // Semi-colon delimited - no need to check JSON, XML, CSV or TSV because they come earlier in the order we check formats + // Semi-colon delimited - no need to check NDJSON, XML, CSV or TSV because they come earlier in the order we check formats public void testCanCreateSemiColonDelimitedFromSampleGivenSemiColonDelimited() { @@ -78,7 +78,7 @@ public void testCanCreateSemiColonDelimitedFromSampleGivenText() { assertFalse(semiColonDelimitedfactory.canCreateFromSample(explanation, TEXT_SAMPLE)); } - // Pipe delimited - no need to check JSON, XML, CSV, TSV or semi-colon delimited + // Pipe delimited - no need to check NDJSON, XML, CSV, TSV or semi-colon delimited // values because they come earlier in the order we check formats public void testCanCreatePipeDelimitedFromSampleGivenPipeDelimited() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java index 4329e076ce6b8..246c96011c2bf 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java @@ -73,20 +73,20 @@ public void testFindCharsetGivenBinary() throws Exception { } } - public void testMakeBestStructureGivenJson() throws Exception { - assertThat(structureFinderManager.makeBestStructureFinder(explanation, JSON_SAMPLE, StandardCharsets.UTF_8.name(), randomBoolean(), - EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), instanceOf(JsonFileStructureFinder.class)); + public void testMakeBestStructureGivenNdJson() throws Exception { + assertThat(structureFinderManager.makeBestStructureFinder(explanation, NDJSON_SAMPLE, StandardCharsets.UTF_8.name(), + randomBoolean(), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), instanceOf(NdJsonFileStructureFinder.class)); } - public void testMakeBestStructureGivenJsonAndDelimitedOverride() throws Exception { + public void testMakeBestStructureGivenNdJsonAndDelimitedOverride() throws Exception { // Need to change the quote character from the default of double quotes - // otherwise the quotes in the JSON will stop it parsing as CSV + // otherwise the quotes in the NDJSON will stop it parsing as CSV FileStructureOverrides overrides = FileStructureOverrides.builder() .setFormat(FileStructure.Format.DELIMITED).setQuote('\'').build(); - assertThat(structureFinderManager.makeBestStructureFinder(explanation, JSON_SAMPLE, StandardCharsets.UTF_8.name(), randomBoolean(), - overrides, NOOP_TIMEOUT_CHECKER), instanceOf(DelimitedFileStructureFinder.class)); + assertThat(structureFinderManager.makeBestStructureFinder(explanation, NDJSON_SAMPLE, StandardCharsets.UTF_8.name(), + randomBoolean(), overrides, NOOP_TIMEOUT_CHECKER), instanceOf(DelimitedFileStructureFinder.class)); } public void testMakeBestStructureGivenXml() throws Exception { @@ -109,13 +109,13 @@ public void testMakeBestStructureGivenCsv() throws Exception { public void testMakeBestStructureGivenCsvAndJsonOverride() { - FileStructureOverrides overrides = FileStructureOverrides.builder().setFormat(FileStructure.Format.JSON).build(); + FileStructureOverrides overrides = FileStructureOverrides.builder().setFormat(FileStructure.Format.NDJSON).build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> structureFinderManager.makeBestStructureFinder(explanation, CSV_SAMPLE, StandardCharsets.UTF_8.name(), randomBoolean(), overrides, NOOP_TIMEOUT_CHECKER)); - assertEquals("Input did not match the specified format [json]", e.getMessage()); + assertEquals("Input did not match the specified format [ndjson]", e.getMessage()); } public void testMakeBestStructureGivenText() throws Exception { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureTestCase.java index 1a0da875e67e2..f47a3582c66a9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureTestCase.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.ml.filestructurefinder; -import org.elasticsearch.common.logging.Loggers; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; @@ -27,7 +27,7 @@ public abstract class FileStructureTestCase extends ESTestCase { "2018-05-17T16:23:40,key1,42.0\n" + "2018-05-17T16:24:11,\"key with spaces\",42.0\n"; - protected static final String JSON_SAMPLE = "{\"logger\":\"controller\",\"timestamp\":1478261151445,\"level\":\"INFO\"," + + protected static final String NDJSON_SAMPLE = "{\"logger\":\"controller\",\"timestamp\":1478261151445,\"level\":\"INFO\"," + "\"pid\":42,\"thread\":\"0x7fff7d2a8000\",\"message\":\"message 1\",\"class\":\"ml\"," + "\"method\":\"core::SomeNoiseMaker\",\"file\":\"Noisemaker.cc\",\"line\":333}\n" + "{\"logger\":\"controller\",\"timestamp\":1478261151445," + @@ -80,7 +80,7 @@ public void initExplanation() { @After public void printExplanation() { - Loggers.getLogger(getClass()).info("Explanation:\n" + String.join("\n", explanation)); + LogManager.getLogger(getClass()).info("Explanation:\n" + String.join("\n", explanation)); } protected Boolean randomHasByteOrderMarker(String charset) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderTests.java similarity index 80% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderTests.java index 55074e8c38272..a220bdf3b0690 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderTests.java @@ -9,21 +9,21 @@ import java.util.Collections; -public class JsonFileStructureFinderTests extends FileStructureTestCase { +public class NdJsonFileStructureFinderTests extends FileStructureTestCase { - private FileStructureFinderFactory factory = new JsonFileStructureFinderFactory(); + private FileStructureFinderFactory factory = new NdJsonFileStructureFinderFactory(); public void testCreateConfigsGivenGoodJson() throws Exception { - assertTrue(factory.canCreateFromSample(explanation, JSON_SAMPLE)); + assertTrue(factory.canCreateFromSample(explanation, NDJSON_SAMPLE)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - FileStructureFinder structureFinder = factory.createFromSample(explanation, JSON_SAMPLE, charset, hasByteOrderMarker, + FileStructureFinder structureFinder = factory.createFromSample(explanation, NDJSON_SAMPLE, charset, hasByteOrderMarker, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); - assertEquals(FileStructure.Format.JSON, structure.getFormat()); + assertEquals(FileStructure.Format.NDJSON, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdNdJsonFileStructureFinderFactoryTests.java similarity index 79% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactoryTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdNdJsonFileStructureFinderFactoryTests.java index 092f11676a877..63353201669b4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/JsonFileStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdNdJsonFileStructureFinderFactoryTests.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.ml.filestructurefinder; -public class JsonFileStructureFinderFactoryTests extends FileStructureTestCase { +public class NdNdJsonFileStructureFinderFactoryTests extends FileStructureTestCase { - private FileStructureFinderFactory factory = new JsonFileStructureFinderFactory(); + private FileStructureFinderFactory factory = new NdJsonFileStructureFinderFactory(); - public void testCanCreateFromSampleGivenJson() { + public void testCanCreateFromSampleGivenNdJson() { - assertTrue(factory.canCreateFromSample(explanation, JSON_SAMPLE)); + assertTrue(factory.canCreateFromSample(explanation, NDJSON_SAMPLE)); } public void testCanCreateFromSampleGivenXml() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactoryTests.java index 8234357fe3676..0148de7fd6ae0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactoryTests.java @@ -9,7 +9,7 @@ public class TextLogFileStructureFinderFactoryTests extends FileStructureTestCas private FileStructureFinderFactory factory = new TextLogFileStructureFinderFactory(); - // No need to check JSON, XML, CSV, TSV, semi-colon delimited values or pipe + // No need to check NDJSON, XML, CSV, TSV, semi-colon delimited values or pipe // delimited values because they come earlier in the order we check formats public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactoryTests.java index e7c111818317f..28368ebf73450 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactoryTests.java @@ -9,7 +9,7 @@ public class XmlFileStructureFinderFactoryTests extends FileStructureTestCase { private FileStructureFinderFactory factory = new XmlFileStructureFinderFactory(); - // No need to check JSON because it comes earlier in the order we check formats + // No need to check NDJSON because it comes earlier in the order we check formats public void testCanCreateFromSampleGivenXml() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java index 756eeb8626dc6..3191253f806eb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java @@ -103,7 +103,7 @@ public void createComponents() throws Exception { renormalizer = mock(Renormalizer.class); capturedUpdateModelSnapshotOnJobRequests = new ArrayList<>(); resultProcessor = new AutoDetectResultProcessor(client(), auditor, JOB_ID, renormalizer, - new JobResultsPersister(nodeSettings(), client()), jobResultsProvider, new ModelSizeStats.Builder(JOB_ID).build(), false) { + new JobResultsPersister(client()), jobResultsProvider, new ModelSizeStats.Builder(JOB_ID).build(), false) { @Override protected void updateModelSnapshotOnJob(ModelSnapshot modelSnapshot) { capturedUpdateModelSnapshotOnJobRequests.add(modelSnapshot); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/EstablishedMemUsageIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/EstablishedMemUsageIT.java index df3af13f71403..a026d5d6c337b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/EstablishedMemUsageIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/EstablishedMemUsageIT.java @@ -33,7 +33,7 @@ public class EstablishedMemUsageIT extends BaseMlIntegTestCase { public void createComponents() { Settings settings = nodeSettings(0); jobResultsProvider = new JobResultsProvider(client(), settings); - jobResultsPersister = new JobResultsPersister(settings, client()); + jobResultsPersister = new JobResultsPersister(client()); } public void testEstablishedMem_givenNoResults() throws Exception { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index e36c313b626c9..dcbed9986a862 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -542,7 +542,7 @@ private void indexScheduledEvents(List events) throws IOExceptio } private void indexDataCounts(DataCounts counts, String jobId) throws Exception { - JobDataCountsPersister persister = new JobDataCountsPersister(nodeSettings(), client()); + JobDataCountsPersister persister = new JobDataCountsPersister(client()); AtomicReference errorHolder = new AtomicReference<>(); CountDownLatch latch = new CountDownLatch(1); @@ -582,17 +582,17 @@ private void indexFilters(List filters) throws IOException { } private void indexModelSizeStats(ModelSizeStats modelSizeStats) { - JobResultsPersister persister = new JobResultsPersister(nodeSettings(), client()); + JobResultsPersister persister = new JobResultsPersister(client()); persister.persistModelSizeStats(modelSizeStats); } private void indexModelSnapshot(ModelSnapshot snapshot) { - JobResultsPersister persister = new JobResultsPersister(nodeSettings(), client()); + JobResultsPersister persister = new JobResultsPersister(client()); persister.persistModelSnapshot(snapshot, WriteRequest.RefreshPolicy.IMMEDIATE); } private void indexQuantiles(Quantiles quantiles) { - JobResultsPersister persister = new JobResultsPersister(nodeSettings(), client()); + JobResultsPersister persister = new JobResultsPersister(client()); persister.persistQuantiles(quantiles); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersisterTests.java index b1ec3008d33c9..f9bacee003c8b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersisterTests.java @@ -7,7 +7,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.job.process.normalizer.BucketNormalizable; import org.elasticsearch.xpack.core.ml.job.results.Bucket; @@ -45,7 +44,7 @@ public void testExecuteRequestResetsBulkRequest() { public void testBulkRequestExecutesWhenReachMaxDocs() { BulkResponse bulkResponse = mock(BulkResponse.class); Client client = new MockClientBuilder("cluster").bulk(bulkResponse).build(); - JobRenormalizedResultsPersister persister = new JobRenormalizedResultsPersister("foo", Settings.EMPTY, client); + JobRenormalizedResultsPersister persister = new JobRenormalizedResultsPersister("foo", client); ModelPlot modelPlot = new ModelPlot("foo", new Date(), 123456, 0); for (int i=0; i<=JobRenormalizedResultsPersister.BULK_LIMIT; i++) { @@ -62,7 +61,7 @@ private JobRenormalizedResultsPersister createJobRenormalizedResultsPersister() when(bulkResponse.hasFailures()).thenReturn(false); Client client = new MockClientBuilder("cluster").bulk(bulkResponse).build(); - return new JobRenormalizedResultsPersister("foo", Settings.EMPTY, client); + return new JobRenormalizedResultsPersister("foo", client); } private BucketNormalizable createBucketNormalizable() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java index c31ebd4bc2c86..0035531d55a57 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java @@ -61,7 +61,7 @@ public void testPersistBucket_OneRecord() throws IOException { AnomalyRecord record = new AnomalyRecord(JOB_ID, new Date(), 600); bucket.setRecords(Collections.singletonList(record)); - JobResultsPersister persister = new JobResultsPersister(Settings.EMPTY, client); + JobResultsPersister persister = new JobResultsPersister(client); persister.bulkPersisterBuilder(JOB_ID).persistBucket(bucket).executeRequest(); BulkRequest bulkRequest = captor.getValue(); assertEquals(2, bulkRequest.numberOfActions()); @@ -113,7 +113,7 @@ public void testPersistRecords() throws IOException { typicals.add(998765.3); r1.setTypical(typicals); - JobResultsPersister persister = new JobResultsPersister(Settings.EMPTY, client); + JobResultsPersister persister = new JobResultsPersister(client); persister.bulkPersisterBuilder(JOB_ID).persistRecords(records).executeRequest(); BulkRequest bulkRequest = captor.getValue(); assertEquals(1, bulkRequest.numberOfActions()); @@ -149,7 +149,7 @@ public void testPersistInfluencers() throws IOException { inf.setProbability(0.4); influencers.add(inf); - JobResultsPersister persister = new JobResultsPersister(Settings.EMPTY, client); + JobResultsPersister persister = new JobResultsPersister(client); persister.bulkPersisterBuilder(JOB_ID).persistInfluencers(influencers).executeRequest(); BulkRequest bulkRequest = captor.getValue(); assertEquals(1, bulkRequest.numberOfActions()); @@ -165,7 +165,7 @@ public void testPersistInfluencers() throws IOException { public void testExecuteRequest_ClearsBulkRequest() { ArgumentCaptor captor = ArgumentCaptor.forClass(BulkRequest.class); Client client = mockClient(captor); - JobResultsPersister persister = new JobResultsPersister(Settings.EMPTY, client); + JobResultsPersister persister = new JobResultsPersister(client); List influencers = new ArrayList<>(); Influencer inf = new Influencer(JOB_ID, "infName1", "infValue1", new Date(), 600); @@ -182,7 +182,7 @@ public void testExecuteRequest_ClearsBulkRequest() { public void testBulkRequestExecutesWhenReachMaxDocs() { ArgumentCaptor captor = ArgumentCaptor.forClass(BulkRequest.class); Client client = mockClient(captor); - JobResultsPersister persister = new JobResultsPersister(Settings.EMPTY, client); + JobResultsPersister persister = new JobResultsPersister(client); JobResultsPersister.Builder bulkBuilder = persister.bulkPersisterBuilder("foo"); ModelPlot modelPlot = new ModelPlot("foo", new Date(), 123456, 0); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporterTests.java index 4be8d74274cce..5415b46019196 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DataCountsReporterTests.java @@ -6,9 +6,7 @@ package org.elasticsearch.xpack.ml.job.process; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; @@ -34,14 +32,10 @@ public class DataCountsReporterTests extends ESTestCase { private Job job; private JobDataCountsPersister jobDataCountsPersister; - private Settings settings; private TimeValue bucketSpan = TimeValue.timeValueSeconds(300); @Before public void setUpMocks() { - settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - AnalysisConfig.Builder acBuilder = new AnalysisConfig.Builder(Arrays.asList(new Detector.Builder("metric", "field").build())); acBuilder.setBucketSpan(bucketSpan); acBuilder.setLatency(TimeValue.ZERO); @@ -57,8 +51,7 @@ public void setUpMocks() { } public void testSimpleConstructor() throws Exception { - DataCountsReporter dataCountsReporter = new DataCountsReporter(settings, job, new DataCounts(job.getId()), - jobDataCountsPersister); + DataCountsReporter dataCountsReporter = new DataCountsReporter(job, new DataCounts(job.getId()), jobDataCountsPersister); DataCounts stats = dataCountsReporter.incrementalStats(); assertNotNull(stats); assertAllCountFieldsEqualZero(stats); @@ -68,8 +61,7 @@ public void testComplexConstructor() throws Exception { DataCounts counts = new DataCounts("foo", 1L, 1L, 2L, 0L, 3L, 4L, 5L, 6L, 7L, 8L, new Date(), new Date(), new Date(), new Date(), new Date()); - DataCountsReporter dataCountsReporter = - new DataCountsReporter(settings, job, counts, jobDataCountsPersister); + DataCountsReporter dataCountsReporter = new DataCountsReporter(job, counts, jobDataCountsPersister); DataCounts stats = dataCountsReporter.incrementalStats(); assertNotNull(stats); assertAllCountFieldsEqualZero(stats); @@ -86,8 +78,7 @@ public void testComplexConstructor() throws Exception { } public void testResetIncrementalCounts() throws Exception { - DataCountsReporter dataCountsReporter = new DataCountsReporter(settings, job, new DataCounts(job.getId()), - jobDataCountsPersister); + DataCountsReporter dataCountsReporter = new DataCountsReporter(job, new DataCounts(job.getId()), jobDataCountsPersister); DataCounts stats = dataCountsReporter.incrementalStats(); assertNotNull(stats); assertAllCountFieldsEqualZero(stats); @@ -139,16 +130,14 @@ public void testResetIncrementalCounts() throws Exception { } public void testReportLatestTimeIncrementalStats() throws IOException { - DataCountsReporter dataCountsReporter = new DataCountsReporter(settings, job, new DataCounts(job.getId()), - jobDataCountsPersister); + DataCountsReporter dataCountsReporter = new DataCountsReporter(job, new DataCounts(job.getId()), jobDataCountsPersister); dataCountsReporter.startNewIncrementalCount(); dataCountsReporter.reportLatestTimeIncrementalStats(5001L); assertEquals(5001L, dataCountsReporter.incrementalStats().getLatestRecordTimeStamp().getTime()); } public void testReportRecordsWritten() { - DataCountsReporter dataCountsReporter = new DataCountsReporter(settings, job, new DataCounts(job.getId()), - jobDataCountsPersister); + DataCountsReporter dataCountsReporter = new DataCountsReporter(job, new DataCounts(job.getId()), jobDataCountsPersister); dataCountsReporter.setAnalysedFieldsPerRecord(3); dataCountsReporter.reportRecordWritten(5, 2000); @@ -258,8 +247,7 @@ public void testReportRecordsWritten_Given2_000_000Records() { public void testFinishReporting() { - DataCountsReporter dataCountsReporter = new DataCountsReporter(settings, job, new DataCounts(job.getId()), - jobDataCountsPersister); + DataCountsReporter dataCountsReporter = new DataCountsReporter(job, new DataCounts(job.getId()), jobDataCountsPersister); dataCountsReporter.setAnalysedFieldsPerRecord(3); Date now = new Date(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DummyDataCountsReporter.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DummyDataCountsReporter.java index 98ab4025bffdb..3967e83f5ec56 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DummyDataCountsReporter.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/DummyDataCountsReporter.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.job.process; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; @@ -27,8 +26,7 @@ class DummyDataCountsReporter extends DataCountsReporter { int logStatusCallCount = 0; DummyDataCountsReporter() { - super(Settings.EMPTY, createJob(), new DataCounts("DummyJobId"), - mock(JobDataCountsPersister.class)); + super(createJob(), new DataCounts("DummyJobId"), mock(JobDataCountsPersister.class)); } /** diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java index 6d5adeb3fdbf1..18ee9434f0dab 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectResultsParser; @@ -55,7 +54,7 @@ public void testProcessStartTime() throws Exception { try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, mock(OutputStream.class), mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, null, - new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { + new AutodetectResultsParser(), mock(Runnable.class))) { process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); ZonedDateTime startTime = process.getProcessStartTime(); @@ -75,7 +74,7 @@ public void testWriteRecord() throws IOException { ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), - new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { + new AutodetectResultsParser(), mock(Runnable.class))) { process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); process.writeRecord(record); @@ -107,7 +106,7 @@ public void testFlush() throws IOException { ByteArrayOutputStream bos = new ByteArrayOutputStream(ControlMsgToProcessWriter.FLUSH_SPACES_LENGTH + 1024); try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), - new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { + new AutodetectResultsParser(), mock(Runnable.class))) { process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); FlushJobParams params = FlushJobParams.builder().build(); @@ -137,7 +136,7 @@ public void testWriteMessage(CheckedConsumer writeFunct ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), - new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { + new AutodetectResultsParser(), mock(Runnable.class))) { process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); writeFunction.accept(process); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParserTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParserTests.java index d2356a79677c3..8a725b95abf9c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParserTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultsParserTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml.job.process.autodetect.output; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; @@ -238,7 +237,7 @@ public class AutodetectResultsParserTests extends ESTestCase { public void testParser() throws IOException { InputStream inputStream = new ByteArrayInputStream(METRIC_OUTPUT_SAMPLE.getBytes(StandardCharsets.UTF_8)); - AutodetectResultsParser parser = new AutodetectResultsParser(Settings.EMPTY); + AutodetectResultsParser parser = new AutodetectResultsParser(); List results = new ArrayList<>(); parser.parseResults(inputStream).forEachRemaining(results::add); List buckets = results.stream().map(AutodetectResult::getBucket) @@ -331,7 +330,7 @@ public void testParser() throws IOException { @AwaitsFix(bugUrl = "rewrite this test so it doesn't use ~200 lines of json") public void testPopulationParser() throws IOException { InputStream inputStream = new ByteArrayInputStream(POPULATION_OUTPUT_SAMPLE.getBytes(StandardCharsets.UTF_8)); - AutodetectResultsParser parser = new AutodetectResultsParser(Settings.EMPTY); + AutodetectResultsParser parser = new AutodetectResultsParser(); List results = new ArrayList<>(); parser.parseResults(inputStream).forEachRemaining(results::add); List buckets = results.stream().map(AutodetectResult::getBucket) @@ -357,7 +356,7 @@ public void testPopulationParser() throws IOException { public void testParse_GivenEmptyArray() throws ElasticsearchParseException, IOException { String json = "[]"; InputStream inputStream = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); - AutodetectResultsParser parser = new AutodetectResultsParser(Settings.EMPTY); + AutodetectResultsParser parser = new AutodetectResultsParser(); assertFalse(parser.parseResults(inputStream).hasNext()); } @@ -365,7 +364,7 @@ public void testParse_GivenModelSizeStats() throws ElasticsearchParseException, String json = "[{\"model_size_stats\": {\"job_id\": \"foo\", \"model_bytes\":300}}]"; InputStream inputStream = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); - AutodetectResultsParser parser = new AutodetectResultsParser(Settings.EMPTY); + AutodetectResultsParser parser = new AutodetectResultsParser(); List results = new ArrayList<>(); parser.parseResults(inputStream).forEachRemaining(results::add); @@ -376,7 +375,7 @@ public void testParse_GivenModelSizeStats() throws ElasticsearchParseException, public void testParse_GivenCategoryDefinition() throws IOException { String json = "[{\"category_definition\": {\"job_id\":\"foo\", \"category_id\":18}}]"; InputStream inputStream = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); - AutodetectResultsParser parser = new AutodetectResultsParser(Settings.EMPTY); + AutodetectResultsParser parser = new AutodetectResultsParser(); List results = new ArrayList<>(); parser.parseResults(inputStream).forEachRemaining(results::add); @@ -387,7 +386,7 @@ public void testParse_GivenCategoryDefinition() throws IOException { public void testParse_GivenUnknownObject() throws ElasticsearchParseException, IOException { String json = "[{\"unknown\":{\"id\": 18}}]"; InputStream inputStream = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); - AutodetectResultsParser parser = new AutodetectResultsParser(Settings.EMPTY); + AutodetectResultsParser parser = new AutodetectResultsParser(); XContentParseException e = expectThrows(XContentParseException.class, () -> parser.parseResults(inputStream).forEachRemaining(a -> {})); assertEquals("[1:3] [autodetect_result] unknown field [unknown], parser not found", e.getMessage()); @@ -396,7 +395,7 @@ public void testParse_GivenUnknownObject() throws ElasticsearchParseException, I public void testParse_GivenArrayContainsAnotherArray() throws ElasticsearchParseException, IOException { String json = "[[]]"; InputStream inputStream = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); - AutodetectResultsParser parser = new AutodetectResultsParser(Settings.EMPTY); + AutodetectResultsParser parser = new AutodetectResultsParser(); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> parser.parseResults(inputStream).forEachRemaining(a -> {})); assertEquals("unexpected token [START_ARRAY]", e.getMessage()); @@ -411,7 +410,7 @@ public void testParsingExceptionNaN() { + "\"by_field_name\":\"airline\",\"by_field_value\":\"JZA\", \"typical\":[1020.08],\"actual\":[0]," + "\"field_name\":\"responsetime\",\"function\":\"max\",\"partition_field_name\":\"\",\"partition_field_value\":\"\"}]}}]"; InputStream inputStream = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); - AutodetectResultsParser parser = new AutodetectResultsParser(Settings.EMPTY); + AutodetectResultsParser parser = new AutodetectResultsParser(); expectThrows(XContentParseException.class, () -> parser.parseResults(inputStream).forEachRemaining(a -> {})); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java index d490d58c3ab52..8c69875a54dbc 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.process.logging; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESTestCase; @@ -205,7 +206,7 @@ public void testParseFatalError() throws IOException, IllegalAccessException { private static void executeLoggingTest(InputStream is, MockLogAppender mockAppender, Level level, String jobId) throws IOException { - Logger cppMessageLogger = Loggers.getLogger(CppLogMessageHandler.class); + Logger cppMessageLogger = LogManager.getLogger(CppLogMessageHandler.class); Loggers.addAppender(cppMessageLogger, mockAppender); Level oldLevel = cppMessageLogger.getLevel(); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index 31f403918fab6..2913a0ec9c0df 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -39,8 +39,7 @@ import org.elasticsearch.xpack.monitoring.action.TransportMonitoringBulkAction; import org.elasticsearch.xpack.monitoring.cleaner.CleanerService; import org.elasticsearch.xpack.monitoring.collector.Collector; -import org.elasticsearch.xpack.monitoring.collector.ccr.CcrAutoFollowStatsCollector; -import org.elasticsearch.xpack.monitoring.collector.ccr.FollowStatsCollector; +import org.elasticsearch.xpack.monitoring.collector.ccr.StatsCollector; import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterStatsCollector; import org.elasticsearch.xpack.monitoring.collector.indices.IndexRecoveryCollector; import org.elasticsearch.xpack.monitoring.collector.indices.IndexStatsCollector; @@ -138,14 +137,13 @@ public Collection createComponents(Client client, ClusterService cluster threadPool.getThreadContext()); Set collectors = new HashSet<>(); - collectors.add(new IndexStatsCollector(settings, clusterService, getLicenseState(), client)); + collectors.add(new IndexStatsCollector(clusterService, getLicenseState(), client)); collectors.add(new ClusterStatsCollector(settings, clusterService, getLicenseState(), client, getLicenseService())); - collectors.add(new ShardsCollector(settings, clusterService, getLicenseState())); - collectors.add(new NodeStatsCollector(settings, clusterService, getLicenseState(), client)); - collectors.add(new IndexRecoveryCollector(settings, clusterService, getLicenseState(), client)); + collectors.add(new ShardsCollector(clusterService, getLicenseState())); + collectors.add(new NodeStatsCollector(clusterService, getLicenseState(), client)); + collectors.add(new IndexRecoveryCollector(clusterService, getLicenseState(), client)); collectors.add(new JobStatsCollector(settings, clusterService, getLicenseState(), client)); - collectors.add(new FollowStatsCollector(settings, clusterService, getLicenseState(), client)); - collectors.add(new CcrAutoFollowStatsCollector(settings, clusterService, getLicenseState(), client)); + collectors.add(new StatsCollector(settings, clusterService, getLicenseState(), client)); final MonitoringService monitoringService = new MonitoringService(settings, clusterService, threadPool, collectors, exporters); @@ -184,8 +182,7 @@ public List> getSettings() { settings.add(IndexRecoveryCollector.INDEX_RECOVERY_ACTIVE_ONLY); settings.add(IndexStatsCollector.INDEX_STATS_TIMEOUT); settings.add(JobStatsCollector.JOB_STATS_TIMEOUT); - settings.add(FollowStatsCollector.CCR_STATS_TIMEOUT); - settings.add(CcrAutoFollowStatsCollector.CCR_AUTO_FOLLOW_STATS_TIMEOUT); + settings.add(StatsCollector.CCR_STATS_TIMEOUT); settings.add(NodeStatsCollector.NODE_STATS_TIMEOUT); settings.addAll(Exporters.getSettings()); return Collections.unmodifiableList(settings); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringBulkAction.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringBulkAction.java index c48c33797c41d..06bdf6481e690 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringBulkAction.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringBulkAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -41,10 +40,10 @@ public class TransportMonitoringBulkAction extends HandledTransportAction timeoutSetting, final XPackLicenseState licenseState) { - super(settings); this.name = name; this.clusterService = clusterService; this.collectionTimeoutSetting = timeoutSetting; diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrAutoFollowStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrAutoFollowStatsCollector.java deleted file mode 100644 index e179c20441644..0000000000000 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrAutoFollowStatsCollector.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.monitoring.collector.ccr; - -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.ccr.action.AutoFollowStatsAction; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; -import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; - -import java.util.Collection; -import java.util.Collections; - -public final class CcrAutoFollowStatsCollector extends AbstractCcrCollector { - - public static final Setting CCR_AUTO_FOLLOW_STATS_TIMEOUT = collectionTimeoutSetting("ccr.auto_follow.stats.timeout"); - - public CcrAutoFollowStatsCollector( - final Settings settings, - final ClusterService clusterService, - final XPackLicenseState licenseState, - final Client client) { - super(settings, clusterService, CCR_AUTO_FOLLOW_STATS_TIMEOUT, licenseState, new XPackClient(client).ccr(), - client.threadPool().getThreadContext()); - } - - CcrAutoFollowStatsCollector( - final Settings settings, - final ClusterService clusterService, - final XPackLicenseState licenseState, - final CcrClient ccrClient, - final ThreadContext threadContext) { - super(settings, clusterService, CCR_AUTO_FOLLOW_STATS_TIMEOUT, licenseState, ccrClient, threadContext); - } - - @Override - Collection innerDoCollect( - long timestamp, - String clusterUuid, - long interval, - MonitoringDoc.Node node) throws Exception { - - final AutoFollowStatsAction.Request request = new AutoFollowStatsAction.Request(); - final AutoFollowStatsAction.Response response = ccrClient.autoFollowStats(request).actionGet(getCollectionTimeout()); - - final AutoFollowStatsMonitoringDoc doc = - new AutoFollowStatsMonitoringDoc(clusterUuid, timestamp, interval, node, response.getStats()); - return Collections.singletonList(doc); - } - -} diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsCollector.java deleted file mode 100644 index 3255032e78552..0000000000000 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsCollector.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.monitoring.collector.ccr; - -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; -import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; - -import java.util.Collection; -import java.util.stream.Collectors; - -public final class FollowStatsCollector extends AbstractCcrCollector { - - public static final Setting CCR_STATS_TIMEOUT = collectionTimeoutSetting("ccr.stats.timeout"); - - public FollowStatsCollector( - final Settings settings, - final ClusterService clusterService, - final XPackLicenseState licenseState, - final Client client) { - super(settings, clusterService, CCR_STATS_TIMEOUT, licenseState, new XPackClient(client).ccr(), - client.threadPool().getThreadContext()); - } - - FollowStatsCollector( - final Settings settings, - final ClusterService clusterService, - final XPackLicenseState licenseState, - final CcrClient ccrClient, - final ThreadContext threadContext) { - super(settings, clusterService, CCR_STATS_TIMEOUT, licenseState, ccrClient, threadContext); - } - - @Override - Collection innerDoCollect( - long timestamp, - String clusterUuid, - long interval, - MonitoringDoc.Node node) throws Exception { - - - final FollowStatsAction.StatsRequest request = new FollowStatsAction.StatsRequest(); - request.setIndices(getCollectionIndices()); - final FollowStatsAction.StatsResponses responses = ccrClient.stats(request).actionGet(getCollectionTimeout()); - - return responses - .getStatsResponses() - .stream() - .map(stats -> new FollowStatsMonitoringDoc(clusterUuid, timestamp, interval, node, stats.status())) - .collect(Collectors.toList()); - } - -} diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/AbstractCcrCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java similarity index 54% rename from x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/AbstractCcrCollector.java rename to x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java index e3db3d3366746..bdccb5604a361 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/AbstractCcrCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.monitoring.collector.ccr; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; @@ -13,30 +14,48 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.core.ccr.client.CcrClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.collector.Collector; +import java.util.Arrays; import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.MONITORING_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.monitoring.collector.ccr.FollowStatsMonitoringDoc.TYPE; -public abstract class AbstractCcrCollector extends Collector { +public final class StatsCollector extends Collector { + public static final Setting CCR_STATS_TIMEOUT = collectionTimeoutSetting("ccr.stats.timeout"); + + private final Settings settings; private final ThreadContext threadContext; - final CcrClient ccrClient; + private final CcrClient ccrClient; + + public StatsCollector( + final Settings settings, + final ClusterService clusterService, + final XPackLicenseState licenseState, + final Client client) { + this(settings, clusterService, licenseState, new XPackClient(client).ccr(), client.threadPool().getThreadContext()); + } - AbstractCcrCollector( + StatsCollector( final Settings settings, final ClusterService clusterService, - final Setting timeoutSetting, final XPackLicenseState licenseState, final CcrClient ccrClient, final ThreadContext threadContext) { - super(settings, TYPE, clusterService, timeoutSetting, licenseState); + super(TYPE, clusterService, CCR_STATS_TIMEOUT, licenseState); + this.settings = settings; this.ccrClient = ccrClient; this.threadContext = threadContext; } @@ -59,13 +78,23 @@ protected Collection doCollect( try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, MONITORING_ORIGIN)) { final long timestamp = timestamp(); final String clusterUuid = clusterUuid(clusterState); - return innerDoCollect(timestamp, clusterUuid, interval, node); + + final CcrStatsAction.Request request = new CcrStatsAction.Request(); + final CcrStatsAction.Response response = ccrClient.stats(request).actionGet(getCollectionTimeout()); + + final AutoFollowStatsMonitoringDoc autoFollowStatsDoc = + new AutoFollowStatsMonitoringDoc(clusterUuid, timestamp, interval, node, response.getAutoFollowStats()); + + Set collectionIndices = new HashSet<>(Arrays.asList(getCollectionIndices())); + List docs = response + .getFollowStats() + .getStatsResponses() + .stream() + .filter(statsResponse -> collectionIndices.isEmpty() || collectionIndices.contains(statsResponse.status().followerIndex())) + .map(stats -> new FollowStatsMonitoringDoc(clusterUuid, timestamp, interval, node, stats.status())) + .collect(Collectors.toList()); + docs.add(autoFollowStatsDoc); + return docs; } } - - abstract Collection innerDoCollect( - long timestamp, - String clusterUuid, - long interval, - MonitoringDoc.Node node) throws Exception; } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java index 23fe4d465435e..ac699759482a8 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java @@ -55,6 +55,7 @@ public class ClusterStatsCollector extends Collector { */ public static final Setting CLUSTER_STATS_TIMEOUT = collectionTimeoutSetting("cluster.stats.timeout"); + private final Settings settings; private final IndexNameExpressionResolver indexNameExpressionResolver; private final LicenseService licenseService; private final Client client; @@ -64,7 +65,7 @@ public ClusterStatsCollector(final Settings settings, final XPackLicenseState licenseState, final Client client, final LicenseService licenseService) { - this(settings, clusterService, licenseState, client, licenseService, new IndexNameExpressionResolver(Settings.EMPTY)); + this(settings, clusterService, licenseState, client, licenseService, new IndexNameExpressionResolver()); } ClusterStatsCollector(final Settings settings, @@ -73,8 +74,8 @@ public ClusterStatsCollector(final Settings settings, final Client client, final LicenseService licenseService, final IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ClusterStatsMonitoringDoc.TYPE, clusterService, CLUSTER_STATS_TIMEOUT, licenseState); - + super(ClusterStatsMonitoringDoc.TYPE, clusterService, CLUSTER_STATS_TIMEOUT, licenseState); + this.settings = settings; this.client = client; this.licenseService = licenseService; this.indexNameExpressionResolver = Objects.requireNonNull(indexNameExpressionResolver); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java index 75c0ba6b81c7d..5f64d5a7a0e7b 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.license.License; @@ -47,7 +46,7 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc { ClusterState.Metric.NODES)); public static final String TYPE = "cluster_stats"; - protected static final String SETTING_CLUSTER_METADATA = "cluster.metadata"; + protected static final String SETTING_DISPLAY_NAME = "cluster.metadata.display_name"; private final String clusterName; private final String version; @@ -121,12 +120,12 @@ boolean getClusterNeedsTLSEnabled() { return clusterNeedsTLSEnabled; } - Settings getClusterMetaDataSettings() { + String getClusterDisplayName() { MetaData metaData = this.clusterState.getMetaData(); if (metaData == null) { - return Settings.EMPTY; + return null; } - return metaData.settings().getAsSettings(SETTING_CLUSTER_METADATA); + return metaData.settings().get(SETTING_DISPLAY_NAME); } @Override @@ -167,21 +166,19 @@ protected void innerToXContent(XContentBuilder builder, Params params) throws IO builder.endObject(); } - Settings clusterMetaDataSettings = getClusterMetaDataSettings(); - if (clusterMetaDataSettings != null) { + String displayName = getClusterDisplayName(); + if (displayName != null) { builder.startObject("cluster_settings"); { - if (clusterMetaDataSettings.size() > 0) { - builder.startObject("cluster"); + builder.startObject("cluster"); + { + builder.startObject("metadata"); { - builder.startObject("metadata"); - { - clusterMetaDataSettings.toXContent(builder, params); - } - builder.endObject(); + builder.field("display_name", displayName); } builder.endObject(); } + builder.endObject(); } builder.endObject(); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollector.java index ea8b5065387f3..82d2176d415e0 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollector.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -46,12 +45,10 @@ public class IndexRecoveryCollector extends Collector { private final Client client; - public IndexRecoveryCollector(final Settings settings, - final ClusterService clusterService, + public IndexRecoveryCollector(final ClusterService clusterService, final XPackLicenseState licenseState, final Client client) { - - super(settings, IndexRecoveryMonitoringDoc.TYPE, clusterService, INDEX_RECOVERY_TIMEOUT, licenseState); + super(IndexRecoveryMonitoringDoc.TYPE, clusterService, INDEX_RECOVERY_TIMEOUT, licenseState); this.client = Objects.requireNonNull(client); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsCollector.java index b6f50ed9e5cfc..7630aa4794b88 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsCollector.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -40,11 +39,10 @@ public class IndexStatsCollector extends Collector { private final Client client; - public IndexStatsCollector(final Settings settings, - final ClusterService clusterService, + public IndexStatsCollector(final ClusterService clusterService, final XPackLicenseState licenseState, final Client client) { - super(settings, "index-stats", clusterService, INDEX_STATS_TIMEOUT, licenseState); + super("index-stats", clusterService, INDEX_STATS_TIMEOUT, licenseState); this.client = client; } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java index cfbf9b7e0a4b8..8742e0b645f8e 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java @@ -42,6 +42,7 @@ public class JobStatsCollector extends Collector { */ public static final Setting JOB_STATS_TIMEOUT = collectionTimeoutSetting("ml.job.stats.timeout"); + private final Settings settings; private final ThreadContext threadContext; private final MachineLearningClient client; @@ -52,7 +53,8 @@ public JobStatsCollector(final Settings settings, final ClusterService clusterSe JobStatsCollector(final Settings settings, final ClusterService clusterService, final XPackLicenseState licenseState, final MachineLearningClient client, final ThreadContext threadContext) { - super(settings, JobStatsMonitoringDoc.TYPE, clusterService, JOB_STATS_TIMEOUT, licenseState); + super(JobStatsMonitoringDoc.TYPE, clusterService, JOB_STATS_TIMEOUT, licenseState); + this.settings = settings; this.client = client; this.threadContext = threadContext; } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollector.java index c990485e6a536..bc816cb9d9a05 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollector.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -49,12 +48,10 @@ public class NodeStatsCollector extends Collector { private final Client client; - public NodeStatsCollector(final Settings settings, - final ClusterService clusterService, + public NodeStatsCollector(final ClusterService clusterService, final XPackLicenseState licenseState, final Client client) { - - super(settings, NodeStatsMonitoringDoc.TYPE, clusterService, NODE_STATS_TIMEOUT, licenseState); + super(NodeStatsMonitoringDoc.TYPE, clusterService, NODE_STATS_TIMEOUT, licenseState); this.client = Objects.requireNonNull(client); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollector.java index b5a3a2920e2f7..eda782913adb5 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollector.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.collector.Collector; @@ -30,11 +29,9 @@ */ public class ShardsCollector extends Collector { - public ShardsCollector(final Settings settings, - final ClusterService clusterService, + public ShardsCollector(final ClusterService clusterService, final XPackLicenseState licenseState) { - - super(settings, ShardMonitoringDoc.TYPE, clusterService, null, licenseState); + super(ShardMonitoringDoc.TYPE, clusterService, null, licenseState); } @Override diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java index 40c95384d5349..fab40bf0944f1 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java @@ -37,6 +37,7 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable { + private final Settings settings; private final Map factories; private final AtomicReference> exporters; private final ClusterService clusterService; @@ -47,7 +48,7 @@ public Exporters(Settings settings, Map factories, ClusterService clusterService, XPackLicenseState licenseState, ThreadContext threadContext) { super(settings); - + this.settings = settings; this.factories = factories; this.exporters = new AtomicReference<>(emptyMap()); this.threadContext = Objects.requireNonNull(threadContext); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResource.java index 97677c3c1c6d5..e035668841f59 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResource.java @@ -11,12 +11,12 @@ import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -33,7 +33,7 @@ */ public class ClusterAlertHttpResource extends PublishableHttpResource { - private static final Logger logger = Loggers.getLogger(ClusterAlertHttpResource.class); + private static final Logger logger = LogManager.getLogger(ClusterAlertHttpResource.class); /** * Use this to retrieve the version of Cluster Alert in the Watch's JSON response from a request. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java index ded3064a2a66f..c00448c903a47 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java @@ -7,6 +7,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -18,7 +19,6 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,7 +39,7 @@ */ class HttpExportBulk extends ExportBulk { - private static final Logger logger = Loggers.getLogger(HttpExportBulk.class); + private static final Logger logger = LogManager.getLogger(HttpExportBulk.class); /** * The {@link RestClient} managed by the {@link HttpExporter}. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulkResponseListener.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulkResponseListener.java index 09c1f3522b44a..de14e4eeb8134 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulkResponseListener.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulkResponseListener.java @@ -5,10 +5,10 @@ */ package org.elasticsearch.xpack.monitoring.exporter.http; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseListener; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -23,7 +23,7 @@ */ class HttpExportBulkResponseListener implements ResponseListener { - private static final Logger logger = Loggers.getLogger(HttpExportBulkResponseListener.class); + private static final Logger logger = LogManager.getLogger(HttpExportBulkResponseListener.class); /** * Singleton instance. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java index 54447c1646370..c7a19635bbbb5 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java @@ -13,6 +13,7 @@ import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.message.BasicHeader; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; @@ -24,7 +25,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -69,7 +69,7 @@ */ public class HttpExporter extends Exporter { - private static final Logger logger = Loggers.getLogger(HttpExporter.class); + private static final Logger logger = LogManager.getLogger(HttpExporter.class); public static final String TYPE = "http"; diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/MultiHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/MultiHttpResource.java index 15747a6f5dbd1..12e2f3f3f1a96 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/MultiHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/MultiHttpResource.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.monitoring.exporter.http; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.logging.Loggers; import java.util.Collections; import java.util.List; @@ -22,7 +22,7 @@ */ public class MultiHttpResource extends HttpResource { - private static final Logger logger = Loggers.getLogger(MultiHttpResource.class); + private static final Logger logger = LogManager.getLogger(MultiHttpResource.class); /** * Sub-resources that are grouped to simplify notification. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java index aa8d2da070eb5..9b597b25c5cc4 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java @@ -6,13 +6,13 @@ package org.elasticsearch.xpack.monitoring.exporter.http; import org.apache.http.HttpHost; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.Node; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; /** * {@code NodeFailureListener} logs warnings for any node failure, but it can also notify a {@link Sniffer} and/or {@link HttpResource} @@ -23,7 +23,7 @@ */ class NodeFailureListener extends RestClient.FailureListener { - private static final Logger logger = Loggers.getLogger(NodeFailureListener.class); + private static final Logger logger = LogManager.getLogger(NodeFailureListener.class); /** * The optional {@link Sniffer} associated with the {@link RestClient}. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResource.java index 77f9de409d3bb..78ca43ff9bbf9 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResource.java @@ -8,10 +8,10 @@ import org.apache.http.HttpEntity; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; @@ -27,7 +27,7 @@ */ public class PipelineHttpResource extends PublishableHttpResource { - private static final Logger logger = Loggers.getLogger(PipelineHttpResource.class); + private static final Logger logger = LogManager.getLogger(PipelineHttpResource.class); /** * The name of the pipeline that is sent to the remote cluster. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResource.java index 21f208a77bf5e..5e6b056959c9d 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResource.java @@ -8,10 +8,10 @@ import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; @@ -28,7 +28,7 @@ */ public class TemplateHttpResource extends PublishableHttpResource { - private static final Logger logger = Loggers.getLogger(TemplateHttpResource.class); + private static final Logger logger = LogManager.getLogger(TemplateHttpResource.class); /** * The name of the template that is sent to the remote cluster. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/VersionHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/VersionHttpResource.java index eec9162e7edb0..252d9072685b6 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/VersionHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/VersionHttpResource.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.monitoring.exporter.http; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -12,7 +13,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -25,7 +25,7 @@ */ public class VersionHttpResource extends HttpResource { - private static final Logger logger = Loggers.getLogger(VersionHttpResource.class); + private static final Logger logger = LogManager.getLogger(VersionHttpResource.class); /** * The minimum supported version of Elasticsearch. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResource.java index 3ea5b92445fdb..93eed61f4c5ff 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResource.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.monitoring.exporter.http; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -12,7 +13,6 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentHelper; @@ -31,7 +31,7 @@ */ public class WatcherExistsHttpResource extends PublishableHttpResource { - private static final Logger logger = Loggers.getLogger(WatcherExistsHttpResource.class); + private static final Logger logger = LogManager.getLogger(WatcherExistsHttpResource.class); /** * Use this to avoid getting any JSON response from a request. */ diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java index 6962d91c9be62..e1475669f2dee 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.monitoring.exporter.local; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -26,7 +27,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -80,7 +80,7 @@ public class LocalExporter extends Exporter implements ClusterStateListener, CleanerService.Listener { - private static final Logger logger = Loggers.getLogger(LocalExporter.class); + private static final Logger logger = LogManager.getLogger(LocalExporter.class); public static final String TYPE = "local"; diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringBulkActionTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringBulkActionTests.java index 55393b1860be3..bf6c77d30ea70 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringBulkActionTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringBulkActionTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; @@ -114,7 +113,7 @@ public void testExecuteWithGlobalBlock() throws Exception { final ClusterBlocks.Builder clusterBlock = ClusterBlocks.builder().addGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ALL); when(clusterService.state()).thenReturn(ClusterState.builder(ClusterName.DEFAULT).blocks(clusterBlock).build()); - final TransportMonitoringBulkAction action = new TransportMonitoringBulkAction(Settings.EMPTY, threadPool, clusterService, + final TransportMonitoringBulkAction action = new TransportMonitoringBulkAction(threadPool, clusterService, transportService, filters, exporters, monitoringService); @@ -128,7 +127,7 @@ public void testExecuteIgnoresRequestWhenCollectionIsDisabled() throws Exception when(clusterService.state()).thenReturn(ClusterState.builder(ClusterName.DEFAULT).build()); when(monitoringService.isMonitoringActive()).thenReturn(false); - final TransportMonitoringBulkAction action = new TransportMonitoringBulkAction(Settings.EMPTY, threadPool, clusterService, + final TransportMonitoringBulkAction action = new TransportMonitoringBulkAction(threadPool, clusterService, transportService, filters, exporters, monitoringService); @@ -150,7 +149,7 @@ public void testExecuteEmptyRequest() { // it validates the request before it tries to execute it when(monitoringService.isMonitoringActive()).thenReturn(randomBoolean()); - final TransportMonitoringBulkAction action = new TransportMonitoringBulkAction(Settings.EMPTY, threadPool, clusterService, + final TransportMonitoringBulkAction action = new TransportMonitoringBulkAction(threadPool, clusterService, transportService, filters, exporters, monitoringService); @@ -215,7 +214,7 @@ public void testExecuteRequest() { return Void.TYPE; }).when(exporters).export(any(Collection.class), any(ActionListener.class)); - final TransportMonitoringBulkAction action = new TransportMonitoringBulkAction(Settings.EMPTY, threadPool, clusterService, + final TransportMonitoringBulkAction action = new TransportMonitoringBulkAction(threadPool, clusterService, transportService, filters, exporters, monitoringService); ActionTestUtils.executeBlocking(action, request); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollectorTests.java index f4484aa5ed755..4e251711298a1 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryCollectorTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryState; @@ -55,7 +54,7 @@ public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { final boolean isElectedMaster = randomBoolean(); whenLocalNodeElectedMaster(isElectedMaster); - final IndexRecoveryCollector collector = new IndexRecoveryCollector(Settings.EMPTY, clusterService, licenseState, client); + final IndexRecoveryCollector collector = new IndexRecoveryCollector(clusterService, licenseState, client); assertThat(collector.shouldCollect(isElectedMaster), is(false)); if (isElectedMaster) { @@ -65,14 +64,14 @@ public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { public void testShouldCollectReturnsFalseIfNotMaster() { when(licenseState.isMonitoringAllowed()).thenReturn(true); - final IndexRecoveryCollector collector = new IndexRecoveryCollector(Settings.EMPTY, clusterService, licenseState, client); + final IndexRecoveryCollector collector = new IndexRecoveryCollector(clusterService, licenseState, client); assertThat(collector.shouldCollect(false), is(false)); } public void testShouldCollectReturnsTrue() { when(licenseState.isMonitoringAllowed()).thenReturn(true); - final IndexRecoveryCollector collector = new IndexRecoveryCollector(Settings.EMPTY, clusterService, licenseState, client); + final IndexRecoveryCollector collector = new IndexRecoveryCollector(clusterService, licenseState, client); assertThat(collector.shouldCollect(true), is(true)); verify(licenseState).isMonitoringAllowed(); @@ -138,7 +137,7 @@ public void testDoCollect() throws Exception { final Client client = mock(Client.class); when(client.admin()).thenReturn(adminClient); - final IndexRecoveryCollector collector = new IndexRecoveryCollector(Settings.EMPTY, clusterService, licenseState, client); + final IndexRecoveryCollector collector = new IndexRecoveryCollector(clusterService, licenseState, client); assertEquals(timeout, collector.getCollectionTimeout()); assertEquals(recoveryOnly, collector.getActiveRecoveriesOnly()); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsCollectorTests.java index ef96726aaee01..c41ce1ec91388 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsCollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsCollectorTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -50,7 +49,7 @@ public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { final boolean isElectedMaster = randomBoolean(); whenLocalNodeElectedMaster(isElectedMaster); - final IndexStatsCollector collector = new IndexStatsCollector(Settings.EMPTY, clusterService, licenseState, client); + final IndexStatsCollector collector = new IndexStatsCollector(clusterService, licenseState, client); assertThat(collector.shouldCollect(isElectedMaster), is(false)); if (isElectedMaster) { @@ -60,14 +59,14 @@ public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { public void testShouldCollectReturnsFalseIfNotMaster() { when(licenseState.isMonitoringAllowed()).thenReturn(true); - final IndexStatsCollector collector = new IndexStatsCollector(Settings.EMPTY, clusterService, licenseState, client); + final IndexStatsCollector collector = new IndexStatsCollector(clusterService, licenseState, client); assertThat(collector.shouldCollect(false), is(false)); } public void testShouldCollectReturnsTrue() { when(licenseState.isMonitoringAllowed()).thenReturn(true); - final IndexStatsCollector collector = new IndexStatsCollector(Settings.EMPTY, clusterService, licenseState, client); + final IndexStatsCollector collector = new IndexStatsCollector(clusterService, licenseState, client); assertThat(collector.shouldCollect(true), is(true)); verify(licenseState).isMonitoringAllowed(); @@ -144,7 +143,7 @@ public void testDoCollect() throws Exception { final Client client = mock(Client.class); when(client.admin()).thenReturn(adminClient); - final IndexStatsCollector collector = new IndexStatsCollector(Settings.EMPTY, clusterService, licenseState, client); + final IndexStatsCollector collector = new IndexStatsCollector(clusterService, licenseState, client); assertEquals(timeout, collector.getCollectionTimeout()); final long interval = randomNonNegativeLong(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollectorTests.java index 03692cc9d5382..4aca7fcdc60a4 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsCollectorTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -43,7 +42,7 @@ public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { final boolean isElectedMaster = randomBoolean(); whenLocalNodeElectedMaster(isElectedMaster); - final NodeStatsCollector collector = new NodeStatsCollector(Settings.EMPTY, clusterService, licenseState, client); + final NodeStatsCollector collector = new NodeStatsCollector(clusterService, licenseState, client); assertThat(collector.shouldCollect(isElectedMaster), is(false)); if (isElectedMaster) { @@ -55,7 +54,7 @@ public void testShouldCollectReturnsTrue() { when(licenseState.isMonitoringAllowed()).thenReturn(true); final boolean isElectedMaster = true; - final NodeStatsCollector collector = new NodeStatsCollector(Settings.EMPTY, clusterService, licenseState, client); + final NodeStatsCollector collector = new NodeStatsCollector(clusterService, licenseState, client); assertThat(collector.shouldCollect(isElectedMaster), is(true)); verify(licenseState).isMonitoringAllowed(); @@ -76,7 +75,7 @@ public void testDoCollectWithFailures() throws Exception { final Client client = mock(Client.class); thenReturnNodeStats(client, timeout, nodesStatsResponse); - final NodeStatsCollector collector = new NodeStatsCollector(Settings.EMPTY, clusterService, licenseState, client); + final NodeStatsCollector collector = new NodeStatsCollector(clusterService, licenseState, client); assertEquals(timeout, collector.getCollectionTimeout()); final FailedNodeException e = expectThrows(FailedNodeException.class, () -> @@ -110,7 +109,7 @@ public void testDoCollect() throws Exception { final Client client = mock(Client.class); thenReturnNodeStats(client, timeout, nodesStatsResponse); - final NodeStatsCollector collector = new NodeStatsCollector(Settings.EMPTY, clusterService, licenseState, client); + final NodeStatsCollector collector = new NodeStatsCollector(clusterService, licenseState, client); assertEquals(timeout, collector.getCollectionTimeout()); final long interval = randomNonNegativeLong(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollectorTests.java index 4affc3a164397..83c098002149a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollectorTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -48,7 +47,7 @@ public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { final boolean isElectedMaster = randomBoolean(); whenLocalNodeElectedMaster(isElectedMaster); - final ShardsCollector collector = new ShardsCollector(Settings.EMPTY, clusterService, licenseState); + final ShardsCollector collector = new ShardsCollector(clusterService, licenseState); assertThat(collector.shouldCollect(isElectedMaster), is(false)); if (isElectedMaster) { @@ -61,7 +60,7 @@ public void testShouldCollectReturnsFalseIfNotMaster() { // this controls the blockage whenLocalNodeElectedMaster(false); - final ShardsCollector collector = new ShardsCollector(Settings.EMPTY, clusterService, licenseState); + final ShardsCollector collector = new ShardsCollector(clusterService, licenseState); assertThat(collector.shouldCollect(false), is(false)); } @@ -70,14 +69,14 @@ public void testShouldCollectReturnsTrue() { when(licenseState.isMonitoringAllowed()).thenReturn(true); whenLocalNodeElectedMaster(true); - final ShardsCollector collector = new ShardsCollector(Settings.EMPTY, clusterService, licenseState); + final ShardsCollector collector = new ShardsCollector(clusterService, licenseState); assertThat(collector.shouldCollect(true), is(true)); verify(licenseState).isMonitoringAllowed(); } public void testDoCollectWhenNoClusterState() throws Exception { - final ShardsCollector collector = new ShardsCollector(Settings.EMPTY, clusterService, licenseState); + final ShardsCollector collector = new ShardsCollector(clusterService, licenseState); final Collection results = collector.doCollect(randomMonitoringNode(random()), randomNonNegativeLong(), null); assertThat(results, notNullValue()); @@ -105,7 +104,7 @@ public void testDoCollect() throws Exception { when(nodes.get(eq("_current"))).thenReturn(localNode); when(clusterState.getNodes()).thenReturn(nodes); - final ShardsCollector collector = new ShardsCollector(Settings.EMPTY, clusterService, licenseState); + final ShardsCollector collector = new ShardsCollector(clusterService, licenseState); assertNull(collector.getCollectionTimeout()); assertArrayEquals(indices, collector.getCollectionIndices()); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index 308da6c900cac..158f6a812626e 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -206,6 +206,11 @@ public void testMonitoringService() throws Exception { .status(), is(RestStatus.CREATED)); + final Settings settings = Settings.builder() + .put("cluster.metadata.display_name", "my cluster") + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + whenExportersAreReady(() -> { final AtomicReference searchResponse = new AtomicReference<>(); @@ -375,6 +380,11 @@ private void assertClusterStatsMonitoringDoc(final Map document, assertThat(clusterState.remove("master_node"), notNullValue()); assertThat(clusterState.remove("nodes"), notNullValue()); assertThat(clusterState.keySet(), empty()); + + final Map clusterSettings = (Map) source.get("cluster_settings"); + assertThat(clusterSettings, notNullValue()); + assertThat(clusterSettings.remove("cluster"), notNullValue()); + assertThat(clusterSettings.keySet(), empty()); } /** @@ -614,6 +624,7 @@ public void disableMonitoring() throws Exception { final Settings settings = Settings.builder() .putNull("xpack.monitoring.collection.enabled") .putNull("xpack.monitoring.exporters._local.enabled") + .putNull("cluster.metadata.display_name") .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index 09b2ccd079a76..62bb18ee03331 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -195,7 +195,7 @@ public List> getPersistentTasksExecutor(ClusterServic } SchedulerEngine schedulerEngine = new SchedulerEngine(settings, getClock()); - return Collections.singletonList(new RollupJobTask.RollupJobPersistentTasksExecutor(settings, client, schedulerEngine, threadPool)); + return Collections.singletonList(new RollupJobTask.RollupJobPersistentTasksExecutor(client, schedulerEngine, threadPool)); } // overridable by tests diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index 41edf220e667e..e900d76c84913 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.rollup; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.TriFunction; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHits; @@ -55,7 +55,7 @@ */ public class RollupResponseTranslator { - private static final Logger logger = Loggers.getLogger(RollupResponseTranslator.class); + private static final Logger logger = LogManager.getLogger(RollupResponseTranslator.class); /** * Verifies a live-only search response. Essentially just checks for failure then returns diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportDeleteRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportDeleteRollupJobAction.java index 5cdc40df4d699..411ccd15058b5 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportDeleteRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportDeleteRollupJobAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; @@ -35,9 +34,8 @@ public class TransportDeleteRollupJobAction extends TransportTasksAction { @Inject - public TransportDeleteRollupJobAction(Settings settings, TransportService transportService, - ActionFilters actionFilters, ClusterService clusterService) { - super(settings, DeleteRollupJobAction.NAME, clusterService, transportService, actionFilters, + public TransportDeleteRollupJobAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService) { + super(DeleteRollupJobAction.NAME, clusterService, transportService, actionFilters, DeleteRollupJobAction.Request::new, DeleteRollupJobAction.Response::new, ThreadPool.Names.SAME); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java index af36d7bc6718c..4a1ee1d1471cc 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.rollup.RollupField; @@ -37,9 +36,8 @@ public class TransportGetRollupCapsAction extends HandledTransportAction) GetRollupCapsAction.Request::new); this.clusterService = clusterService; } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupIndexCapsAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupIndexCapsAction.java index 518fe7ec29f0f..dd25dff489840 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupIndexCapsAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupIndexCapsAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; @@ -35,9 +34,9 @@ public class TransportGetRollupIndexCapsAction extends HandledTransportAction) GetRollupIndexCapsAction.Request::new); this.clusterService = clusterService; } @@ -46,7 +45,7 @@ public TransportGetRollupIndexCapsAction(Settings settings, TransportService tra protected void doExecute(Task task, GetRollupIndexCapsAction.Request request, ActionListener listener) { - IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(clusterService.getSettings()); + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); String[] indices = resolver.concreteIndexNames(clusterService.state(), request.indicesOptions(), request.indices()); Map allCaps = getCapsByRollupIndex(Arrays.asList(indices), diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupJobAction.java index 0f684de9ea268..d15a7e2b062a0 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupJobAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; @@ -38,9 +37,8 @@ public class TransportGetRollupJobAction extends TransportTasksAction { @Inject - public TransportGetRollupJobAction(Settings settings, TransportService transportService, - ActionFilters actionFilters, ClusterService clusterService) { - super(settings, GetRollupJobsAction.NAME, clusterService, transportService, actionFilters, + public TransportGetRollupJobAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService) { + super(GetRollupJobsAction.NAME, clusterService, transportService, actionFilters, GetRollupJobsAction.Request::new, GetRollupJobsAction.Response::new, ThreadPool.Names.SAME); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index f0600d80f82a6..cb04f5554b437 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.license.LicenseUtils; @@ -60,11 +59,11 @@ public class TransportPutRollupJobAction extends TransportMasterNodeAction listener) { - RollupSearchContext rollupSearchContext = separateIndices(request.indices(), - clusterService.state().getMetaData().indices()); + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); + String[] indices = resolver.concreteIndexNames(clusterService.state(), request.indicesOptions(), request.indices()); + RollupSearchContext rollupSearchContext = separateIndices(indices, clusterService.state().getMetaData().indices()); MultiSearchRequest msearch = createMSearchRequest(request, registry, rollupSearchContext); @@ -401,9 +402,10 @@ static RollupSearchContext separateIndices(String[] indices, ImmutableOpenMap 0; if (rollup.size() > 1) { - throw new IllegalArgumentException("RollupSearch currently only supports searching one rollup index at a time."); + throw new IllegalArgumentException("RollupSearch currently only supports searching one rollup index at a time. " + + "Found the following rollup indices: " + rollup); } - return new RollupSearchContext(normal.toArray(new String[normal.size()]), rollup.toArray(new String[rollup.size()]), jobCaps); + return new RollupSearchContext(normal.toArray(new String[0]), rollup.toArray(new String[0]), jobCaps); } class TransportHandler implements TransportRequestHandler { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportStartRollupAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportStartRollupAction.java index 9d2e8ffa9a300..83753da078bfe 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportStartRollupAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportStartRollupAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.tasks.Task; @@ -34,10 +33,10 @@ public class TransportStartRollupAction extends TransportTasksAction TransportRollupSearchAction.separateIndices(indices, metaMap.build())); - assertThat(e.getMessage(), equalTo("RollupSearch currently only supports searching one rollup index at a time.")); + assertThat(e.getMessage(), equalTo("RollupSearch currently only supports searching one rollup index at a time. " + + "Found the following rollup indices: [foo, bar]")); } public void testEmptyMsearch() { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java index fc5962d705c94..b3a653c044de4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java @@ -9,6 +9,7 @@ import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; @@ -16,6 +17,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; import static org.elasticsearch.xpack.core.security.SecurityField.setting; @@ -35,8 +37,10 @@ class PkiRealmBootstrapCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { final Settings settings = context.settings; - final boolean pkiRealmEnabled = settings.getGroups(RealmSettings.PREFIX).values().stream() - .filter(s -> PkiRealmSettings.TYPE.equals(s.get("type"))) + final Map realms = RealmSettings.getRealmSettings(settings); + final boolean pkiRealmEnabled = realms.entrySet().stream() + .filter(e -> PkiRealmSettings.TYPE.equals(e.getKey().getType())) + .map(Map.Entry::getValue) .anyMatch(s -> s.getAsBoolean("enabled", true)); if (pkiRealmEnabled) { for (String contextName : getSslContextNames(settings)) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 46324c5c04621..ae0b34dde8cdc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; @@ -28,7 +29,6 @@ import org.elasticsearch.common.inject.util.Providers; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.LoggerMessageFormat; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.regex.Regex; @@ -105,16 +105,16 @@ import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; import org.elasticsearch.xpack.core.security.action.user.DeleteUserAction; +import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.GetUsersAction; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; -import org.elasticsearch.xpack.core.security.action.user.GetUserPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.PutUserAction; import org.elasticsearch.xpack.core.security.action.user.SetEnabledAction; import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.InternalRealmsSettings; import org.elasticsearch.xpack.core.security.authc.Realm; -import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; @@ -122,6 +122,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; import org.elasticsearch.xpack.core.security.support.Automatons; import org.elasticsearch.xpack.core.security.user.AnonymousUser; @@ -161,9 +162,9 @@ import org.elasticsearch.xpack.security.action.user.TransportAuthenticateAction; import org.elasticsearch.xpack.security.action.user.TransportChangePasswordAction; import org.elasticsearch.xpack.security.action.user.TransportDeleteUserAction; +import org.elasticsearch.xpack.security.action.user.TransportGetUserPrivilegesAction; import org.elasticsearch.xpack.security.action.user.TransportGetUsersAction; import org.elasticsearch.xpack.security.action.user.TransportHasPrivilegesAction; -import org.elasticsearch.xpack.security.action.user.TransportGetUserPrivilegesAction; import org.elasticsearch.xpack.security.action.user.TransportPutUserAction; import org.elasticsearch.xpack.security.action.user.TransportSetEnabledAction; import org.elasticsearch.xpack.security.audit.AuditTrail; @@ -185,7 +186,6 @@ import org.elasticsearch.xpack.security.authz.store.FileRolesStore; import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; -import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor; import org.elasticsearch.xpack.security.rest.SecurityRestFilter; import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction; @@ -256,7 +256,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, NetworkPlugin, ClusterPlugin, DiscoveryPlugin, MapperPlugin, ExtensiblePlugin { - private static final Logger logger = Loggers.getLogger(Security.class); + private static final Logger logger = LogManager.getLogger(Security.class); static final Setting> AUDIT_OUTPUTS_SETTING = Setting.listSetting(SecurityField.setting("audit.outputs"), @@ -344,7 +344,7 @@ public Collection createGuiceModules() { b.bind(CompositeRolesStore.class).toProvider(Providers.of(null)); // for SecurityFeatureSet b.bind(NativeRoleMappingStore.class).toProvider(Providers.of(null)); // for SecurityFeatureSet b.bind(AuditTrailService.class) - .toInstance(new AuditTrailService(settings, Collections.emptyList(), getLicenseState())); + .toInstance(new AuditTrailService(Collections.emptyList(), getLicenseState())); }); return modules; } @@ -415,7 +415,7 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste } } final AuditTrailService auditTrailService = - new AuditTrailService(settings, new ArrayList<>(auditTrails), getLicenseState()); + new AuditTrailService(new ArrayList<>(auditTrails), getLicenseState()); components.add(auditTrailService); this.auditTrailService.set(auditTrailService); @@ -487,16 +487,16 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste final Set requestInterceptors; if (XPackSettings.DLS_FLS_ENABLED.get(settings)) { requestInterceptors = Collections.unmodifiableSet(Sets.newHashSet( - new SearchRequestInterceptor(settings, threadPool, getLicenseState()), - new UpdateRequestInterceptor(settings, threadPool, getLicenseState()), - new BulkShardRequestInterceptor(settings, threadPool, getLicenseState()), - new ResizeRequestInterceptor(settings, threadPool, getLicenseState(), auditTrailService), + new SearchRequestInterceptor(threadPool, getLicenseState()), + new UpdateRequestInterceptor(threadPool, getLicenseState()), + new BulkShardRequestInterceptor(threadPool, getLicenseState()), + new ResizeRequestInterceptor(threadPool, getLicenseState(), auditTrailService), new IndicesAliasesRequestInterceptor(threadPool.getThreadContext(), getLicenseState(), auditTrailService))); } else { requestInterceptors = Collections.emptySet(); } - securityActionFilter.set(new SecurityActionFilter(settings, authcService.get(), authzService, getLicenseState(), + securityActionFilter.set(new SecurityActionFilter(authcService.get(), authzService, getLicenseState(), requestInterceptors, threadPool, securityContext.get(), destructiveOperations)); return components; @@ -607,7 +607,7 @@ public static List> getSettings(boolean transportClientMode, List getRestHeaders() { public List getSettingsFilter() { List asArray = settings.getAsList(SecurityField.setting("hide_settings")); ArrayList settingsFilter = new ArrayList<>(asArray); - settingsFilter.addAll(RealmSettings.getSettingsFilter(securityExtensions)); // hide settings where we don't define them - they are part of a group... settingsFilter.add("transport.profiles.*." + SecurityField.setting("*")); return settingsFilter; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java index 3e1f9f97c2fea..a0ab370e6dba2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.security.action.filter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -18,8 +20,6 @@ import org.elasticsearch.action.support.ActionFilterChain; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; @@ -41,10 +41,11 @@ import java.util.Set; import java.util.function.Predicate; -public class SecurityActionFilter extends AbstractComponent implements ActionFilter { +public class SecurityActionFilter implements ActionFilter { private static final Predicate LICENSE_EXPIRATION_ACTION_MATCHER = HealthAndStatsPrivilege.INSTANCE.predicate(); private static final Predicate SECURITY_ACTION_MATCHER = Automatons.predicate("cluster:admin/xpack/security*"); + private static final Logger logger = LogManager.getLogger(SecurityActionFilter.class); private final AuthenticationService authcService; private final AuthorizationService authzService; @@ -55,10 +56,9 @@ public class SecurityActionFilter extends AbstractComponent implements ActionFil private final SecurityContext securityContext; private final DestructiveOperations destructiveOperations; - public SecurityActionFilter(Settings settings, AuthenticationService authcService, AuthorizationService authzService, + public SecurityActionFilter(AuthenticationService authcService, AuthorizationService authzService, XPackLicenseState licenseState, Set requestInterceptors, ThreadPool threadPool, SecurityContext securityContext, DestructiveOperations destructiveOperations) { - super(settings); this.authcService = authcService; this.authzService = authzService; this.licenseState = licenseState; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java index abdaba7cf29d3..c9eb571f3ae09 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java @@ -5,12 +5,12 @@ */ package org.elasticsearch.xpack.security.action.interceptor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.bulk.BulkItemRequest; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; @@ -24,13 +24,14 @@ /** * Similar to {@link UpdateRequestInterceptor}, but checks if there are update requests embedded in a bulk request. */ -public class BulkShardRequestInterceptor extends AbstractComponent implements RequestInterceptor { +public class BulkShardRequestInterceptor implements RequestInterceptor { + + private static final Logger logger = LogManager.getLogger(BulkShardRequestInterceptor.class); private final ThreadContext threadContext; private final XPackLicenseState licenseState; - public BulkShardRequestInterceptor(Settings settings, ThreadPool threadPool, XPackLicenseState licenseState) { - super(settings); + public BulkShardRequestInterceptor(ThreadPool threadPool, XPackLicenseState licenseState) { this.threadContext = threadPool.getThreadContext(); this.licenseState = licenseState; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java index 5f6f4d1643bef..b9bf11aca3a8d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.security.action.interceptor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.security.authc.Authentication; @@ -19,17 +19,17 @@ * Base class for interceptors that disables features when field level security is configured for indices a request * is going to execute on. */ -abstract class FieldAndDocumentLevelSecurityRequestInterceptor extends AbstractComponent implements +abstract class FieldAndDocumentLevelSecurityRequestInterceptor implements RequestInterceptor { private final ThreadContext threadContext; private final XPackLicenseState licenseState; + private final Logger logger; - FieldAndDocumentLevelSecurityRequestInterceptor(Settings settings, ThreadContext threadContext, - XPackLicenseState licenseState) { - super(settings); + FieldAndDocumentLevelSecurityRequestInterceptor(ThreadContext threadContext, XPackLicenseState licenseState) { this.threadContext = threadContext; this.licenseState = licenseState; + this.logger = LogManager.getLogger(getClass()); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java index 255f46cb02c2e..4689d480c1762 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java @@ -9,8 +9,6 @@ import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; @@ -23,15 +21,14 @@ import org.elasticsearch.xpack.core.security.support.Exceptions; import org.elasticsearch.xpack.security.audit.AuditTrailService; -public final class ResizeRequestInterceptor extends AbstractComponent implements RequestInterceptor { +public final class ResizeRequestInterceptor implements RequestInterceptor { private final ThreadContext threadContext; private final XPackLicenseState licenseState; private final AuditTrailService auditTrailService; - public ResizeRequestInterceptor(Settings settings, ThreadPool threadPool, XPackLicenseState licenseState, + public ResizeRequestInterceptor(ThreadPool threadPool, XPackLicenseState licenseState, AuditTrailService auditTrailService) { - super(settings); this.threadContext = threadPool.getThreadContext(); this.licenseState = licenseState; this.auditTrailService = auditTrailService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/SearchRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/SearchRequestInterceptor.java index 3ceaa02ee7286..5738d3eef5051 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/SearchRequestInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/SearchRequestInterceptor.java @@ -7,7 +7,6 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -18,8 +17,8 @@ */ public class SearchRequestInterceptor extends FieldAndDocumentLevelSecurityRequestInterceptor { - public SearchRequestInterceptor(Settings settings, ThreadPool threadPool, XPackLicenseState licenseState) { - super(settings, threadPool.getThreadContext(), licenseState); + public SearchRequestInterceptor(ThreadPool threadPool, XPackLicenseState licenseState) { + super(threadPool.getThreadContext(), licenseState); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/UpdateRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/UpdateRequestInterceptor.java index 40b63d943d818..db265333e6965 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/UpdateRequestInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/UpdateRequestInterceptor.java @@ -7,7 +7,6 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -22,8 +21,8 @@ */ public class UpdateRequestInterceptor extends FieldAndDocumentLevelSecurityRequestInterceptor { - public UpdateRequestInterceptor(Settings settings, ThreadPool threadPool, XPackLicenseState licenseState) { - super(settings, threadPool.getThreadContext(), licenseState); + public UpdateRequestInterceptor(ThreadPool threadPool, XPackLicenseState licenseState) { + super(threadPool.getThreadContext(), licenseState); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportDeletePrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportDeletePrivilegesAction.java index da23ac50b6912..502caf1c94be9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportDeletePrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/privilege/TransportDeletePrivilegesAction.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -29,10 +28,9 @@ public class TransportDeletePrivilegesAction extends HandledTransportAction) AuthenticateRequest::new); + public TransportAuthenticateAction(TransportService transportService, ActionFilters actionFilters, SecurityContext securityContext) { + super(AuthenticateAction.NAME, transportService, actionFilters, (Supplier) AuthenticateRequest::new); this.securityContext = securityContext; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordAction.java index 5046beca1c837..96553f8f7bdeb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordAction.java @@ -24,12 +24,14 @@ public class TransportChangePasswordAction extends HandledTransportAction { + private final Settings settings; private final NativeUsersStore nativeUsersStore; @Inject public TransportChangePasswordAction(Settings settings, TransportService transportService, ActionFilters actionFilters, NativeUsersStore nativeUsersStore) { - super(settings, ChangePasswordAction.NAME, transportService, actionFilters, ChangePasswordRequest::new); + super(ChangePasswordAction.NAME, transportService, actionFilters, ChangePasswordRequest::new); + this.settings = settings; this.nativeUsersStore = nativeUsersStore; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java index 36efdf3bd1737..12e840b4f89f2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java @@ -25,13 +25,14 @@ public class TransportDeleteUserAction extends HandledTransportAction { + private final Settings settings; private final NativeUsersStore usersStore; @Inject public TransportDeleteUserAction(Settings settings, ActionFilters actionFilters, NativeUsersStore usersStore, TransportService transportService) { - super(settings, DeleteUserAction.NAME, transportService, actionFilters, - (Supplier) DeleteUserRequest::new); + super(DeleteUserAction.NAME, transportService, actionFilters, (Supplier) DeleteUserRequest::new); + this.settings = settings; this.usersStore = usersStore; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesAction.java index 7a86842eb5297..518c9cb25a01a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -50,9 +49,9 @@ public class TransportGetUserPrivilegesAction extends HandledTransportAction { + private final Settings settings; private final NativeUsersStore usersStore; private final ReservedRealm reservedRealm; @Inject public TransportGetUsersAction(Settings settings, ActionFilters actionFilters, NativeUsersStore usersStore, TransportService transportService, ReservedRealm reservedRealm) { - super(settings, GetUsersAction.NAME, transportService, actionFilters, GetUsersRequest::new); + super(GetUsersAction.NAME, transportService, actionFilters, GetUsersRequest::new); + this.settings = settings; this.usersStore = usersStore; this.reservedRealm = reservedRealm; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java index b49984b28da08..37cd3478aa533 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -56,10 +55,10 @@ public class TransportHasPrivilegesAction extends HandledTransportAction { + private final Settings settings; private final NativeUsersStore usersStore; @Inject public TransportPutUserAction(Settings settings, ActionFilters actionFilters, NativeUsersStore usersStore, TransportService transportService) { - super(settings, PutUserAction.NAME, transportService, actionFilters, PutUserRequest::new); + super(PutUserAction.NAME, transportService, actionFilters, PutUserRequest::new); + this.settings = settings; this.usersStore = usersStore; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledAction.java index cbf505d9c6751..ee30168259bf0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledAction.java @@ -27,13 +27,15 @@ */ public class TransportSetEnabledAction extends HandledTransportAction { + private final Settings settings; private final ThreadPool threadPool; private final NativeUsersStore usersStore; @Inject public TransportSetEnabledAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, NativeUsersStore usersStore) { - super(settings, SetEnabledAction.NAME, transportService, actionFilters, SetEnabledRequest::new); + super(SetEnabledAction.NAME, transportService, actionFilters, SetEnabledRequest::new); + this.settings = settings; this.threadPool = threadPool; this.usersStore = usersStore; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java index e36dee3d67c49..97b071ad26297 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.security.audit; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.TransportMessage; @@ -19,7 +17,7 @@ import java.util.Collections; import java.util.List; -public class AuditTrailService extends AbstractComponent implements AuditTrail { +public class AuditTrailService implements AuditTrail { private final XPackLicenseState licenseState; private final List auditTrails; @@ -29,8 +27,7 @@ public String name() { return "service"; } - public AuditTrailService(Settings settings, List auditTrails, XPackLicenseState licenseState) { - super(settings); + public AuditTrailService(List auditTrails, XPackLicenseState licenseState) { this.auditTrails = Collections.unmodifiableList(auditTrails); this.licenseState = licenseState; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java index d8b4b4e4bc198..3cf63976e5eed 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.audit.index; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; @@ -29,7 +30,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Setting; @@ -113,7 +113,7 @@ /** * Audit trail implementation that writes events into an index. */ -public class IndexAuditTrail extends AbstractComponent implements AuditTrail, ClusterStateListener { +public class IndexAuditTrail implements AuditTrail, ClusterStateListener { public static final String NAME = "index"; public static final String DOC_TYPE = "doc"; @@ -163,8 +163,10 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl private static final Setting FLUSH_TIMEOUT_SETTING = Setting.timeSetting(setting("audit.index.flush_interval"), DEFAULT_FLUSH_INTERVAL, TimeValue.timeValueMillis(1L), Property.NodeScope); + private static final Logger logger = LogManager.getLogger(IndexAuditTrail.class); private final AtomicReference state = new AtomicReference<>(State.INITIALIZED); + private final Settings settings; private final String nodeName; private final Client client; private final QueueConsumer queueConsumer; @@ -185,7 +187,7 @@ public String name() { } public IndexAuditTrail(Settings settings, Client client, ThreadPool threadPool, ClusterService clusterService) { - super(settings); + this.settings = settings; this.threadPool = threadPool; this.clusterService = clusterService; this.nodeName = Node.NODE_NAME_SETTING.get(settings); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 05665d2f6a012..0a3ae1224683e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -74,7 +73,7 @@ import static org.elasticsearch.xpack.security.audit.AuditLevel.parse; import static org.elasticsearch.xpack.security.audit.AuditUtil.restRequestContent; -public class LoggingAuditTrail extends AbstractComponent implements AuditTrail, ClusterStateListener { +public class LoggingAuditTrail implements AuditTrail, ClusterStateListener { public static final String REST_ORIGIN_FIELD_VALUE = "rest"; public static final String LOCAL_ORIGIN_FIELD_VALUE = "local_node"; @@ -168,7 +167,6 @@ public LoggingAuditTrail(Settings settings, ClusterService clusterService, Threa } LoggingAuditTrail(Settings settings, ClusterService clusterService, Logger logger, ThreadContext threadContext) { - super(settings); this.logger = logger; this.events = parse(INCLUDE_EVENT_SETTINGS.get(settings), EXCLUDE_EVENT_SETTINGS.get(settings)); this.includeRequestBody = INCLUDE_REQUEST_BODY.get(settings); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java index d5242fab45fac..0c3706d6f4ced 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.security.authc; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchSecurityException; @@ -12,7 +14,6 @@ import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -49,7 +50,9 @@ * This service also supports request level caching of authenticated users (i.e. once a user authenticated * successfully, it is set on the request context to avoid subsequent redundant authentication process) */ -public class AuthenticationService extends AbstractComponent { +public class AuthenticationService { + + private static final Logger logger = LogManager.getLogger(AuthenticationService.class); private final Realms realms; private final AuditTrail auditTrail; @@ -64,7 +67,6 @@ public class AuthenticationService extends AbstractComponent { public AuthenticationService(Settings settings, Realms realms, AuditTrailService auditTrail, AuthenticationFailureHandler failureHandler, ThreadPool threadPool, AnonymousUser anonymousUser, TokenService tokenService) { - super(settings); this.nodeName = Node.NODE_NAME_SETTING.get(settings); this.realms = realms; this.auditTrail = auditTrail; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java index d568a052a5e15..7cd4fa96aa4a3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java @@ -33,12 +33,13 @@ import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; /** * Provides a single entry point into dealing with all standard XPack security {@link Realm realms}. @@ -98,9 +99,9 @@ public static Map getFactories(ThreadPool threadPool, Res securityIndex.addIndexStateListener(nativeRealm::onSecurityIndexStateChange); return nativeRealm; }); - map.put(LdapRealmSettings.AD_TYPE, config -> new LdapRealm(LdapRealmSettings.AD_TYPE, config, sslService, + map.put(LdapRealmSettings.AD_TYPE, config -> new LdapRealm(config, sslService, resourceWatcherService, nativeRoleMappingStore, threadPool)); - map.put(LdapRealmSettings.LDAP_TYPE, config -> new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, + map.put(LdapRealmSettings.LDAP_TYPE, config -> new LdapRealm(config, sslService, resourceWatcherService, nativeRoleMappingStore, threadPool)); map.put(PkiRealmSettings.TYPE, config -> new PkiRealm(config, resourceWatcherService, nativeRoleMappingStore)); map.put(SamlRealmSettings.TYPE, config -> SamlRealm.create(config, sslService, resourceWatcherService, nativeRoleMappingStore)); @@ -112,20 +113,14 @@ private InternalRealms() { } public static List getBootstrapChecks(final Settings globalSettings, final Environment env) { - final List checks = new ArrayList<>(); - final Map settingsByRealm = RealmSettings.getRealmSettings(globalSettings); - settingsByRealm.forEach((name, settings) -> { - final RealmConfig realmConfig = new RealmConfig(name, settings, globalSettings, env, null); - switch (realmConfig.type()) { - case LdapRealmSettings.AD_TYPE: - case LdapRealmSettings.LDAP_TYPE: - case PkiRealmSettings.TYPE: - final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(realmConfig); - if (check != null) { - checks.add(check); - } - } - }); + final Set realmTypes = Sets.newHashSet(LdapRealmSettings.AD_TYPE, LdapRealmSettings.LDAP_TYPE, PkiRealmSettings.TYPE); + final List checks = RealmSettings.getRealmSettings(globalSettings) + .keySet().stream() + .filter(id -> realmTypes.contains(id.getType())) + .map(id -> new RealmConfig(id, globalSettings, env, null)) + .map(RoleMappingFileBootstrapCheck::create) + .filter(Objects::nonNull) + .collect(Collectors.toList()); return checks; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index ce45ee2bedf9b..925654fae8bbf 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -20,9 +20,11 @@ import java.util.stream.Stream; import java.util.stream.StreamSupport; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -33,16 +35,18 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; -import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; - +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; /** * Serves as a realms registry (also responsible for ordering the realms appropriately) */ -public class Realms extends AbstractComponent implements Iterable { +public class Realms implements Iterable { + private static final Logger logger = LogManager.getLogger(Realms.class); + + private final Settings settings; private final Environment env; private final Map factories; private final XPackLicenseState licenseState; @@ -58,7 +62,7 @@ public class Realms extends AbstractComponent implements Iterable { public Realms(Settings settings, Environment env, Map factories, XPackLicenseState licenseState, ThreadContext threadContext, ReservedRealm reservedRealm) throws Exception { - super(settings); + this.settings = settings; this.env = env; this.factories = factories; this.licenseState = licenseState; @@ -151,41 +155,37 @@ public Realm.Factory realmFactory(String type) { } protected List initRealms() throws Exception { - Settings realmsSettings = RealmSettings.get(settings); + Map realmsSettings = RealmSettings.getRealmSettings(settings); Set internalTypes = new HashSet<>(); List realms = new ArrayList<>(); List kerberosRealmNames = new ArrayList<>(); - for (String name : realmsSettings.names()) { - Settings realmSettings = realmsSettings.getAsSettings(name); - String type = realmSettings.get("type"); - if (type == null) { - throw new IllegalArgumentException("missing realm type for [" + name + "] realm"); - } - Realm.Factory factory = factories.get(type); + for (RealmConfig.RealmIdentifier identifier: realmsSettings.keySet()) { + Realm.Factory factory = factories.get(identifier.getType()); if (factory == null) { - throw new IllegalArgumentException("unknown realm type [" + type + "] set for realm [" + name + "]"); + throw new IllegalArgumentException("unknown realm type [" + identifier.getType() + "] for realm [" + identifier + "]"); } - RealmConfig config = new RealmConfig(name, realmSettings, settings, env, threadContext); + RealmConfig config = new RealmConfig(identifier, settings, env, threadContext); if (!config.enabled()) { if (logger.isDebugEnabled()) { - logger.debug("realm [{}/{}] is disabled", type, name); + logger.debug("realm [{}] is disabled", identifier); } continue; } - if (FileRealmSettings.TYPE.equals(type) || NativeRealmSettings.TYPE.equals(type)) { + if (FileRealmSettings.TYPE.equals(identifier.getType()) || NativeRealmSettings.TYPE.equals(identifier.getType())) { // this is an internal realm factory, let's make sure we didn't already registered one // (there can only be one instance of an internal realm) - if (internalTypes.contains(type)) { - throw new IllegalArgumentException("multiple [" + type + "] realms are configured. [" + type + - "] is an internal realm and therefore there can only be one such realm configured"); + if (internalTypes.contains(identifier.getType())) { + throw new IllegalArgumentException("multiple [" + identifier.getType() + "] realms are configured. [" + + identifier.getType() + "] is an internal realm and therefore there can only be one such realm configured"); } - internalTypes.add(type); + internalTypes.add(identifier.getType()); } - if (KerberosRealmSettings.TYPE.equals(type)) { - kerberosRealmNames.add(name); + if (KerberosRealmSettings.TYPE.equals(identifier.getType())) { + kerberosRealmNames.add(identifier.getName()); if (kerberosRealmNames.size() > 1) { - throw new IllegalArgumentException("multiple realms " + kerberosRealmNames.toString() + " configured of type [" + type - + "], [" + type + "] can only have one such realm configured"); + throw new IllegalArgumentException("multiple realms " + kerberosRealmNames.toString() + " configured of type [" + + identifier.getType() + "], [" + identifier.getType() + "] can only have one such realm " + + "configured"); } } realms.add(factory.create(config)); @@ -268,13 +268,14 @@ public void usageStats(ActionListener> listener) { private void addNativeRealms(List realms) throws Exception { Realm.Factory fileRealm = factories.get(FileRealmSettings.TYPE); if (fileRealm != null) { - - realms.add(fileRealm.create(new RealmConfig("default_" + FileRealmSettings.TYPE, Settings.EMPTY, + realms.add(fileRealm.create(new RealmConfig( + new RealmConfig.RealmIdentifier(FileRealmSettings.TYPE, "default_" + FileRealmSettings.TYPE), settings, env, threadContext))); } Realm.Factory indexRealmFactory = factories.get(NativeRealmSettings.TYPE); if (indexRealmFactory != null) { - realms.add(indexRealmFactory.create(new RealmConfig("default_" + NativeRealmSettings.TYPE, Settings.EMPTY, + realms.add(indexRealmFactory.create(new RealmConfig( + new RealmConfig.RealmIdentifier(NativeRealmSettings.TYPE, "default_" + NativeRealmSettings.TYPE), settings, env, threadContext))); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 8814a62708750..be5b11aa666d1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.security.authc; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; @@ -43,7 +45,6 @@ import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; @@ -126,7 +127,7 @@ * Service responsible for the creation, validation, and other management of {@link UserToken} * objects for authentication */ -public final class TokenService extends AbstractComponent { +public final class TokenService { /** * The parameters below are used to generate the cryptographic key that is used to encrypt the @@ -160,8 +161,10 @@ public final class TokenService extends AbstractComponent { static final int MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; private static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); private static final int MAX_RETRY_ATTEMPTS = 5; + private static final Logger logger = LogManager.getLogger(TokenService.class); private final SecureRandom secureRandom = new SecureRandom(); + private final Settings settings; private final ClusterService clusterService; private final Clock clock; private final TimeValue expirationDelay; @@ -183,11 +186,11 @@ public final class TokenService extends AbstractComponent { */ public TokenService(Settings settings, Clock clock, Client client, SecurityIndexManager securityIndex, ClusterService clusterService) throws GeneralSecurityException { - super(settings); byte[] saltArr = new byte[SALT_BYTES]; secureRandom.nextBytes(saltArr); final SecureString tokenPassphrase = generateTokenKey(); + this.settings = settings; this.clock = clock.withZone(ZoneOffset.UTC); this.expirationDelay = TOKEN_EXPIRATION.get(settings); this.client = client; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java index a84b76beab8bb..53e171ff4f321 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java @@ -9,7 +9,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; @@ -28,7 +27,7 @@ public class NativeRealm extends CachingUsernamePasswordRealm { private final NativeUsersStore userStore; public NativeRealm(RealmConfig config, NativeUsersStore usersStore, ThreadPool threadPool) { - super(NativeRealmSettings.TYPE, config, threadPool); + super(config, threadPool); this.userStore = usersStore; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 35912de44126f..1fbb8d1eaa56e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.security.authc.esnative; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -25,7 +27,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -74,11 +75,14 @@ * No caching is done by this class, it is handled at a higher level and no polling for changes is done by this class. Modification * operations make a best effort attempt to clear the cache on all nodes for the user that was modified. */ -public class NativeUsersStore extends AbstractComponent { +public class NativeUsersStore { public static final String INDEX_TYPE = "doc"; static final String USER_DOC_TYPE = "user"; public static final String RESERVED_USER_TYPE = "reserved-user"; + private static final Logger logger = LogManager.getLogger(NativeUsersStore.class); + + private final Settings settings; private final Client client; private final ReservedUserInfo disabledDefaultUserInfo; private final ReservedUserInfo enabledDefaultUserInfo; @@ -86,7 +90,7 @@ public class NativeUsersStore extends AbstractComponent { private final SecurityIndexManager securityIndex; public NativeUsersStore(Settings settings, Client client, SecurityIndexManager securityIndex) { - super(settings); + this.settings = settings; this.client = client; this.securityIndex = securityIndex; final char[] emptyPasswordHash = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(settings)). diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 2cf548eb4e1a9..a2d172215921b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -68,7 +68,7 @@ public class ReservedRealm extends CachingUsernamePasswordRealm { public ReservedRealm(Environment env, Settings settings, NativeUsersStore nativeUsersStore, AnonymousUser anonymousUser, SecurityIndexManager securityIndex, ThreadPool threadPool) { - super(TYPE, new RealmConfig(TYPE, Settings.EMPTY, settings, env, threadPool.getThreadContext()), threadPool); + super(new RealmConfig(new RealmConfig.RealmIdentifier(TYPE, TYPE), settings, env, threadPool.getThreadContext()), threadPool); this.nativeUsersStore = nativeUsersStore; this.realmEnabled = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings); this.anonymousUser = anonymousUser; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java index e2586ea836dec..884cd5dbafb10 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java @@ -10,7 +10,6 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; @@ -28,7 +27,7 @@ public FileRealm(RealmConfig config, ResourceWatcherService watcherService, Thre // pkg private for testing FileRealm(RealmConfig config, FileUserPasswdStore userPasswdStore, FileUserRolesStore userRolesStore, ThreadPool threadPool) { - super(FileRealmSettings.TYPE, config, threadPool); + super(config, threadPool); this.userPasswdStore = userPasswdStore; userPasswdStore.addListener(this::expireAll); this.userRolesStore = userRolesStore; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java index 0f47b6032f5ab..56c65574980db 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java @@ -79,22 +79,22 @@ public KerberosRealm(final RealmConfig config, final NativeRoleMappingStore nati KerberosRealm(final RealmConfig config, final NativeRoleMappingStore nativeRoleMappingStore, final KerberosTicketValidator kerberosTicketValidator, final ThreadPool threadPool, final Cache userPrincipalNameToUserCache) { - super(KerberosRealmSettings.TYPE, config); + super(config); this.userRoleMapper = nativeRoleMappingStore; this.userRoleMapper.refreshRealmOnChange(this); - final TimeValue ttl = KerberosRealmSettings.CACHE_TTL_SETTING.get(config.settings()); + final TimeValue ttl = config.getSetting(KerberosRealmSettings.CACHE_TTL_SETTING); if (ttl.getNanos() > 0) { this.userPrincipalNameToUserCache = (userPrincipalNameToUserCache == null) ? CacheBuilder.builder() - .setExpireAfterWrite(KerberosRealmSettings.CACHE_TTL_SETTING.get(config.settings())) - .setMaximumWeight(KerberosRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())).build() + .setExpireAfterWrite(config.getSetting(KerberosRealmSettings.CACHE_TTL_SETTING)) + .setMaximumWeight(config.getSetting(KerberosRealmSettings.CACHE_MAX_USERS_SETTING)).build() : userPrincipalNameToUserCache; } else { this.userPrincipalNameToUserCache = null; } this.kerberosTicketValidator = kerberosTicketValidator; this.threadPool = threadPool; - this.keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); + this.keytabPath = config.env().configFile().resolve(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH)); if (Files.exists(keytabPath) == false) { throw new IllegalArgumentException("configured service key tab file [" + keytabPath + "] does not exist"); @@ -105,8 +105,9 @@ public KerberosRealm(final RealmConfig config, final NativeRoleMappingStore nati if (Files.isReadable(keytabPath) == false) { throw new IllegalArgumentException("configured service key tab file [" + keytabPath + "] must have read permission"); } - this.enableKerberosDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); - this.removeRealmName = KerberosRealmSettings.SETTING_REMOVE_REALM_NAME.get(config.settings()); + + this.enableKerberosDebug = config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE); + this.removeRealmName = config.getSetting(KerberosRealmSettings.SETTING_REMOVE_REALM_NAME); this.delegatedRealms = null; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolver.java index 8e009154cad12..a8bf30fa71357 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolver.java @@ -13,8 +13,10 @@ import com.unboundid.ldap.sdk.SearchScope; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; @@ -24,13 +26,12 @@ import java.util.List; import java.util.stream.Collectors; -import static org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING; +import static org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySIDUtil.convertToString; import static org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySessionFactory.buildDnFromDomain; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.OBJECT_CLASS_PRESENCE_FILTER; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.search; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; -import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING; -import static org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySIDUtil.convertToString; class ActiveDirectoryGroupsResolver implements GroupsResolver { @@ -39,10 +40,11 @@ class ActiveDirectoryGroupsResolver implements GroupsResolver { private final LdapSearchScope scope; private final boolean ignoreReferralErrors; - ActiveDirectoryGroupsResolver(Settings settings) { - this.baseDn = settings.get("group_search.base_dn", buildDnFromDomain(settings.get(AD_DOMAIN_NAME_SETTING))); - this.scope = LdapSearchScope.resolve(settings.get("group_search.scope"), LdapSearchScope.SUB_TREE); - this.ignoreReferralErrors = IGNORE_REFERRAL_ERRORS_SETTING.get(settings); + ActiveDirectoryGroupsResolver(RealmConfig config) { + this.baseDn = config.getSetting(SearchGroupsResolverSettings.BASE_DN, + () -> buildDnFromDomain(config.getSetting(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING))); + this.scope = config.getSetting(SearchGroupsResolverSettings.SCOPE); + this.ignoreReferralErrors = config.getSetting(IGNORE_REFERRAL_ERRORS_SETTING); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java index 8107d7488188b..da258a99d0cb4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.internal.io.IOUtils; @@ -64,30 +64,30 @@ class ActiveDirectorySessionFactory extends PoolingSessionFactory { final UpnADAuthenticator upnADAuthenticator; ActiveDirectorySessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) throws LDAPException { - super(config, sslService, new ActiveDirectoryGroupsResolver(config.settings()), + super(config, sslService, new ActiveDirectoryGroupsResolver(config), ActiveDirectorySessionFactorySettings.POOL_ENABLED, - PoolingSessionFactorySettings.BIND_DN.exists(config.settings())? getBindDN(config.settings()) : null, + config.hasSetting(PoolingSessionFactorySettings.BIND_DN) ? getBindDN(config) : null, () -> { - if (PoolingSessionFactorySettings.BIND_DN.exists(config.settings())) { - final String healthCheckDn = PoolingSessionFactorySettings.BIND_DN.get(config.settings()); + if (config.hasSetting(PoolingSessionFactorySettings.BIND_DN)) { + final String healthCheckDn = config.getSetting(PoolingSessionFactorySettings.BIND_DN); if (healthCheckDn.isEmpty() && healthCheckDn.indexOf('=') > 0) { return healthCheckDn; } } - return config.settings().get(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING, - config.settings().get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING)); + return config.getSetting(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING, + () -> config.getSetting(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING)); }, threadPool); - Settings settings = config.settings(); - String domainName = settings.get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING); + String domainName = config.getSetting(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING); if (domainName == null) { - throw new IllegalArgumentException("missing [" + ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING + throw new IllegalArgumentException("missing [" + + RealmSettings.getFullSettingKey(config, ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING) + "] setting for active directory"); } String domainDN = buildDnFromDomain(domainName); - final int ldapPort = ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING.get(settings); - final int ldapsPort = ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING.get(settings); - final int gcLdapPort = ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING.get(settings); - final int gcLdapsPort = ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING.get(settings); + final int ldapPort = config.getSetting(ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING); + final int ldapsPort = config.getSetting(ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING); + final int gcLdapPort = config.getSetting(ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING); + final int gcLdapsPort = config.getSetting(ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING); defaultADAuthenticator = new DefaultADAuthenticator(config, timeout, ignoreReferralErrors, logger, groupResolver, metaDataResolver, domainDN, threadPool); @@ -99,9 +99,9 @@ class ActiveDirectorySessionFactory extends PoolingSessionFactory { } @Override - protected List getDefaultLdapUrls(Settings settings) { - return Collections.singletonList("ldap://" + settings.get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING) + - ":" + ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING.get(settings)); + protected List getDefaultLdapUrls(RealmConfig config) { + return Collections.singletonList("ldap://" + config.getSetting(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING) + + ":" + config.getSetting(ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING)); } @Override @@ -143,7 +143,7 @@ void getUnauthenticatedSessionWithPool(LDAPConnectionPool connectionPool, String @Override void getUnauthenticatedSessionWithoutPool(String user, ActionListener listener) { - if (PoolingSessionFactorySettings.BIND_DN.exists(config.settings()) == false) { + if (config.hasSetting(PoolingSessionFactorySettings.BIND_DN) == false) { listener.onResponse(null); return; } @@ -188,10 +188,10 @@ static String buildDnFromDomain(String domain) { return "DC=" + domain.replace(".", ",DC="); } - static String getBindDN(Settings settings) { - String bindDN = PoolingSessionFactorySettings.BIND_DN.get(settings); + static String getBindDN(RealmConfig config) { + String bindDN = config.getSetting(PoolingSessionFactorySettings.BIND_DN); if (bindDN.isEmpty() == false && bindDN.indexOf('\\') < 0 && bindDN.indexOf('@') < 0 && bindDN.indexOf('=') < 0) { - bindDN = bindDN + "@" + settings.get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING); + bindDN = bindDN + "@" + config.getSetting(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING); } return bindDN; } @@ -226,22 +226,22 @@ abstract static class ADAuthenticator { final ThreadPool threadPool; ADAuthenticator(RealmConfig realm, TimeValue timeout, boolean ignoreReferralErrors, Logger logger, GroupsResolver groupsResolver, - LdapMetaDataResolver metaDataResolver, String domainDN, String userSearchFilterSetting, String defaultUserSearchFilter, - ThreadPool threadPool) { + LdapMetaDataResolver metaDataResolver, String domainDN, Setting.AffixSetting userSearchFilterSetting, + String defaultUserSearchFilter, ThreadPool threadPool) { this.realm = realm; this.timeout = timeout; this.ignoreReferralErrors = ignoreReferralErrors; this.logger = logger; this.groupsResolver = groupsResolver; this.metaDataResolver = metaDataResolver; - final Settings settings = realm.settings(); - this.bindDN = getBindDN(settings); - this.bindPassword = PoolingSessionFactorySettings.SECURE_BIND_PASSWORD.get(settings); + this.bindDN = getBindDN(realm); + this.bindPassword = realm.getSetting(PoolingSessionFactorySettings.SECURE_BIND_PASSWORD, + () -> realm.getSetting(PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD)); this.threadPool = threadPool; - userSearchDN = settings.get(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING, domainDN); - userSearchScope = LdapSearchScope.resolve(settings.get(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_SCOPE_SETTING), + userSearchDN = realm.getSetting(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING, () -> domainDN); + userSearchScope = LdapSearchScope.resolve(realm.getSetting(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_SCOPE_SETTING), LdapSearchScope.SUB_TREE); - userSearchFilter = settings.get(userSearchFilterSetting, defaultUserSearchFilter); + userSearchFilter = realm.getSetting(userSearchFilterSetting, () -> defaultUserSearchFilter); } final void authenticate(LDAPConnection connection, String username, SecureString password, ActionListener listener) { @@ -324,7 +324,8 @@ static class DefaultADAuthenticator extends ADAuthenticator { final String domainName; DefaultADAuthenticator(RealmConfig realm, TimeValue timeout, boolean ignoreReferralErrors, Logger logger, - GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN, ThreadPool threadPool) { + GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN, + ThreadPool threadPool) { super(realm, timeout, ignoreReferralErrors, logger, groupsResolver, metaDataResolver, domainDN, ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_FILTER_SETTING, "(&(objectClass=user)(|(sAMAccountName={0})(userPrincipalName={0}@" + domainName(realm) + ")))", threadPool); @@ -332,7 +333,7 @@ static class DefaultADAuthenticator extends ADAuthenticator { } private static String domainName(RealmConfig realm) { - return realm.settings().get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING); + return realm.getSetting(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING); } @Override @@ -363,7 +364,6 @@ static class DownLevelADAuthenticator extends ADAuthenticator { Cache domainNameCache = CacheBuilder.builder().setMaximumWeight(100).build(); final String domainDN; - final Settings settings; final SSLService sslService; final RealmConfig config; private final int ldapPort; @@ -372,12 +372,12 @@ static class DownLevelADAuthenticator extends ADAuthenticator { private final int gcLdapsPort; DownLevelADAuthenticator(RealmConfig config, TimeValue timeout, boolean ignoreReferralErrors, Logger logger, - GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN, SSLService sslService, - ThreadPool threadPool, int ldapPort, int ldapsPort, int gcLdapPort, int gcLdapsPort) { + GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN, + SSLService sslService, ThreadPool threadPool, + int ldapPort, int ldapsPort, int gcLdapPort, int gcLdapsPort) { super(config, timeout, ignoreReferralErrors, logger, groupsResolver, metaDataResolver, domainDN, ActiveDirectorySessionFactorySettings.AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING, DOWN_LEVEL_FILTER, threadPool); this.domainDN = domainDN; - this.settings = config.settings(); this.sslService = sslService; this.config = config; this.ldapPort = ldapPort; @@ -457,7 +457,7 @@ protected void doRun() throws Exception { public void onFailure(Exception e) { IOUtils.closeWhileHandlingException(searchConnection); listener.onFailure(e); - }; + } }); } } catch (LDAPException e) { @@ -519,7 +519,7 @@ static class UpnADAuthenticator extends ADAuthenticator { static final String UPN_USER_FILTER = "(&(objectClass=user)(userPrincipalName={1}))"; UpnADAuthenticator(RealmConfig config, TimeValue timeout, boolean ignoreReferralErrors, Logger logger, - GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN, ThreadPool threadPool) { + GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN, ThreadPool threadPool) { super(config, timeout, ignoreReferralErrors, logger, groupsResolver, metaDataResolver, domainDN, ActiveDirectorySessionFactorySettings.AD_UPN_USER_SEARCH_FILTER_SETTING, UPN_USER_FILTER, threadPool); if (userSearchFilter.contains("{0}")) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java index 193b33b7d8fe9..d9245e4e22813 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -56,43 +57,44 @@ public final class LdapRealm extends CachingUsernamePasswordRealm { private final UserRoleMapper roleMapper; private final ThreadPool threadPool; private final TimeValue executionTimeout; + private DelegatedAuthorizationSupport delegatedRealms; - public LdapRealm(String type, RealmConfig config, SSLService sslService, + public LdapRealm(RealmConfig config, SSLService sslService, ResourceWatcherService watcherService, NativeRoleMappingStore nativeRoleMappingStore, ThreadPool threadPool) throws LDAPException { - this(type, config, sessionFactory(config, sslService, threadPool, type), - new CompositeRoleMapper(type, config, watcherService, nativeRoleMappingStore), + this(config, sessionFactory(config, sslService, threadPool), + new CompositeRoleMapper(config, watcherService, nativeRoleMappingStore), threadPool); } // pkg private for testing - LdapRealm(String type, RealmConfig config, SessionFactory sessionFactory, + LdapRealm(RealmConfig config, SessionFactory sessionFactory, UserRoleMapper roleMapper, ThreadPool threadPool) { - super(type, config, threadPool); + super(config, threadPool); this.sessionFactory = sessionFactory; this.roleMapper = roleMapper; this.threadPool = threadPool; - this.executionTimeout = LdapRealmSettings.EXECUTION_TIMEOUT.get(config.settings()); + this.executionTimeout = config.getSetting(LdapRealmSettings.EXECUTION_TIMEOUT); roleMapper.refreshRealmOnChange(this); } - static SessionFactory sessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool, String type) + static SessionFactory sessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) throws LDAPException { final SessionFactory sessionFactory; - if (LdapRealmSettings.AD_TYPE.equals(type)) { + if (LdapRealmSettings.AD_TYPE.equals(config.type())) { sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); } else { - assert LdapRealmSettings.LDAP_TYPE.equals(type) : "type [" + type + "] is unknown. expected one of [" + assert LdapRealmSettings.LDAP_TYPE.equals(config.type()) : "type [" + config.type() + "] is unknown. expected one of [" + LdapRealmSettings.AD_TYPE + ", " + LdapRealmSettings.LDAP_TYPE + "]"; final boolean hasSearchSettings = LdapUserSearchSessionFactory.hasUserSearchSettings(config); - final boolean hasTemplates = LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.exists(config.settings()); + final boolean hasTemplates = config.hasSetting(LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING); if (hasSearchSettings == false) { if (hasTemplates == false) { throw new IllegalArgumentException("settings were not found for either user search [" + - RealmSettings.getFullSettingKey(config, LdapUserSearchSessionFactory.SEARCH_PREFIX) + + RealmSettings.getFullSettingKey(config, LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN) + "] or user template [" + RealmSettings.getFullSettingKey(config, LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING) + "] modes of operation. " + @@ -102,7 +104,7 @@ static SessionFactory sessionFactory(RealmConfig config, SSLService sslService, sessionFactory = new LdapSessionFactory(config, sslService, threadPool); } else if (hasTemplates) { throw new IllegalArgumentException("settings were found for both user search [" + - RealmSettings.getFullSettingKey(config, LdapUserSearchSessionFactory.SEARCH_PREFIX) + + RealmSettings.getFullSettingKey(config, LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN) + "] and user template [" + RealmSettings.getFullSettingKey(config, LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING) + "] modes of operation. " + @@ -175,7 +177,7 @@ public void initialize(Iterable realms, XPackLicenseState licenseState) { public void usageStats(ActionListener> listener) { super.usageStats(ActionListener.wrap(usage -> { usage.put("size", getCacheSize()); - usage.put("load_balance_type", LdapLoadBalancing.resolve(config.settings()).toString()); + usage.put("load_balance_type", LdapLoadBalancing.resolve(config).toString()); usage.put("ssl", sessionFactory.isSslUsed()); usage.put("user_search", LdapUserSearchSessionFactory.hasUserSearchSettings(config)); listener.onResponse(usage); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java index 464a0cfc61cf8..df9a0a8326133 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java @@ -8,12 +8,11 @@ import com.unboundid.ldap.sdk.LDAPConnection; import com.unboundid.ldap.sdk.LDAPException; import com.unboundid.ldap.sdk.SimpleBindRequest; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; @@ -46,15 +45,14 @@ public class LdapSessionFactory extends SessionFactory { public LdapSessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) { super(config, sslService, threadPool); - Settings settings = config.settings(); - userDnTemplates = LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); + userDnTemplates = config.getSetting(LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING).toArray(Strings.EMPTY_ARRAY); if (userDnTemplates.length == 0) { throw new IllegalArgumentException("missing required LDAP setting [" + RealmSettings.getFullSettingKey(config, LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING) + "]"); } logger.info("Realm [{}] is in user-dn-template mode: [{}]", config.name(), userDnTemplates); - groupResolver = groupResolver(settings); - metaDataResolver = new LdapMetaDataResolver(settings, ignoreReferralErrors); + groupResolver = groupResolver(config); + metaDataResolver = new LdapMetaDataResolver(config, ignoreReferralErrors); } /** @@ -123,11 +121,11 @@ String buildDnFromTemplate(String username, String template) { return new MessageFormat(template, Locale.ROOT).format(new Object[] { escapedUsername }, new StringBuffer(), null).toString(); } - static GroupsResolver groupResolver(Settings settings) { - if (SearchGroupsResolverSettings.BASE_DN.exists(settings)) { - return new SearchGroupsResolver(settings); + static GroupsResolver groupResolver(RealmConfig realmConfig) { + if (realmConfig.hasSetting(SearchGroupsResolverSettings.BASE_DN)) { + return new SearchGroupsResolver(realmConfig); } - return new UserAttributeGroupsResolver(settings); + return new UserAttributeGroupsResolver(realmConfig); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java index a3541ec2759b3..64f3be516fa5d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -30,6 +29,8 @@ import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; +import java.util.stream.Stream; + import static org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings.BIND_DN; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.attributesToSearchFor; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.createFilter; @@ -44,30 +45,30 @@ class LdapUserSearchSessionFactory extends PoolingSessionFactory { private final String searchFilter; LdapUserSearchSessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) throws LDAPException { - super(config, sslService, groupResolver(config.settings()), LdapUserSearchSessionFactorySettings.POOL_ENABLED, - BIND_DN.exists(config.settings()) ? BIND_DN.get(config.settings()) : null, + super(config, sslService, groupResolver(config), LdapUserSearchSessionFactorySettings.POOL_ENABLED, + config.getSetting(BIND_DN, () -> null), + () -> config.getSetting(BIND_DN, () -> config.getSetting(LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN)), + threadPool); + userSearchBaseDn = config.getSetting(LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN, () -> { - if (BIND_DN.exists(config.settings())) { - return BIND_DN.get(config.settings()); - } else { - return LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN.get(config.settings()); - } - }, threadPool); - Settings settings = config.settings(); - if (LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN.exists(settings)) { - userSearchBaseDn = LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN.get(settings); - } else { - throw new IllegalArgumentException("[" + RealmSettings.getFullSettingKey(config, - LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN) + "] must be specified"); - } - scope = LdapUserSearchSessionFactorySettings.SEARCH_SCOPE.get(settings); + throw new IllegalArgumentException("[" + RealmSettings.getFullSettingKey(config, + LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN) + "] must be specified"); + } + ); + scope = config.getSetting(LdapUserSearchSessionFactorySettings.SEARCH_SCOPE); searchFilter = getSearchFilter(config); logger.info("Realm [{}] is in user-search mode - base_dn=[{}], search filter=[{}]", config.name(), userSearchBaseDn, searchFilter); } static boolean hasUserSearchSettings(RealmConfig config) { - return config.settings().getByPrefix("user_search.").isEmpty() == false; + return Stream.of( + LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN, + LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE, + LdapUserSearchSessionFactorySettings.SEARCH_SCOPE, + LdapUserSearchSessionFactorySettings.SEARCH_FILTER, + LdapUserSearchSessionFactorySettings.POOL_ENABLED + ).anyMatch(config::hasSetting); } /** @@ -96,13 +97,13 @@ protected void doRun() throws Exception { /** * Sets up a LDAPSession using the following process: *
        - *
      1. Opening a new connection to the LDAP server
      2. - *
      3. Executes a bind request using the bind user
      4. - *
      5. Executes a search to find the DN of the user
      6. - *
      7. Closes the opened connection
      8. - *
      9. Opens a new connection to the LDAP server
      10. - *
      11. Executes a bind request using the found DN and provided password
      12. - *
      13. Creates a new LDAPSession with the bound connection
      14. + *
      15. Opening a new connection to the LDAP server
      16. + *
      17. Executes a bind request using the bind user
      18. + *
      19. Executes a search to find the DN of the user
      20. + *
      21. Closes the opened connection
      22. + *
      23. Opens a new connection to the LDAP server
      24. + *
      25. Executes a bind request using the found DN and provided password
      26. + *
      27. Creates a new LDAPSession with the bound connection
      28. *
      */ @Override @@ -151,6 +152,7 @@ public void onFailure(Exception e) { listener.onFailure(e); })); } + @Override public void onFailure(Exception e) { IOUtils.closeWhileHandlingException(connection); @@ -227,17 +229,16 @@ private void findUser(String user, LDAPInterface ldapInterface, ActionListener poolingEnabled, @Nullable String bindDn, Supplier healthCheckDNSupplier, + Setting.AffixSetting poolingEnabled, @Nullable String bindDn, Supplier healthCheckDNSupplier, ThreadPool threadPool) throws LDAPException { super(config, sslService, threadPool); this.groupResolver = groupResolver; - this.metaDataResolver = new LdapMetaDataResolver(config.settings(), ignoreReferralErrors); + this.metaDataResolver = new LdapMetaDataResolver(config, ignoreReferralErrors); final byte[] bindPassword; - if (LEGACY_BIND_PASSWORD.exists(config.settings())) { - if (SECURE_BIND_PASSWORD.exists(config.settings())) { + if (config.hasSetting(LEGACY_BIND_PASSWORD)) { + if (config.hasSetting(SECURE_BIND_PASSWORD)) { throw new IllegalArgumentException("You cannot specify both [" + RealmSettings.getFullSettingKey(config, LEGACY_BIND_PASSWORD) + "] and [" + RealmSettings.getFullSettingKey(config, SECURE_BIND_PASSWORD) + "]"); } else { - bindPassword = CharArrays.toUtf8Bytes(LEGACY_BIND_PASSWORD.get(config.settings()).getChars()); + bindPassword = CharArrays.toUtf8Bytes(config.getSetting(LEGACY_BIND_PASSWORD).getChars()); } - } else if (SECURE_BIND_PASSWORD.exists(config.settings())) { - bindPassword = CharArrays.toUtf8Bytes(SECURE_BIND_PASSWORD.get(config.settings()).getChars()); + } else if (config.hasSetting(SECURE_BIND_PASSWORD)) { + bindPassword = CharArrays.toUtf8Bytes(config.getSetting(SECURE_BIND_PASSWORD).getChars()); } else { bindPassword = null; } @@ -87,7 +86,7 @@ abstract class PoolingSessionFactory extends SessionFactory implements Releasabl bindCredentials = new SimpleBindRequest(bindDn, bindPassword); } - this.useConnectionPool = poolingEnabled.get(config.settings()); + this.useConnectionPool = config.getSetting(poolingEnabled); if (useConnectionPool) { this.connectionPool = createConnectionPool(config, serverSet, timeout, logger, bindCredentials, healthCheckDNSupplier); } else { @@ -142,17 +141,16 @@ abstract void getSessionWithPool(LDAPConnectionPool connectionPool, String user, static LDAPConnectionPool createConnectionPool(RealmConfig config, ServerSet serverSet, TimeValue timeout, Logger logger, BindRequest bindRequest, Supplier healthCheckDnSupplier) throws LDAPException { - Settings settings = config.settings(); - final int initialSize = PoolingSessionFactorySettings.POOL_INITIAL_SIZE.get(settings); - final int size = PoolingSessionFactorySettings.POOL_SIZE.get(settings); + final int initialSize = config.getSetting(PoolingSessionFactorySettings.POOL_INITIAL_SIZE); + final int size = config.getSetting(PoolingSessionFactorySettings.POOL_SIZE); LDAPConnectionPool pool = null; boolean success = false; try { pool = LdapUtils.privilegedConnect(() -> new LDAPConnectionPool(serverSet, bindRequest, initialSize, size)); pool.setRetryFailedOperationsDueToInvalidConnections(true); - if (PoolingSessionFactorySettings.HEALTH_CHECK_ENABLED.get(settings)) { - String entryDn = PoolingSessionFactorySettings.HEALTH_CHECK_DN.get(settings).orElseGet(healthCheckDnSupplier); - final long healthCheckInterval = PoolingSessionFactorySettings.HEALTH_CHECK_INTERVAL.get(settings).millis(); + if (config.getSetting(PoolingSessionFactorySettings.HEALTH_CHECK_ENABLED)) { + String entryDn = config.getSetting(PoolingSessionFactorySettings.HEALTH_CHECK_DN).orElseGet(healthCheckDnSupplier); + final long healthCheckInterval = config.getSetting(PoolingSessionFactorySettings.HEALTH_CHECK_INTERVAL).millis(); if (entryDn != null) { // Checks the status of the LDAP connection at a specified interval in the background. We do not check on // create as the LDAP server may require authentication to get an entry and a bind request has not been executed diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolver.java index c641be947d8dd..7e7fcf319a083 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolver.java @@ -14,8 +14,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; @@ -26,11 +26,11 @@ import java.util.stream.Collectors; import static org.elasticsearch.common.Strings.isNullOrEmpty; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.OBJECT_CLASS_PRESENCE_FILTER; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.createFilter; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.search; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; -import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING; /** * Resolves the groups for a user by executing a search with a filter usually that contains a group @@ -44,16 +44,14 @@ class SearchGroupsResolver implements GroupsResolver { private final LdapSearchScope scope; private final boolean ignoreReferralErrors; - SearchGroupsResolver(Settings settings) { - if (SearchGroupsResolverSettings.BASE_DN.exists(settings)) { - baseDn = SearchGroupsResolverSettings.BASE_DN.get(settings); - } else { + SearchGroupsResolver(RealmConfig config) { + baseDn = config.getSetting(SearchGroupsResolverSettings.BASE_DN, () -> { throw new IllegalArgumentException("base_dn must be specified"); - } - filter = SearchGroupsResolverSettings.FILTER.get(settings); - userAttribute = SearchGroupsResolverSettings.USER_ATTRIBUTE.get(settings); - scope = SearchGroupsResolverSettings.SCOPE.get(settings); - this.ignoreReferralErrors = IGNORE_REFERRAL_ERRORS_SETTING.get(settings); + }); + filter = config.getSetting(SearchGroupsResolverSettings.FILTER); + userAttribute = config.getSetting(SearchGroupsResolverSettings.USER_ATTRIBUTE); + scope = config.getSetting(SearchGroupsResolverSettings.SCOPE); + ignoreReferralErrors = config.getSetting(IGNORE_REFERRAL_ERRORS_SETTING); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolver.java index 6f5393d591e02..d2b2a346a9588 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolver.java @@ -10,8 +10,8 @@ import com.unboundid.ldap.sdk.SearchScope; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.UserAttributeGroupsResolverSettings; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; @@ -22,9 +22,9 @@ import java.util.Objects; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.OBJECT_CLASS_PRESENCE_FILTER; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; -import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING; /** * Resolves the groups of a user based on the value of a attribute of the user's ldap entry @@ -34,8 +34,8 @@ class UserAttributeGroupsResolver implements GroupsResolver { private final String attribute; private final boolean ignoreReferralErrors; - UserAttributeGroupsResolver(Settings settings) { - this(UserAttributeGroupsResolverSettings.ATTRIBUTE.get(settings), IGNORE_REFERRAL_ERRORS_SETTING.get(settings)); + UserAttributeGroupsResolver(RealmConfig realmConfig) { + this(realmConfig.getSetting(UserAttributeGroupsResolverSettings.ATTRIBUTE), realmConfig.getSetting(IGNORE_REFERRAL_ERRORS_SETTING)); } private UserAttributeGroupsResolver(String attribute, boolean ignoreReferralErrors) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancing.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancing.java index 3ac40eb374aef..287b67b21d3a1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancing.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancing.java @@ -11,9 +11,11 @@ import com.unboundid.ldap.sdk.RoundRobinServerSet; import com.unboundid.ldap.sdk.ServerSet; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.InetAddresses; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapLoadBalancingSettings; import javax.net.SocketFactory; @@ -26,7 +28,7 @@ public enum LdapLoadBalancing { FAILOVER() { @Override - ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + ServerSet buildServerSet(String[] addresses, int[] ports, RealmConfig realmConfig, @Nullable SocketFactory socketFactory, @Nullable LDAPConnectionOptions options) { FailoverServerSet serverSet = new FailoverServerSet(addresses, ports, socketFactory, options); serverSet.setReOrderOnFailover(true); @@ -36,7 +38,7 @@ ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nu ROUND_ROBIN() { @Override - ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + ServerSet buildServerSet(String[] addresses, int[] ports, RealmConfig realmConfig, @Nullable SocketFactory socketFactory, @Nullable LDAPConnectionOptions options) { return new RoundRobinServerSet(addresses, ports, socketFactory, options); } @@ -44,7 +46,7 @@ ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nu DNS_ROUND_ROBIN() { @Override - ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + ServerSet buildServerSet(String[] addresses, int[] ports, RealmConfig realmConfig, @Nullable SocketFactory socketFactory, @Nullable LDAPConnectionOptions options) { if (addresses.length != 1) { throw new IllegalArgumentException(toString() + " can only be used with a single url"); @@ -52,7 +54,7 @@ ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nu if (InetAddresses.isInetAddress(addresses[0])) { throw new IllegalArgumentException(toString() + " can only be used with a DNS name"); } - TimeValue dnsTtl = settings.getAsTime(LdapLoadBalancingSettings.CACHE_TTL_SETTING, CACHE_TTL_DEFAULT); + TimeValue dnsTtl = realmConfig.getSetting(LdapLoadBalancingSettings.CACHE_TTL_SETTING); return new RoundRobinDNSServerSet(addresses[0], ports[0], RoundRobinDNSServerSet.AddressSelectionMode.ROUND_ROBIN, dnsTtl.millis(), null, socketFactory, options); } @@ -60,7 +62,7 @@ ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nu DNS_FAILOVER() { @Override - ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + ServerSet buildServerSet(String[] addresses, int[] ports, RealmConfig realmConfig, @Nullable SocketFactory socketFactory, @Nullable LDAPConnectionOptions options) { if (addresses.length != 1) { throw new IllegalArgumentException(toString() + " can only be used with a single url"); @@ -68,16 +70,15 @@ ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nu if (InetAddresses.isInetAddress(addresses[0])) { throw new IllegalArgumentException(toString() + " can only be used with a DNS name"); } - TimeValue dnsTtl = settings.getAsTime(LdapLoadBalancingSettings.CACHE_TTL_SETTING, CACHE_TTL_DEFAULT); + TimeValue dnsTtl = realmConfig.getSetting(LdapLoadBalancingSettings.CACHE_TTL_SETTING); return new RoundRobinDNSServerSet(addresses[0], ports[0], RoundRobinDNSServerSet.AddressSelectionMode.FAILOVER, dnsTtl.millis(), null, socketFactory, options); } }; - public static final String LOAD_BALANCE_TYPE_DEFAULT = LdapLoadBalancing.FAILOVER.toString(); - public static final TimeValue CACHE_TTL_DEFAULT = TimeValue.timeValueHours(1L); + public static final LdapLoadBalancing LOAD_BALANCE_TYPE_DEFAULT = LdapLoadBalancing.FAILOVER; - abstract ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + abstract ServerSet buildServerSet(String[] addresses, int[] ports, RealmConfig realmConfig, @Nullable SocketFactory socketFactory, @Nullable LDAPConnectionOptions options); @Override @@ -85,21 +86,24 @@ public String toString() { return name().toLowerCase(Locale.ROOT); } - public static LdapLoadBalancing resolve(Settings settings) { - Settings loadBalanceSettings = settings.getAsSettings(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS); - String type = loadBalanceSettings.get(LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, LOAD_BALANCE_TYPE_DEFAULT); + public static LdapLoadBalancing resolve(RealmConfig realmConfig) { + String type = realmConfig.getSetting(LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING); + if (Strings.isNullOrEmpty(type)) { + return LOAD_BALANCE_TYPE_DEFAULT; + } try { return valueOf(type.toUpperCase(Locale.ROOT)); } catch (IllegalArgumentException ilae) { - throw new IllegalArgumentException("unknown load balance type [" + type + "]", ilae); + throw new IllegalArgumentException("unknown load balance type [" + type + "] in setting [" + + RealmSettings.getFullSettingKey(realmConfig, LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING) + + "]", ilae); } } - public static ServerSet serverSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + public static ServerSet serverSet(String[] addresses, int[] ports, RealmConfig realmConfig, @Nullable SocketFactory socketFactory, @Nullable LDAPConnectionOptions options) { - LdapLoadBalancing loadBalancing = resolve(settings); - Settings loadBalanceSettings = settings.getAsSettings(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS); - return loadBalancing.buildServerSet(addresses, ports, loadBalanceSettings, socketFactory, options); + LdapLoadBalancing loadBalancing = resolve(realmConfig); + return loadBalancing.buildServerSet(addresses, ports, realmConfig, socketFactory, options); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolver.java index e957c29fe2ba2..ad5aa7927b68c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolver.java @@ -5,24 +5,24 @@ */ package org.elasticsearch.xpack.security.authc.ldap.support; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Map; -import java.util.Objects; -import java.util.function.Function; -import java.util.stream.Collectors; - import com.unboundid.ldap.sdk.Attribute; import com.unboundid.ldap.sdk.LDAPInterface; import com.unboundid.ldap.sdk.SearchResultEntry; import com.unboundid.ldap.sdk.SearchScope; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.OBJECT_CLASS_PRESENCE_FILTER; import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; @@ -31,8 +31,8 @@ public class LdapMetaDataResolver { private final String[] attributeNames; private final boolean ignoreReferralErrors; - public LdapMetaDataResolver(Settings settings, boolean ignoreReferralErrors) { - this(LdapMetaDataResolverSettings.ADDITIONAL_META_DATA_SETTING.get(settings), ignoreReferralErrors); + public LdapMetaDataResolver(RealmConfig realmConfig, boolean ignoreReferralErrors) { + this(realmConfig.getSetting(LdapMetaDataResolverSettings.ADDITIONAL_META_DATA_SETTING), ignoreReferralErrors); } LdapMetaDataResolver(Collection attributeNames, boolean ignoreReferralErrors) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapUtils.java index d2d87db683ca3..0d6a94037ab18 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapUtils.java @@ -26,7 +26,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; import org.elasticsearch.action.ActionListener; @@ -35,11 +34,11 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.support.Exceptions; import javax.naming.ldap.Rdn; - import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java index 78f8c68f12459..193254c7a3963 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.RealmConfig; @@ -64,8 +63,8 @@ public abstract class SessionFactory { protected SessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) { this.config = config; this.logger = LogManager.getLogger(getClass()); - final Settings settings = config.settings(); - TimeValue searchTimeout = settings.getAsTime(SessionFactorySettings.TIMEOUT_LDAP_SETTING, SessionFactorySettings.TIMEOUT_DEFAULT); + TimeValue searchTimeout = config.getSetting(SessionFactorySettings.TIMEOUT_LDAP_SETTING, + () -> SessionFactorySettings.TIMEOUT_DEFAULT); if (searchTimeout.millis() < 1000L) { logger.warn("ldap_search timeout [{}] is less than the minimum supported search " + "timeout of 1s. using 1s", @@ -75,10 +74,10 @@ protected SessionFactory(RealmConfig config, SSLService sslService, ThreadPool t this.timeout = searchTimeout; this.sslService = sslService; this.threadPool = threadPool; - LDAPServers ldapServers = ldapServers(settings); + LDAPServers ldapServers = ldapServers(config); this.serverSet = serverSet(config, sslService, ldapServers); this.sslUsed = ldapServers.ssl; - this.ignoreReferralErrors = SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING.get(settings); + this.ignoreReferralErrors = config.getSetting(SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING); } /** @@ -116,32 +115,22 @@ public void unauthenticatedSession(String username, ActionListener protected static LDAPConnectionOptions connectionOptions(RealmConfig config, SSLService sslService, Logger logger) { - Settings realmSettings = config.settings(); LDAPConnectionOptions options = new LDAPConnectionOptions(); - options.setConnectTimeoutMillis(Math.toIntExact( - realmSettings.getAsTime(SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING, - SessionFactorySettings.TIMEOUT_DEFAULT).millis() - )); - options.setFollowReferrals(realmSettings.getAsBoolean(SessionFactorySettings.FOLLOW_REFERRALS_SETTING, true)); - options.setResponseTimeoutMillis( - realmSettings.getAsTime(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING, SessionFactorySettings.TIMEOUT_DEFAULT).millis() - ); + options.setConnectTimeoutMillis(Math.toIntExact(config.getSetting(SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING).millis())); + options.setFollowReferrals(config.getSetting(SessionFactorySettings.FOLLOW_REFERRALS_SETTING)); + options.setResponseTimeoutMillis(config.getSetting(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING).millis()); options.setAllowConcurrentSocketFactoryUse(true); - final SSLConfigurationSettings sslConfigurationSettings = - SSLConfigurationSettings.withoutPrefix(); - final Settings realmSSLSettings = realmSettings.getByPrefix("ssl."); - final boolean verificationModeExists = - sslConfigurationSettings.verificationMode.exists(realmSSLSettings); - final boolean hostnameVerificationExists = - realmSettings.get(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, null) != null; + final boolean verificationModeExists = config.hasSetting(SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM); + final boolean hostnameVerificationExists = config.hasSetting(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING); if (verificationModeExists && hostnameVerificationExists) { - throw new IllegalArgumentException("[" + SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING + "] and [" + - sslConfigurationSettings.verificationMode.getKey() + + throw new IllegalArgumentException("[" + + RealmSettings.getFullSettingKey(config, SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING) + "] and [" + + RealmSettings.getFullSettingKey(config, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM) + "] may not be used at the same time"); } else if (verificationModeExists) { - final String sslKey = RealmSettings.getFullSettingKey(config, "ssl"); + final String sslKey = RealmSettings.realmSslPrefix(config.identifier()); final SSLConfiguration sslConfiguration = sslService.getSSLConfiguration(sslKey); if (sslConfiguration == null) { throw new IllegalStateException("cannot find SSL configuration for " + sslKey); @@ -153,9 +142,8 @@ protected static LDAPConnectionOptions connectionOptions(RealmConfig config, new DeprecationLogger(logger).deprecated("the setting [{}] has been deprecated and " + "will be removed in a future version. use [{}] instead", RealmSettings.getFullSettingKey(config, SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING), - RealmSettings.getFullSettingKey(config, "ssl." + - sslConfigurationSettings.verificationMode.getKey())); - if (realmSettings.getAsBoolean(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, true)) { + RealmSettings.getFullSettingKey(config, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM)); + if (config.getSetting(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING)) { options.setSSLSocketVerifier(new HostNameSSLSocketVerifier(true)); } } else { @@ -164,34 +152,34 @@ protected static LDAPConnectionOptions connectionOptions(RealmConfig config, return options; } - private LDAPServers ldapServers(Settings settings) { + private LDAPServers ldapServers(RealmConfig config) { // Parse LDAP urls - List ldapUrls = settings.getAsList(SessionFactorySettings.URLS_SETTING, getDefaultLdapUrls(settings)); + List ldapUrls = config.getSetting(SessionFactorySettings.URLS_SETTING, () -> getDefaultLdapUrls(config)); if (ldapUrls == null || ldapUrls.isEmpty()) { - throw new IllegalArgumentException("missing required LDAP setting [" + SessionFactorySettings.URLS_SETTING + - "]"); + throw new IllegalArgumentException("missing required LDAP setting [" + + RealmSettings.getFullSettingKey(config, SessionFactorySettings.URLS_SETTING) + "]"); } return new LDAPServers(ldapUrls.toArray(new String[ldapUrls.size()])); } - protected List getDefaultLdapUrls(Settings settings) { + protected List getDefaultLdapUrls(RealmConfig config) { return null; } private ServerSet serverSet(RealmConfig realmConfig, SSLService clientSSLService, LDAPServers ldapServers) { - Settings settings = realmConfig.settings(); SocketFactory socketFactory = null; if (ldapServers.ssl()) { - SSLConfiguration ssl = clientSSLService.getSSLConfiguration(RealmSettings.getFullSettingKey(realmConfig, "ssl")); + final String sslKey = RealmSettings.realmSslPrefix(config.identifier()); + final SSLConfiguration ssl = clientSSLService.getSSLConfiguration(sslKey); socketFactory = clientSSLService.sslSocketFactory(ssl); - if (settings.getAsBoolean(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, true)) { + if (ssl.verificationMode().isHostnameVerificationEnabled()) { logger.debug("using encryption for LDAP connections with hostname verification"); } else { logger.debug("using encryption for LDAP connections without hostname verification"); } } - return LdapLoadBalancing.serverSet(ldapServers.addresses(), ldapServers.ports(), settings, + return LdapLoadBalancing.serverSet(ldapServers.addresses(), ldapServers.ports(), realmConfig, socketFactory, connectionOptions(realmConfig, sslService, logger)); } @@ -255,7 +243,7 @@ private boolean secureUrls(String[] ldapUrls) { //No mixing is allowed because we use the same socketfactory throw new IllegalArgumentException( "configured LDAP protocols are not all equal (ldaps://.. and ldap://..): [" - + Strings.arrayToCommaDelimitedString(ldapUrls) + "]"); + + Strings.arrayToCommaDelimitedString(ldapUrls) + "]"); } return allSecure; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java index 4d13f332ffe20..942e328824fc3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -79,19 +78,19 @@ public class PkiRealm extends Realm implements CachingRealm { private DelegatedAuthorizationSupport delegatedRealms; public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, NativeRoleMappingStore nativeRoleMappingStore) { - this(config, new CompositeRoleMapper(PkiRealmSettings.TYPE, config, watcherService, nativeRoleMappingStore)); + this(config, new CompositeRoleMapper(config, watcherService, nativeRoleMappingStore)); } // pkg private for testing PkiRealm(RealmConfig config, UserRoleMapper roleMapper) { - super(PkiRealmSettings.TYPE, config); + super(config); this.trustManager = trustManagers(config); - this.principalPattern = PkiRealmSettings.USERNAME_PATTERN_SETTING.get(config.settings()); + this.principalPattern = config.getSetting(PkiRealmSettings.USERNAME_PATTERN_SETTING); this.roleMapper = roleMapper; this.roleMapper.refreshRealmOnChange(this); this.cache = CacheBuilder.builder() - .setExpireAfterWrite(PkiRealmSettings.CACHE_TTL_SETTING.get(config.settings())) - .setMaximumWeight(PkiRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) + .setExpireAfterWrite(config.getSetting(PkiRealmSettings.CACHE_TTL_SETTING)) + .setMaximumWeight(config.getSetting(PkiRealmSettings.CACHE_MAX_USERS_SETTING)) .build(); this.delegatedRealms = null; } @@ -117,7 +116,7 @@ public X509AuthenticationToken token(ThreadContext context) { @Override public void authenticate(AuthenticationToken authToken, ActionListener listener) { assert delegatedRealms != null : "Realm has not been initialized correctly"; - X509AuthenticationToken token = (X509AuthenticationToken)authToken; + X509AuthenticationToken token = (X509AuthenticationToken) authToken; try { final BytesKey fingerprint = computeFingerprint(token.credentials()[0]); User user = cache.get(fingerprint); @@ -215,36 +214,43 @@ static boolean isCertificateChainTrusted(X509TrustManager trustManager, X509Auth return true; } - static X509TrustManager trustManagers(RealmConfig realmConfig) { - final Settings settings = realmConfig.settings(); - final Environment env = realmConfig.env(); - List certificateAuthorities = settings.getAsList(PkiRealmSettings.SSL_SETTINGS.caPaths.getKey(), null); - String truststorePath = PkiRealmSettings.SSL_SETTINGS.truststorePath.get(settings).orElse(null); + X509TrustManager trustManagers(RealmConfig realmConfig) { + final List certificateAuthorities = realmConfig.hasSetting(PkiRealmSettings.CAPATH_SETTING) ? + realmConfig.getSetting(PkiRealmSettings.CAPATH_SETTING) : null; + String truststorePath = realmConfig.getSetting(PkiRealmSettings.TRUST_STORE_PATH).orElse(null); if (truststorePath == null && certificateAuthorities == null) { return null; } else if (truststorePath != null && certificateAuthorities != null) { - final String pathKey = RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.SSL_SETTINGS.truststorePath); - final String caKey = RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.SSL_SETTINGS.caPaths); + final String pathKey = RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.TRUST_STORE_PATH); + final String caKey = RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.CAPATH_SETTING); throw new IllegalArgumentException("[" + pathKey + "] and [" + caKey + "] cannot be used at the same time"); } else if (truststorePath != null) { - return trustManagersFromTruststore(truststorePath, realmConfig); + final X509TrustManager trustManager = trustManagersFromTruststore(truststorePath, realmConfig); + if (trustManager.getAcceptedIssuers().length == 0) { + logger.warn("PKI Realm {} uses truststore {} which has no accepted certificate issuers", this, truststorePath); + } + return trustManager; + } + final X509TrustManager trustManager = trustManagersFromCAs(certificateAuthorities, realmConfig.env()); + if (trustManager.getAcceptedIssuers().length == 0) { + logger.warn("PKI Realm {} uses CAs {} with no accepted certificate issuers", this, certificateAuthorities); } - return trustManagersFromCAs(settings, env); + return trustManager; } private static X509TrustManager trustManagersFromTruststore(String truststorePath, RealmConfig realmConfig) { - final Settings settings = realmConfig.settings(); - if (PkiRealmSettings.SSL_SETTINGS.truststorePassword.exists(settings) == false - && PkiRealmSettings.SSL_SETTINGS.legacyTruststorePassword.exists(settings) == false) { + if (realmConfig.hasSetting(PkiRealmSettings.TRUST_STORE_PASSWORD) == false + && realmConfig.hasSetting(PkiRealmSettings.LEGACY_TRUST_STORE_PASSWORD) == false) { throw new IllegalArgumentException("Neither [" + - RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.SSL_SETTINGS.truststorePassword) + "] or [" + - RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.SSL_SETTINGS.legacyTruststorePassword) + "] is configured" - ); + RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.TRUST_STORE_PASSWORD) + "] or [" + + RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.LEGACY_TRUST_STORE_PASSWORD) + + "] is configured"); } - try (SecureString password = PkiRealmSettings.SSL_SETTINGS.truststorePassword.get(settings)) { - String trustStoreAlgorithm = PkiRealmSettings.SSL_SETTINGS.truststoreAlgorithm.get(settings); - String trustStoreType = SSLConfigurationSettings.getKeyStoreType(PkiRealmSettings.SSL_SETTINGS.truststoreType, - settings, truststorePath); + try (SecureString password = realmConfig.getSetting(PkiRealmSettings.TRUST_STORE_PASSWORD)) { + String trustStoreAlgorithm = realmConfig.getSetting(PkiRealmSettings.TRUST_STORE_ALGORITHM); + String trustStoreType = SSLConfigurationSettings.getKeyStoreType( + realmConfig.getConcreteSetting(PkiRealmSettings.TRUST_STORE_TYPE), realmConfig.globalSettings(), + truststorePath); try { return CertParsingUtils.trustManager(truststorePath, trustStoreType, password.getChars(), trustStoreAlgorithm, realmConfig .env()); @@ -254,8 +260,7 @@ private static X509TrustManager trustManagersFromTruststore(String truststorePat } } - private static X509TrustManager trustManagersFromCAs(Settings settings, Environment env) { - List certificateAuthorities = settings.getAsList(PkiRealmSettings.SSL_SETTINGS.caPaths.getKey(), null); + private static X509TrustManager trustManagersFromCAs(List certificateAuthorities, Environment env) { assert certificateAuthorities != null; try { Certificate[] certificates = CertParsingUtils.readCertificates(certificateAuthorities, env); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java index bdc8da0dfca12..af83d26ea8fa2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java @@ -11,9 +11,9 @@ import java.util.function.Function; import java.util.stream.Collectors; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.logging.Loggers; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.opensaml.saml.saml2.core.Issuer; @@ -32,7 +32,7 @@ public abstract class SamlMessageBuilder { protected final EntityDescriptor identityProvider; public SamlMessageBuilder(EntityDescriptor identityProvider, SpConfiguration serviceProvider, Clock clock) { - this.logger = Loggers.getLogger(getClass()); + this.logger = LogManager.getLogger(getClass()); this.identityProvider = identityProvider; this.serviceProvider = serviceProvider; this.clock = clock; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java index 9aad0272f4801..f5935b4477c76 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java @@ -30,7 +30,9 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.SuppressForbidden; @@ -64,8 +66,6 @@ import org.w3c.dom.Element; import org.xml.sax.SAXException; -import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getRealmType; - /** * CLI tool to generate SAML Metadata for a Service Provider (realm) */ @@ -141,9 +141,9 @@ public void close() throws IOException { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { // OpenSAML prints a lot of _stuff_ at info level, that really isn't needed in a command line tool. - Loggers.setLevel(Loggers.getLogger("org.opensaml"), Level.WARN); + Loggers.setLevel(LogManager.getLogger("org.opensaml"), Level.WARN); - final Logger logger = Loggers.getLogger(getClass()); + final Logger logger = LogManager.getLogger(getClass()); SamlUtils.initialize(logger); final EntityDescriptor descriptor = buildEntityDescriptor(terminal, options, env); @@ -158,8 +158,9 @@ EntityDescriptor buildEntityDescriptor(Terminal terminal, OptionSet options, Env final boolean batch = options.has(batchSpec); final RealmConfig realm = findRealm(terminal, options, env); + final Settings realmSettings = realm.globalSettings().getByPrefix(RealmSettings.realmSettingPrefix(realm.identifier())); terminal.println(Terminal.Verbosity.VERBOSE, - "Using realm configuration\n=====\n" + realm.settings().toDelimitedString('\n') + "====="); + "Using realm configuration\n=====\n" + realmSettings.toDelimitedString('\n') + "====="); final Locale locale = findLocale(options); terminal.println(Terminal.Verbosity.VERBOSE, "Using locale: " + locale.toLanguageTag()); @@ -170,7 +171,7 @@ EntityDescriptor buildEntityDescriptor(Terminal terminal, OptionSet options, Env .encryptionCredentials(spConfig.getEncryptionCredentials()) .signingCredential(spConfig.getSigningConfiguration().getCredential()) .authnRequestsSigned(spConfig.getSigningConfiguration().shouldSign(AuthnRequest.DEFAULT_ELEMENT_LOCAL_NAME)) - .nameIdFormat(SamlRealmSettings.NAMEID_FORMAT.get(realm.settings())) + .nameIdFormat(realm.getSetting(SamlRealmSettings.NAMEID_FORMAT)) .serviceName(option(serviceNameSpec, options, env.settings().get("cluster.name"))); Map attributes = getAttributeNames(options, realm); @@ -397,7 +398,8 @@ private Map getAttributeNames(OptionSet options, RealmConfig rea for (String a : attributeSpec.values(options)) { attributes.put(a, null); } - final Settings attributeSettings = realm.settings().getByPrefix(SamlRealmSettings.AttributeSetting.ATTRIBUTES_PREFIX); + final String prefix = RealmSettings.realmSettingPrefix(realm.identifier()) + SamlRealmSettings.AttributeSetting.ATTRIBUTES_PREFIX; + final Settings attributeSettings = realm.globalSettings().getByPrefix(prefix); for (String key : sorted(attributeSettings.keySet())) { final String attr = attributeSettings.get(key); attributes.put(attr, key); @@ -410,6 +412,9 @@ private SortedSet sorted(Set strings) { return new TreeSet<>(strings); } + /** + * @TODO REALM-SETTINGS[TIM] This can be redone a lot now the realm settings are keyed by type + */ private RealmConfig findRealm(Terminal terminal, OptionSet options, Environment env) throws UserException, IOException, Exception { keyStoreWrapper = keyStoreFunction.apply(env); @@ -428,36 +433,37 @@ private RealmConfig findRealm(Terminal terminal, OptionSet options, Environment settings = env.settings(); } - final Map realms = RealmSettings.getRealmSettings(settings); + final Map realms = RealmSettings.getRealmSettings(settings); if (options.has(realmSpec)) { final String name = realmSpec.value(options); - final Settings realmSettings = realms.get(name); + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier(SamlRealmSettings.TYPE, name); + final Settings realmSettings = realms.get(identifier); if (realmSettings == null) { throw new UserException(ExitCodes.CONFIG, "No such realm '" + name + "' defined in " + env.configFile()); } - final String realmType = getRealmType(realmSettings); - if (isSamlRealm(realmType)) { - return buildRealm(name, realmSettings, env); + if (isSamlRealm(identifier)) { + return buildRealm(identifier, env, settings); } else { - throw new UserException(ExitCodes.CONFIG, "Realm '" + name + "' is not a SAML realm (is '" + realmType + "')"); + throw new UserException(ExitCodes.CONFIG, "Realm '" + name + "' is not a SAML realm (is '" + identifier.getType() + "')"); } } else { - final List> saml = realms.entrySet().stream() - .filter(entry -> isSamlRealm(getRealmType(entry.getValue()))) + final List> saml = realms.entrySet().stream() + .filter(entry -> isSamlRealm(entry.getKey())) .collect(Collectors.toList()); if (saml.isEmpty()) { throw new UserException(ExitCodes.CONFIG, "There is no SAML realm configured in " + env.configFile()); } if (saml.size() > 1) { terminal.println("Using configuration in " + env.configFile()); - terminal.println("Found multiple SAML realms: " + saml.stream().map(Map.Entry::getKey).collect(Collectors.joining(", "))); + terminal.println("Found multiple SAML realms: " + + saml.stream().map(Map.Entry::getKey).map(Object::toString).collect(Collectors.joining(", "))); terminal.println("Use the -" + optionName(realmSpec) + " option to specify an explicit realm"); throw new UserException(ExitCodes.CONFIG, "Found multiple SAML realms, please specify one with '-" + optionName(realmSpec) + "'"); } - final Map.Entry entry = saml.get(0); + final Map.Entry entry = saml.get(0); terminal.println("Building metadata for SAML realm " + entry.getKey()); - return buildRealm(entry.getKey(), entry.getValue(), env); + return buildRealm(entry.getKey(), env, settings); } } @@ -465,12 +471,12 @@ private String optionName(OptionSpec spec) { return spec.options().get(0); } - private RealmConfig buildRealm(String name, Settings settings, Environment env) { - return new RealmConfig(name, settings, env.settings(), env, new ThreadContext(env.settings())); + private RealmConfig buildRealm(RealmConfig.RealmIdentifier identifier, Environment env, Settings globalSettings ) { + return new RealmConfig(identifier, globalSettings, env, new ThreadContext(globalSettings)); } - private boolean isSamlRealm(String realmType) { - return SamlRealmSettings.TYPE.equals(realmType); + private boolean isSamlRealm(RealmConfig.RealmIdentifier realmIdentifier) { + return SamlRealmSettings.TYPE.equals(realmIdentifier.getType()); } private Locale findLocale(OptionSet options) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java index 7c982e6b1b396..b2a1001aef9c6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; @@ -48,7 +47,6 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; -import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.X509KeyPairSettings; import org.elasticsearch.xpack.security.authc.Realms; @@ -107,7 +105,7 @@ import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.CLOCK_SKEW; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.DN_ATTRIBUTE; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.ENCRYPTION_KEY_ALIAS; -import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.ENCRYPTION_SETTINGS; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.ENCRYPTION_SETTING_KEY; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.FORCE_AUTHN; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.GROUPS_ATTRIBUTE; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.IDP_ENTITY_ID; @@ -124,11 +122,10 @@ import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.REQUESTED_AUTHN_CONTEXT_CLASS_REF; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_KEY_ALIAS; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_MESSAGE_TYPES; -import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_SETTINGS; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_SETTING_KEY; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_ACS; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_ENTITY_ID; import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_LOGOUT; -import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.TYPE; /** * This class is {@link Releasable} because it uses a library that thinks timers and timer tasks @@ -196,7 +193,7 @@ public static SamlRealm create(RealmConfig config, SSLService sslService, Resour final Clock clock = Clock.systemUTC(); final IdpConfiguration idpConfiguration = getIdpConfiguration(config, metadataResolver, idpDescriptor); - final TimeValue maxSkew = CLOCK_SKEW.get(config.settings()); + final TimeValue maxSkew = config.getSetting(CLOCK_SKEW); final SamlAuthenticator authenticator = new SamlAuthenticator(clock, idpConfiguration, serviceProvider, maxSkew); final SamlLogoutRequestHandler logoutHandler = new SamlLogoutRequestHandler(clock, idpConfiguration, serviceProvider, maxSkew); @@ -212,7 +209,7 @@ public static SamlRealm create(RealmConfig config, SSLService sslService, Resour // For testing SamlRealm(RealmConfig config, UserRoleMapper roleMapper, SamlAuthenticator authenticator, SamlLogoutRequestHandler logoutHandler, Supplier idpDescriptor, SpConfiguration spConfiguration) throws Exception { - super(TYPE, config); + super(config); this.roleMapper = roleMapper; this.authenticator = authenticator; @@ -222,10 +219,10 @@ public static SamlRealm create(RealmConfig config, SSLService sslService, Resour this.serviceProvider = spConfiguration; this.nameIdPolicy = new SamlAuthnRequestBuilder.NameIDPolicySettings(require(config, NAMEID_FORMAT), - NAMEID_ALLOW_CREATE.get(config.settings()), NAMEID_SP_QUALIFIER.get(config.settings())); - this.forceAuthn = FORCE_AUTHN.exists(config.settings()) ? FORCE_AUTHN.get(config.settings()) : null; - this.useSingleLogout = IDP_SINGLE_LOGOUT.get(config.settings()); - this.populateUserMetadata = POPULATE_USER_METADATA.get(config.settings()); + config.getSetting(NAMEID_ALLOW_CREATE), config.getSetting(NAMEID_SP_QUALIFIER)); + this.forceAuthn = config.getSetting(FORCE_AUTHN, () -> null); + this.useSingleLogout = config.getSetting(IDP_SINGLE_LOGOUT); + this.populateUserMetadata = config.getSetting(POPULATE_USER_METADATA); this.principalAttribute = AttributeParser.forSetting(logger, PRINCIPAL_ATTRIBUTE, config, true); this.groupsAttribute = AttributeParser.forSetting(logger, GROUPS_ATTRIBUTE, config, false); @@ -244,8 +241,8 @@ public void initialize(Iterable realms, XPackLicenseState licenseState) { delegatedRealms = new DelegatedAuthorizationSupport(realms, config, licenseState); } - static String require(RealmConfig config, Setting setting) { - final String value = setting.get(config.settings()); + static String require(RealmConfig config, Setting.AffixSetting setting) { + final String value = config.getSetting(setting); if (value.isEmpty()) { throw new IllegalArgumentException("The configuration setting [" + RealmSettings.getFullSettingKey(config, setting) + "] is required"); @@ -287,8 +284,8 @@ private static IdpConfiguration getIdpConfiguration(RealmConfig config, Metadata static SpConfiguration getSpConfiguration(RealmConfig config) throws IOException, GeneralSecurityException { final String serviceProviderId = require(config, SP_ENTITY_ID); final String assertionConsumerServiceURL = require(config, SP_ACS); - final String logoutUrl = SP_LOGOUT.get(config.settings()); - final List reqAuthnCtxClassRef = REQUESTED_AUTHN_CONTEXT_CLASS_REF.get(config.settings()); + final String logoutUrl = config.getSetting(SP_LOGOUT); + final List reqAuthnCtxClassRef = config.getSetting(REQUESTED_AUTHN_CONTEXT_CLASS_REF); return new SpConfiguration(serviceProviderId, assertionConsumerServiceURL, logoutUrl, buildSigningConfiguration(config), buildEncryptionCredential(config), reqAuthnCtxClassRef); } @@ -296,35 +293,37 @@ static SpConfiguration getSpConfiguration(RealmConfig config) throws IOException // Package-private for testing static List buildEncryptionCredential(RealmConfig config) throws IOException, GeneralSecurityException { - return buildCredential(config, ENCRYPTION_SETTINGS, ENCRYPTION_KEY_ALIAS, true); + return buildCredential(config, + RealmSettings.realmSettingPrefix(config.identifier()) + ENCRYPTION_SETTING_KEY, + ENCRYPTION_KEY_ALIAS, true); } static SigningConfiguration buildSigningConfiguration(RealmConfig config) throws IOException, GeneralSecurityException { - final List credentials = buildCredential(config, SIGNING_SETTINGS, SIGNING_KEY_ALIAS, false); - + final List credentials = buildCredential(config, + RealmSettings.realmSettingPrefix(config.identifier()) + SIGNING_SETTING_KEY, SIGNING_KEY_ALIAS, false); if (credentials == null || credentials.isEmpty()) { - if (SIGNING_MESSAGE_TYPES.exists(config.settings())) { + if (config.hasSetting(SIGNING_MESSAGE_TYPES)) { throw new IllegalArgumentException("The setting [" + RealmSettings.getFullSettingKey(config, SIGNING_MESSAGE_TYPES) + "] cannot be specified if there are no signing credentials"); } else { return new SigningConfiguration(Collections.emptySet(), null); } } else { - final List types = SIGNING_MESSAGE_TYPES.get(config.settings()); + final List types = config.getSetting(SIGNING_MESSAGE_TYPES); return new SigningConfiguration(Sets.newHashSet(types), credentials.get(0)); } } - private static List buildCredential(RealmConfig config, X509KeyPairSettings keyPairSettings, - Setting aliasSetting, final boolean allowMultiple) { - final X509KeyManager keyManager = CertParsingUtils.getKeyManager(keyPairSettings, config.settings(), null, config.env()); - + private static List buildCredential(RealmConfig config, String prefix, Setting.AffixSetting aliasSetting, + boolean allowMultiple) { + final X509KeyPairSettings keyPairSettings = X509KeyPairSettings.withPrefix(prefix, false); + final X509KeyManager keyManager = CertParsingUtils.getKeyManager(keyPairSettings, config.globalSettings(), null, config.env()); if (keyManager == null) { return null; } final Set aliases = new HashSet<>(); - final String configuredAlias = aliasSetting.get(config.settings()); + final String configuredAlias = config.getSetting(aliasSetting); if (Strings.isNullOrEmpty(configuredAlias)) { final String[] serverAliases = keyManager.getServerAliases("RSA", null); @@ -334,11 +333,11 @@ private static List buildCredential(RealmConfig config, X509KeyP if (aliases.isEmpty()) { throw new IllegalArgumentException( - "The configured key store for " + RealmSettings.getFullSettingKey(config, keyPairSettings.getPrefix()) + "The configured key store for " + prefix + " does not contain any RSA key pairs"); } else if (allowMultiple == false && aliases.size() > 1) { throw new IllegalArgumentException( - "The configured key store for " + RealmSettings.getFullSettingKey(config, keyPairSettings.getPrefix()) + "The configured key store for " + prefix + " has multiple keys but no alias has been specified (from setting " + RealmSettings.getFullSettingKey(config, aliasSetting) + ")"); } @@ -350,7 +349,7 @@ private static List buildCredential(RealmConfig config, X509KeyP for (String alias : aliases) { if (keyManager.getPrivateKey(alias) == null) { throw new IllegalArgumentException( - "The configured key store for " + RealmSettings.getFullSettingKey(config, keyPairSettings.getPrefix()) + "The configured key store for " + prefix + " does not have a key associated with alias [" + alias + "] " + ((Strings.isNullOrEmpty(configuredAlias) == false) ? "(from setting " + RealmSettings.getFullSettingKey(config, aliasSetting) + ")" @@ -416,7 +415,7 @@ public void authenticate(AuthenticationToken authenticationToken, ActionListener } private void buildUser(SamlAttributes attributes, ActionListener baseListener) { - final String principal = resolveSingleValueAttribute(attributes, principalAttribute, PRINCIPAL_ATTRIBUTE.name()); + final String principal = resolveSingleValueAttribute(attributes, principalAttribute, PRINCIPAL_ATTRIBUTE.name(config)); if (Strings.isNullOrEmpty(principal)) { baseListener.onResponse(AuthenticationResult.unsuccessful( principalAttribute + " not found in " + attributes.attributes(), null)); @@ -455,9 +454,9 @@ private void buildUser(SamlAttributes attributes, ActionListener groups = groupsAttribute.getAttribute(attributes); - final String dn = resolveSingleValueAttribute(attributes, dnAttribute, DN_ATTRIBUTE.name()); - final String name = resolveSingleValueAttribute(attributes, nameAttribute, NAME_ATTRIBUTE.name()); - final String mail = resolveSingleValueAttribute(attributes, mailAttribute, MAIL_ATTRIBUTE.name()); + final String dn = resolveSingleValueAttribute(attributes, dnAttribute, DN_ATTRIBUTE.name(config)); + final String name = resolveSingleValueAttribute(attributes, nameAttribute, NAME_ATTRIBUTE.name(config)); + final String mail = resolveSingleValueAttribute(attributes, mailAttribute, MAIL_ATTRIBUTE.name(config)); UserRoleMapper.UserData userData = new UserRoleMapper.UserData(principal, dn, groups, userMeta, config); roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { final User user = new User(principal, roles.toArray(new String[roles.size()]), name, mail, userMeta, true); @@ -526,7 +525,7 @@ private static Tuple attributes.getAttributeValues(attributeName).stream().map(s -> { final Matcher matcher = regex.matcher(s); if (matcher.find() == false) { @@ -768,17 +767,19 @@ static AttributeParser forSetting(Logger logger, SamlRealmSettings.AttributeSett ); } else { return new AttributeParser( - "SAML Attribute [" + attributeName + "] for [" + setting.name() + "]", + "SAML Attribute [" + attributeName + "] for [" + setting.name(realmConfig) + "]", attributes -> attributes.getAttributeValues(attributeName)); } } else if (required) { - throw new SettingsException("Setting " + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) - + " is required"); - } else if (setting.getPattern().exists(settings)) { - throw new SettingsException("Setting " + RealmSettings.getFullSettingKey(realmConfig, setting.getPattern()) - + " cannot be set unless " + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + " is also set"); + throw new SettingsException("Setting [" + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + + "] is required"); + } else if (realmConfig.hasSetting(setting.getPattern())) { + throw new SettingsException("Setting [" + RealmSettings.getFullSettingKey(realmConfig, setting.getPattern()) + + "] cannot be set unless [" + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + + "] is also set"); } else { - return new AttributeParser("No SAML attribute for [" + setting.name() + "]", attributes -> Collections.emptyList()); + return new AttributeParser("No SAML attribute for [" + setting.name(realmConfig) + "]", + attributes -> Collections.emptyList()); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java index fdb2fd0f33db8..3924370fb33ec 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java @@ -34,20 +34,20 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm private final boolean authenticationEnabled; final Hasher cacheHasher; - protected CachingUsernamePasswordRealm(String type, RealmConfig config, ThreadPool threadPool) { - super(type, config); - cacheHasher = Hasher.resolve(CachingUsernamePasswordRealmSettings.CACHE_HASH_ALGO_SETTING.get(config.settings())); + protected CachingUsernamePasswordRealm(RealmConfig config, ThreadPool threadPool) { + super(config); + cacheHasher = Hasher.resolve(this.config.getSetting(CachingUsernamePasswordRealmSettings.CACHE_HASH_ALGO_SETTING)); this.threadPool = threadPool; - final TimeValue ttl = CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.get(config.settings()); + final TimeValue ttl = this.config.getSetting(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING); if (ttl.getNanos() > 0) { cache = CacheBuilder.>builder() - .setExpireAfterWrite(ttl) - .setMaximumWeight(CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) - .build(); + .setExpireAfterWrite(ttl) + .setMaximumWeight(this.config.getSetting(CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING)) + .build(); } else { cache = null; } - this.authenticationEnabled = CachingUsernamePasswordRealmSettings.AUTHC_ENABLED_SETTING.get(config.settings()); + this.authenticationEnabled = config.getSetting(CachingUsernamePasswordRealmSettings.AUTHC_ENABLED_SETTING); } @Override @@ -86,7 +86,7 @@ public boolean supports(AuthenticationToken token) { * This method will respond with {@link AuthenticationResult#notHandled()} if * {@link CachingUsernamePasswordRealmSettings#AUTHC_ENABLED_SETTING authentication is not enabled}. * @param authToken The authentication token - * @param listener to be called at completion + * @param listener to be called at completion */ @Override public final void authenticate(AuthenticationToken authToken, ActionListener listener) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java index ff6fc6042e7e8..8ce2805d23059 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupport.java @@ -6,10 +6,11 @@ package org.elasticsearch.xpack.security.authc.support; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.LicenseUtils; @@ -17,15 +18,14 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.user.User; import java.util.ArrayList; import java.util.List; -import java.util.Map; import static org.elasticsearch.common.Strings.collectionToDelimitedString; +import static org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings.AUTHZ_REALMS; /** * Utility class for supporting "delegated authorization" (aka "authorization_realms", aka "lookup realms"). @@ -46,7 +46,7 @@ public class DelegatedAuthorizationSupport { * {@link #DelegatedAuthorizationSupport(Iterable, List, Settings, ThreadContext, XPackLicenseState)} */ public DelegatedAuthorizationSupport(Iterable allRealms, RealmConfig config, XPackLicenseState licenseState) { - this(allRealms, DelegatedAuthorizationSettings.AUTHZ_REALMS.get(config.settings()), config.globalSettings(), config.threadContext(), + this(allRealms, config.getSetting(AUTHZ_REALMS), config.globalSettings(), config.threadContext(), licenseState); } @@ -60,7 +60,7 @@ protected DelegatedAuthorizationSupport(Iterable allRealms, Lis final List resolvedLookupRealms = resolveRealms(allRealms, lookupRealms); checkForRealmChains(resolvedLookupRealms, settings); this.lookup = new RealmUserLookup(resolvedLookupRealms, threadContext); - this.logger = Loggers.getLogger(getClass()); + this.logger = LogManager.getLogger(getClass()); this.licenseState = licenseState; } @@ -82,14 +82,14 @@ public boolean hasDelegation() { public void resolve(String username, ActionListener resultListener) { if (licenseState.isAuthorizationRealmAllowed() == false) { resultListener.onResponse(AuthenticationResult.unsuccessful( - DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey() + " are not permitted", - LicenseUtils.newComplianceException(DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey()) + DelegatedAuthorizationSettings.AUTHZ_REALMS_SUFFIX + " are not permitted", + LicenseUtils.newComplianceException(DelegatedAuthorizationSettings.AUTHZ_REALMS_SUFFIX) )); return; } if (hasDelegation() == false) { resultListener.onResponse(AuthenticationResult.unsuccessful( - "No [" + DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey() + "] have been configured", null)); + "No [" + DelegatedAuthorizationSettings.AUTHZ_REALMS_SUFFIX + "] have been configured", null)); return; } ActionListener> userListener = ActionListener.wrap(tuple -> { @@ -123,13 +123,12 @@ private List resolveRealms(Iterable allRealms, List delegatedRealms, Settings globalSettings) { - final Map settingsByRealm = RealmSettings.getRealmSettings(globalSettings); for (Realm realm : delegatedRealms) { - final Settings realmSettings = settingsByRealm.get(realm.name()); - if (realmSettings != null && DelegatedAuthorizationSettings.AUTHZ_REALMS.exists(realmSettings)) { - throw new IllegalArgumentException("cannot use realm [" + realm + - "] as an authorization realm - it is already delegating authorization to [" + - DelegatedAuthorizationSettings.AUTHZ_REALMS.get(realmSettings) + "]"); + Setting> realmAuthzSetting = AUTHZ_REALMS.apply(realm.type()).getConcreteSettingForNamespace(realm.name()); + if (realmAuthzSetting.exists(globalSettings)) { + throw new IllegalArgumentException("cannot use realm [" + realm + + "] as an authorization realm - it is already delegating authorization to [" + realmAuthzSetting.get(globalSettings) + + "]"); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java index bdabc690f76ff..78094f518c53c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java @@ -30,7 +30,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.env.Environment; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; import org.elasticsearch.watcher.ResourceWatcherService; @@ -59,8 +58,8 @@ public class DnRoleMapper implements UserRoleMapper { public DnRoleMapper(RealmConfig config, ResourceWatcherService watcherService) { this.config = config; - useUnmappedGroupsAsRoles = DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.get(config.settings()); - file = resolveFile(config.settings(), config.env()); + useUnmappedGroupsAsRoles = config.getSetting(DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING); + file = resolveFile(config); dnRoles = parseFileLenient(file, logger, config.type(), config.name()); FileWatcher watcher = new FileWatcher(file.getParent()); watcher.addListener(new FileListener()); @@ -80,9 +79,9 @@ synchronized void addListener(Runnable listener) { listeners.add(Objects.requireNonNull(listener, "listener cannot be null")); } - public static Path resolveFile(Settings settings, Environment env) { - String location = DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.get(settings); - return XPackPlugin.resolveConfigFile(env, location); + public static Path resolveFile(RealmConfig realmConfig) { + String location = realmConfig.getSetting(DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING); + return XPackPlugin.resolveConfigFile(realmConfig.env(), location); } /** diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java index 81b0dc6ea4876..6a7609238b9d6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java @@ -43,8 +43,8 @@ public boolean alwaysEnforce() { } public static BootstrapCheck create(RealmConfig realmConfig) { - if (realmConfig.enabled() && DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.exists(realmConfig.settings())) { - Path file = DnRoleMapper.resolveFile(realmConfig.settings(), realmConfig.env()); + if (realmConfig.enabled() && realmConfig.hasSetting(DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING)) { + Path file = DnRoleMapper.resolveFile(realmConfig); return new RoleMappingFileBootstrapCheck(realmConfig, file); } return null; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java index 2c728fa002c3c..e55530bb5def0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java @@ -8,12 +8,12 @@ import com.unboundid.ldap.sdk.DN; import com.unboundid.ldap.sdk.LDAPException; import com.unboundid.util.LDAPSDKUsageException; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; @@ -159,7 +159,7 @@ public RealmConfig getRealm() { * */ class DistinguishedNamePredicate implements Predicate { - private static final Logger LOGGER = Loggers.getLogger(DistinguishedNamePredicate.class); + private static final Logger LOGGER = LogManager.getLogger(DistinguishedNamePredicate.class); private final String string; private final DN dn; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordRealm.java index b50cba349dc56..380515d010268 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordRealm.java @@ -13,8 +13,8 @@ abstract class UsernamePasswordRealm extends Realm { - UsernamePasswordRealm(String type, RealmConfig config) { - super(type, config); + UsernamePasswordRealm(RealmConfig config) { + super(config); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java index 956060a65789c..1723473df05d7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java @@ -29,7 +29,7 @@ public class CompositeRoleMapper implements UserRoleMapper { private List delegates; - public CompositeRoleMapper(String realmType, RealmConfig realmConfig, + public CompositeRoleMapper(RealmConfig realmConfig, ResourceWatcherService watcherService, NativeRoleMappingStore nativeRoleMappingStore) { this(new DnRoleMapper(realmConfig, watcherService), nativeRoleMappingStore); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index e41bcfbfe17a1..3181c14fc272d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.security.authc.support.mapper; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteResponse; @@ -14,7 +16,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -74,8 +75,9 @@ * is done by this class. Modification operations make a best effort attempt to clear the cache * on all nodes for the user that was modified. */ -public class NativeRoleMappingStore extends AbstractComponent implements UserRoleMapper { +public class NativeRoleMappingStore implements UserRoleMapper { + private static final Logger logger = LogManager.getLogger(NativeRoleMappingStore.class); static final String DOC_TYPE_FIELD = "doc_type"; static final String DOC_TYPE_ROLE_MAPPING = "role-mapping"; @@ -95,12 +97,13 @@ public void onFailure(Exception e) { } }; + private final Settings settings; private final Client client; private final SecurityIndexManager securityIndex; private final List realmsToRefresh = new CopyOnWriteArrayList<>(); public NativeRoleMappingStore(Settings settings, Client client, SecurityIndexManager securityIndex) { - super(settings); + this.settings = settings; this.client = client; this.securityIndex = securityIndex; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 8db69ff4e47c4..e1375f1d3c18b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.security.authz; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.CompositeIndicesRequest; @@ -30,7 +32,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -79,7 +80,7 @@ import static org.elasticsearch.xpack.core.security.SecurityField.setting; import static org.elasticsearch.xpack.core.security.support.Exceptions.authorizationError; -public class AuthorizationService extends AbstractComponent { +public class AuthorizationService { public static final Setting ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING = Setting.boolSetting(setting("authc.anonymous.authz_exception"), true, Property.NodeScope); public static final String ORIGINATING_ACTION_KEY = "_originating_action_name"; @@ -93,6 +94,7 @@ public class AuthorizationService extends AbstractComponent { private static final String INDEX_SUB_REQUEST_REPLICA = IndexAction.NAME + "[r]"; private static final String DELETE_SUB_REQUEST_PRIMARY = DeleteAction.NAME + "[p]"; private static final String DELETE_SUB_REQUEST_REPLICA = DeleteAction.NAME + "[r]"; + private static final Logger logger = LogManager.getLogger(AuthorizationService.class); private final ClusterService clusterService; private final CompositeRolesStore rolesStore; @@ -108,7 +110,6 @@ public class AuthorizationService extends AbstractComponent { public AuthorizationService(Settings settings, CompositeRolesStore rolesStore, ClusterService clusterService, AuditTrailService auditTrail, AuthenticationFailureHandler authcFailureHandler, ThreadPool threadPool, AnonymousUser anonymousUser) { - super(settings); this.rolesStore = rolesStore; this.clusterService = clusterService; this.auditTrail = auditTrail; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 5d9176b18976e..02679a1dfc0dc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -24,6 +24,7 @@ import java.util.function.Predicate; import static org.elasticsearch.xpack.core.ClientHelper.DEPRECATION_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.INDEX_LIFECYCLE_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.MONITORING_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.PERSISTENT_TASK_ORIGIN; @@ -111,6 +112,7 @@ public static void switchUserBasedOnActionOriginAndExecute(ThreadContext threadC case DEPRECATION_ORIGIN: case PERSISTENT_TASK_ORIGIN: case ROLLUP_ORIGIN: + case INDEX_LIFECYCLE_ORIGIN: securityContext.executeAsUser(XPackUser.INSTANCE, consumer, Version.CURRENT); break; default: diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index 26fa8405ccf00..cbd0d7ca184cc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -54,7 +54,7 @@ class IndicesAndAliasesResolver { private final RemoteClusterResolver remoteClusterResolver; IndicesAndAliasesResolver(Settings settings, ClusterService clusterService) { - this.nameExpressionResolver = new IndexNameExpressionResolver(settings); + this.nameExpressionResolver = new IndexNameExpressionResolver(); this.remoteClusterResolver = new RemoteClusterResolver(settings, clusterService.getClusterSettings()); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index a7a73397d50ab..6168192d4077f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.security.authz.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; @@ -14,7 +16,6 @@ import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -64,7 +65,14 @@ * A composite roles store that combines built in roles, file-based roles, and index-based roles. Checks the built in roles first, then the * file roles, and finally the index roles. */ -public class CompositeRolesStore extends AbstractComponent { +public class CompositeRolesStore { + + + private static final Setting CACHE_SIZE_SETTING = + Setting.intSetting("xpack.security.authz.store.roles.cache.max_size", 10000, Property.NodeScope); + private static final Setting NEGATIVE_LOOKUP_CACHE_SIZE_SETTING = + Setting.intSetting("xpack.security.authz.store.roles.negative_lookup_cache.max_size", 10000, Property.NodeScope); + private static final Logger logger = LogManager.getLogger(CompositeRolesStore.class); // the lock is used in an odd manner; when iterating over the cache we cannot have modifiers other than deletes using // the iterator but when not iterating we can modify the cache without external locking. When making normal modifications to the cache @@ -79,11 +87,6 @@ public class CompositeRolesStore extends AbstractComponent { writeLock = new ReleasableLock(iterationLock.writeLock()); } - private static final Setting CACHE_SIZE_SETTING = - Setting.intSetting("xpack.security.authz.store.roles.cache.max_size", 10000, Property.NodeScope); - private static final Setting NEGATIVE_LOOKUP_CACHE_SIZE_SETTING = - Setting.intSetting("xpack.security.authz.store.roles.negative_lookup_cache.max_size", 10000, Property.NodeScope); - private final FileRolesStore fileRolesStore; private final NativeRolesStore nativeRolesStore; private final NativePrivilegeStore privilegeStore; @@ -99,7 +102,6 @@ public CompositeRolesStore(Settings settings, FileRolesStore fileRolesStore, Nat ReservedRolesStore reservedRolesStore, NativePrivilegeStore privilegeStore, List, ActionListener>> rolesProviders, ThreadContext threadContext, XPackLicenseState licenseState) { - super(settings); this.fileRolesStore = fileRolesStore; fileRolesStore.addListener(this::invalidate); this.nativeRolesStore = nativeRolesStore; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java index ccc4f1fe3ea13..a2be72dc6d631 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.security.authz.store; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -52,11 +52,13 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; -public class FileRolesStore extends AbstractComponent implements BiConsumer, ActionListener> { +public class FileRolesStore implements BiConsumer, ActionListener> { private static final Pattern IN_SEGMENT_LINE = Pattern.compile("^\\s+.+"); private static final Pattern SKIP_LINE = Pattern.compile("(^#.*|^\\s*)"); + private static final Logger logger = LogManager.getLogger(FileRolesStore.class); + private final Settings settings; private final Path file; private final XPackLicenseState licenseState; private final List>> listeners = new ArrayList<>(); @@ -70,7 +72,7 @@ public FileRolesStore(Settings settings, Environment env, ResourceWatcherService FileRolesStore(Settings settings, Environment env, ResourceWatcherService watcherService, Consumer> listener, XPackLicenseState licenseState) throws IOException { - super(settings); + this.settings = settings; this.file = resolveFile(env); if (listener != null) { listeners.add(listener); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java index a1905db9599d8..63b3ba2c9bba7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.security.authz.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -21,7 +23,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -66,7 +67,7 @@ * {@code NativePrivilegeStore} is a store that reads/writes {@link ApplicationPrivilegeDescriptor} objects, * from an Elasticsearch index. */ -public class NativePrivilegeStore extends AbstractComponent { +public class NativePrivilegeStore { private static final Collector, ?, Map>> TUPLES_TO_MAP = Collectors.toMap( Tuple::v1, @@ -74,13 +75,15 @@ public class NativePrivilegeStore extends AbstractComponent { a.addAll(b); return a; }); + private static final Logger logger = LogManager.getLogger(NativePrivilegeStore.class); + private final Settings settings; private final Client client; private final SecurityClient securityClient; private final SecurityIndexManager securityIndexManager; public NativePrivilegeStore(Settings settings, Client client, SecurityIndexManager securityIndexManager) { - super(settings); + this.settings = settings; this.client = client; this.securityClient = new SecurityClient(client); this.securityIndexManager = securityIndexManager; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index 91244a1ad36b9..7895aced4647a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.authz.store; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; @@ -13,6 +14,9 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.MultiSearchResponse; @@ -22,7 +26,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -49,7 +52,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -78,7 +80,7 @@ * * No caching is done by this class, it is handled at a higher level */ -public class NativeRolesStore extends AbstractComponent implements BiConsumer, ActionListener> { +public class NativeRolesStore implements BiConsumer, ActionListener> { // these are no longer used, but leave them around for users upgrading private static final Setting CACHE_SIZE_SETTING = @@ -86,7 +88,9 @@ public class NativeRolesStore extends AbstractComponent implements BiConsumer CACHE_TTL_SETTING = Setting.timeSetting(setting("authz.store.roles.index.cache.ttl"), TimeValue.timeValueMinutes(20), Property.NodeScope, Property.Deprecated); private static final String ROLE_DOC_TYPE = "doc"; + private static final Logger logger = LogManager.getLogger(NativeRolesStore.class); + private final Settings settings; private final Client client; private final XPackLicenseState licenseState; @@ -94,7 +98,7 @@ public class NativeRolesStore extends AbstractComponent implements BiConsumer names, final ActionListener { - QueryBuilder query; - if (names == null || names.isEmpty()) { - query = QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE); - } else { - final String[] roleNames = names.stream().map(NativeRolesStore::getIdForUser).toArray(String[]::new); - query = QueryBuilders.boolQuery().filter(QueryBuilders.idsQuery(ROLE_DOC_TYPE).addIds(roleNames)); - } + QueryBuilder query = QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE); final Supplier supplier = client.threadPool().getThreadContext().newRestorableContext(false); try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) { SearchRequest request = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setScroll(DEFAULT_KEEPALIVE_SETTING.get(settings)) - .setQuery(query) - .setSize(1000) - .setFetchSource(true) - .request(); + .setScroll(DEFAULT_KEEPALIVE_SETTING.get(settings)) + .setQuery(query) + .setSize(1000) + .setFetchSource(true) + .request(); request.indicesOptions().ignoreUnavailable(); - final ActionListener> descriptorsListener = ActionListener.wrap( - roleDescriptors -> listener.onResponse(RoleRetrievalResult.success(new HashSet<>(roleDescriptors))), - e -> listener.onResponse(RoleRetrievalResult.failure(e))); - ScrollHelper.fetchAllByEntity(client, request, new ContextPreservingActionListener<>(supplier, descriptorsListener), - (hit) -> transformRole(hit.getId(), hit.getSourceRef(), logger, licenseState)); + ScrollHelper.fetchAllByEntity(client, request, new ContextPreservingActionListener<>(supplier, + ActionListener.wrap(roles -> listener.onResponse(RoleRetrievalResult.success(new HashSet<>(roles))), + e -> listener.onResponse(RoleRetrievalResult.failure(e)))), + (hit) -> transformRole(hit.getId(), hit.getSourceRef(), logger, licenseState)); } }); + } else if (names.size() == 1) { + getRoleDescriptor(Objects.requireNonNull(names.iterator().next()), listener); + } else { + securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { + final String[] roleIds = names.stream().map(NativeRolesStore::getIdForRole).toArray(String[]::new); + MultiGetRequest multiGetRequest = client.prepareMultiGet().add(SECURITY_INDEX_NAME, ROLE_DOC_TYPE, roleIds).request(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, multiGetRequest, + ActionListener.wrap(mGetResponse -> { + final MultiGetItemResponse[] responses = mGetResponse.getResponses(); + Set descriptors = new HashSet<>(); + for (int i = 0; i < responses.length; i++) { + MultiGetItemResponse item = responses[i]; + if (item.isFailed()) { + final Exception failure = item.getFailure().getFailure(); + for (int j = i + 1; j < responses.length; j++) { + item = responses[j]; + if (item.isFailed()) { + failure.addSuppressed(failure); + } + } + listener.onResponse(RoleRetrievalResult.failure(failure)); + return; + } else if (item.getResponse().isExists()) { + descriptors.add(transformRole(item.getResponse())); + } + } + listener.onResponse(RoleRetrievalResult.success(descriptors)); + }, + e -> listener.onResponse(RoleRetrievalResult.failure(e))), client::multiGet); + }); } } @@ -152,7 +177,7 @@ public void deleteRole(final DeleteRoleRequest deleteRoleRequest, final ActionLi } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { DeleteRequest request = client.prepareDelete(SecurityIndexManager.SECURITY_INDEX_NAME, - ROLE_DOC_TYPE, getIdForUser(deleteRoleRequest.name())).request(); + ROLE_DOC_TYPE, getIdForRole(deleteRoleRequest.name())).request(); request.setRefreshPolicy(deleteRoleRequest.getRefreshPolicy()); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, new ActionListener() { @@ -192,7 +217,7 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final listener.onFailure(e); return; } - final IndexRequest indexRequest = client.prepareIndex(SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForUser(role.getName())) + final IndexRequest indexRequest = client.prepareIndex(SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForRole(role.getName())) .setSource(xContentBuilder) .setRefreshPolicy(request.getRefreshPolicy()) .request(); @@ -308,7 +333,7 @@ private void executeGetRoleRequest(String role, ActionListener list securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareGet(SECURITY_INDEX_NAME, - ROLE_DOC_TYPE, getIdForUser(role)).request(), + ROLE_DOC_TYPE, getIdForRole(role)).request(), listener, client::get)); } @@ -388,7 +413,7 @@ public static void addSettings(List> settings) { /** * Gets the document's id field for the given role name. */ - private static String getIdForUser(final String roleName) { + private static String getIdForRole(final String roleName) { return ROLE_TYPE + "-" + roleName; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java index dd1e387b98989..c2066996f9ce4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java @@ -24,6 +24,7 @@ */ public abstract class SecurityBaseRestHandler extends BaseRestHandler { + private final Settings settings; protected final XPackLicenseState licenseState; /** @@ -32,6 +33,7 @@ public abstract class SecurityBaseRestHandler extends BaseRestHandler { */ protected SecurityBaseRestHandler(Settings settings, XPackLicenseState licenseState) { super(settings); + this.settings = settings; this.licenseState = licenseState; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java index 1b64b3ce2baee..9907e3345232e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java @@ -34,12 +34,13 @@ public class RestChangePasswordAction extends SecurityBaseRestHandler implements RestRequestFilter { private final SecurityContext securityContext; - private final Hasher passwordHasher = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(settings)); + private final Hasher passwordHasher; public RestChangePasswordAction(Settings settings, RestController controller, SecurityContext securityContext, XPackLicenseState licenseState) { super(settings, licenseState); this.securityContext = securityContext; + passwordHasher = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(settings)); controller.registerHandler(POST, "/_xpack/security/user/{username}/_password", this); controller.registerHandler(PUT, "/_xpack/security/user/{username}/_password", this); controller.registerHandler(POST, "/_xpack/security/user/_password", this); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java index 521ab76c96b1e..486ac3fa35929 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java @@ -36,10 +36,11 @@ */ public class RestPutUserAction extends SecurityBaseRestHandler implements RestRequestFilter { - private final Hasher passwordHasher = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(settings)); + private final Hasher passwordHasher; public RestPutUserAction(Settings settings, RestController controller, XPackLicenseState licenseState) { super(settings, licenseState); + passwordHasher = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(settings)); controller.registerHandler(POST, "/_xpack/security/user/{username}", this); controller.registerHandler(PUT, "/_xpack/security/user/{username}", this); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 14081e136d33a..b924d378f9a2a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.security.transport; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -49,7 +49,7 @@ import static org.elasticsearch.xpack.core.security.SecurityField.setting; -public class SecurityServerTransportInterceptor extends AbstractComponent implements TransportInterceptor { +public class SecurityServerTransportInterceptor implements TransportInterceptor { private static final Function> TRANSPORT_TYPE_SETTING_TEMPLATE = key -> new Setting<>(key, "node", v -> { if (v.equals("node") || v.equals("client")) { @@ -58,6 +58,7 @@ public class SecurityServerTransportInterceptor extends AbstractComponent implem throw new IllegalArgumentException("type must be one of [client, node]"); }, Setting.Property.NodeScope); private static final String TRANSPORT_TYPE_SETTING_KEY = "xpack.security.type"; + private static final Logger logger = LogManager.getLogger(SecurityServerTransportInterceptor.class); public static final Setting.AffixSetting TRANSPORT_TYPE_PROFILE_SETTING = Setting.affixKeySetting("transport.profiles.", TRANSPORT_TYPE_SETTING_KEY, TRANSPORT_TYPE_SETTING_TEMPLATE); @@ -83,7 +84,6 @@ public SecurityServerTransportInterceptor(Settings settings, SecurityContext securityContext, DestructiveOperations destructiveOperations, ClusterService clusterService) { - super(settings); this.settings = settings; this.threadPool = threadPool; this.authcService = authcService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java index d59064619fc6d..40ad10b8acb88 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.transport; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -13,7 +14,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.open.OpenIndexAction; import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.transport.TaskTransportChannel; import org.elasticsearch.transport.TcpChannel; @@ -57,7 +57,7 @@ void inbound(String action, TransportRequest request, TransportChannel transport * request is properly authenticated and authorized */ class NodeProfile implements ServerTransportFilter { - private static final Logger logger = Loggers.getLogger(NodeProfile.class); + private static final Logger logger = LogManager.getLogger(NodeProfile.class); private final AuthenticationService authcService; private final AuthorizationService authzService; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java index 2238c12cfb446..df36527d7acac 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.SecurityIntegTestCase; @@ -60,7 +59,7 @@ protected String configRoles() { public void testDateMathExpressionsCanBeAuthorized() throws Exception { final String expression = ""; - final String expectedIndexName = new IndexNameExpressionResolver(Settings.EMPTY).resolveDateMathExpression(expression); + final String expectedIndexName = new IndexNameExpressionResolver().resolveDateMathExpression(expression); final boolean refeshOnOperation = randomBoolean(); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", basicAuthHeaderValue("user1", USERS_PASSWD))); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java index 6098562ec3a89..fa1cfbcba9993 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java @@ -135,10 +135,8 @@ public Settings nodeSettings(int nodeOrdinal) { .put(LoggingAuditTrail.EMIT_HOST_NAME_SETTING.getKey(), randomBoolean()) .put(LoggingAuditTrail.EMIT_NODE_NAME_SETTING.getKey(), randomBoolean()) .put(LoggingAuditTrail.EMIT_NODE_ID_SETTING.getKey(), randomBoolean()) - .put("xpack.security.authc.realms.file.type", FileRealmSettings.TYPE) - .put("xpack.security.authc.realms.file.order", 0) - .put("xpack.security.authc.realms.index.type", NativeRealmSettings.TYPE) - .put("xpack.security.authc.realms.index.order", "1"); + .put("xpack.security.authc.realms." + FileRealmSettings.TYPE + ".file.order", 0) + .put("xpack.security.authc.realms." + NativeRealmSettings.TYPE + ".index.order", "1"); addNodeSSLSettings(builder); return builder.build(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java index 3bf3bb4dc8641..b4584200a4a33 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.hamcrest.Matcher; @@ -39,37 +40,35 @@ public class SettingsFilterTests extends ESTestCase { public void testFiltering() throws Exception { final boolean useLegacyLdapBindPassword = randomBoolean(); - configureUnfilteredSetting("xpack.security.authc.realms.file.type", "file"); + configureUnfilteredSetting("xpack.security.authc.realms.file.file1.enabled", "true"); // ldap realm filtering - configureUnfilteredSetting("xpack.security.authc.realms.ldap1.type", "ldap"); - configureUnfilteredSetting("xpack.security.authc.realms.ldap1.enabled", "false"); - configureUnfilteredSetting("xpack.security.authc.realms.ldap1.url", "ldap://host.domain"); - configureFilteredSetting("xpack.security.authc.realms.ldap1.hostname_verification", Boolean.toString(randomBoolean())); - configureFilteredSetting("xpack.security.authc.realms.ldap1.bind_dn", randomAlphaOfLength(5)); + configureUnfilteredSetting("xpack.security.authc.realms.ldap.ldap1.enabled", "false"); + configureUnfilteredSetting("xpack.security.authc.realms.ldap.ldap1.url", "ldap://host.domain"); + configureFilteredSetting("xpack.security.authc.realms.ldap.ldap1.hostname_verification", Boolean.toString(randomBoolean())); + configureFilteredSetting("xpack.security.authc.realms.ldap.ldap1.bind_dn", randomAlphaOfLength(5)); if (useLegacyLdapBindPassword) { - configureFilteredSetting("xpack.security.authc.realms.ldap1.bind_password", randomAlphaOfLength(5)); + configureFilteredSetting("xpack.security.authc.realms.ldap.ldap1.bind_password", randomAlphaOfLength(5)); } else { - configureSecureSetting("xpack.security.authc.realms.ldap1.secure_bind_password", randomAlphaOfLengthBetween(3, 8)); + configureSecureSetting("xpack.security.authc.realms.ldap.ldap1.secure_bind_password", randomAlphaOfLengthBetween(3, 8)); } // active directory filtering - configureUnfilteredSetting("xpack.security.authc.realms.ad1.type", "active_directory"); - configureUnfilteredSetting("xpack.security.authc.realms.ad1.enabled", "false"); - configureUnfilteredSetting("xpack.security.authc.realms.ad1.url", "ldap://host.domain"); - configureFilteredSetting("xpack.security.authc.realms.ad1.hostname_verification", Boolean.toString(randomBoolean())); + configureUnfilteredSetting("xpack.security.authc.realms.active_directory.ad1.enabled", "false"); + configureUnfilteredSetting("xpack.security.authc.realms.active_directory.ad1.url", "ldap://host.domain"); + configureFilteredSetting("xpack.security.authc.realms.active_directory.ad1.hostname_verification", + Boolean.toString(randomBoolean())); // pki filtering - configureUnfilteredSetting("xpack.security.authc.realms.pki1.type", "pki"); - configureUnfilteredSetting("xpack.security.authc.realms.pki1.order", "0"); + configureUnfilteredSetting("xpack.security.authc.realms.pki.pki1.order", "0"); if (inFipsJvm() == false) { - configureFilteredSetting("xpack.security.authc.realms.pki1.truststore.path", + configureFilteredSetting("xpack.security.authc.realms.pki.pki1.truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks").toString()); configureFilteredSetting("xpack.ssl.keystore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks").toString()); } - configureSecureSetting("xpack.security.authc.realms.pki1.truststore.secure_password", "truststore-testnode-only"); - configureFilteredSetting("xpack.security.authc.realms.pki1.truststore.algorithm", "SunX509"); + configureSecureSetting("xpack.security.authc.realms.pki.pki1.truststore.secure_password", "truststore-testnode-only"); + configureFilteredSetting("xpack.security.authc.realms.pki.pki1.truststore.algorithm", "SunX509"); configureFilteredSetting("xpack.ssl.cipher_suites", @@ -134,7 +133,9 @@ public void testFiltering() throws Exception { } if (useLegacyLdapBindPassword) { - assertSettingDeprecationsAndWarnings(new Setting[]{PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD}); + assertSettingDeprecationsAndWarnings(new Setting[]{PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD + .apply(LdapRealmSettings.LDAP_TYPE) + .getConcreteSettingForNamespace("ldap1")}); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java index 6966b7edf67d8..fdaa82c602194 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.hamcrest.Matchers; @@ -26,7 +25,7 @@ public void testPkiRealmBootstrapDefault() throws Exception { public void testBootstrapCheckWithPkiRealm() throws Exception { Settings settings = Settings.builder() - .put("xpack.security.authc.realms.test_pki.type", PkiRealmSettings.TYPE) + .put("xpack.security.authc.realms.pki.test_pki.order", 0) .put("path.home", createTempDir()) .build(); Environment env = TestEnvironment.newEnvironment(settings); @@ -88,8 +87,7 @@ private BootstrapCheck.BootstrapCheckResult runCheck(Settings settings, Environm public void testBootstrapCheckWithDisabledRealm() throws Exception { Settings settings = Settings.builder() - .put("xpack.security.authc.realms.test_pki.type", PkiRealmSettings.TYPE) - .put("xpack.security.authc.realms.test_pki.enabled", false) + .put("xpack.security.authc.realms.pki.test_pki.enabled", false) .put("xpack.ssl.client_authentication", "none") .put("path.home", createTempDir()) .build(); @@ -102,7 +100,7 @@ public void testBootstrapCheckWithClosedSecuredSetting() throws Exception { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("xpack.security.http.ssl.secure_key_passphrase", "testnode"); Settings settings = Settings.builder() - .put("xpack.security.authc.realms.test_pki.type", PkiRealmSettings.TYPE) + .put("xpack.security.authc.realms.pki.test_pki.order", 0) .put("xpack.security.http.ssl.enabled", true) .put("xpack.security.http.ssl.client_authentication", expectFail ? "none" : "optional") .put("xpack.security.http.ssl.key", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 857b1694ac87a..ec0a20faf58c4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -36,7 +36,6 @@ import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; -import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; @@ -46,6 +45,7 @@ import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail; import org.elasticsearch.xpack.security.authc.Realms; +import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; @@ -61,11 +61,13 @@ import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_INDEX_FORMAT; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.mockito.Mockito.mock; @@ -79,9 +81,11 @@ public class SecurityTests extends ESTestCase { public static class DummyExtension implements SecurityExtension { private String realmType; + DummyExtension(String realmType) { this.realmType = realmType; } + @Override public Map getRealms(ResourceWatcherService resourceWatcherService) { return Collections.singletonMap(realmType, config -> null); @@ -153,7 +157,7 @@ public void testCustomRealmExtension() throws Exception { public void testCustomRealmExtensionConflict() throws Exception { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> createComponents(Settings.EMPTY, new DummyExtension(FileRealmSettings.TYPE))); + () -> createComponents(Settings.EMPTY, new DummyExtension(FileRealmSettings.TYPE))); assertEquals("Realm type [" + FileRealmSettings.TYPE + "] is already registered", e.getMessage()); } @@ -175,8 +179,8 @@ public void testDisabledByDefault() throws Exception { public void testIndexAuditTrail() throws Exception { Settings settings = Settings.builder() - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index").build(); + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index").build(); Collection components = createComponents(settings); AuditTrailService service = findComponent(AuditTrailService.class, components); assertNotNull(service); @@ -186,8 +190,8 @@ public void testIndexAuditTrail() throws Exception { public void testIndexAndLoggingAuditTrail() throws Exception { Settings settings = Settings.builder() - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index,logfile").build(); + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index,logfile").build(); Collection components = createComponents(settings); AuditTrailService service = findComponent(AuditTrailService.class, components); assertNotNull(service); @@ -198,8 +202,8 @@ public void testIndexAndLoggingAuditTrail() throws Exception { public void testUnknownOutput() { Settings settings = Settings.builder() - .put(XPackSettings.AUDIT_ENABLED.getKey(), true) - .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "foo").build(); + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "foo").build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createComponents(settings)); assertEquals("Unknown audit trail output [foo]", e.getMessage()); } @@ -212,9 +216,9 @@ public void testHttpSettingDefaults() throws Exception { public void testTransportSettingNetty4Both() { Settings both4 = Security.additionalSettings(Settings.builder() - .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) - .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) - .build(), true, false); + .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) + .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) + .build(), true, false); assertFalse(NetworkModule.TRANSPORT_TYPE_SETTING.exists(both4)); assertFalse(NetworkModule.HTTP_TYPE_SETTING.exists(both4)); } @@ -237,12 +241,28 @@ public void testTransportSettingValidation() { public void testSettingFilter() throws Exception { createComponents(Settings.EMPTY); final List filter = security.getSettingsFilter(); - assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.bind_dn"))); - assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.bind_password"))); - assertThat(filter, hasItem(SecurityField.setting("authc.realms.*." + SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING))); - assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.ssl.truststore.password"))); - assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.ssl.truststore.path"))); - assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.ssl.truststore.algorithm"))); + assertThat(filter, hasItem("transport.profiles.*.xpack.security.*")); + } + + public void testFilteredSettings() throws Exception { + createComponents(Settings.EMPTY); + final List> realmSettings = security.getSettings().stream() + .filter(s -> s.getKey().startsWith("xpack.security.authc.realms")) + .collect(Collectors.toList()); + + Arrays.asList( + "bind_dn", "bind_password", + "hostname_verification", + "truststore.password", "truststore.path", "truststore.algorithm", + "keystore.key_password").forEach(suffix -> { + + final List> matching = realmSettings.stream() + .filter(s -> s.getKey().endsWith("." + suffix)) + .collect(Collectors.toList()); + assertThat("For suffix " + suffix, matching, Matchers.not(empty())); + matching.forEach(setting -> assertThat("For setting " + setting, + setting.getProperties(), Matchers.hasItem(Setting.Property.Filtered))); + }); } public void testJoinValidatorOnDisabledSecurity() throws Exception { @@ -258,7 +278,7 @@ public void testTLSJoinValidator() throws Exception { assertNotNull(joinValidator); DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); joinValidator.accept(node, ClusterState.builder(ClusterName.DEFAULT).build()); - int numIters = randomIntBetween(1,10); + int numIters = randomIntBetween(1, 10); for (int i = 0; i < numIters; i++) { boolean tlsOn = randomBoolean(); String discoveryType = randomFrom("single-node", "zen", randomAlphaOfLength(4)); @@ -318,18 +338,18 @@ public void testIndexJoinValidator_Old_And_Rolling() throws Exception { assertNotNull(joinValidator); DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_INDEX_NAME) - .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT - 1)) - .numberOfShards(1).numberOfReplicas(0) - .build(); + .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT - 1)) + .numberOfShards(1).numberOfReplicas(0) + .build(); DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); - ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(discoveryNodes) - .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes) + .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); IllegalStateException e = expectThrows(IllegalStateException.class, - () -> joinValidator.accept(node, clusterState)); + () -> joinValidator.accept(node, clusterState)); assertThat(e.getMessage(), equalTo("Security index is not on the current version [6] - " + - "The Upgrade API must be run for 7.x nodes to join the cluster")); + "The Upgrade API must be run for 7.x nodes to join the cluster")); } public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { @@ -339,14 +359,14 @@ public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); int indexFormat = randomBoolean() ? INTERNAL_INDEX_FORMAT : INTERNAL_INDEX_FORMAT - 1; IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_INDEX_NAME) - .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), indexFormat)) - .numberOfShards(1).numberOfReplicas(0) - .build(); + .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), indexFormat)) + .numberOfShards(1).numberOfReplicas(0) + .build(); DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); - ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(discoveryNodes) - .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes) + .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); joinValidator.accept(node, clusterState); } @@ -357,14 +377,14 @@ public void testIndexUpgradeValidatorWithUpToDateIndex() throws Exception { Version version = randomBoolean() ? Version.CURRENT : Version.V_6_1_0; DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_INDEX_NAME) - .settings(settings(version).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT)) - .numberOfShards(1).numberOfReplicas(0) - .build(); + .settings(settings(version).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT)) + .numberOfShards(1).numberOfReplicas(0) + .build(); DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), version); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); - ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(discoveryNodes) - .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes) + .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); joinValidator.accept(node, clusterState); } @@ -375,8 +395,8 @@ public void testIndexUpgradeValidatorWithMissingIndex() throws Exception { DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); - ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(discoveryNodes).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes).build(); joinValidator.accept(node, clusterState); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java index 95d821cb256e9..f6e5552ddbc53 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.TemplateUpgradeService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.SecurityIntegTestCase; @@ -55,7 +54,7 @@ public void testTemplatesWorkAsExpected() throws Exception { assertAcked(putIndexTemplateResponse); assertTemplates("removed-template", "added-template"); - TemplateUpgradeService templateUpgradeService = new TemplateUpgradeService(Settings.EMPTY, client, clusterService, threadPool, + TemplateUpgradeService templateUpgradeService = new TemplateUpgradeService(client, clusterService, threadPool, Collections.singleton(indexTemplateMetaDataUpgraders)); // ensure the cluster listener gets triggered diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java index 93df605a74fc0..b14ed2e4848b8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java @@ -83,7 +83,7 @@ public void init() throws Exception { when(state.nodes()).thenReturn(nodes); SecurityContext securityContext = new SecurityContext(settings, threadContext); - filter = new SecurityActionFilter(Settings.EMPTY, authcService, authzService, + filter = new SecurityActionFilter(authcService, authzService, licenseState, new HashSet<>(), threadPool, securityContext, destructiveOperations); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java index a5798be9746ed..bf7ab102c2a21 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java @@ -40,7 +40,7 @@ public void testInterceptorThrowsWhenFLSDLSEnabled() { when(licenseState.isAuditingAllowed()).thenReturn(true); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - AuditTrailService auditTrailService = new AuditTrailService(Settings.EMPTY, Collections.emptyList(), licenseState); + AuditTrailService auditTrailService = new AuditTrailService(Collections.emptyList(), licenseState); Authentication authentication = new Authentication(new User("john", "role"), new RealmRef(null, null, null), new RealmRef(null, null, null)); final FieldPermissions fieldPermissions; @@ -87,7 +87,7 @@ public void testInterceptorThrowsWhenTargetHasGreaterPermissions() throws Except when(licenseState.isAuditingAllowed()).thenReturn(true); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - AuditTrailService auditTrailService = new AuditTrailService(Settings.EMPTY, Collections.emptyList(), licenseState); + AuditTrailService auditTrailService = new AuditTrailService(Collections.emptyList(), licenseState); Authentication authentication = new Authentication(new User("john", "role"), new RealmRef(null, null, null), new RealmRef(null, null, null)); Role role = Role.builder() diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java index 008928794db46..e956ad5e03151 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java @@ -44,7 +44,7 @@ public void testResizeRequestInterceptorThrowsWhenFLSDLSEnabled() { ThreadPool threadPool = mock(ThreadPool.class); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); when(threadPool.getThreadContext()).thenReturn(threadContext); - AuditTrailService auditTrailService = new AuditTrailService(Settings.EMPTY, Collections.emptyList(), licenseState); + AuditTrailService auditTrailService = new AuditTrailService(Collections.emptyList(), licenseState); final Authentication authentication = new Authentication(new User("john", "role"), new RealmRef(null, null, null), null); final FieldPermissions fieldPermissions; final boolean useFls = randomBoolean(); @@ -67,7 +67,7 @@ public void testResizeRequestInterceptorThrowsWhenFLSDLSEnabled() { threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, accessControl); ResizeRequestInterceptor resizeRequestInterceptor = - new ResizeRequestInterceptor(Settings.EMPTY, threadPool, licenseState, auditTrailService); + new ResizeRequestInterceptor(threadPool, licenseState, auditTrailService); ElasticsearchSecurityException securityException = expectThrows(ElasticsearchSecurityException.class, () -> resizeRequestInterceptor.intercept(new ResizeRequest("bar", "foo"), authentication, role, action)); @@ -84,7 +84,7 @@ public void testResizeRequestInterceptorThrowsWhenTargetHasGreaterPermissions() ThreadPool threadPool = mock(ThreadPool.class); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); when(threadPool.getThreadContext()).thenReturn(threadContext); - AuditTrailService auditTrailService = new AuditTrailService(Settings.EMPTY, Collections.emptyList(), licenseState); + AuditTrailService auditTrailService = new AuditTrailService(Collections.emptyList(), licenseState); final Authentication authentication = new Authentication(new User("john", "role"), new RealmRef(null, null, null), null); Role role = Role.builder() .add(IndexPrivilege.ALL, "target") @@ -94,7 +94,7 @@ public void testResizeRequestInterceptorThrowsWhenTargetHasGreaterPermissions() IndicesAccessControl accessControl = new IndicesAccessControl(true, Collections.emptyMap()); threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, accessControl); ResizeRequestInterceptor resizeRequestInterceptor = - new ResizeRequestInterceptor(Settings.EMPTY, threadPool, licenseState, auditTrailService); + new ResizeRequestInterceptor(threadPool, licenseState, auditTrailService); ElasticsearchSecurityException securityException = expectThrows(ElasticsearchSecurityException.class, () -> resizeRequestInterceptor.intercept(new ResizeRequest("target", "source"), authentication, role, action)); assertEquals("Resizing an index is not allowed when the target index has more permissions than the source index", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java index 612a0ea83c0f8..93ece479ddcbd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java @@ -45,7 +45,7 @@ public void testReservedRole() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, (x) -> null, null, Collections.emptySet()); - TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ActionFilters.class), + TransportDeleteRoleAction action = new TransportDeleteRoleAction(mock(ActionFilters.class), rolesStore, transportService); DeleteRoleRequest request = new DeleteRoleRequest(); @@ -76,7 +76,7 @@ public void testValidRole() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, (x) -> null, null, Collections.emptySet()); - TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ActionFilters.class), + TransportDeleteRoleAction action = new TransportDeleteRoleAction(mock(ActionFilters.class), rolesStore, transportService); DeleteRoleRequest request = new DeleteRoleRequest(); @@ -120,8 +120,7 @@ public void testException() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, (x) -> null, null, Collections.emptySet()); - TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ActionFilters.class), - rolesStore, transportService); + TransportDeleteRoleAction action = new TransportDeleteRoleAction(mock(ActionFilters.class), rolesStore, transportService); DeleteRoleRequest request = new DeleteRoleRequest(); request.name(roleName); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java index 9fffba3195515..133584ed59327 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java @@ -48,7 +48,7 @@ public void testReservedRoles() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ActionFilters.class), + TransportGetRolesAction action = new TransportGetRolesAction(mock(ActionFilters.class), rolesStore, transportService, new ReservedRolesStore()); final int size = randomIntBetween(1, ReservedRolesStore.names().size()); @@ -94,7 +94,7 @@ public void testStoreRoles() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ActionFilters.class), + TransportGetRolesAction action = new TransportGetRolesAction(mock(ActionFilters.class), rolesStore, transportService, new ReservedRolesStore()); GetRolesRequest request = new GetRolesRequest(); @@ -146,7 +146,7 @@ public void testGetAllOrMix() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ActionFilters.class), + TransportGetRolesAction action = new TransportGetRolesAction(mock(ActionFilters.class), rolesStore, transportService, new ReservedRolesStore()); final List expectedNames = new ArrayList<>(); @@ -209,7 +209,7 @@ public void testException() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ActionFilters.class), + TransportGetRolesAction action = new TransportGetRolesAction(mock(ActionFilters.class), rolesStore, transportService, new ReservedRolesStore()); GetRolesRequest request = new GetRolesRequest(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java index 1634462b27dee..3cbb7782688e6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java @@ -46,7 +46,7 @@ public void testReservedRole() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService); + TransportPutRoleAction action = new TransportPutRoleAction(mock(ActionFilters.class), rolesStore, transportService); PutRoleRequest request = new PutRoleRequest(); request.name(roleName); @@ -76,7 +76,7 @@ public void testValidRole() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService); + TransportPutRoleAction action = new TransportPutRoleAction(mock(ActionFilters.class), rolesStore, transportService); final boolean created = randomBoolean(); PutRoleRequest request = new PutRoleRequest(); @@ -119,7 +119,7 @@ public void testException() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService); + TransportPutRoleAction action = new TransportPutRoleAction(mock(ActionFilters.class), rolesStore, transportService); PutRoleRequest request = new PutRoleRequest(); request.name(roleName); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java index 67df9013e752e..12be8cadcbe25 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java @@ -47,7 +47,7 @@ public void setupMocks() { store = mock(NativeRoleMappingStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - action = new TransportGetRoleMappingsAction(Settings.EMPTY, mock(ActionFilters.class), transportService, store); + action = new TransportGetRoleMappingsAction(mock(ActionFilters.class), transportService, store); namesRef = new AtomicReference<>(null); result = Collections.emptyList(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java index d3a0cd2e9c715..91222a5af5845 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java @@ -43,7 +43,7 @@ public void setupMocks() { store = mock(NativeRoleMappingStore.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - action = new TransportPutRoleMappingAction(Settings.EMPTY, mock(ActionFilters.class), transportService, store); + action = new TransportPutRoleMappingAction(mock(ActionFilters.class), transportService, store); requestRef = new AtomicReference<>(null); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index b0ca953b69c1f..ea5bb6e9f97a7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -54,6 +54,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmConfig.RealmIdentifier; import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; import org.elasticsearch.xpack.core.security.user.User; @@ -84,6 +85,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -177,20 +179,24 @@ void doExecute(Action action, Request request, ActionListener null, null, Collections.emptySet()); final Realms realms = mock(Realms.class); - action = new TransportSamlInvalidateSessionAction(settings, transportService, mock(ActionFilters.class),tokenService, realms); + action = new TransportSamlInvalidateSessionAction(transportService, mock(ActionFilters.class),tokenService, realms); final Path metadata = PathUtils.get(SamlRealm.class.getResource("idp1.xml").toURI()); final Environment env = TestEnvironment.newEnvironment(settings); + final RealmIdentifier identifier = new RealmIdentifier("saml", "saml1"); final Settings realmSettings = Settings.builder() - .put(SamlRealmSettings.IDP_METADATA_PATH.getKey(), metadata.toString()) - .put(SamlRealmSettings.IDP_ENTITY_ID.getKey(), SamlRealmTests.TEST_IDP_ENTITY_ID) - .put(SamlRealmSettings.SP_ENTITY_ID.getKey(), SamlRealmTestHelper.SP_ENTITY_ID) - .put(SamlRealmSettings.SP_ACS.getKey(), SamlRealmTestHelper.SP_ACS_URL) - .put(SamlRealmSettings.SP_LOGOUT.getKey(), SamlRealmTestHelper.SP_LOGOUT_URL) - .put("attributes.principal", "uid") + .put(getFullSettingKey(identifier.getName(), SamlRealmSettings.IDP_METADATA_PATH), metadata.toString()) + .put(getFullSettingKey(identifier.getName(), SamlRealmSettings.IDP_ENTITY_ID), SamlRealmTests.TEST_IDP_ENTITY_ID) + .put(getFullSettingKey(identifier.getName(), SamlRealmSettings.SP_ENTITY_ID), SamlRealmTestHelper.SP_ENTITY_ID) + .put(getFullSettingKey(identifier.getName(), SamlRealmSettings.SP_ACS), SamlRealmTestHelper.SP_ACS_URL) + .put(getFullSettingKey(identifier.getName(), SamlRealmSettings.SP_LOGOUT), SamlRealmTestHelper.SP_LOGOUT_URL) + .put(getFullSettingKey(identifier.getName(), SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), "uid") .build(); - final RealmConfig realmConfig = new RealmConfig("saml1", realmSettings, settings, env, threadContext); + final RealmConfig realmConfig = new RealmConfig( + identifier, + mergeSettings(realmSettings, settings), + env, threadContext); samlRealm = SamlRealmTestHelper.buildRealm(realmConfig, null); when(realms.realm(realmConfig.name())).thenReturn(samlRealm); when(realms.stream()).thenAnswer(i -> Stream.of(samlRealm)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index f2f94176edc15..bb57b9a8583f1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmConfig.RealmIdentifier; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -69,6 +70,7 @@ import java.util.Map; import java.util.function.Consumer; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.security.authc.TokenServiceTests.mockGetTokenFromId; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; @@ -190,19 +192,22 @@ public void setup() throws Exception { final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); final Realms realms = mock(Realms.class); - action = new TransportSamlLogoutAction(settings, transportService, mock(ActionFilters.class), realms, tokenService); + action = new TransportSamlLogoutAction(transportService, mock(ActionFilters.class), realms, tokenService); final Path metadata = PathUtils.get(SamlRealm.class.getResource("idp1.xml").toURI()); final Environment env = TestEnvironment.newEnvironment(settings); + + final RealmIdentifier realmIdentifier = new RealmIdentifier("saml", "saml1"); final Settings realmSettings = Settings.builder() - .put(SamlRealmSettings.IDP_METADATA_PATH.getKey(), metadata.toString()) - .put(SamlRealmSettings.IDP_ENTITY_ID.getKey(), SamlRealmTests.TEST_IDP_ENTITY_ID) - .put(SamlRealmSettings.SP_ENTITY_ID.getKey(), SP_URL) - .put(SamlRealmSettings.SP_ACS.getKey(), SP_URL) - .put("attributes.principal", "uid") + .put(getFullSettingKey("saml1", SamlRealmSettings.IDP_METADATA_PATH), metadata.toString()) + .put(getFullSettingKey("saml1", SamlRealmSettings.IDP_ENTITY_ID), SamlRealmTests.TEST_IDP_ENTITY_ID) + .put(getFullSettingKey("saml1", SamlRealmSettings.SP_ENTITY_ID), SP_URL) + .put(getFullSettingKey("saml1", SamlRealmSettings.SP_ACS), SP_URL) + .put(getFullSettingKey("saml1", SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), "uid") .build(); - final RealmConfig realmConfig = new RealmConfig("saml1", realmSettings, settings, env, threadContext); + final RealmConfig realmConfig = new RealmConfig(realmIdentifier, mergeSettings(realmSettings, settings), + env, threadContext); samlRealm = SamlRealm.create(realmConfig, mock(SSLService.class), mock(ResourceWatcherService.class), mock(UserRoleMapper.class)); when(realms.realm(realmConfig.name())).thenReturn(samlRealm); } @@ -219,7 +224,7 @@ public void testLogoutInvalidatesToken() throws Exception { .put(SamlRealm.USER_METADATA_NAMEID_FORMAT, NameID.TRANSIENT) .put(SamlRealm.USER_METADATA_NAMEID_VALUE, nameId) .map(); - final User user = new User("punisher", new String[] { "superuser" }, null, null, userMetaData, true); + final User user = new User("punisher", new String[]{"superuser"}, null, null, userMetaData, true); final Authentication.RealmRef realmRef = new Authentication.RealmRef(samlRealm.name(), SamlRealmSettings.TYPE, "node01"); final Authentication authentication = new Authentication(user, realmRef, null); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java index a6b92d79f1589..d7640264cc079 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java @@ -148,7 +148,7 @@ public void testClientCredentialsCreatesWithoutRefreshToken() throws Exception { Authentication authentication = new Authentication(new User("joe"), new Authentication.RealmRef("realm", "type", "node"), null); authentication.writeToContext(threadPool.getThreadContext()); - final TransportCreateTokenAction action = new TransportCreateTokenAction(SETTINGS, threadPool, + final TransportCreateTokenAction action = new TransportCreateTokenAction(threadPool, mock(TransportService.class), new ActionFilters(Collections.emptySet()), tokenService, authenticationService); final CreateTokenRequest createTokenRequest = new CreateTokenRequest(); @@ -172,7 +172,7 @@ public void testPasswordGrantTypeCreatesWithRefreshToken() throws Exception { Authentication authentication = new Authentication(new User("joe"), new Authentication.RealmRef("realm", "type", "node"), null); authentication.writeToContext(threadPool.getThreadContext()); - final TransportCreateTokenAction action = new TransportCreateTokenAction(SETTINGS, threadPool, + final TransportCreateTokenAction action = new TransportCreateTokenAction(threadPool, mock(TransportService.class), new ActionFilters(Collections.emptySet()), tokenService, authenticationService); final CreateTokenRequest createTokenRequest = new CreateTokenRequest(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java index a8e2464805825..5ba8a18dacf3f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java @@ -40,7 +40,7 @@ public void testInternalUser() { when(securityContext.getUser()).thenReturn(randomFrom(SystemUser.INSTANCE, XPackUser.INSTANCE)); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, transportService, + TransportAuthenticateAction action = new TransportAuthenticateAction(transportService, mock(ActionFilters.class), securityContext); final AtomicReference throwableRef = new AtomicReference<>(); @@ -66,7 +66,7 @@ public void testNullUser() { SecurityContext securityContext = mock(SecurityContext.class); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, transportService, + TransportAuthenticateAction action = new TransportAuthenticateAction(transportService, mock(ActionFilters.class), securityContext); final AtomicReference throwableRef = new AtomicReference<>(); @@ -94,7 +94,7 @@ public void testValidUser() { when(securityContext.getUser()).thenReturn(user); TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); - TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, transportService, + TransportAuthenticateAction action = new TransportAuthenticateAction(transportService, mock(ActionFilters.class), securityContext); final AtomicReference throwableRef = new AtomicReference<>(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesActionTests.java index 81616341a882e..8ff5378cbfc22 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUserPrivilegesActionTests.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -49,8 +48,8 @@ public void testBuildResponseObject() { .runAs(new Privilege(Sets.newHashSet("user01", "user02"), "user01", "user02")) .build(); - final TransportGetUserPrivilegesAction action = new TransportGetUserPrivilegesAction(Settings.EMPTY, - mock(ThreadPool.class), mock(TransportService.class), mock(ActionFilters.class), mock(AuthorizationService.class)); + final TransportGetUserPrivilegesAction action = new TransportGetUserPrivilegesAction(mock(ThreadPool.class), + mock(TransportService.class), mock(ActionFilters.class), mock(AuthorizationService.class)); final GetUserPrivilegesResponse response = action.buildResponseObject(role); assertThat(response.getClusterPrivileges(), containsInAnyOrder("monitor", "manage_watcher")); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java index a2e283e1b36ff..804412339e8a4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java @@ -72,7 +72,6 @@ public class TransportHasPrivilegesActionTests extends ESTestCase { @Before public void setup() { - final Settings settings = Settings.builder().build(); user = new User(randomAlphaOfLengthBetween(4, 12)); final ThreadPool threadPool = mock(ThreadPool.class); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); @@ -103,7 +102,7 @@ public void setup() { return null; }).when(privilegeStore).getPrivileges(any(Collection.class), any(Collection.class), any(ActionListener.class)); - action = new TransportHasPrivilegesAction(settings, threadPool, transportService, mock(ActionFilters.class), authorizationService, + action = new TransportHasPrivilegesAction(threadPool, transportService, mock(ActionFilters.class), authorizationService, privilegeStore); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java index 13a7e5c3cf71a..6ce5ad0ccfab2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.security.audit; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; @@ -46,7 +45,7 @@ public void init() throws Exception { } auditTrails = unmodifiableList(auditTrailsBuilder); licenseState = mock(XPackLicenseState.class); - service = new AuditTrailService(Settings.EMPTY, auditTrails, licenseState); + service = new AuditTrailService(auditTrails, licenseState); isAuditingAllowed = randomBoolean(); when(licenseState.isAuditingAllowed()).thenReturn(isAuditingAllowed); token = mock(AuthenticationToken.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java index 2e91c40677ed6..7551517e23241 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java @@ -46,12 +46,12 @@ public void testNativeRealmRegistersIndexHealthChangeListener() throws Exception verifyZeroInteractions(securityIndex); Settings settings = Settings.builder().put("path.home", createTempDir()).build(); - factories.get(NativeRealmSettings.TYPE).create(new RealmConfig("test", Settings.EMPTY, settings, - TestEnvironment.newEnvironment(settings), new ThreadContext(settings))); + factories.get(NativeRealmSettings.TYPE).create(new RealmConfig(new RealmConfig.RealmIdentifier(NativeRealmSettings.TYPE, "test"), + Settings.EMPTY, settings, TestEnvironment.newEnvironment(settings), new ThreadContext(settings))); verify(securityIndex).addIndexStateListener(isA(BiConsumer.class)); - factories.get(NativeRealmSettings.TYPE).create(new RealmConfig("test", Settings.EMPTY, settings, - TestEnvironment.newEnvironment(settings), new ThreadContext(settings))); + factories.get(NativeRealmSettings.TYPE).create(new RealmConfig(new RealmConfig.RealmIdentifier(NativeRealmSettings.TYPE, "test"), + Settings.EMPTY, settings, TestEnvironment.newEnvironment(settings), new ThreadContext(settings))); verify(securityIndex, times(2)).addIndexStateListener(isA(BiConsumer.class)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java index b177d17793f89..7d7fd135349b1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.authc; +import org.elasticsearch.common.settings.AbstractScopedSettings; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Setting; @@ -12,45 +13,26 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.security.SecurityExtension; +import org.elasticsearch.xpack.core.security.authc.InternalRealmsSettings; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; public class RealmSettingsTests extends ESTestCase { private static final List CACHE_HASHING_ALGOS = Arrays.stream(Hasher.values()).map(Hasher::name).collect(Collectors.toList()); - public void testRealmWithoutTypeDoesNotValidate() throws Exception { - final Settings.Builder builder = baseSettings("x", false); - builder.remove("type"); - assertErrorWithMessage("empty1", "missing realm type", realm("empty1", builder).build()); - } - public void testRealmWithBlankTypeDoesNotValidate() throws Exception { - final Settings.Builder builder = baseSettings("", false); - assertErrorWithMessage("empty2", "missing realm type", realm("empty2", builder).build()); - } - - /** - * This test exists because (in 5.x), we want to be backwards compatible and accept custom realms that - * have not been updated to explicitly declare their settings. - * - * @see org.elasticsearch.xpack.core.security.SecurityExtension#getRealmSettings() - */ - public void testRealmWithUnknownTypeAcceptsAllSettings() throws Exception { - final Settings.Builder settings = baseSettings("tam", true) - .put("ip", "8.6.75.309") - .put(randomAlphaOfLengthBetween(4, 8), randomTimeValue()); - assertSuccess(realm("tam", settings)); + final Settings.Builder builder = baseSettings(false); + assertErrorWithMessage("", "empty", "unknown setting [" + realmPrefix("", "empty"), realm("", "empty", builder).build()); } public void testFileRealmWithAllSettingsValidatesSuccessfully() throws Exception { @@ -58,8 +40,8 @@ public void testFileRealmWithAllSettingsValidatesSuccessfully() throws Exception } public void testFileRealmWithUnknownConfigurationDoesNotValidate() throws Exception { - final Settings.Builder builder = realm("file2", fileSettings().put("not-valid", randomInt())); - assertErrorWithCause("file2", "unknown setting [not-valid]", builder.build()); + final Settings.Builder builder = realm("file", "file2", fileSettings().put("not-valid", randomInt())); + assertErrorWithMessage("file", "file2", "unknown setting [" + realmPrefix("file", "file2") + "not-valid]", builder.build()); } public void testNativeRealmWithAllSettingsValidatesSuccessfully() throws Exception { @@ -67,8 +49,8 @@ public void testNativeRealmWithAllSettingsValidatesSuccessfully() throws Excepti } public void testNativeRealmWithUnknownConfigurationDoesNotValidate() throws Exception { - final Settings.Builder builder = realm("native2", nativeSettings().put("not-valid", randomAlphaOfLength(10))); - assertErrorWithCause("native2", "unknown setting [not-valid]", builder.build()); + final Settings.Builder builder = realm("native", "native2", nativeSettings().put("not-valid", randomAlphaOfLength(10))); + assertErrorWithMessage("native", "native2", "unknown setting [" + realmPrefix("native", "native2") + "not-valid]", builder.build()); } public void testLdapRealmWithUserTemplatesAndGroupAttributesValidatesSuccessfully() throws Exception { @@ -92,15 +74,8 @@ public void testPkiRealmWithTrustStoreValidatesSuccessfully() throws Exception { } public void testPkiRealmWithFullSslSettingsDoesNotValidate() throws Exception { - final Settings.Builder realm = realm("pki3", configureSsl("", pkiSettings(true), true, true)); - assertError("pki3", realm.build()); - } - - public void testPkiRealmWithClosedSecurePasswordValidatesSuccessfully() throws Exception { - final Settings.Builder builder = pkiRealm("pki4", true); - builder.getSecureSettings().close(); - final Settings settings = builder.build(); - assertSuccess(settings); + final Settings.Builder realm = realm("pki", "pki3", configureSsl("", pkiSettings(true), true, true)); + assertError("pki", "pki3", realm.build()); } public void testSettingsWithMultipleRealmsValidatesSuccessfully() throws Exception { @@ -115,23 +90,23 @@ public void testSettingsWithMultipleRealmsValidatesSuccessfully() throws Excepti } private Settings.Builder nativeRealm(String name) { - return realm(name, nativeSettings()); + return realm("native", name, nativeSettings()); } private Settings.Builder nativeSettings() { - return baseSettings("native", true); + return baseSettings(true); } private Settings.Builder fileRealm(String name) { - return realm(name, fileSettings()); + return realm("file", name, fileSettings()); } private Settings.Builder fileSettings() { - return baseSettings("file", true); + return baseSettings(true); } private Settings.Builder ldapRealm(String name, boolean userSearch, boolean groupSearch) { - return realm(name, ldapSettings(userSearch, groupSearch)); + return realm("ldap", name, ldapSettings(userSearch, groupSearch)); } private Settings.Builder ldapSettings(boolean userSearch, boolean groupSearch) { @@ -140,7 +115,7 @@ private Settings.Builder ldapSettings(boolean userSearch, boolean groupSearch) { .put("follow_referrals", randomBoolean()); SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { - secureSettings.setString("bind_password", "t0p_s3cr3t"); + secureSettings.setString("secure_bind_password", "t0p_s3cr3t"); }); if (userSearch) { @@ -171,7 +146,7 @@ private Settings.Builder ldapSettings(boolean userSearch, boolean groupSearch) { } private Settings.Builder activeDirectoryRealm(String name, boolean configureSSL) { - return realm(name, activeDirectorySettings(configureSSL)); + return realm("active_directory", name, activeDirectorySettings(configureSSL)); } private Settings.Builder activeDirectorySettings(boolean configureSSL) { @@ -186,7 +161,7 @@ private Settings.Builder activeDirectorySettings(boolean configureSSL) { } private Settings.Builder commonLdapSettings(String type, boolean configureSSL) { - final Settings.Builder builder = baseSettings(type, true) + final Settings.Builder builder = baseSettings(true) .putList("url", "ldap://dir1.internal:9876", "ldap://dir2.internal:9876", "ldap://dir3.internal:9876") .put("load_balance.type", "round_robin") .put("load_balance.cache_ttl", randomTimeValue()) @@ -202,11 +177,11 @@ private Settings.Builder commonLdapSettings(String type, boolean configureSSL) { } private Settings.Builder pkiRealm(String name, boolean useTrustStore) { - return realm(name, pkiSettings(useTrustStore)); + return realm("pki", name, pkiSettings(useTrustStore)); } private Settings.Builder pkiSettings(boolean useTrustStore) { - final Settings.Builder builder = baseSettings("pki", false) + final Settings.Builder builder = baseSettings(false) .put("username_pattern", "CN=\\D(\\d+)(?:,\\|$)") .put("files.role_mapping", "x-pack/" + randomAlphaOfLength(8) + ".yml"); @@ -232,7 +207,7 @@ private Settings.Builder configureSsl(String prefix, Settings.Builder builder, b } else { builder.put(prefix + "key", "x-pack/ssl/" + randomAlphaOfLength(5) + ".key"); SecuritySettingsSource.addSecureSettings(builder, secureSettings -> - secureSettings.setString(prefix + "secure_key_passphrase", randomAlphaOfLength(32))); + secureSettings.setString(prefix + "secure_key_passphrase", randomAlphaOfLength(32))); builder.put(prefix + "certificate", "ssl/" + randomAlphaOfLength(5) + ".cert"); } @@ -240,7 +215,7 @@ private Settings.Builder configureSsl(String prefix, Settings.Builder builder, b if (useTrustStore) { builder.put(prefix + "truststore.path", "x-pack/ssl/" + randomAlphaOfLength(5) + ".jts"); SecuritySettingsSource.addSecureSettings(builder, secureSettings -> - secureSettings.setString(prefix + "truststore.secure_password", randomAlphaOfLength(8))); + secureSettings.setString(prefix + "truststore.secure_password", randomAlphaOfLength(8))); } else { builder.put(prefix + "certificate_authorities", "ssl/" + randomAlphaOfLength(8) + ".ca"); } @@ -252,9 +227,8 @@ private Settings.Builder configureSsl(String prefix, Settings.Builder builder, b return builder; } - private Settings.Builder baseSettings(String type, boolean withCacheSettings) { + private Settings.Builder baseSettings(boolean withCacheSettings) { final Settings.Builder builder = Settings.builder() - .put("type", type) .put("order", randomInt()) .put("enabled", true); if (withCacheSettings) { @@ -265,8 +239,8 @@ private Settings.Builder baseSettings(String type, boolean withCacheSettings) { return builder; } - private Settings.Builder realm(String name, Settings.Builder settings) { - final String prefix = realmPrefix(name); + private Settings.Builder realm(String type, String name, Settings.Builder settings) { + final String prefix = realmPrefix(type, name); final MockSecureSettings secureSettings = normaliseSecureSettingPrefix(prefix, settings.getSecureSettings()); final Settings.Builder builder = Settings.builder().put(settings.normalizePrefix(prefix).build(), false); if (secureSettings != null) { @@ -291,8 +265,8 @@ private MockSecureSettings normaliseSecureSettingPrefix(String prefix, SecureSet } } - private String realmPrefix(String name) { - return RealmSettings.PREFIX + name + "."; + private String realmPrefix(String type, String name) { + return RealmSettings.PREFIX + type + "." + name + "."; } private void assertSuccess(Settings.Builder builder) { @@ -300,33 +274,37 @@ private void assertSuccess(Settings.Builder builder) { } private void assertSuccess(Settings settings) { - assertThat(group().get(settings), notNullValue()); + try { + validate(settings); + } catch (RuntimeException e) { + fail("Settings do not validate: " + e); + } } - private void assertErrorWithCause(String realmName, String message, Settings settings) { - final IllegalArgumentException exception = assertError(realmName, settings); + private void assertErrorWithCause(String realmType, String realmName, String message, Settings settings) { + final IllegalArgumentException exception = assertError(realmType, realmName, settings); assertThat(exception.getCause(), notNullValue()); assertThat(exception.getCause().getMessage(), containsString(message)); } - private void assertErrorWithMessage(String realmName, String message, Settings settings) { - final IllegalArgumentException exception = assertError(realmName, settings); + private void assertErrorWithMessage(String realmType, String realmName, String message, Settings settings) { + final IllegalArgumentException exception = assertError(realmType, realmName, settings); assertThat(exception.getMessage(), containsString(message)); } - private IllegalArgumentException assertError(String realmName, Settings settings) { + private IllegalArgumentException assertError(String realmType, String realmName, Settings settings) { final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> group().get(settings) + () -> validate(settings) ); - assertThat(exception.getMessage(), containsString(realmPrefix(realmName))); + assertThat(exception.getMessage(), containsString(realmPrefix(realmType, realmName))); return exception; } - private Setting group() { - final List> list = new ArrayList<>(); - final List noExtensions = Collections.emptyList(); - RealmSettings.addSettings(list, noExtensions); - assertThat(list, hasSize(1)); - return list.get(0); + private void validate(Settings settings) { + final Set> settingsSet = new HashSet<>(InternalRealmsSettings.getSettings()); + final AbstractScopedSettings validator = new AbstractScopedSettings(settings, settingsSet, Collections.emptySet(), + Setting.Property.NodeScope) { + }; + validator.validate(settings, false); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index c5fbb39fee627..c37d6913d1fd2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -83,8 +83,7 @@ public void testWithSettings() throws Exception { Collections.shuffle(orders, random()); Map orderToIndex = new HashMap<>(); for (int i = 0; i < randomRealmTypesCount; i++) { - builder.put("xpack.security.authc.realms.realm_" + i + ".type", "type_" + i); - builder.put("xpack.security.authc.realms.realm_" + i + ".order", orders.get(i)); + builder.put("xpack.security.authc.realms.type_" + i + ".realm_" + i + ".order", orders.get(i)); orderToIndex.put(orders.get(i), i); } Settings settings = builder.build(); @@ -119,11 +118,10 @@ public void testWithSettingsWhereDifferentRealmsHaveSameOrder() throws Exception TreeMap nameToRealmId = new TreeMap<>(); for (int i = 0; i < randomRealmTypesCount; i++) { int randomizedRealmId = randomSeq.get(i); - String randomizedRealmName = randomAlphaOfLengthBetween(12,32); + String randomizedRealmName = randomAlphaOfLengthBetween(12, 32); nameToRealmId.put("realm_" + randomizedRealmName, randomizedRealmId); - builder.put("xpack.security.authc.realms.realm_" + randomizedRealmName + ".type", "type_" + randomizedRealmId); // set same order for all realms - builder.put("xpack.security.authc.realms.realm_" + randomizedRealmName + ".order", 1); + builder.put("xpack.security.authc.realms.type_" + randomizedRealmId + ".realm_" + randomizedRealmName + ".order", 1); } Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); @@ -148,10 +146,8 @@ public void testWithSettingsWhereDifferentRealmsHaveSameOrder() throws Exception public void testWithSettingsWithMultipleInternalRealmsOfSameType() throws Exception { Settings settings = Settings.builder() - .put("xpack.security.authc.realms.realm_1.type", FileRealmSettings.TYPE) - .put("xpack.security.authc.realms.realm_1.order", 0) - .put("xpack.security.authc.realms.realm_2.type", FileRealmSettings.TYPE) - .put("xpack.security.authc.realms.realm_2.order", 1) + .put("xpack.security.authc.realms.file.realm_1.order", 0) + .put("xpack.security.authc.realms.file.realm_2.order", 1) .put("path.home", createTempDir()) .build(); Environment env = TestEnvironment.newEnvironment(settings); @@ -191,8 +187,7 @@ public void testUnlicensedWithOnlyCustomRealms() throws Exception { Collections.shuffle(orders, random()); Map orderToIndex = new HashMap<>(); for (int i = 0; i < randomRealmTypesCount; i++) { - builder.put("xpack.security.authc.realms.realm_" + i + ".type", "type_" + i); - builder.put("xpack.security.authc.realms.realm_" + i + ".order", orders.get(i)); + builder.put("xpack.security.authc.realms.type_" + i + ".realm_" + i + ".order", orders.get(i)); orderToIndex.put(orders.get(i), i); } Settings settings = builder.build(); @@ -252,13 +247,11 @@ public void testUnlicensedWithInternalRealms() throws Exception { assertThat(factories.get("type_0"), notNullValue()); Settings.Builder builder = Settings.builder() .put("path.home", createTempDir()) - .put("xpack.security.authc.realms.foo.type", "ldap") - .put("xpack.security.authc.realms.foo.order", "0") - .put("xpack.security.authc.realms.custom.type", "type_0") - .put("xpack.security.authc.realms.custom.order", "1"); + .put("xpack.security.authc.realms.ldap.foo.order", "0") + .put("xpack.security.authc.realms.type_0.custom.order", "1"); Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); - Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm ); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); Iterator iter = realms.iterator(); assertThat(iter.hasNext(), is(true)); Realm realm = iter.next(); @@ -282,7 +275,7 @@ public void testUnlicensedWithInternalRealms() throws Exception { i = 0; while (iter.hasNext()) { realm = iter.next(); - assertThat(realm.getType(), is("ldap")); + assertThat(realm.type(), is("ldap")); i++; } assertThat(i, is(1)); @@ -303,15 +296,13 @@ public void testUnlicensedWithInternalRealms() throws Exception { assertThat(iter.hasNext(), is(false)); } - public void testUnlicensedWithNativeRealmSettingss() throws Exception { + public void testUnlicensedWithNativeRealmSettings() throws Exception { factories.put(LdapRealmSettings.LDAP_TYPE, config -> new DummyRealm(LdapRealmSettings.LDAP_TYPE, config)); final String type = randomFrom(FileRealmSettings.TYPE, NativeRealmSettings.TYPE); Settings.Builder builder = Settings.builder() .put("path.home", createTempDir()) - .put("xpack.security.authc.realms.foo.type", "ldap") - .put("xpack.security.authc.realms.foo.order", "0") - .put("xpack.security.authc.realms.native.type", type) - .put("xpack.security.authc.realms.native.order", "1"); + .put("xpack.security.authc.realms.ldap.foo.order", "0") + .put("xpack.security.authc.realms." + type + ".native.order", "1"); Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); @@ -343,8 +334,7 @@ public void testUnlicensedWithNonStandardRealms() throws Exception { factories.put(selectedRealmType, config -> new DummyRealm(selectedRealmType, config)); Settings.Builder builder = Settings.builder() .put("path.home", createTempDir()) - .put("xpack.security.authc.realms.foo.type", selectedRealmType) - .put("xpack.security.authc.realms.foo.order", "0"); + .put("xpack.security.authc.realms." + selectedRealmType + ".foo.order", "0"); Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); @@ -394,10 +384,9 @@ public void testDisabledRealmsAreNotAdded() throws Exception { Collections.shuffle(orders, random()); Map orderToIndex = new HashMap<>(); for (int i = 0; i < randomRealmTypesCount; i++) { - builder.put("xpack.security.authc.realms.realm_" + i + ".type", "type_" + i); - builder.put("xpack.security.authc.realms.realm_" + i + ".order", orders.get(i)); + builder.put("xpack.security.authc.realms.type_" + i + ".realm_" + i + ".order", orders.get(i)); boolean enabled = randomBoolean(); - builder.put("xpack.security.authc.realms.realm_" + i + ".enabled", enabled); + builder.put("xpack.security.authc.realms.type_" + i + ".realm_" + i + ".enabled", enabled); if (enabled) { orderToIndex.put(orders.get(i), i); logger.error("put [{}] -> [{}]", orders.get(i), i); @@ -405,7 +394,7 @@ public void testDisabledRealmsAreNotAdded() throws Exception { } Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); - Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm ); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); Iterator iterator = realms.iterator(); Realm realm = iterator.next(); assertThat(realm, is(reservedRealm)); @@ -438,11 +427,10 @@ public void testDisabledRealmsAreNotAdded() throws Exception { public void testAuthcAuthzDisabled() throws Exception { Settings settings = Settings.builder() .put("path.home", createTempDir()) - .put("xpack.security.authc.realms.realm_1.type", FileRealmSettings.TYPE) - .put("xpack.security.authc.realms.realm_1.order", 0) + .put("xpack.security.authc.realms." + FileRealmSettings.TYPE + ".realm_1.order", 0) .build(); Environment env = TestEnvironment.newEnvironment(settings); - Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm ); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); assertThat(realms.iterator().hasNext(), is(true)); @@ -454,10 +442,8 @@ public void testUsageStats() throws Exception { // test realms with duplicate values Settings.Builder builder = Settings.builder() .put("path.home", createTempDir()) - .put("xpack.security.authc.realms.foo.type", "type_0") - .put("xpack.security.authc.realms.foo.order", "0") - .put("xpack.security.authc.realms.bar.type", "type_0") - .put("xpack.security.authc.realms.bar.order", "1"); + .put("xpack.security.authc.realms.type_0.foo.order", "0") + .put("xpack.security.authc.realms.type_0.bar.order", "1"); Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); @@ -525,10 +511,8 @@ public void testUsageStats() throws Exception { public void testInitRealmsFailsForMultipleKerberosRealms() throws IOException { final Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); - builder.put("xpack.security.authc.realms.realm_1.type", "kerberos"); - builder.put("xpack.security.authc.realms.realm_1.order", 1); - builder.put("xpack.security.authc.realms.realm_2.type", "kerberos"); - builder.put("xpack.security.authc.realms.realm_2.order", 2); + builder.put("xpack.security.authc.realms.kerberos.realm_1.order", 1); + builder.put("xpack.security.authc.realms.kerberos.realm_2.order", 2); final Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, @@ -540,7 +524,7 @@ public void testInitRealmsFailsForMultipleKerberosRealms() throws IOException { static class DummyRealm extends Realm { DummyRealm(String type, RealmConfig config) { - super(type, config); + super(config); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java index 56131c87001d1..ff632b21995af 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java @@ -32,8 +32,8 @@ public void testCacheClearOnIndexHealthChange() { final AtomicInteger numInvalidation = new AtomicInteger(0); int expectedInvalidation = 0; Settings settings = Settings.builder().put("path.home", createTempDir()).build(); - RealmConfig config = new RealmConfig("native", Settings.EMPTY, settings, TestEnvironment.newEnvironment(settings), - new ThreadContext(settings)); + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("native", "native"), Settings.EMPTY, + settings, TestEnvironment.newEnvironment(settings), new ThreadContext(settings)); final NativeRealm nativeRealm = new NativeRealm(config, mock(NativeUsersStore.class), threadPool) { @Override void clearCache() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index 1310980fc5f7c..ba6f918819aa0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.User; @@ -42,6 +43,8 @@ public class FileRealmTests extends ESTestCase { + private static final RealmConfig.RealmIdentifier REALM_IDENTIFIER = new RealmConfig.RealmIdentifier("file", "file-test"); + private static final Answer VERIFY_PASSWORD_ANSWER = inv -> { assertThat(inv.getArguments().length, is(3)); Supplier supplier = (Supplier) inv.getArguments()[2]; @@ -69,8 +72,7 @@ public void testAuthenticate() throws Exception { when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) .thenAnswer(VERIFY_PASSWORD_ANSWER); when(userRolesStore.roles("user1")).thenReturn(new String[] { "role1", "role2" }); - RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext); + RealmConfig config = getRealmConfig(Settings.EMPTY); FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore, threadPool); PlainActionFuture future = new PlainActionFuture<>(); realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); @@ -84,12 +86,17 @@ public void testAuthenticate() throws Exception { assertThat(user.roles(), arrayContaining("role1", "role2")); } + private RealmConfig getRealmConfig(Settings settings) { + return new RealmConfig(REALM_IDENTIFIER, + mergeSettings(settings, globalSettings), + TestEnvironment.newEnvironment(globalSettings), threadContext); + } + public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() - .put("cache.hash_algo", Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT)) - .build(); - RealmConfig config = new RealmConfig("file-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext); + .put(RealmSettings.realmSettingPrefix(REALM_IDENTIFIER) + "cache.hash_algo", + Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT)).build(); + RealmConfig config = getRealmConfig(settings); when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) .thenAnswer(VERIFY_PASSWORD_ANSWER); when(userRolesStore.roles("user1")).thenReturn(new String[]{"role1", "role2"}); @@ -104,8 +111,7 @@ public void testAuthenticateCaching() throws Exception { } public void testAuthenticateCachingRefresh() throws Exception { - RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext); + RealmConfig config = getRealmConfig(Settings.EMPTY); userPasswdStore = spy(new UserPasswdStore(config)); userRolesStore = spy(new UserRolesStore(config)); when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) @@ -144,8 +150,7 @@ public void testAuthenticateCachingRefresh() throws Exception { } public void testToken() throws Exception { - RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext); + RealmConfig config = getRealmConfig(Settings.EMPTY); when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) .thenAnswer(VERIFY_PASSWORD_ANSWER); when(userRolesStore.roles("user1")).thenReturn(new String[]{"role1", "role2"}); @@ -164,8 +169,7 @@ public void testToken() throws Exception { public void testLookup() throws Exception { when(userPasswdStore.userExists("user1")).thenReturn(true); when(userRolesStore.roles("user1")).thenReturn(new String[] { "role1", "role2" }); - RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext); + RealmConfig config = getRealmConfig(Settings.EMPTY); FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore, threadPool); PlainActionFuture future = new PlainActionFuture<>(); @@ -182,8 +186,7 @@ public void testLookup() throws Exception { public void testLookupCaching() throws Exception { when(userPasswdStore.userExists("user1")).thenReturn(true); when(userRolesStore.roles("user1")).thenReturn(new String[] { "role1", "role2" }); - RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext); + RealmConfig config = getRealmConfig(Settings.EMPTY); FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore, threadPool); PlainActionFuture future = new PlainActionFuture<>(); @@ -198,8 +201,7 @@ public void testLookupCaching() throws Exception { } public void testLookupCachingWithRefresh() throws Exception { - RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext); + RealmConfig config = getRealmConfig(Settings.EMPTY); userPasswdStore = spy(new UserPasswdStore(config)); userRolesStore = spy(new UserRolesStore(config)); doReturn(true).when(userPasswdStore).userExists("user1"); @@ -243,17 +245,16 @@ public void testUsageStats() throws Exception { Settings.Builder settings = Settings.builder(); int order = randomIntBetween(0, 10); - settings.put("order", order); + settings.put(RealmSettings.realmSettingPrefix(REALM_IDENTIFIER) + "order", order); - RealmConfig config = new RealmConfig("file-realm", settings.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext); + RealmConfig config = getRealmConfig(settings.build()); FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore, threadPool); PlainActionFuture> future = new PlainActionFuture<>(); realm.usageStats(future); Map usage = future.get(); assertThat(usage, is(notNullValue())); - assertThat(usage, hasEntry("name", "file-realm")); + assertThat(usage, hasEntry("name", REALM_IDENTIFIER.getName())); assertThat(usage, hasEntry("order", order)); assertThat(usage, hasEntry("size", userCount)); } @@ -269,4 +270,9 @@ static class UserRolesStore extends FileUserRolesStore { super(config, mock(ResourceWatcherService.class)); } } + + private Settings mergeSettings(Settings local, Settings global) { + return Settings.builder().put(global).put(local).build(); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java index 3acb4888d7834..29ee447ebae10 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java @@ -76,7 +76,7 @@ public void testStore_ConfiguredWithUnreadableFile() throws Exception { Files.write(file, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); Settings fileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("files.users", file.toAbsolutePath()).build(); - RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, threadPool.getThreadContext()); + RealmConfig config = getRealmConfig(fileSettings); ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); FileUserPasswdStore store = new FileUserPasswdStore(config, watcherService); assertThat(store.usersCount(), is(0)); @@ -90,7 +90,7 @@ public void testStore_AutoReload() throws Exception { Files.copy(users, file, StandardCopyOption.REPLACE_EXISTING); final Hasher hasher = Hasher.resolve(settings.get("xpack.security.authc.password_hashing.algorithm")); Settings fileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("files.users", file.toAbsolutePath()).build(); - RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, threadPool.getThreadContext()); + RealmConfig config = getRealmConfig(fileSettings); ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); final CountDownLatch latch = new CountDownLatch(1); @@ -120,6 +120,11 @@ public void testStore_AutoReload() throws Exception { assertThat(result.getUser(), is(user)); } + private RealmConfig getRealmConfig(Settings fileSettings) { + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("file", "file-test"); + return new RealmConfig(identifier, fileSettings, settings, env, threadPool.getThreadContext()); + } + public void testStore_AutoReload_WithParseFailures() throws Exception { Path users = getDataPath("users"); Path xpackConf = env.configFile(); @@ -131,7 +136,7 @@ public void testStore_AutoReload_WithParseFailures() throws Exception { .put("files.users", testUsers.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, threadPool.getThreadContext()); + RealmConfig config = getRealmConfig(fileSettings); ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); final CountDownLatch latch = new CountDownLatch(1); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStoreTests.java index 8e4011b1159d5..f69416f5115ae 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStoreTests.java @@ -78,7 +78,8 @@ public void testStore_ConfiguredWithUnreadableFile() throws Exception { .put("files.users_roles", file.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("file", "file-test"), fileSettings, settings, env, + new ThreadContext(Settings.EMPTY)); ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); FileUserRolesStore store = new FileUserRolesStore(config, watcherService); assertThat(store.entriesCount(), is(0)); @@ -93,7 +94,8 @@ public void testStoreAutoReload() throws Exception { .put("files.users_roles", tmp.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("file", "file-test"), fileSettings, settings, env, + new ThreadContext(Settings.EMPTY)); ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); final CountDownLatch latch = new CountDownLatch(1); @@ -131,7 +133,8 @@ public void testStoreAutoReloadWithParseFailure() throws Exception { .put("files.users_roles", tmp.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("file", "file-test"), fileSettings, settings, env, + new ThreadContext(Settings.EMPTY)); ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); final CountDownLatch latch = new CountDownLatch(1); @@ -224,7 +227,8 @@ public void testParseFileEmptyRolesDoesNotCauseNPE() throws Exception { .build(); Environment env = TestEnvironment.newEnvironment(settings); - RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("file", "file-test"), fileSettings, settings, env, + new ThreadContext(Settings.EMPTY)); ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); FileUserRolesStore store = new FileUserRolesStore(config, watcherService); assertThat(store.roles("user"), equalTo(Strings.EMPTY_ARRAY)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java index dcb087ff147c8..cccf590a8bd4c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmAuthenticateFailedTests.java @@ -18,10 +18,11 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.ietf.jgss.GSSException; +import javax.security.auth.login.LoginException; import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.util.Collections; @@ -29,8 +30,6 @@ import java.util.List; import java.util.Map; -import javax.security.auth.login.LoginException; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -60,8 +59,8 @@ public void testAuthenticateDifferentFailureScenarios() throws LoginException, G final boolean throwExceptionForInvalidTicket = validTicket ? false : randomBoolean(); final boolean throwLoginException = randomBoolean(); final byte[] decodedTicket = randomByteArrayOfLength(5); - final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); - final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + final Path keytabPath = config.env().configFile().resolve(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH)); + final boolean krbDebug = config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE); if (validTicket) { mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, outToken), null); } else { @@ -122,16 +121,16 @@ public void testAuthenticateDifferentFailureScenarios() throws LoginException, G public void testDelegatedAuthorizationFailedToResolve() throws Exception { final String username = randomPrincipalName(); - final MockLookupRealm otherRealm = new MockLookupRealm(new RealmConfig("other_realm", Settings.EMPTY, globalSettings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + final MockLookupRealm otherRealm = new MockLookupRealm(new RealmConfig(new RealmConfig.RealmIdentifier("mock", "other_realm"), + Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); final User lookupUser = new User(randomAlphaOfLength(5)); otherRealm.registerUser(lookupUser); settings = Settings.builder().put(settings).putList("authorization_realms", "other_realm").build(); final KerberosRealm kerberosRealm = createKerberosRealm(Collections.singletonList(otherRealm), username); final byte[] decodedTicket = "base64encodedticket".getBytes(StandardCharsets.UTF_8); - final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); - final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + final Path keytabPath = config.env().configFile().resolve(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH)); + final boolean krbDebug = config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE); mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, "out-token"), null); final KerberosAuthenticationToken kerberosAuthenticationToken = new KerberosAuthenticationToken(decodedTicket); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java index 2bef16883bbbf..c3d6c5ae07e0e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; import org.ietf.jgss.GSSException; +import javax.security.auth.login.LoginException; import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; @@ -22,8 +23,6 @@ import java.util.List; import java.util.Map; -import javax.security.auth.login.LoginException; - import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.sameInstance; @@ -47,8 +46,8 @@ public void testAuthenticateWithCache() throws LoginException, GSSException { metadata.put(KerberosRealm.KRB_METADATA_UPN_KEY, username); final User expectedUser = new User(expectedUsername, roles.toArray(new String[roles.size()]), null, null, metadata, true); final byte[] decodedTicket = randomByteArrayOfLength(10); - final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); - final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + final Path keytabPath = config.env().configFile().resolve(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH)); + final boolean krbDebug = config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE); mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, outToken), null); final KerberosAuthenticationToken kerberosAuthenticationToken = new KerberosAuthenticationToken(decodedTicket); @@ -73,8 +72,8 @@ public void testCacheInvalidationScenarios() throws LoginException, GSSException final String authNUsername = randomFrom(userNames); final byte[] decodedTicket = randomByteArrayOfLength(10); - final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); - final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + final Path keytabPath = config.env().configFile().resolve(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH)); + final boolean krbDebug = config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE); mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(authNUsername, outToken), null); final String expectedUsername = maybeRemoveRealmName(authNUsername); final Map metadata = new HashMap<>(); @@ -110,9 +109,8 @@ public void testCacheInvalidationScenarios() throws LoginException, GSSException public void testAuthenticateWithValidTicketSucessAuthnWithUserDetailsWhenCacheDisabled() throws LoginException, GSSException, IOException { // if cache.ttl <= 0 then the cache is disabled - settings = buildKerberosRealmSettings( - writeKeyTab(dir.resolve("key.keytab"), randomAlphaOfLength(4)).toString(), 100, "0m", true, - randomBoolean()); + settings = buildKerberosRealmSettings(REALM_NAME, + writeKeyTab(dir.resolve("key.keytab"), randomAlphaOfLength(4)).toString(), 100, "0m", true, randomBoolean()); final String username = randomPrincipalName(); final String outToken = randomAlphaOfLength(10); final KerberosRealm kerberosRealm = createKerberosRealm(username); @@ -123,8 +121,8 @@ public void testAuthenticateWithValidTicketSucessAuthnWithUserDetailsWhenCacheDi metadata.put(KerberosRealm.KRB_METADATA_UPN_KEY, username); final User expectedUser = new User(expectedUsername, roles.toArray(new String[roles.size()]), null, null, metadata, true); final byte[] decodedTicket = randomByteArrayOfLength(10); - final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); - final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + final Path keytabPath = config.env().configFile().resolve(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH)); + final boolean krbDebug = config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE); mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, outToken), null); final KerberosAuthenticationToken kerberosAuthenticationToken = new KerberosAuthenticationToken(decodedTicket); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmSettingsTests.java index 55687d5188842..eb1e32037d5ef 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmSettingsTests.java @@ -8,7 +8,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmConfig.RealmIdentifier; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import java.io.IOException; @@ -32,15 +36,18 @@ public void testKerberosRealmSettings() throws IOException { final String cacheTTL = randomLongBetween(10L, 100L) + "m"; final boolean enableDebugLogs = randomBoolean(); final boolean removeRealmName = randomBoolean(); - final Settings settings = KerberosRealmTestCase.buildKerberosRealmSettings(keytabPathConfig, maxUsers, cacheTTL, enableDebugLogs, - removeRealmName); - - assertThat(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(settings), equalTo(keytabPathConfig)); - assertThat(KerberosRealmSettings.CACHE_TTL_SETTING.get(settings), + final Settings settings = KerberosRealmTestCase.buildKerberosRealmSettings(KerberosRealmTestCase.REALM_NAME, + keytabPathConfig, maxUsers, cacheTTL, enableDebugLogs, removeRealmName); + final RealmIdentifier identifier = new RealmIdentifier(KerberosRealmSettings.TYPE, KerberosRealmTestCase.REALM_NAME); + final RealmConfig config = new RealmConfig(identifier, + settings, TestEnvironment.newEnvironment(settings), new ThreadContext(settings)); + + assertThat(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH), equalTo(keytabPathConfig)); + assertThat(config.getSetting(KerberosRealmSettings.CACHE_TTL_SETTING), equalTo(TimeValue.parseTimeValue(cacheTTL, KerberosRealmSettings.CACHE_TTL_SETTING.getKey()))); - assertThat(KerberosRealmSettings.CACHE_MAX_USERS_SETTING.get(settings), equalTo(maxUsers)); - assertThat(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(settings), is(enableDebugLogs)); - assertThat(KerberosRealmSettings.SETTING_REMOVE_REALM_NAME.get(settings), is(removeRealmName)); + assertThat(config.getSetting(KerberosRealmSettings.CACHE_MAX_USERS_SETTING), equalTo(maxUsers)); + assertThat(config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE), is(enableDebugLogs)); + assertThat(config.getSetting(KerberosRealmSettings.SETTING_REMOVE_REALM_NAME), is(removeRealmName)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java index 4c0b77e320abd..43e5fb216399d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.support.Exceptions; import org.elasticsearch.xpack.core.security.user.User; @@ -57,6 +58,8 @@ public abstract class KerberosRealmTestCase extends ESTestCase { + protected static final String REALM_NAME = "test-kerb-realm"; + protected Path dir; protected ThreadPool threadPool; protected Settings globalSettings; @@ -76,8 +79,8 @@ public void setup() throws Exception { resourceWatcherService = new ResourceWatcherService(Settings.EMPTY, threadPool); dir = createTempDir(); globalSettings = Settings.builder().put("path.home", dir).build(); - settings = buildKerberosRealmSettings(writeKeyTab(dir.resolve("key.keytab"), "asa").toString(), - 100, "10m", true, randomBoolean()); + settings = buildKerberosRealmSettings(REALM_NAME, + writeKeyTab(dir.resolve("key.keytab"), "asa").toString(), 100, "10m", true, randomBoolean()); licenseState = mock(XPackLicenseState.class); when(licenseState.isAuthorizationRealmAllowed()).thenReturn(true); } @@ -89,7 +92,7 @@ public void shutdown() throws InterruptedException { } protected void mockKerberosTicketValidator(final byte[] decodedTicket, final Path keytabPath, final boolean krbDebug, - final Tuple value, final Exception e) { + final Tuple value, final Exception e) { assert value != null || e != null; doAnswer((i) -> { ActionListener> listener = (ActionListener>) i.getArguments()[3]; @@ -109,7 +112,7 @@ protected void assertSuccessAuthenticationResult(final User expectedUser, final final Map> responseHeaders = threadPool.getThreadContext().getResponseHeaders(); assertThat(responseHeaders, is(notNullValue())); assertThat(responseHeaders.get(KerberosAuthenticationToken.WWW_AUTHENTICATE).get(0), - is(equalTo(KerberosAuthenticationToken.NEGOTIATE_AUTH_HEADER_PREFIX + outToken))); + is(equalTo(KerberosAuthenticationToken.NEGOTIATE_AUTH_HEADER_PREFIX + outToken))); } protected KerberosRealm createKerberosRealm(final String... userForRoleMapping) { @@ -117,17 +120,25 @@ protected KerberosRealm createKerberosRealm(final String... userForRoleMapping) } protected KerberosRealm createKerberosRealm(final List delegatedRealms, final String... userForRoleMapping) { - config = new RealmConfig("test-kerb-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + final RealmConfig.RealmIdentifier id = new RealmConfig.RealmIdentifier(KerberosRealmSettings.TYPE, REALM_NAME); + config = new RealmConfig(id, merge(id, settings, globalSettings), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); mockNativeRoleMappingStore = roleMappingStore(Arrays.asList(userForRoleMapping)); mockKerberosTicketValidator = mock(KerberosTicketValidator.class); final KerberosRealm kerberosRealm = - new KerberosRealm(config, mockNativeRoleMappingStore, mockKerberosTicketValidator, threadPool, null); + new KerberosRealm(config, mockNativeRoleMappingStore, mockKerberosTicketValidator, threadPool, null); Collections.shuffle(delegatedRealms, random()); kerberosRealm.initialize(delegatedRealms, licenseState); return kerberosRealm; } + private Settings merge(RealmConfig.RealmIdentifier identifier, Settings realmSettings, Settings globalSettings) { + return Settings.builder().put(realmSettings) + .normalizePrefix(RealmSettings.realmSettingPrefix(identifier)) + .put(globalSettings) + .build(); + } + @SuppressWarnings("unchecked") protected NativeRoleMappingStore roleMappingStore(final List userNames) { final List expectedUserNames = userNames.stream().map(this::maybeRemoveRealmName).collect(Collectors.toList()); @@ -145,7 +156,7 @@ protected NativeRoleMappingStore roleMappingStore(final List userNames) listener.onResponse(roles); } else { listener.onFailure( - Exceptions.authorizationError("Expected UPN '" + expectedUserNames + "' but was '" + userData.getUsername() + "'")); + Exceptions.authorizationError("Expected UPN '" + expectedUserNames + "' but was '" + userData.getUsername() + "'")); } return null; }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); @@ -175,7 +186,11 @@ protected String randomPrincipalName() { * @return username after removal of realm */ protected String maybeRemoveRealmName(final String principalName) { - if (KerberosRealmSettings.SETTING_REMOVE_REALM_NAME.get(settings)) { + return maybeRemoveRealmName(REALM_NAME, principalName); + } + + protected String maybeRemoveRealmName(String realmName, final String principalName) { + if (KerberosRealmSettings.SETTING_REMOVE_REALM_NAME.getConcreteSettingForNamespace(realmName).get(settings)) { int foundAtIndex = principalName.indexOf('@'); if (foundAtIndex > 0) { return principalName.substring(0, foundAtIndex); @@ -218,27 +233,39 @@ public static Path writeKeyTab(final Path keytabPath, final String content) thro * @param keytabPath key tab file path * @return {@link Settings} for kerberos realm */ - public static Settings buildKerberosRealmSettings(final String keytabPath) { - return buildKerberosRealmSettings(keytabPath, 100, "10m", true, false); + public static Settings buildKerberosRealmSettings(final String realmName,final String keytabPath) { + return buildKerberosRealmSettings(realmName, keytabPath, 100, "10m", true, false); + } + + public static Settings buildKerberosRealmSettings(String realmName, String keytabPath, int maxUsersInCache, String cacheTTL, + boolean enableDebugging, boolean removeRealmName) { + final Settings global = Settings.builder().put("path.home", createTempDir()).build(); + return buildKerberosRealmSettings(realmName, keytabPath, maxUsersInCache, cacheTTL, enableDebugging, removeRealmName, global); } /** * Build kerberos realm settings * - * @param keytabPath key tab file path + * @param realmName the name of the realm to configure + * @param keytabPath key tab file path * @param maxUsersInCache max users to be maintained in cache - * @param cacheTTL time to live for cached entries + * @param cacheTTL time to live for cached entries * @param enableDebugging for krb5 logs * @param removeRealmName {@code true} if we want to remove realm name from the username of form 'user@REALM' + * @param globalSettings Any global settings to include * @return {@link Settings} for kerberos realm */ - public static Settings buildKerberosRealmSettings(final String keytabPath, final int maxUsersInCache, final String cacheTTL, - final boolean enableDebugging, final boolean removeRealmName) { - final Settings.Builder builder = Settings.builder().put(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.getKey(), keytabPath) - .put(KerberosRealmSettings.CACHE_MAX_USERS_SETTING.getKey(), maxUsersInCache) - .put(KerberosRealmSettings.CACHE_TTL_SETTING.getKey(), cacheTTL) - .put(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.getKey(), enableDebugging) - .put(KerberosRealmSettings.SETTING_REMOVE_REALM_NAME.getKey(), removeRealmName); + + public static Settings buildKerberosRealmSettings(String realmName, String keytabPath, int maxUsersInCache, String cacheTTL, + boolean enableDebugging, boolean removeRealmName, Settings globalSettings) { + final Settings.Builder builder = Settings.builder() + .put(RealmSettings.getFullSettingKey(realmName, KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH), keytabPath) + .put(RealmSettings.getFullSettingKey(realmName, KerberosRealmSettings.CACHE_MAX_USERS_SETTING), maxUsersInCache) + .put(RealmSettings.getFullSettingKey(realmName, KerberosRealmSettings.CACHE_TTL_SETTING), cacheTTL) + .put(RealmSettings.getFullSettingKey(realmName, KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE), enableDebugging) + .put(RealmSettings.getFullSettingKey(realmName, KerberosRealmSettings.SETTING_REMOVE_REALM_NAME), removeRealmName) + .put(globalSettings); return builder.build(); } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java index 3c7c3d3473f76..8d6869404bbaf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java @@ -15,16 +15,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; import org.ietf.jgss.GSSException; +import javax.security.auth.login.LoginException; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.SeekableByteChannel; @@ -43,8 +44,7 @@ import java.util.Map; import java.util.Set; -import javax.security.auth.login.LoginException; - +import static org.elasticsearch.xpack.security.authc.kerberos.KerberosRealmTestCase.buildKerberosRealmSettings; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -78,8 +78,8 @@ public void testAuthenticateWithValidTicketSucessAuthnWithUserDetails() throws L metadata.put(KerberosRealm.KRB_METADATA_UPN_KEY, username); final User expectedUser = new User(expectedUsername, roles.toArray(new String[roles.size()]), null, null, metadata, true); final byte[] decodedTicket = "base64encodedticket".getBytes(StandardCharsets.UTF_8); - final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); - final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + final Path keytabPath = config.env().configFile().resolve(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH)); + final boolean krbDebug = config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE); mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, "out-token"), null); final KerberosAuthenticationToken kerberosAuthenticationToken = new KerberosAuthenticationToken(decodedTicket); @@ -98,8 +98,8 @@ public void testFailedAuthorization() throws LoginException, GSSException { final String username = randomPrincipalName(); final KerberosRealm kerberosRealm = createKerberosRealm(username); final byte[] decodedTicket = "base64encodedticket".getBytes(StandardCharsets.UTF_8); - final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); - final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + final Path keytabPath = config.env().configFile().resolve(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH)); + final boolean krbDebug = config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE); mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>("does-not-exist@REALM", "out-token"), null); final PlainActionFuture future = new PlainActionFuture<>(); @@ -160,9 +160,10 @@ public void testKerberosRealmThrowsErrorWhenKeytabFileHasNoReadPermissions() thr } private void assertKerberosRealmConstructorFails(final String keytabPath, final String expectedErrorMessage) { - settings = buildKerberosRealmSettings(keytabPath, 100, "10m", true, randomBoolean()); - config = new RealmConfig("test-kerb-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + final String realmName = "test-kerb-realm"; + settings = buildKerberosRealmSettings(realmName, keytabPath, 100, "10m", true, randomBoolean(), globalSettings); + config = new RealmConfig(new RealmConfig.RealmIdentifier(KerberosRealmSettings.TYPE, realmName), settings, + TestEnvironment.newEnvironment(settings), new ThreadContext(settings)); mockNativeRoleMappingStore = roleMappingStore(Arrays.asList("user")); mockKerberosTicketValidator = mock(KerberosTicketValidator.class); final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, @@ -173,8 +174,8 @@ private void assertKerberosRealmConstructorFails(final String keytabPath, final public void testDelegatedAuthorization() throws Exception { final String username = randomPrincipalName(); final String expectedUsername = maybeRemoveRealmName(username); - final MockLookupRealm otherRealm = spy(new MockLookupRealm(new RealmConfig("other_realm", Settings.EMPTY, globalSettings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)))); + final MockLookupRealm otherRealm = spy(new MockLookupRealm(new RealmConfig(new RealmConfig.RealmIdentifier("mock", "other_realm"), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)))); final User lookupUser = new User(expectedUsername, new String[] { "admin-role" }, expectedUsername, expectedUsername + "@example.com", Collections.singletonMap("k1", "v1"), true); otherRealm.registerUser(lookupUser); @@ -183,8 +184,8 @@ public void testDelegatedAuthorization() throws Exception { final KerberosRealm kerberosRealm = createKerberosRealm(Collections.singletonList(otherRealm), username); final User expectedUser = lookupUser; final byte[] decodedTicket = "base64encodedticket".getBytes(StandardCharsets.UTF_8); - final Path keytabPath = config.env().configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(config.settings())); - final boolean krbDebug = KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE.get(config.settings()); + final Path keytabPath = config.env().configFile().resolve(config.getSetting(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH)); + final boolean krbDebug = config.getSetting(KerberosRealmSettings.SETTING_KRB_DEBUG_ENABLE); mockKerberosTicketValidator(decodedTicket, keytabPath, krbDebug, new Tuple<>(username, "out-token"), null); final KerberosAuthenticationToken kerberosAuthenticationToken = new KerberosAuthenticationToken(decodedTicket); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java index 2f5147ca2b17d..9fa731138b355 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java @@ -33,11 +33,13 @@ import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapLoadBalancingSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySessionFactory.DownLevelADAuthenticator; @@ -52,8 +54,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING; import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.URLS_SETTING; import static org.hamcrest.Matchers.arrayContaining; @@ -85,7 +89,6 @@ public class ActiveDirectoryRealmTests extends ESTestCase { private static final String PASSWORD = "password"; - private static final String ROLE_MAPPING_FILE_SETTING = DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(); static int numberOfLdapServers; InMemoryDirectoryServer[] directoryServers; @@ -150,24 +153,24 @@ public boolean enableWarningsCheck() { * Creates a realm with the provided settings, rebuilds the SSL Service to be aware of the new realm, and then returns * the RealmConfig */ - private RealmConfig setupRealm(String realmName, Settings settings) { - final Settings merged = Settings.builder() - .put(settings) - .normalizePrefix("xpack.security.authc.realms." + realmName + ".") - .put(globalSettings) - .build(); - - final Environment env = TestEnvironment.newEnvironment(merged); - this.sslService = new SSLService(merged, env); - return new RealmConfig(realmName, settings, merged, env, new ThreadContext(merged)); + private RealmConfig setupRealm(RealmConfig.RealmIdentifier realmIdentifier, Settings localSettings) { + final Settings mergedSettings = Settings.builder().put(globalSettings).put(localSettings).build(); + final Environment env = TestEnvironment.newEnvironment(mergedSettings); + this.sslService = new SSLService(mergedSettings, env); + return new RealmConfig( + realmIdentifier, + mergedSettings, + env, new ThreadContext(mergedSettings) + ); } public void testAuthenticateUserPrincipleName() throws Exception { - Settings settings = settings(); - RealmConfig config = setupRealm("testAuthenticateUserPrincipleName", settings); + final RealmConfig.RealmIdentifier realmIdentifier = realmId("testAuthenticateUserPrincipleName"); + Settings settings = settings(realmIdentifier); + RealmConfig config = setupRealm(realmIdentifier, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); - LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + LdapRealm realm = new LdapRealm(config, sessionFactory, roleMapper, threadPool); realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); @@ -180,11 +183,12 @@ public void testAuthenticateUserPrincipleName() throws Exception { } public void testAuthenticateSAMAccountName() throws Exception { - Settings settings = settings(); - RealmConfig config = setupRealm("testAuthenticateSAMAccountName", settings); + final RealmConfig.RealmIdentifier realmIdentifier = realmId("testAuthenticateSAMAccountName"); + Settings settings = settings(realmIdentifier); + RealmConfig config = setupRealm(realmIdentifier, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); - LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + LdapRealm realm = new LdapRealm(config, sessionFactory, roleMapper, threadPool); realm.initialize(Collections.singleton(realm), licenseState); // Thor does not have a UPN of form CN=Thor@ad.test.elasticsearch.com @@ -205,11 +209,12 @@ protected String[] ldapUrls() throws LDAPException { } public void testAuthenticateCachesSuccessfulAuthentications() throws Exception { - Settings settings = settings(); - RealmConfig config = setupRealm("testAuthenticateCachesSuccesfulAuthentications", settings); + final RealmConfig.RealmIdentifier realmIdentifier = realmId("testAuthenticateCachesSuccesfulAuthentications"); + Settings settings = settings(realmIdentifier); + RealmConfig config = setupRealm(realmIdentifier, settings); ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); - LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + LdapRealm realm = new LdapRealm(config, sessionFactory, roleMapper, threadPool); realm.initialize(Collections.singleton(realm), licenseState); int count = randomIntBetween(2, 10); @@ -224,11 +229,14 @@ public void testAuthenticateCachesSuccessfulAuthentications() throws Exception { } public void testAuthenticateCachingCanBeDisabled() throws Exception { - Settings settings = settings(Settings.builder().put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), -1).build()); - RealmConfig config = setupRealm("testAuthenticateCachingCanBeDisabled", settings); + final RealmConfig.RealmIdentifier realmIdentifier = realmId("testAuthenticateCachingCanBeDisabled"); + final Settings settings = settings(realmIdentifier, Settings.builder() + .put(getFullSettingKey(realmIdentifier, CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING), -1) + .build()); + RealmConfig config = setupRealm(realmIdentifier, settings); ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); - LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + LdapRealm realm = new LdapRealm(config, sessionFactory, roleMapper, threadPool); realm.initialize(Collections.singleton(realm), licenseState); int count = randomIntBetween(2, 10); @@ -243,11 +251,12 @@ public void testAuthenticateCachingCanBeDisabled() throws Exception { } public void testAuthenticateCachingClearsCacheOnRoleMapperRefresh() throws Exception { - Settings settings = settings(); - RealmConfig config = setupRealm("testAuthenticateCachingClearsCacheOnRoleMapperRefresh", settings); + final RealmConfig.RealmIdentifier realmIdentifier = realmId("testAuthenticateCachingClearsCacheOnRoleMapperRefresh"); + Settings settings = settings(realmIdentifier); + RealmConfig config = setupRealm(realmIdentifier, settings); ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); - LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + LdapRealm realm = new LdapRealm(config, sessionFactory, roleMapper, threadPool); realm.initialize(Collections.singleton(realm), licenseState); int count = randomIntBetween(2, 10); @@ -281,22 +290,24 @@ public void testUnauthenticatedLookupWithoutConnectionPool() throws Exception { } private void doUnauthenticatedLookup(boolean pooled) throws Exception { + final RealmConfig.RealmIdentifier realmIdentifier = realmId("testUnauthenticatedLookupWithConnectionPool"); + final Settings.Builder builder = Settings.builder() - .put(ActiveDirectorySessionFactorySettings.POOL_ENABLED.getKey(), pooled) - .put(PoolingSessionFactorySettings.BIND_DN.getKey(), "CN=ironman@ad.test.elasticsearch.com"); + .put(getFullSettingKey(realmIdentifier.getName(), ActiveDirectorySessionFactorySettings.POOL_ENABLED), pooled) + .put(getFullSettingKey(realmIdentifier, PoolingSessionFactorySettings.BIND_DN), "CN=ironman@ad.test.elasticsearch.com"); final boolean useLegacyBindPassword = randomBoolean(); if (useLegacyBindPassword) { - builder.put(PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD.getKey(), PASSWORD); + builder.put(getFullSettingKey(realmIdentifier, PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD), PASSWORD); } else { final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(PoolingSessionFactorySettings.SECURE_BIND_PASSWORD.getKey(), PASSWORD); + secureSettings.setString(getFullSettingKey(realmIdentifier, PoolingSessionFactorySettings.SECURE_BIND_PASSWORD), PASSWORD); builder.setSecureSettings(secureSettings); } - Settings settings = settings(builder.build()); - RealmConfig config = setupRealm("testUnauthenticatedLookupWithConnectionPool", settings); + Settings settings = settings(realmIdentifier, builder.build()); + RealmConfig config = setupRealm(realmIdentifier, settings); try (ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool)) { DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); - LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + LdapRealm realm = new LdapRealm(config, sessionFactory, roleMapper, threadPool); realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); @@ -308,13 +319,14 @@ private void doUnauthenticatedLookup(boolean pooled) throws Exception { } public void testRealmMapsGroupsToRoles() throws Exception { - Settings settings = settings(Settings.builder() - .put(ROLE_MAPPING_FILE_SETTING, getDataPath("role_mapping.yml")) + final RealmConfig.RealmIdentifier realmId = realmId("testRealmMapsGroupsToRoles"); + Settings settings = settings(realmId, Settings.builder() + .put(getFullSettingKey(realmId, DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING), getDataPath("role_mapping.yml")) .build()); - RealmConfig config = setupRealm("testRealmMapsGroupsToRoles", settings); + RealmConfig config = setupRealm(realmId, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); - LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + LdapRealm realm = new LdapRealm(config, sessionFactory, roleMapper, threadPool); realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); @@ -325,13 +337,14 @@ public void testRealmMapsGroupsToRoles() throws Exception { } public void testRealmMapsUsersToRoles() throws Exception { - Settings settings = settings(Settings.builder() - .put(ROLE_MAPPING_FILE_SETTING, getDataPath("role_mapping.yml")) + final RealmConfig.RealmIdentifier realmId = realmId("testRealmMapsGroupsToRoles"); + Settings settings = settings(realmId, Settings.builder() + .put(getFullSettingKey(realmId, DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING), getDataPath("role_mapping.yml")) .build()); - RealmConfig config = setupRealm("testRealmMapsGroupsToRoles", settings); + RealmConfig config = setupRealm(realmId, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); - LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + LdapRealm realm = new LdapRealm(config, sessionFactory, roleMapper, threadPool); realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture future = new PlainActionFuture<>(); @@ -342,15 +355,16 @@ public void testRealmMapsUsersToRoles() throws Exception { } public void testRealmUsageStats() throws Exception { + final RealmConfig.RealmIdentifier realmId = realmId("testRealmUsageStats"); String loadBalanceType = randomFrom("failover", "round_robin"); - Settings settings = settings(Settings.builder() - .put(ROLE_MAPPING_FILE_SETTING, getDataPath("role_mapping.yml")) - .put("load_balance.type", loadBalanceType) + Settings settings = settings(realmId, Settings.builder() + .put(getFullSettingKey(realmId, DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING), getDataPath("role_mapping.yml")) + .put(getFullSettingKey(realmId, LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING), loadBalanceType) .build()); - RealmConfig config = setupRealm("testRealmUsageStats", settings); + RealmConfig config = setupRealm(realmId, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); - LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + LdapRealm realm = new LdapRealm(config, sessionFactory, roleMapper, threadPool); realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture> future = new PlainActionFuture<>(); @@ -365,8 +379,9 @@ public void testRealmUsageStats() throws Exception { } public void testDefaultSearchFilters() throws Exception { - Settings settings = settings(); - RealmConfig config = setupRealm("testDefaultSearchFilters", settings); + final RealmConfig.RealmIdentifier realmIdentifier = realmId("testDefaultSearchFilters"); + Settings settings = settings(realmIdentifier); + RealmConfig config = setupRealm(realmIdentifier, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); assertEquals("(&(objectClass=user)(|(sAMAccountName={0})(userPrincipalName={0}@ad.test.elasticsearch.com)))", sessionFactory.defaultADAuthenticator.getUserSearchFilter()); @@ -375,43 +390,61 @@ public void testDefaultSearchFilters() throws Exception { } public void testCustomSearchFilters() throws Exception { - Settings settings = settings(Settings.builder() - .put(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_FILTER_SETTING, "(objectClass=default)") - .put(ActiveDirectorySessionFactorySettings.AD_UPN_USER_SEARCH_FILTER_SETTING, "(objectClass=upn)") - .put(ActiveDirectorySessionFactorySettings.AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING, "(objectClass=down level)") + final RealmConfig.RealmIdentifier realmId = realmId("testDefaultSearchFilters"); + Settings settings = settings(realmId, Settings.builder() + .put(getFullSettingKey(realmId.getName(), ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_FILTER_SETTING), + "(objectClass=default)") + .put(getFullSettingKey(realmId.getName(), ActiveDirectorySessionFactorySettings.AD_UPN_USER_SEARCH_FILTER_SETTING), + "(objectClass=upn)") + .put(getFullSettingKey(realmId.getName(), ActiveDirectorySessionFactorySettings.AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING), + "(objectClass=down level)") .build()); - RealmConfig config = setupRealm("testDefaultSearchFilters", settings); + RealmConfig config = setupRealm(realmId, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); assertEquals("(objectClass=default)", sessionFactory.defaultADAuthenticator.getUserSearchFilter()); assertEquals("(objectClass=upn)", sessionFactory.upnADAuthenticator.getUserSearchFilter()); assertEquals("(objectClass=down level)", sessionFactory.downLevelADAuthenticator.getUserSearchFilter()); } + public RealmConfig.RealmIdentifier realmId(String realmName) { + return new RealmConfig.RealmIdentifier(LdapRealmSettings.AD_TYPE, realmName.toLowerCase(Locale.ROOT)); + } + + private Settings settings(RealmConfig.RealmIdentifier realmIdentifier) throws Exception { + return settings(realmIdentifier, Settings.EMPTY); + } + public void testBuildUrlFromDomainNameAndDefaultPort() throws Exception { + final RealmConfig.RealmIdentifier realmId = realmId("testBuildUrlFromDomainNameAndDefaultPort"); Settings settings = Settings.builder() - .put(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING, "ad.test.elasticsearch.com") + .put(getFullSettingKey(realmId.getName(), ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), + "ad.test.elasticsearch.com") .build(); - RealmConfig config = setupRealm("testBuildUrlFromDomainNameAndDefaultPort", settings); + RealmConfig config = setupRealm(realmId, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); assertSingleLdapServer(sessionFactory, "ad.test.elasticsearch.com", 389); } public void testBuildUrlFromDomainNameAndCustomPort() throws Exception { + final RealmConfig.RealmIdentifier realmId = realmId("testBuildUrlFromDomainNameAndCustomPort"); Settings settings = Settings.builder() - .put(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING, "ad.test.elasticsearch.com") - .put(ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING.getKey(), 10389) + .put(getFullSettingKey(realmId.getName(), ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), + "ad.test.elasticsearch.com") + .put(getFullSettingKey(realmId.getName(), ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING), 10389) .build(); - RealmConfig config = setupRealm("testBuildUrlFromDomainNameAndCustomPort", settings); + RealmConfig config = setupRealm(realmId, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); assertSingleLdapServer(sessionFactory, "ad.test.elasticsearch.com", 10389); } public void testUrlConfiguredInSettings() throws Exception { + final RealmConfig.RealmIdentifier realmId = realmId("testBuildUrlFromDomainNameAndCustomPort"); Settings settings = Settings.builder() - .put(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING, "ad.test.elasticsearch.com") - .put(SessionFactorySettings.URLS_SETTING, "ldap://ad01.testing.elastic.co:20389/") + .put(getFullSettingKey(realmId.getName(), ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), + "ad.test.elasticsearch.com") + .put(getFullSettingKey(realmId, SessionFactorySettings.URLS_SETTING), "ldap://ad01.testing.elastic.co:20389/") .build(); - RealmConfig config = setupRealm("testBuildUrlFromDomainNameAndCustomPort", settings); + RealmConfig config = setupRealm(realmId, settings); ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); assertSingleLdapServer(sessionFactory, "ad01.testing.elastic.co", 20389); } @@ -426,19 +459,17 @@ private void assertSingleLdapServer(ActiveDirectorySessionFactory sessionFactory assertThat(sss.getPort(), equalTo(port)); } - private Settings settings() throws Exception { - return settings(Settings.EMPTY); - } - - private Settings settings(Settings extraSettings) throws Exception { + private Settings settings(RealmConfig.RealmIdentifier realmIdentifier, Settings extraSettings) throws Exception { Settings.Builder builder = Settings.builder() - .putList(URLS_SETTING, ldapUrls()) - .put(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING, "ad.test.elasticsearch.com") - .put(DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.getKey(), true); + .putList(getFullSettingKey(realmIdentifier, URLS_SETTING), ldapUrls()) + .put(getFullSettingKey(realmIdentifier.getName(), ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), + "ad.test.elasticsearch.com") + .put(getFullSettingKey(realmIdentifier, DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING), true); if (randomBoolean()) { - builder.put("ssl.verification_mode", VerificationMode.CERTIFICATE); + builder.put(getFullSettingKey(realmIdentifier, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), + VerificationMode.CERTIFICATE); } else { - builder.put(HOSTNAME_VERIFICATION_SETTING, false); + builder.put(getFullSettingKey(realmIdentifier, HOSTNAME_VERIFICATION_SETTING), false); } return builder.put(extraSettings).build(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupsResolverTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupsResolverTestCase.java index 23010e400a52b..a4557171fe48b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupsResolverTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupsResolverTestCase.java @@ -10,7 +10,11 @@ import com.unboundid.ldap.sdk.LDAPInterface; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -24,6 +28,13 @@ public abstract class GroupsResolverTestCase extends ESTestCase { LDAPConnection ldapConnection; + protected static RealmConfig config(RealmConfig.RealmIdentifier realmId, Settings settings) { + if (settings.hasValue("path.home") == false) { + settings = Settings.builder().put(settings).put("path.home", createTempDir()).build(); + } + return new RealmConfig(realmId, settings, TestEnvironment.newEnvironment(settings), new ThreadContext(Settings.EMPTY)); + } + protected abstract String ldapUrl(); protected abstract String bindDN(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java index fb20527575df9..3e63fe1f870f1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java @@ -9,7 +9,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -24,12 +26,16 @@ import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; @@ -43,7 +49,9 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Function; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.URLS_SETTING; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; @@ -69,8 +77,6 @@ public class LdapRealmTests extends LdapTestCase { public static final String VALID_USERNAME = "Thomas Masterman Hardy"; public static final String PASSWORD = "pass"; - private static final String USER_DN_TEMPLATES_SETTING_KEY = LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.getKey(); - private ThreadPool threadPool; private ResourceWatcherService resourceWatcherService; private Settings defaultGlobalSettings; @@ -97,10 +103,9 @@ public void testAuthenticateSubTreeGroupSearch() throws Exception { String groupSearchBase = "o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; Settings settings = buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, - TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); - LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), + LdapRealm ldap = new LdapRealm(config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); ldap.initialize(Collections.singleton(ldap), licenseState); @@ -117,18 +122,23 @@ public void testAuthenticateSubTreeGroupSearch() throws Exception { assertThat((List) user.metadata().get("ldap_groups"), contains("cn=HMS Victory,ou=crews,ou=groups,o=sevenSeas")); } + private RealmConfig getRealmConfig(RealmConfig.RealmIdentifier identifier, Settings settings) { + final Settings globalSettings = mergeSettings(settings, defaultGlobalSettings); + final Environment env = TestEnvironment.newEnvironment(globalSettings); + return new RealmConfig(identifier, globalSettings, env, new ThreadContext(globalSettings)); + } + public void testAuthenticateOneLevelGroupSearch() throws Exception { String groupSearchBase = "ou=crews,ou=groups,o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, - TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); LdapRealm ldap = - new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + new LdapRealm(config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); @@ -150,13 +160,12 @@ public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, - TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); ldapFactory = spy(ldapFactory); LdapRealm ldap = - new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + new LdapRealm(config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); @@ -177,13 +186,12 @@ public void testAuthenticateCachingRefresh() throws Exception { Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, - TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); DnRoleMapper roleMapper = buildGroupAsRoleMapper(resourceWatcherService); ldapFactory = spy(ldapFactory); - LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, roleMapper, threadPool); + LdapRealm ldap = new LdapRealm(config, ldapFactory, roleMapper, threadPool); ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); @@ -211,15 +219,14 @@ public void testAuthenticateNoncaching() throws Exception { String userTemplate = VALID_USER_TEMPLATE; Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), -1) + .put(getFullSettingKey(REALM_IDENTIFIER, CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING), -1) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, - TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); ldapFactory = spy(ldapFactory); LdapRealm ldap = - new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + new LdapRealm(config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); ldap.initialize(Collections.singleton(ldap), licenseState); PlainActionFuture future = new PlainActionFuture<>(); @@ -238,23 +245,23 @@ public void testDelegatedAuthorization() throws Exception { String userTemplate = VALID_USER_TEMPLATE; final Settings.Builder builder = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) - .putList(DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey(), "mock_lookup"); + .putList(getFullSettingKey(REALM_IDENTIFIER, DelegatedAuthorizationSettings.AUTHZ_REALMS), "mock_lookup"); if (randomBoolean()) { // maybe disable caching - builder.put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), -1); + builder.put(getFullSettingKey(REALM_IDENTIFIER, CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING), -1); } final Settings realmSettings = builder.build(); final Environment env = TestEnvironment.newEnvironment(defaultGlobalSettings); - RealmConfig config = new RealmConfig("test-ldap-realm", realmSettings, defaultGlobalSettings, env, threadPool.getThreadContext()); + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, realmSettings, env, threadPool.getThreadContext()); final LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); final DnRoleMapper roleMapper = buildGroupAsRoleMapper(resourceWatcherService); - final LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, roleMapper, threadPool); + final LdapRealm ldap = new LdapRealm(config, ldapFactory, roleMapper, threadPool); - final MockLookupRealm mockLookup = new MockLookupRealm(new RealmConfig("mock_lookup", Settings.EMPTY, defaultGlobalSettings, env, - threadPool.getThreadContext())); + final MockLookupRealm mockLookup = new MockLookupRealm(new RealmConfig(new RealmConfig.RealmIdentifier("mock", "mock_lookup"), + defaultGlobalSettings, env, threadPool.getThreadContext())); ldap.initialize(Arrays.asList(ldap, mockLookup), licenseState); mockLookup.initialize(Arrays.asList(ldap, mockLookup), licenseState); @@ -276,98 +283,81 @@ public void testDelegatedAuthorization() throws Exception { } public void testLdapRealmSelectsLdapSessionFactory() throws Exception { + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier(LdapRealmSettings.LDAP_TYPE, "test-ldap-realm"); String groupSearchBase = "o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; Settings settings = Settings.builder() - .putList(URLS_SETTING, ldapUrls()) - .putList(USER_DN_TEMPLATES_SETTING_KEY, userTemplate) - .put("group_search.base_dn", groupSearchBase) - .put("group_search.scope", LdapSearchScope.SUB_TREE) - .put("ssl.verification_mode", VerificationMode.CERTIFICATE) + .putList(getFullSettingKey(identifier, URLS_SETTING), ldapUrls()) + .putList(getFullSettingKey(identifier.getName(), LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING), userTemplate) + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.BASE_DN), groupSearchBase) + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.SCOPE), LdapSearchScope.SUB_TREE) + .put(getFullSettingKey(identifier, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.CERTIFICATE) .build(); - - final String realmName = "test-ldap-realm"; - final Settings globalSettings = Settings.builder() - .put(settings) - .normalizePrefix(RealmSettings.PREFIX + realmName + ".") - .put(defaultGlobalSettings) - .build(); - - final Environment env = TestEnvironment.newEnvironment(globalSettings); - final RealmConfig config = new RealmConfig(realmName, settings, globalSettings, env, new ThreadContext(globalSettings)); - SessionFactory sessionFactory = LdapRealm.sessionFactory(config, new SSLService(globalSettings, env), threadPool, - LdapRealmSettings.LDAP_TYPE); + RealmConfig config = getRealmConfig(identifier, settings); + SessionFactory sessionFactory = LdapRealm.sessionFactory(config, new SSLService(settings, config.env()), threadPool); assertThat(sessionFactory, is(instanceOf(LdapSessionFactory.class))); } public void testLdapRealmSelectsLdapUserSearchSessionFactory() throws Exception { + final RealmConfig.RealmIdentifier identifier + = new RealmConfig.RealmIdentifier(LdapRealmSettings.LDAP_TYPE, "test-ldap-realm-user-search"); String groupSearchBase = "o=sevenSeas"; Settings settings = Settings.builder() - .putList(URLS_SETTING, ldapUrls()) - .put("user_search.base_dn", "") - .put("bind_dn", "cn=Thomas Masterman Hardy,ou=people,o=sevenSeas") - .setSecureSettings(secureSettings("secure_bind_password", PASSWORD)) - .put("group_search.base_dn", groupSearchBase) - .put("group_search.scope", LdapSearchScope.SUB_TREE) - .put("ssl.verification_mode", VerificationMode.CERTIFICATE) + .put(defaultGlobalSettings) + .putList(getFullSettingKey(identifier, URLS_SETTING), ldapUrls()) + .put(getFullSettingKey(identifier.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), "") + .put(getFullSettingKey(identifier, PoolingSessionFactorySettings.BIND_DN), + "cn=Thomas Masterman Hardy,ou=people,o=sevenSeas") + .setSecureSettings(secureSettings(PoolingSessionFactorySettings.SECURE_BIND_PASSWORD, identifier, PASSWORD)) + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.BASE_DN), groupSearchBase) + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.SCOPE), LdapSearchScope.SUB_TREE) + .put(getFullSettingKey(identifier, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.CERTIFICATE) .build(); - final String realmName = "test-ldap-realm-user-search"; - final Settings globalSettings = Settings.builder() - .put(settings) - .normalizePrefix(RealmSettings.PREFIX + realmName + ".") - .put(defaultGlobalSettings) - .build(); - final Environment env = TestEnvironment.newEnvironment(globalSettings); - final RealmConfig config = new RealmConfig(realmName, settings, globalSettings, env, new ThreadContext(globalSettings)); - SessionFactory sessionFactory = LdapRealm.sessionFactory(config, new SSLService(globalSettings, env), threadPool, - LdapRealmSettings.LDAP_TYPE); + final RealmConfig config = getRealmConfig(identifier, settings); + SessionFactory sessionFactory = LdapRealm.sessionFactory(config, new SSLService(config.globalSettings(), config.env()), threadPool); try { assertThat(sessionFactory, is(instanceOf(LdapUserSearchSessionFactory.class))); } finally { - ((LdapUserSearchSessionFactory)sessionFactory).close(); + ((LdapUserSearchSessionFactory) sessionFactory).close(); } } - private MockSecureSettings secureSettings(String key, String value) { - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(key, value); - return secureSettings; - } - public void testLdapRealmThrowsExceptionForUserTemplateAndSearchSettings() throws Exception { + final RealmConfig.RealmIdentifier identifier + = new RealmConfig.RealmIdentifier(LdapRealmSettings.LDAP_TYPE, "test-ldap-realm-user-search"); Settings settings = Settings.builder() - .putList(URLS_SETTING, ldapUrls()) - .putList(USER_DN_TEMPLATES_SETTING_KEY, "cn=foo") - .put("user_search.base_dn", "cn=bar") - .put("group_search.base_dn", "") - .put("group_search.scope", LdapSearchScope.SUB_TREE) - .put("ssl.verification_mode", VerificationMode.CERTIFICATE) + .putList(getFullSettingKey(identifier, URLS_SETTING), ldapUrls()) + .putList(getFullSettingKey(identifier.getName(), LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING), "cn=foo") + .put(getFullSettingKey(identifier.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), "cn=bar") + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.BASE_DN), "") + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.SCOPE), LdapSearchScope.SUB_TREE) + .put(getFullSettingKey(identifier, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.CERTIFICATE) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, - TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = getRealmConfig(identifier, settings); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> LdapRealm.sessionFactory(config, null, threadPool, LdapRealmSettings.LDAP_TYPE)); + () -> LdapRealm.sessionFactory(config, null, threadPool)); assertThat(e.getMessage(), containsString("settings were found for both" + - " user search [xpack.security.authc.realms.test-ldap-realm-user-search.user_search.] and" + - " user template [xpack.security.authc.realms.test-ldap-realm-user-search.user_dn_templates]")); + " user search [xpack.security.authc.realms.ldap.test-ldap-realm-user-search.user_search.base_dn] and" + + " user template [xpack.security.authc.realms.ldap.test-ldap-realm-user-search.user_dn_templates]")); } public void testLdapRealmThrowsExceptionWhenNeitherUserTemplateNorSearchSettingsProvided() throws Exception { + final RealmConfig.RealmIdentifier identifier + = new RealmConfig.RealmIdentifier(LdapRealmSettings.LDAP_TYPE, "test-ldap-realm-user-search"); Settings settings = Settings.builder() - .putList(URLS_SETTING, ldapUrls()) - .put("group_search.base_dn", "") - .put("group_search.scope", LdapSearchScope.SUB_TREE) - .put("ssl.verification_mode", VerificationMode.CERTIFICATE) + .putList(getFullSettingKey(identifier, URLS_SETTING), ldapUrls()) + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.BASE_DN), "") + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.SCOPE), LdapSearchScope.SUB_TREE) + .put(getFullSettingKey(identifier, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.CERTIFICATE) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, defaultGlobalSettings, - TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = getRealmConfig(identifier, settings); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> LdapRealm.sessionFactory(config, null, threadPool, LdapRealmSettings.LDAP_TYPE)); + () -> LdapRealm.sessionFactory(config, null, threadPool)); assertThat(e.getMessage(), containsString("settings were not found for either" + - " user search [xpack.security.authc.realms.test-ldap-realm-user-search.user_search.] or" + - " user template [xpack.security.authc.realms.test-ldap-realm-user-search.user_dn_templates]")); + " user search [xpack.security.authc.realms.ldap.test-ldap-realm-user-search.user_search.base_dn] or" + + " user template [xpack.security.authc.realms.ldap.test-ldap-realm-user-search.user_dn_templates]")); } public void testLdapRealmMapsUserDNToRole() throws Exception { @@ -375,14 +365,13 @@ public void testLdapRealmMapsUserDNToRole() throws Exception { String userTemplate = VALID_USER_TEMPLATE; Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put(DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(), + .put(getFullSettingKey(REALM_IDENTIFIER, DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING), getDataPath("/org/elasticsearch/xpack/security/authc/support/role_mapping.yml")) .build(); - RealmConfig config = new RealmConfig("test-ldap-realm-userdn", settings, defaultGlobalSettings, - TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); - LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, + LdapRealm ldap = new LdapRealm(config, ldapFactory, new DnRoleMapper(config, resourceWatcherService), threadPool); ldap.initialize(Collections.singleton(ldap), licenseState); @@ -407,10 +396,9 @@ public void testLdapConnectionFailureIsTreatedAsAuthenticationFailure() throws E String groupSearchBase = "o=sevenSeas"; String userTemplate = VALID_USER_TEMPLATE; Settings settings = buildLdapSettings(new String[] { url.toString() }, userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE); - RealmConfig config = new RealmConfig("test-ldap-realm", settings, defaultGlobalSettings, - TestEnvironment.newEnvironment(defaultGlobalSettings), new ThreadContext(defaultGlobalSettings)); + RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); - LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), + LdapRealm ldap = new LdapRealm(config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); ldap.initialize(Collections.singleton(ldap), licenseState); @@ -425,47 +413,47 @@ public void testLdapConnectionFailureIsTreatedAsAuthenticationFailure() throws E } public void testUsageStats() throws Exception { + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier(LdapRealmSettings.LDAP_TYPE, "ldap-realm"); String groupSearchBase = "o=sevenSeas"; Settings.Builder settings = Settings.builder() - .putList(URLS_SETTING, ldapUrls()) - .put("bind_dn", "cn=Thomas Masterman Hardy,ou=people,o=sevenSeas") - .put("bind_password", PASSWORD) - .put("group_search.base_dn", groupSearchBase) - .put("group_search.scope", LdapSearchScope.SUB_TREE) - .put(LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.getKey(), "--") - .put("ssl.verification_mode", VerificationMode.CERTIFICATE); + .putList(getFullSettingKey(identifier, URLS_SETTING), ldapUrls()) + .put(getFullSettingKey(identifier, PoolingSessionFactorySettings.BIND_DN), + "cn=Thomas Masterman Hardy,ou=people,o=sevenSeas") + .put(getFullSettingKey(identifier, PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD), PASSWORD) + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.BASE_DN), groupSearchBase) + .put(getFullSettingKey(identifier, SearchGroupsResolverSettings.SCOPE), LdapSearchScope.SUB_TREE) + .put(getFullSettingKey(identifier.getName(), LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING), "--") + .put(getFullSettingKey(identifier, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.CERTIFICATE); int order = randomIntBetween(0, 10); - settings.put("order", order); + settings.put(getFullSettingKey(identifier, RealmSettings.ORDER_SETTING), order); boolean userSearch = randomBoolean(); if (userSearch) { - settings.put("user_search.base_dn", ""); + settings.put(getFullSettingKey(identifier.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), ""); } - final Settings realmSettings = settings.build(); - final String realmName = "ldap-realm"; - final Settings globalSettings = Settings.builder() - .put(realmSettings) - .normalizePrefix(RealmSettings.PREFIX + realmName + ".") - .put(defaultGlobalSettings) - .build(); - final Environment env = TestEnvironment.newEnvironment(globalSettings); - final RealmConfig config = new RealmConfig(realmName, realmSettings, globalSettings, env, new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(identifier, settings.build()); - LdapSessionFactory ldapFactory = new LdapSessionFactory(config, new SSLService(globalSettings, env), threadPool); - LdapRealm realm = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, - new DnRoleMapper(config, resourceWatcherService), threadPool); + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, new SSLService(config.globalSettings(), config.env()), threadPool); + LdapRealm realm = new LdapRealm(config, ldapFactory, new DnRoleMapper(config, resourceWatcherService), threadPool); realm.initialize(Collections.singleton(realm), licenseState); PlainActionFuture> future = new PlainActionFuture<>(); realm.usageStats(future); Map stats = future.get(); assertThat(stats, is(notNullValue())); - assertThat(stats, hasEntry("name", realmName)); + assertThat(stats, hasEntry("name", identifier.getName())); assertThat(stats, hasEntry("order", realm.order())); assertThat(stats, hasEntry("size", 0)); assertThat(stats, hasEntry("ssl", false)); assertThat(stats, hasEntry("user_search", userSearch)); } + + private SecureSettings secureSettings(Function> settingFactory, + RealmConfig.RealmIdentifier identifier, String value) { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(getFullSettingKey(identifier, settingFactory), value); + return secureSettings; + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java index a22cc9fba1779..e484af1b272a7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java @@ -9,7 +9,6 @@ import com.unboundid.ldap.sdk.LDAPException; import com.unboundid.ldap.sdk.LDAPURL; import com.unboundid.ldap.sdk.SimpleBindRequest; - import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -18,6 +17,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -59,11 +59,11 @@ public void testBindWithReadTimeout() throws Exception { Settings settings = Settings.builder() .put(buildLdapSettings(ldapUrl, userTemplates, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING, "1ms") //1 millisecond + .put(RealmSettings.getFullSettingKey(REALM_IDENTIFIER, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "1ms") .put("path.home", createTempDir()) .build(); - RealmConfig config = new RealmConfig("ldap_realm", settings, globalSettings, + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, mergeSettings(settings, globalSettings), TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); String user = "Horatio Hornblower"; @@ -83,14 +83,14 @@ public void testBindWithReadTimeout() throws Exception { public void testBindWithTemplates() throws Exception { String groupSearchBase = "o=sevenSeas"; - String[] userTemplates = new String[] { + String[] userTemplates = new String[]{ "cn={0},ou=something,ou=obviously,ou=incorrect,o=sevenSeas", "wrongname={0},ou=people,o=sevenSeas", "cn={0},ou=people,o=sevenSeas", //this last one should work }; - RealmConfig config = new RealmConfig("ldap_realm", - buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, LdapSearchScope.SUB_TREE), - globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, + mergeSettings(buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, LdapSearchScope.SUB_TREE), globalSettings), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); @@ -107,14 +107,14 @@ public void testBindWithTemplates() throws Exception { public void testBindWithBogusTemplates() throws Exception { String groupSearchBase = "o=sevenSeas"; - String[] userTemplates = new String[] { + String[] userTemplates = new String[]{ "cn={0},ou=something,ou=obviously,ou=incorrect,o=sevenSeas", "wrongname={0},ou=people,o=sevenSeas", "asdf={0},ou=people,o=sevenSeas", //none of these should work }; - RealmConfig config = new RealmConfig("ldap_realm", - buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, LdapSearchScope.SUB_TREE), - globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, + mergeSettings(buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, LdapSearchScope.SUB_TREE), globalSettings), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); @@ -131,9 +131,9 @@ public void testBindWithBogusTemplates() throws Exception { public void testGroupLookupSubtree() throws Exception { String groupSearchBase = "o=sevenSeas"; String userTemplate = "cn={0},ou=people,o=sevenSeas"; - RealmConfig config = new RealmConfig("ldap_realm", - buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE), - globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, + mergeSettings(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE), globalSettings), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); @@ -151,9 +151,9 @@ public void testGroupLookupSubtree() throws Exception { public void testGroupLookupOneLevel() throws Exception { String groupSearchBase = "ou=crews,ou=groups,o=sevenSeas"; String userTemplate = "cn={0},ou=people,o=sevenSeas"; - RealmConfig config = new RealmConfig("ldap_realm", - buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL), - globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, + mergeSettings(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL), globalSettings), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); @@ -170,9 +170,9 @@ public void testGroupLookupOneLevel() throws Exception { public void testGroupLookupBase() throws Exception { String groupSearchBase = "cn=HMS Lydia,ou=crews,ou=groups,o=sevenSeas"; String userTemplate = "cn={0},ou=people,o=sevenSeas"; - RealmConfig config = new RealmConfig("ldap_realm", - buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.BASE), - globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, + mergeSettings(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.BASE), globalSettings), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapTestUtils.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapTestUtils.java index 966f2e3f5492d..c91f634c1a786 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapTestUtils.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapTestUtils.java @@ -36,18 +36,18 @@ public static LDAPConnection openConnection(String url, String bindDN, String bi if (useGlobalSSL) { builder.put("xpack.ssl.truststore.path", truststore); // fake realm to load config with certificate verification mode - builder.put("xpack.security.authc.realms.bar.ssl.truststore.path", truststore); - builder.put("xpack.security.authc.realms.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); + builder.put("xpack.security.authc.realms.ldap.bar.ssl.truststore.path", truststore); + builder.put("xpack.security.authc.realms.ldap.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); secureSettings.setString("xpack.ssl.truststore.secure_password", "changeit"); - secureSettings.setString("xpack.security.authc.realms.bar.ssl.truststore.secure_password", "changeit"); + secureSettings.setString("xpack.security.authc.realms.ldap.bar.ssl.truststore.secure_password", "changeit"); } else { // fake realms so ssl will get loaded - builder.put("xpack.security.authc.realms.foo.ssl.truststore.path", truststore); - builder.put("xpack.security.authc.realms.foo.ssl.verification_mode", VerificationMode.FULL); - builder.put("xpack.security.authc.realms.bar.ssl.truststore.path", truststore); - builder.put("xpack.security.authc.realms.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); - secureSettings.setString("xpack.security.authc.realms.foo.ssl.truststore.secure_password", "changeit"); - secureSettings.setString("xpack.security.authc.realms.bar.ssl.truststore.secure_password", "changeit"); + builder.put("xpack.security.authc.realms.ldap.foo.ssl.truststore.path", truststore); + builder.put("xpack.security.authc.realms.ldap.foo.ssl.verification_mode", VerificationMode.FULL); + builder.put("xpack.security.authc.realms.ldap.bar.ssl.truststore.path", truststore); + builder.put("xpack.security.authc.realms.ldap.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); + secureSettings.setString("xpack.security.authc.realms.ldap.foo.ssl.truststore.secure_password", "changeit"); + secureSettings.setString("xpack.security.authc.realms.ldap.bar.ssl.truststore.secure_password", "changeit"); } Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); @@ -64,7 +64,7 @@ public static LDAPConnection openConnection(String url, String bindDN, String bi if (useGlobalSSL) { sslConfiguration = sslService.getSSLConfiguration("xpack.ssl"); } else { - sslConfiguration = sslService.getSSLConfiguration("xpack.security.authc.realms.foo.ssl"); + sslConfiguration = sslService.getSSLConfiguration("xpack.security.authc.realms.ldap.foo.ssl"); } return LdapUtils.privilegedConnect(() -> new LDAPConnection(sslService.sslSocketFactory(sslConfiguration), options, ldapurl.getHost(), ldapurl.getPort(), bindDN, bindPassword)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java index 19b0d4e71bb8a..73e5a3f2b6656 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java @@ -41,6 +41,7 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isEmptyString; @@ -85,18 +86,18 @@ public void testSupportsUnauthenticatedSessions() throws Exception { final boolean useAttribute = randomBoolean(); Settings.Builder builder = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, "", LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", "") - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("user_search.pool.enabled", randomBoolean()); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), "") + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()); final boolean useLegacyBindPassword = configureBindPassword(builder); if (useAttribute) { - builder.put("user_search.attribute", "cn"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE), "cn"); } else { - builder.put("user_search.filter", "(cn={0})"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_FILTER), "(cn={0})"); } - RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(builder); LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); try { @@ -105,7 +106,13 @@ public void testSupportsUnauthenticatedSessions() throws Exception { sessionFactory.close(); } - assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), useAttribute, useLegacyBindPassword); + } + + private RealmConfig getRealmConfig(Settings.Builder builder) { + return new RealmConfig(REALM_IDENTIFIER, + mergeSettings(builder.build(), globalSettings), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); } public void testUserSearchSubTree() throws Exception { @@ -115,17 +122,17 @@ public void testUserSearchSubTree() throws Exception { final boolean useAttribute = randomBoolean(); Settings.Builder builder = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("user_search.pool.enabled", randomBoolean()); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()); final boolean useLegacyBindPassword = configureBindPassword(builder); if (useAttribute) { - builder.put("user_search.attribute", "cn"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE), "cn"); } else { - builder.put("user_search.filter", "(cn={0})"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_FILTER), "(cn={0})"); } - RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(builder); LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); @@ -150,7 +157,7 @@ public void testUserSearchSubTree() throws Exception { sessionFactory.close(); } - assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), useAttribute, useLegacyBindPassword); } public void testUserSearchBaseScopeFailsWithWrongBaseDN() throws Exception { @@ -160,18 +167,18 @@ public void testUserSearchBaseScopeFailsWithWrongBaseDN() throws Exception { final boolean useAttribute = randomBoolean(); Settings.Builder builder = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("user_search.scope", LdapSearchScope.BASE) - .put("user_search.pool.enabled", randomBoolean()); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_SCOPE), LdapSearchScope.BASE) + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()); final boolean useLegacyBindPassword = configureBindPassword(builder); if (useAttribute) { - builder.put("user_search.attribute", "cn"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE), "cn"); } else { - builder.put("user_search.filter", "(cn={0})"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_FILTER), "(cn={0})"); } - RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(builder); LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); @@ -185,7 +192,7 @@ public void testUserSearchBaseScopeFailsWithWrongBaseDN() throws Exception { sessionFactory.close(); } - assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), useAttribute, useLegacyBindPassword); } public void testUserSearchBaseScopePassesWithCorrectBaseDN() throws Exception { @@ -194,19 +201,19 @@ public void testUserSearchBaseScopePassesWithCorrectBaseDN() throws Exception { Settings.Builder builder = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("user_search.scope", LdapSearchScope.BASE) - .put("user_search.pool.enabled", randomBoolean()); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_SCOPE), LdapSearchScope.BASE) + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()); final boolean useLegacyBindPassword = configureBindPassword(builder); final boolean useAttribute = randomBoolean(); if (useAttribute) { - builder.put("user_search.attribute", "cn"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE), "cn"); } else { - builder.put("user_search.filter", "(cn={0})"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_FILTER), "(cn={0})"); } - RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(builder); LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); @@ -231,7 +238,7 @@ public void testUserSearchBaseScopePassesWithCorrectBaseDN() throws Exception { sessionFactory.close(); } - assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), useAttribute, useLegacyBindPassword); } public void testUserSearchOneLevelScopeFailsWithWrongBaseDN() throws Exception { @@ -240,19 +247,20 @@ public void testUserSearchOneLevelScopeFailsWithWrongBaseDN() throws Exception { Settings.Builder builder = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("user_search.scope", LdapSearchScope.ONE_LEVEL) - .put("user_search.pool.enabled", randomBoolean()); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_SCOPE), + LdapSearchScope.ONE_LEVEL) + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()); final boolean useLegacyBindPassword = configureBindPassword(builder); final boolean useAttribute = randomBoolean(); if (useAttribute) { - builder.put("user_search.attribute", "cn"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE), "cn"); } else { - builder.put("user_search.filter", "(cn={0})"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_FILTER), "(cn={0})"); } - RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(builder); LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); @@ -266,7 +274,7 @@ public void testUserSearchOneLevelScopeFailsWithWrongBaseDN() throws Exception { sessionFactory.close(); } - assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), useAttribute, useLegacyBindPassword); } public void testUserSearchOneLevelScopePassesWithCorrectBaseDN() throws Exception { @@ -275,19 +283,20 @@ public void testUserSearchOneLevelScopePassesWithCorrectBaseDN() throws Exceptio Settings.Builder builder = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("user_search.scope", LdapSearchScope.ONE_LEVEL) - .put("user_search.pool.enabled", randomBoolean()); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_SCOPE), + LdapSearchScope.ONE_LEVEL) + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()); final boolean useLegacyBindPassword = configureBindPassword(builder); final boolean useAttribute = randomBoolean(); if (useAttribute) { - builder.put("user_search.attribute", "cn"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE), "cn"); } else { - builder.put("user_search.filter", "(cn={0})"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_FILTER), "(cn={0})"); } - RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(builder); LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); @@ -312,7 +321,7 @@ public void testUserSearchOneLevelScopePassesWithCorrectBaseDN() throws Exceptio sessionFactory.close(); } - assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), useAttribute, useLegacyBindPassword); } public void testUserSearchWithBadAttributeFails() throws Exception { @@ -321,18 +330,18 @@ public void testUserSearchWithBadAttributeFails() throws Exception { Settings.Builder builder = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("user_search.pool.enabled", randomBoolean()); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()); final boolean useLegacyBindPassword = configureBindPassword(builder); final boolean useAttribute = randomBoolean(); if (useAttribute) { - builder.put("user_search.attribute", "uid1"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE), "uid1"); } else { - builder.put("user_search.filter", "(uid1={0})"); + builder.put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_FILTER), "(uid1={0})"); } - RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(builder); LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); @@ -346,7 +355,7 @@ public void testUserSearchWithBadAttributeFails() throws Exception { sessionFactory.close(); } - assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), useAttribute, useLegacyBindPassword); } public void testUserSearchWithoutAttributePasses() throws Exception { @@ -355,12 +364,12 @@ public void testUserSearchWithoutAttributePasses() throws Exception { final Settings.Builder realmSettings = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("user_search.pool.enabled", randomBoolean()); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()); final boolean useLegacyBindPassword = configureBindPassword(realmSettings); - RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(realmSettings); LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); @@ -385,7 +394,7 @@ public void testUserSearchWithoutAttributePasses() throws Exception { sessionFactory.close(); } - assertDeprecationWarnings(false, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), false, useLegacyBindPassword); } public void testConnectionPoolDefaultSettings() throws Exception { @@ -393,11 +402,11 @@ public void testConnectionPoolDefaultSettings() throws Exception { String userSearchBase = "o=sevenSeas"; final Settings.Builder realmSettings = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas"); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas"); configureBindPassword(realmSettings); - RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(realmSettings); LDAPConnectionPool connectionPool = LdapUserSearchSessionFactory.createConnectionPool(config, new SingleServerSet("localhost", randomFrom(ldapServers).getListenPort()), TimeValue.timeValueSeconds(5), NoOpLogger.INSTANCE, @@ -422,14 +431,14 @@ public void testConnectionPoolSettings() throws Exception { String userSearchBase = "o=sevenSeas"; final Settings.Builder realmSettings = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("user_search.pool.initial_size", 10) - .put("user_search.pool.size", 12) - .put("user_search.pool.health_check.enabled", false); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.POOL_INITIAL_SIZE), 10) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.POOL_SIZE), 12) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.HEALTH_CHECK_ENABLED), false); configureBindPassword(realmSettings); - RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(realmSettings); LDAPConnectionPool connectionPool = LdapUserSearchSessionFactory.createConnectionPool(config, new SingleServerSet("localhost", randomFrom(ldapServers).getListenPort()), TimeValue.timeValueSeconds(5), NoOpLogger.INSTANCE, @@ -448,11 +457,10 @@ public void testConnectionPoolSettings() throws Exception { public void testThatEmptyBindDNWithHealthCheckEnabledDoesNotThrow() throws Exception { String groupSearchBase = "o=sevenSeas"; String userSearchBase = "o=sevenSeas"; - RealmConfig config = new RealmConfig("ldap_realm", Settings.builder() + RealmConfig config = getRealmConfig(Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_password", "pass") - .build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD), "pass")); LdapUserSearchSessionFactory searchSessionFactory = null; try { @@ -463,18 +471,17 @@ public void testThatEmptyBindDNWithHealthCheckEnabledDoesNotThrow() throws Excep } } - assertDeprecationWarnings(false, true); + assertDeprecationWarnings(config.identifier(), false, true); } public void testThatEmptyBindDNAndDisabledPoolingDoesNotThrow() throws Exception { String groupSearchBase = "o=sevenSeas"; String userSearchBase = "o=sevenSeas"; - RealmConfig config = new RealmConfig("ldap_realm", Settings.builder() + RealmConfig config = getRealmConfig(Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("user_search.pool.enabled", false) - .put("bind_password", "pass") - .build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), false) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD), "pass")); LdapUserSearchSessionFactory searchSessionFactory = null; try { @@ -488,7 +495,7 @@ public void testThatEmptyBindDNAndDisabledPoolingDoesNotThrow() throws Exception } } - assertDeprecationWarnings(false, true); + assertDeprecationWarnings(config.identifier(), false, true); } public void testEmptyBindDNReturnsAnonymousBindRequest() throws LDAPException { @@ -496,15 +503,16 @@ public void testEmptyBindDNReturnsAnonymousBindRequest() throws LDAPException { String userSearchBase = "o=sevenSeas"; final Settings.Builder realmSettings = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase); final boolean useLegacyBindPassword = configureBindPassword(realmSettings); - RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, + mergeSettings(realmSettings.build(), globalSettings), TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); try (LdapUserSearchSessionFactory searchSessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool)) { assertThat(searchSessionFactory.bindCredentials, notNullValue()); assertThat(searchSessionFactory.bindCredentials.getBindDN(), isEmptyString()); } - assertDeprecationWarnings(false, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), false, useLegacyBindPassword); } public void testThatBindRequestReturnsSimpleBindRequest() throws LDAPException { @@ -512,16 +520,17 @@ public void testThatBindRequestReturnsSimpleBindRequest() throws LDAPException { String userSearchBase = "o=sevenSeas"; final Settings.Builder realmSettings = Settings.builder() .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("bind_dn", "cn=ironman") - .put("user_search.base_dn", userSearchBase); + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), "cn=ironman") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase); final boolean useLegacyBindPassword = configureBindPassword(realmSettings); - RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, + mergeSettings(realmSettings.build(), globalSettings), TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); try (LdapUserSearchSessionFactory searchSessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool)) { assertThat(searchSessionFactory.bindCredentials, notNullValue()); assertThat(searchSessionFactory.bindCredentials.getBindDN(), is("cn=ironman")); } - assertDeprecationWarnings(false, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), false, useLegacyBindPassword); } public void testThatConnectErrorIsNotThrownOnConstruction() throws Exception { @@ -536,17 +545,16 @@ public void testThatConnectErrorIsNotThrownOnConstruction() throws Exception { final Settings.Builder ldapSettingsBuilder = Settings.builder() .put(LdapTestCase.buildLdapSettings(new String[]{ldapUrl}, Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("user_search.base_dn", userSearchBase) - .put("bind_dn", "ironman@ad.test.elasticsearch.com") - .put("user_search.attribute", "cn") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), "ironman@ad.test.elasticsearch.com") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE), "cn") .put("timeout.tcp_connect", "500ms") .put("type", "ldap") .put("user_search.pool.health_check.enabled", false) - .put("user_search.pool.enabled", randomBoolean()); + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()); final boolean useLegacyBindPassword = configureBindPassword(ldapSettingsBuilder); - RealmConfig config = new RealmConfig("ldap_realm", ldapSettingsBuilder.build(), globalSettings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + RealmConfig config = getRealmConfig(ldapSettingsBuilder); LdapUserSearchSessionFactory searchSessionFactory = null; try { searchSessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); @@ -556,16 +564,20 @@ public void testThatConnectErrorIsNotThrownOnConstruction() throws Exception { } } - assertDeprecationWarnings(true, useLegacyBindPassword); + assertDeprecationWarnings(config.identifier(), true, useLegacyBindPassword); } - private void assertDeprecationWarnings(boolean useAttribute, boolean legacyBindPassword) { + private void assertDeprecationWarnings(RealmConfig.RealmIdentifier realmIdentifier, boolean useAttribute, boolean legacyBindPassword) { List> deprecatedSettings = new ArrayList<>(); if (useAttribute) { - deprecatedSettings.add(LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE); + deprecatedSettings.add(LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE + .getConcreteSettingForNamespace(realmIdentifier.getName()) + ); } if (legacyBindPassword) { - deprecatedSettings.add(PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD); + deprecatedSettings.add(PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD + .apply(realmIdentifier.getType()) + .getConcreteSettingForNamespace(realmIdentifier.getName())); } if (deprecatedSettings.size() > 0) { assertSettingDeprecationsAndWarnings(deprecatedSettings.toArray(new Setting[deprecatedSettings.size()])); @@ -575,9 +587,10 @@ private void assertDeprecationWarnings(boolean useAttribute, boolean legacyBindP private boolean configureBindPassword(Settings.Builder builder) { final boolean useLegacyBindPassword = randomBoolean(); if (useLegacyBindPassword) { - builder.put("bind_password", "pass"); + builder.put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD), "pass"); } else { - builder.setSecureSettings(newSecureSettings("secure_bind_password", "pass")); + final String secureKey = getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.SECURE_BIND_PASSWORD); + builder.setSecureSettings(newSecureSettings(secureKey, "pass")); } return useLegacyBindPassword; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverInMemoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverInMemoryTests.java index 122b486130e7c..a1bd0d59d6f89 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverInMemoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverInMemoryTests.java @@ -5,9 +5,6 @@ */ package org.elasticsearch.xpack.security.authc.ldap; -import java.util.List; -import java.util.concurrent.ExecutionException; - import com.unboundid.ldap.sdk.LDAPConnection; import com.unboundid.ldap.sdk.LDAPConnectionOptions; import com.unboundid.ldap.sdk.LDAPConnectionPool; @@ -19,11 +16,20 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; import org.junit.After; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -32,6 +38,7 @@ public class SearchGroupsResolverInMemoryTests extends LdapTestCase { private static final String WILLIAM_BUSH = "cn=William Bush,ou=people,o=sevenSeas"; + public static final RealmConfig.RealmIdentifier REALM_IDENTIFIER = new RealmConfig.RealmIdentifier("ldap", "ldap1"); private LDAPConnection connection; @After @@ -54,10 +61,10 @@ public void testSearchTimeoutIsFailure() throws Exception { connect(options); final Settings settings = Settings.builder() - .put("group_search.base_dn", "ou=groups,o=sevenSeas") - .put("group_search.scope", LdapSearchScope.SUB_TREE) + .put(getFullSettingKey(REALM_IDENTIFIER, SearchGroupsResolverSettings.BASE_DN), "ou=groups,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER, SearchGroupsResolverSettings.SCOPE), LdapSearchScope.SUB_TREE) .build(); - final SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + final SearchGroupsResolver resolver = new SearchGroupsResolver(getConfig(settings)); final PlainActionFuture> future = new PlainActionFuture<>(); resolver.resolve(connection, WILLIAM_BUSH, TimeValue.timeValueSeconds(30), logger, null, future); @@ -74,8 +81,8 @@ public void testResolveWithDefaultUserAttribute() throws Exception { connect(new LDAPConnectionOptions()); Settings settings = Settings.builder() - .put("group_search.base_dn", "ou=groups,o=sevenSeas") - .put("group_search.scope", LdapSearchScope.SUB_TREE) + .put(getFullSettingKey(REALM_IDENTIFIER, SearchGroupsResolverSettings.BASE_DN), "ou=groups,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER, SearchGroupsResolverSettings.SCOPE), LdapSearchScope.SUB_TREE) .build(); final List groups = resolveGroups(settings, WILLIAM_BUSH); @@ -90,8 +97,8 @@ public void testResolveWithExplicitDnAttribute() throws Exception { connect(new LDAPConnectionOptions()); Settings settings = Settings.builder() - .put("group_search.base_dn", "ou=groups,o=sevenSeas") - .put("group_search.user_attribute", "dn") + .put(getFullSettingKey(REALM_IDENTIFIER, SearchGroupsResolverSettings.BASE_DN), "ou=groups,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "dn") .build(); final List groups = resolveGroups(settings, WILLIAM_BUSH); @@ -106,8 +113,8 @@ public void testResolveWithMissingAttribute() throws Exception { connect(new LDAPConnectionOptions()); Settings settings = Settings.builder() - .put("group_search.base_dn", "ou=groups,o=sevenSeas") - .put("group_search.user_attribute", "no-such-attribute") + .put(getFullSettingKey(REALM_IDENTIFIER, SearchGroupsResolverSettings.BASE_DN), "ou=groups,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "no-such-attribute") .build(); final List groups = resolveGroups(settings, WILLIAM_BUSH); @@ -122,13 +129,13 @@ public void testSearchWithConnectionPoolForOneResult() throws Exception { new SimpleBindRequest("cn=Horatio Hornblower,ou=people,o=sevenSeas", "pass"), 0, 20))) { final Settings settings = Settings.builder() - .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") - .put("bind_password", "pass") - .put("user_search.base_dn", "ou=groups,o=sevenSeas") - .put("group_search.base_dn", "ou=groups,o=sevenSeas") - .put("group_search.scope", LdapSearchScope.SUB_TREE) + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.BIND_DN), + "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER, PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD), "pass") + .put(getFullSettingKey(REALM_IDENTIFIER, SearchGroupsResolverSettings.BASE_DN), "ou=groups,o=sevenSeas") + .put(getFullSettingKey(REALM_IDENTIFIER, SearchGroupsResolverSettings.SCOPE), LdapSearchScope.SUB_TREE) .build(); - final SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + final SearchGroupsResolver resolver = new SearchGroupsResolver(getConfig(settings)); final PlainActionFuture> future = new PlainActionFuture<>(); resolver.resolve(pool, "cn=Moultrie Crystal,ou=people,o=sevenSeas", @@ -150,10 +157,17 @@ private void connect(LDAPConnectionOptions options) throws LDAPException { } private List resolveGroups(Settings settings, String userDn) { - final SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + final SearchGroupsResolver resolver = new SearchGroupsResolver(getConfig(settings)); final PlainActionFuture> future = new PlainActionFuture<>(); resolver.resolve(connection, userDn, TimeValue.timeValueSeconds(30), logger, null, future); return future.actionGet(); } + private RealmConfig getConfig(Settings settings) { + if (settings.hasValue("path.home") == false) { + settings = Settings.builder().put(settings).put("path.home", createTempDir()).build(); + } + return new RealmConfig(REALM_IDENTIFIER, settings, TestEnvironment.newEnvironment(settings), new ThreadContext(settings)); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancingTests.java index 00e111fa9d8f6..f0420339dc75c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancingTests.java @@ -10,61 +10,72 @@ import com.unboundid.ldap.sdk.RoundRobinServerSet; import com.unboundid.ldap.sdk.ServerSet; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapLoadBalancingSettings; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class LdapLoadBalancingTests extends ESTestCase { + private static final RealmConfig.RealmIdentifier REALM_IDENTIFIER = new RealmConfig.RealmIdentifier("ldap", "ldap1"); + public void testBadTypeThrowsException() { String badType = randomAlphaOfLengthBetween(3, 12); - Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + - LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, badType).build(); + Settings settings = getSettings(badType); try { - LdapLoadBalancing.serverSet(null, null, settings, null, null); + LdapLoadBalancing.serverSet(null, null, getConfig(settings), null, null); fail("using type [" + badType + "] should have thrown an exception"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("unknown load balance type")); } } + public Settings getSettings(String loadBalancerType) { + return Settings.builder() + .put(getFullSettingKey(REALM_IDENTIFIER, LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING), loadBalancerType) + .put("path.home", createTempDir()) + .build(); + } + public void testFailoverServerSet() { - Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + - LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "failover").build(); - String[] address = new String[] { "localhost" }; - int[] ports = new int[] { 26000 }; - ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, settings, null, null); + Settings settings = getSettings("failover"); + String[] address = new String[]{"localhost"}; + int[] ports = new int[]{26000}; + ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, getConfig(settings), null, null); assertThat(serverSet, instanceOf(FailoverServerSet.class)); - assertThat(((FailoverServerSet)serverSet).reOrderOnFailover(), is(true)); + assertThat(((FailoverServerSet) serverSet).reOrderOnFailover(), is(true)); } public void testDnsFailover() { - Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + - LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "dns_failover").build(); - String[] address = new String[] { "foo.bar" }; - int[] ports = new int[] { 26000 }; - ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, settings, null, null); + Settings settings = getSettings("dns_failover"); + String[] address = new String[]{"foo.bar"}; + int[] ports = new int[]{26000}; + ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, getConfig(settings), null, null); assertThat(serverSet, instanceOf(RoundRobinDNSServerSet.class)); - assertThat(((RoundRobinDNSServerSet)serverSet).getAddressSelectionMode(), is(RoundRobinDNSServerSet.AddressSelectionMode.FAILOVER)); + assertThat(((RoundRobinDNSServerSet) serverSet).getAddressSelectionMode(), + is(RoundRobinDNSServerSet.AddressSelectionMode.FAILOVER)); } public void testDnsFailoverBadArgs() { - Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + - LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "dns_failover").build(); - String[] addresses = new String[] { "foo.bar", "localhost" }; - int[] ports = new int[] { 26000, 389 }; + final Settings settings = getSettings("dns_failover"); + final RealmConfig config = getConfig(settings); + String[] addresses = new String[]{"foo.bar", "localhost"}; + int[] ports = new int[]{26000, 389}; try { - LdapLoadBalancing.serverSet(addresses, ports, settings, null, null); + LdapLoadBalancing.serverSet(addresses, ports, config, null, null); fail("dns server sets only support a single URL"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("single url")); } try { - LdapLoadBalancing.serverSet(new String[] { "127.0.0.1" }, new int[] { 389 }, settings, null, null); + LdapLoadBalancing.serverSet(new String[]{"127.0.0.1"}, new int[]{389}, config, null, null); fail("dns server sets only support DNS names"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("DNS name")); @@ -72,42 +83,44 @@ public void testDnsFailoverBadArgs() { } public void testRoundRobin() { - Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + - LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "round_robin").build(); - String[] address = new String[] { "localhost", "foo.bar" }; - int[] ports = new int[] { 389, 389 }; - ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, settings, null, null); + Settings settings = getSettings("round_robin"); + String[] address = new String[]{"localhost", "foo.bar"}; + int[] ports = new int[]{389, 389}; + ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, getConfig(settings), null, null); assertThat(serverSet, instanceOf(RoundRobinServerSet.class)); } public void testDnsRoundRobin() { - Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + - LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "dns_round_robin").build(); - String[] address = new String[] { "foo.bar" }; - int[] ports = new int[] { 26000 }; - ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, settings, null, null); + Settings settings = getSettings("dns_round_robin"); + String[] address = new String[]{"foo.bar"}; + int[] ports = new int[]{26000}; + ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, getConfig(settings), null, null); assertThat(serverSet, instanceOf(RoundRobinDNSServerSet.class)); - assertThat(((RoundRobinDNSServerSet)serverSet).getAddressSelectionMode(), + assertThat(((RoundRobinDNSServerSet) serverSet).getAddressSelectionMode(), is(RoundRobinDNSServerSet.AddressSelectionMode.ROUND_ROBIN)); } public void testDnsRoundRobinBadArgs() { - Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + - LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "dns_round_robin").build(); - String[] addresses = new String[] { "foo.bar", "localhost" }; - int[] ports = new int[] { 26000, 389 }; + final Settings settings = getSettings("dns_round_robin"); + final RealmConfig config = getConfig(settings); + String[] addresses = new String[]{"foo.bar", "localhost"}; + int[] ports = new int[]{26000, 389}; try { - LdapLoadBalancing.serverSet(addresses, ports, settings, null, null); + LdapLoadBalancing.serverSet(addresses, ports, config, null, null); fail("dns server sets only support a single URL"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("single url")); } try { - LdapLoadBalancing.serverSet(new String[] { "127.0.0.1" }, new int[] { 389 }, settings, null, null); + LdapLoadBalancing.serverSet(new String[]{"127.0.0.1"}, new int[]{389}, config, null, null); fail("dns server sets only support DNS names"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("DNS name")); } } + + public RealmConfig getConfig(Settings settings) { + return new RealmConfig(REALM_IDENTIFIER, settings, TestEnvironment.newEnvironment(settings), new ThreadContext(settings)); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolverTests.java index 74502b4e2b33b..bb54d6972bfe6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolverTests.java @@ -5,17 +5,23 @@ */ package org.elasticsearch.xpack.security.authc.ldap.support; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; - import com.unboundid.ldap.sdk.Attribute; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; @@ -30,7 +36,15 @@ public class LdapMetaDataResolverTests extends ESTestCase { private LdapMetaDataResolver resolver; public void testParseSettings() throws Exception { - resolver = new LdapMetaDataResolver(Settings.builder().putList("metadata", "cn", "uid").build(), false); + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier(LdapRealmSettings.LDAP_TYPE, "my_ldap"); + final Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .putList(RealmSettings.getFullSettingKey(realmId.getName(), LdapMetaDataResolverSettings.ADDITIONAL_META_DATA_SETTING), + "cn", "uid") + .build(); + RealmConfig config = new RealmConfig(realmId, + settings, TestEnvironment.newEnvironment(settings), new ThreadContext(settings)); + resolver = new LdapMetaDataResolver(config, false); assertThat(resolver.attributeNames(), arrayContaining("cn", "uid")); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java index 38f5c7871dc2b..cf3840fb4ce99 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java @@ -13,7 +13,6 @@ import com.unboundid.ldap.sdk.LDAPInterface; import com.unboundid.ldap.sdk.LDAPURL; import com.unboundid.ldap.sdk.SimpleBindRequest; - import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; @@ -23,13 +22,16 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapLoadBalancingSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; import org.junit.After; @@ -43,12 +45,13 @@ import java.util.List; import java.util.Objects; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING; import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.URLS_SETTING; public abstract class LdapTestCase extends ESTestCase { - private static final String USER_DN_TEMPLATES_SETTING_KEY = LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.getKey(); + protected static final RealmConfig.RealmIdentifier REALM_IDENTIFIER = new RealmConfig.RealmIdentifier("ldap", "ldap1"); static int numberOfLdapServers; protected InMemoryDirectoryServer[] ldapServers; @@ -93,11 +96,11 @@ protected String[] ldapUrls() throws LDAPException { } public static Settings buildLdapSettings(String ldapUrl, String userTemplate, String groupSearchBase, LdapSearchScope scope) { - return buildLdapSettings(new String[] { ldapUrl }, new String[] { userTemplate }, groupSearchBase, scope); + return buildLdapSettings(new String[]{ldapUrl}, new String[]{userTemplate}, groupSearchBase, scope); } public static Settings buildLdapSettings(String[] ldapUrl, String userTemplate, String groupSearchBase, LdapSearchScope scope) { - return buildLdapSettings(ldapUrl, new String[] { userTemplate }, groupSearchBase, scope); + return buildLdapSettings(ldapUrl, new String[]{userTemplate}, groupSearchBase, scope); } public static Settings buildLdapSettings(String[] ldapUrl, String[] userTemplate, String groupSearchBase, LdapSearchScope scope) { @@ -115,39 +118,45 @@ public static Settings buildLdapSettings(String[] ldapUrl, String[] userTemplate String groupSearchBase, LdapSearchScope scope, LdapLoadBalancing serverSetType, boolean ignoreReferralErrors) { + return buildLdapSettings(REALM_IDENTIFIER, ldapUrl, userTemplate, groupSearchBase, scope, serverSetType, ignoreReferralErrors); + } + + public static Settings buildLdapSettings(RealmConfig.RealmIdentifier realmId, String[] ldapUrl, String[] userTemplate, + String groupSearchBase, LdapSearchScope scope, LdapLoadBalancing serverSetType, + boolean ignoreReferralErrors) { Settings.Builder builder = Settings.builder() - .putList(URLS_SETTING, ldapUrl) - .putList(USER_DN_TEMPLATES_SETTING_KEY, userTemplate) - .put(SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING, TimeValue.timeValueSeconds(1L)) - .put(SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING.getKey(), ignoreReferralErrors) - .put("group_search.base_dn", groupSearchBase) - .put("group_search.scope", scope); + .putList(getFullSettingKey(realmId, URLS_SETTING), ldapUrl) + .putList(getFullSettingKey(realmId.getName(), LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING), userTemplate) + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING), TimeValue.timeValueSeconds(1L)) + .put(getFullSettingKey(realmId, SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING), ignoreReferralErrors) + .put(getFullSettingKey(realmId, SearchGroupsResolverSettings.BASE_DN), groupSearchBase) + .put(getFullSettingKey(realmId, SearchGroupsResolverSettings.SCOPE), scope); if (serverSetType != null) { - builder.put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + - LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, serverSetType.toString()); + builder.put(getFullSettingKey(realmId, LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING), serverSetType.toString()); } return builder.build(); } public static Settings buildLdapSettings(String[] ldapUrl, String userTemplate, boolean hostnameVerification) { Settings.Builder builder = Settings.builder() - .putList(URLS_SETTING, ldapUrl) - .putList(USER_DN_TEMPLATES_SETTING_KEY, userTemplate); + .putList(getFullSettingKey(REALM_IDENTIFIER, URLS_SETTING), ldapUrl) + .putList(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING), userTemplate); if (randomBoolean()) { - builder.put("ssl.verification_mode", hostnameVerification ? VerificationMode.FULL : VerificationMode.CERTIFICATE); + builder.put(getFullSettingKey(REALM_IDENTIFIER, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), + hostnameVerification ? VerificationMode.FULL : VerificationMode.CERTIFICATE); } else { - builder.put(HOSTNAME_VERIFICATION_SETTING, hostnameVerification); + builder.put(getFullSettingKey(REALM_IDENTIFIER, HOSTNAME_VERIFICATION_SETTING), hostnameVerification); } return builder.build(); } protected DnRoleMapper buildGroupAsRoleMapper(ResourceWatcherService resourceWatcherService) { Settings settings = Settings.builder() - .put(DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.getKey(), true) + .put(getFullSettingKey(REALM_IDENTIFIER, DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING), true) + .put("path.home", createTempDir()) .build(); - Settings global = Settings.builder().put("path.home", createTempDir()).build(); - RealmConfig config = new RealmConfig("ldap1", settings, global, TestEnvironment.newEnvironment(global), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, settings, + TestEnvironment.newEnvironment(settings), new ThreadContext(Settings.EMPTY)); return new DnRoleMapper(config, resourceWatcherService); } @@ -179,12 +188,12 @@ public Void run() { if (conn instanceof LDAPConnection) { assertTrue(((LDAPConnection) conn).isConnected()); assertEquals(bindRequest.getBindDN(), - ((SimpleBindRequest)((LDAPConnection) conn).getLastBindRequest()).getBindDN()); + ((SimpleBindRequest) ((LDAPConnection) conn).getLastBindRequest()).getBindDN()); ((LDAPConnection) conn).reconnect(); } else if (conn instanceof LDAPConnectionPool) { try (LDAPConnection c = ((LDAPConnectionPool) conn).getConnection()) { assertTrue(c.isConnected()); - assertEquals(bindRequest.getBindDN(), ((SimpleBindRequest)c.getLastBindRequest()).getBindDN()); + assertEquals(bindRequest.getBindDN(), ((SimpleBindRequest) c.getLastBindRequest()).getBindDN()); c.reconnect(); } } @@ -196,4 +205,15 @@ public Void run() { } }); } + + protected Settings mergeSettings(Settings local, Settings global) { + final Settings.Builder builder = Settings.builder() + .put(global, true) + .put(local, false); + final Settings.Builder tmpLocal = Settings.builder().put(local, true); + SecuritySettingsSource.addSecureSettings(builder, + mainSecure -> SecuritySettingsSource.addSecureSettings(tmpLocal, localSecure -> mainSecure.merge(localSecure)) + ); + return builder.build(); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java index f8bfa241736b9..11d1e4889b823 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java @@ -237,8 +237,8 @@ private TestSessionFactory createSessionFactory(LdapLoadBalancing loadBalancing) String userTemplate = "cn={0},ou=people,o=sevenSeas"; Settings settings = buildLdapSettings(ldapUrls(), new String[] { userTemplate }, groupSearchBase, LdapSearchScope.SUB_TREE, loadBalancing); - Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); - RealmConfig config = new RealmConfig("test-session-factory", settings, globalSettings, + Settings globalSettings = Settings.builder().put("path.home", createTempDir()).put(settings).build(); + RealmConfig config = new RealmConfig(REALM_IDENTIFIER, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); return new TestSessionFactory(config, new SSLService(Settings.EMPTY, TestEnvironment.newEnvironment(config.globalSettings())), threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java index 6540be6a5eb14..eb91fe04e057b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java @@ -19,15 +19,16 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.junit.After; import org.junit.Before; -import java.util.function.Function; +import java.nio.file.Path; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -49,8 +50,8 @@ public void shutdown() throws InterruptedException { public void testConnectionFactoryReturnsCorrectLDAPConnectionOptionsWithDefaultSettings() throws Exception { final Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); - RealmConfig realmConfig = new RealmConfig("conn settings", Settings.EMPTY, environment.settings(), environment, - new ThreadContext(Settings.EMPTY)); + RealmConfig realmConfig = new RealmConfig(new RealmConfig.RealmIdentifier("ldap", "conn_settings"), + environment.settings(), environment, new ThreadContext(Settings.EMPTY)); LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment.settings(), environment), logger); assertThat(options.followReferrals(), is(equalTo(true))); @@ -61,49 +62,52 @@ public void testConnectionFactoryReturnsCorrectLDAPConnectionOptionsWithDefaultS } public void testConnectionFactoryReturnsCorrectLDAPConnectionOptions() throws Exception { + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "conn_settings"); + final Path pathHome = createTempDir(); Settings settings = Settings.builder() - .put(SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING, "10ms") - .put(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, "false") - .put(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING, "20ms") - .put(SessionFactorySettings.FOLLOW_REFERRALS_SETTING, "false") + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING), "10ms") + .put(getFullSettingKey(realmId, SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING), "false") + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "20ms") + .put(getFullSettingKey(realmId, SessionFactorySettings.FOLLOW_REFERRALS_SETTING), "false") + .put("path.home", pathHome) .build(); - final String realmName = "conn_settings"; - final Function globalSettings = realmSettings -> Settings.builder() - .put(realmSettings) - .normalizePrefix(RealmSettings.PREFIX + realmName + ".") - .put("path.home", createTempDir()) - .build(); - final Environment environment = TestEnvironment.newEnvironment(globalSettings.apply(settings)); - final Function sslService = realmSettings -> new SSLService(globalSettings.apply(realmSettings), environment); - - final ThreadContext threadContext = new ThreadContext(environment.settings()); - RealmConfig realmConfig = new RealmConfig(realmName, settings, environment.settings(), environment, threadContext); - LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, sslService.apply(settings), logger); + final Environment environment = TestEnvironment.newEnvironment(settings); + RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings)); + LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger); assertThat(options.followReferrals(), is(equalTo(false))); assertThat(options.allowConcurrentSocketFactoryUse(), is(equalTo(true))); assertThat(options.getConnectTimeoutMillis(), is(equalTo(10))); assertThat(options.getResponseTimeoutMillis(), is(equalTo(20L))); assertThat(options.getSSLSocketVerifier(), is(instanceOf(TrustAllSSLSocketVerifier.class))); - assertWarnings("the setting [xpack.security.authc.realms." + realmName + ".hostname_verification] has been deprecated" + - " and will be removed in a future version. use [xpack.security.authc.realms." + realmName + ".ssl.verification_mode] instead"); + assertWarnings("the setting [xpack.security.authc.realms.ldap.conn_settings.hostname_verification] has been deprecated and will be " + + "removed in a future version. use [xpack.security.authc.realms.ldap.conn_settings.ssl.verification_mode] instead"); - settings = Settings.builder().put("ssl.verification_mode", VerificationMode.CERTIFICATE).build(); - realmConfig = new RealmConfig(realmName, settings, globalSettings.apply(settings), environment, threadContext); - options = SessionFactory.connectionOptions(realmConfig, sslService.apply(settings), logger); + settings = Settings.builder() + .put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.CERTIFICATE) + .put("path.home", pathHome) + .build(); + realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings)); + options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger); assertThat(options.getSSLSocketVerifier(), is(instanceOf(TrustAllSSLSocketVerifier.class))); // Can't run in FIPS with verification_mode none, disable this check instead of duplicating the test case if (inFipsJvm() == false) { - settings = Settings.builder().put("ssl.verification_mode", VerificationMode.NONE).build(); - realmConfig = new RealmConfig(realmName, settings, environment.settings(), environment, threadContext); - options = SessionFactory.connectionOptions(realmConfig, sslService.apply(settings), logger); + settings = Settings.builder() + .put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.NONE) + .put("path.home", pathHome) + .build(); + realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings)); + options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger); assertThat(options.getSSLSocketVerifier(), is(instanceOf(TrustAllSSLSocketVerifier.class))); } - settings = Settings.builder().put("ssl.verification_mode", VerificationMode.FULL).build(); - realmConfig = new RealmConfig(realmName, settings, environment.settings(), environment, threadContext); - options = SessionFactory.connectionOptions(realmConfig, sslService.apply(settings), logger); + settings = Settings.builder() + .put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.FULL) + .put("path.home", pathHome) + .build(); + realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings)); + options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger); assertThat(options.getSSLSocketVerifier(), is(instanceOf(HostNameSSLSocketVerifier.class))); } @@ -119,8 +123,12 @@ public void testUnauthenticatedSessionThrowsUnsupportedOperationException() thro private SessionFactory createSessionFactory() { Settings global = Settings.builder().put("path.home", createTempDir()).build(); - final RealmConfig realmConfig = new RealmConfig("_name", Settings.builder().put("url", "ldap://localhost:389").build(), - global, TestEnvironment.newEnvironment(global), new ThreadContext(Settings.EMPTY)); + final RealmConfig.RealmIdentifier realmIdentifier = new RealmConfig.RealmIdentifier("ldap", "_name"); + final RealmConfig realmConfig = new RealmConfig(realmIdentifier, mergeSettings( + Settings.builder() + .put(getFullSettingKey(realmIdentifier, SessionFactorySettings.URLS_SETTING), "ldap://localhost:389") + .build(), global), + TestEnvironment.newEnvironment(global), new ThreadContext(Settings.EMPTY)); return new SessionFactory(realmConfig, null, threadPool) { @Override @@ -129,4 +137,8 @@ public void session(String user, SecureString password, ActionListener - secureSettings.setString("xpack.security.authc.realms.pki1.truststore.secure_password", "truststore-testnode-only")); + secureSettings.setString("xpack.security.authc.realms.pki.pki1.truststore.secure_password", "truststore-testnode-only")); return builder.build(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java index 4fb94c7494971..6e1a2480d2bcb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java @@ -52,19 +52,17 @@ protected Settings nodeSettings() { .put(super.nodeSettings()) .put("xpack.security.http.ssl.enabled", true) .put("xpack.security.http.ssl.client_authentication", SSLClientAuth.OPTIONAL) - .put("xpack.security.authc.realms.file.type", "file") - .put("xpack.security.authc.realms.file.order", "0") - .put("xpack.security.authc.realms.pki1.type", "pki") - .put("xpack.security.authc.realms.pki1.order", "1") - .put("xpack.security.authc.realms.pki1.truststore.path", + .put("xpack.security.authc.realms.file.file.order", "0") + .put("xpack.security.authc.realms.pki.pki1.order", "1") + .put("xpack.security.authc.realms.pki.pki1.truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) - .put("xpack.security.authc.realms.pki1.files.role_mapping", getDataPath("role_mapping.yml")) + .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")) .put("transport.profiles.want_client_auth.port", randomClientPortRange) .put("transport.profiles.want_client_auth.bind_host", "localhost") .put("transport.profiles.want_client_auth.xpack.security.ssl.client_authentication", SSLClientAuth.OPTIONAL); SecuritySettingsSource.addSecureSettings(builder, secureSettings -> - secureSettings.setString("xpack.security.authc.realms.pki1.truststore.secure_password", "truststore-testnode-only")); + secureSettings.setString("xpack.security.authc.realms.pki.pki1.truststore.secure_password", "truststore-testnode-only")); return builder.build(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java index 45ccaf6a14725..4027c50a4feae 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java @@ -18,21 +18,19 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.InternalRealmsSettings; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; -import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.junit.Before; import org.mockito.Mockito; import javax.security.auth.x500.X500Principal; - import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -61,6 +59,7 @@ public class PkiRealmTests extends ESTestCase { + public static final String REALM_NAME = "my_pki"; private Settings globalSettings; private XPackLicenseState licenseState; @@ -74,7 +73,7 @@ public void setup() throws Exception { } public void testTokenSupport() { - RealmConfig config = new RealmConfig("", Settings.EMPTY, globalSettings, + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("pki", "my_pki"), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); PkiRealm realm = new PkiRealm(config, mock(UserRoleMapper.class)); @@ -86,9 +85,9 @@ public void testTokenSupport() { public void testExtractToken() throws Exception { X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); - PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), mock(UserRoleMapper.class)); + threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[]{certificate}); + PkiRealm realm = new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "my_pki"), globalSettings, + TestEnvironment.newEnvironment(globalSettings), threadContext), mock(UserRoleMapper.class)); X509AuthenticationToken token = realm.token(threadContext); assertThat(token, is(notNullValue())); @@ -161,8 +160,10 @@ private UserRoleMapper buildRoleMapper(Set roles, String dn) { } private PkiRealm buildRealm(UserRoleMapper roleMapper, Settings realmSettings, Realm... otherRealms) { - PkiRealm realm = new PkiRealm(new RealmConfig("", realmSettings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), roleMapper); + final Settings settings = mergeSettings(realmSettings, globalSettings); + final RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("pki", REALM_NAME), settings, + TestEnvironment.newEnvironment(settings), new ThreadContext(settings)); + PkiRealm realm = new PkiRealm(config, roleMapper); List allRealms = CollectionUtils.arrayAsArrayList(otherRealms); allRealms.add(realm); Collections.shuffle(allRealms, random()); @@ -185,8 +186,12 @@ public void testCustomUsernamePattern() throws Exception { ThreadContext threadContext = new ThreadContext(globalSettings); X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); UserRoleMapper roleMapper = mock(UserRoleMapper.class); - PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.builder().put("username_pattern", "OU=(.*?),").build(), globalSettings, - TestEnvironment.newEnvironment(globalSettings), threadContext), roleMapper); + final Settings realmSettings = Settings.builder() + .put("xpack.security.authc.realms.pki.my_pki.username_pattern", "OU=(.*?),") + .build(); + PkiRealm realm = new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "my_pki"), + mergeSettings(realmSettings, globalSettings), + TestEnvironment.newEnvironment(globalSettings), threadContext), roleMapper); realm.initialize(Collections.emptyList(), licenseState); Mockito.doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; @@ -210,14 +215,16 @@ public void testVerificationUsingATruststore() throws Exception { UserRoleMapper roleMapper = mock(UserRoleMapper.class); MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("truststore.secure_password", "testnode"); + secureSettings.setString("xpack.security.authc.realms.pki.my_pki.truststore.secure_password", "testnode"); Settings settings = Settings.builder() - .put("truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .put("xpack.security.authc.realms.pki.my_pki.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) .setSecureSettings(secureSettings) .build(); ThreadContext threadContext = new ThreadContext(globalSettings); - PkiRealm realm = new PkiRealm(new RealmConfig("", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext), roleMapper); + PkiRealm realm = new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "my_pki"), + mergeSettings(settings, globalSettings), + TestEnvironment.newEnvironment(globalSettings), threadContext), roleMapper); realm.initialize(Collections.emptyList(), licenseState); Mockito.doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; @@ -241,15 +248,16 @@ public void testVerificationFailsUsingADifferentTruststore() throws Exception { X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); UserRoleMapper roleMapper = mock(UserRoleMapper.class); MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("truststore.secure_password", "testnode-client-profile"); + secureSettings.setString("xpack.security.authc.realms.pki.mypki.truststore.secure_password", "testnode-client-profile"); Settings settings = Settings.builder() - .put("truststore.path", + .put("xpack.security.authc.realms.pki.mypki.truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks")) .setSecureSettings(secureSettings) .build(); final ThreadContext threadContext = new ThreadContext(globalSettings); - PkiRealm realm = new PkiRealm(new RealmConfig("", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - threadContext), roleMapper); + PkiRealm realm = new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "mypki"), + mergeSettings(settings, globalSettings), + TestEnvironment.newEnvironment(globalSettings), threadContext), roleMapper); realm.initialize(Collections.emptyList(), licenseState); Mockito.doAnswer(invocation -> { ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; @@ -268,28 +276,30 @@ public void testVerificationFailsUsingADifferentTruststore() throws Exception { public void testTruststorePathWithoutPasswordThrowsException() throws Exception { Settings settings = Settings.builder() - .put("truststore.path", + .put("xpack.security.authc.realms.pki.mypki.truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks")) .build(); - try { - new PkiRealm(new RealmConfig("mypki", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), mock(UserRoleMapper.class)); - fail("exception should have been thrown"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Neither [xpack.security.authc.realms.mypki.truststore.secure_password] or [" + - "xpack.security.authc.realms.mypki.truststore.password] is configured")); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "mypki"), + mergeSettings(settings, globalSettings), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)), mock(UserRoleMapper.class)) + ); + assertThat(e.getMessage(), containsString("Neither [xpack.security.authc.realms.pki.mypki.truststore.secure_password] or [" + + "xpack.security.authc.realms.pki.mypki.truststore.password] is configured")); } public void testTruststorePathWithLegacyPasswordDoesNotThrow() throws Exception { Settings settings = Settings.builder() - .put("truststore.path", + .put("xpack.security.authc.realms.pki.mypki.truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks")) - .put("truststore.password", "testnode-client-profile") + .put("xpack.security.authc.realms.pki.mypki.truststore.password", "testnode-client-profile") .build(); - new PkiRealm(new RealmConfig("mypki", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)), mock(UserRoleMapper.class)); - assertSettingDeprecationsAndWarnings(new Setting[] { SSLConfigurationSettings.withoutPrefix().legacyTruststorePassword }); + new PkiRealm(new RealmConfig(new RealmConfig.RealmIdentifier("pki", "mypki"), + mergeSettings(settings, globalSettings), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)), mock(UserRoleMapper.class)); + assertSettingDeprecationsAndWarnings(new Setting[]{ + PkiRealmSettings.LEGACY_TRUST_STORE_PASSWORD.getConcreteSettingForNamespace("mypki") + }); } public void testCertificateWithOnlyCnExtractsProperly() throws Exception { @@ -297,7 +307,7 @@ public void testCertificateWithOnlyCnExtractsProperly() throws Exception { X500Principal principal = new X500Principal("CN=PKI Client"); when(certificate.getSubjectX500Principal()).thenReturn(principal); - X509AuthenticationToken token = PkiRealm.token(new X509Certificate[] { certificate }, + X509AuthenticationToken token = PkiRealm.token(new X509Certificate[]{certificate}, Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), NoOpLogger.INSTANCE); assertThat(token, notNullValue()); assertThat(token.principal(), is("PKI Client")); @@ -309,7 +319,7 @@ public void testCertificateWithCnAndOuExtractsProperly() throws Exception { X500Principal principal = new X500Principal("CN=PKI Client, OU=Security"); when(certificate.getSubjectX500Principal()).thenReturn(principal); - X509AuthenticationToken token = PkiRealm.token(new X509Certificate[] { certificate }, + X509AuthenticationToken token = PkiRealm.token(new X509Certificate[]{certificate}, Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), NoOpLogger.INSTANCE); assertThat(token, notNullValue()); assertThat(token.principal(), is("PKI Client")); @@ -321,7 +331,7 @@ public void testCertificateWithCnInMiddle() throws Exception { X500Principal principal = new X500Principal("EMAILADDRESS=pki@elastic.co, CN=PKI Client, OU=Security"); when(certificate.getSubjectX500Principal()).thenReturn(principal); - X509AuthenticationToken token = PkiRealm.token(new X509Certificate[] { certificate }, + X509AuthenticationToken token = PkiRealm.token(new X509Certificate[]{certificate}, Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), NoOpLogger.INSTANCE); assertThat(token, notNullValue()); assertThat(token.principal(), is("PKI Client")); @@ -330,28 +340,30 @@ public void testCertificateWithCnInMiddle() throws Exception { public void testPKIRealmSettingsPassValidation() throws Exception { Settings settings = Settings.builder() - .put("xpack.security.authc.realms.pki1.type", "pki") - .put("xpack.security.authc.realms.pki1.truststore.path", "/foo/bar") - .put("xpack.security.authc.realms.pki1.truststore.password", "supersecret") + .put("xpack.security.authc.realms.pki.pki1.order", "1") + .put("xpack.security.authc.realms.pki.pki1.truststore.path", "/foo/bar") + .put("xpack.security.authc.realms.pki.pki1.truststore.password", "supersecret") .build(); List> settingList = new ArrayList<>(); - RealmSettings.addSettings(settingList, Collections.emptyList()); + settingList.addAll(InternalRealmsSettings.getSettings()); ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(settingList)); clusterSettings.validate(settings, false); - assertSettingDeprecationsAndWarnings(new Setting[] { SSLConfigurationSettings.withoutPrefix().legacyTruststorePassword }); + assertSettingDeprecationsAndWarnings(new Setting[]{ + PkiRealmSettings.LEGACY_TRUST_STORE_PASSWORD.getConcreteSettingForNamespace("pki1") + }); } public void testDelegatedAuthorization() throws Exception { final X509AuthenticationToken token = buildToken(); - final MockLookupRealm otherRealm = new MockLookupRealm(new RealmConfig("other_realm", Settings.EMPTY, globalSettings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + final MockLookupRealm otherRealm = new MockLookupRealm(new RealmConfig(new RealmConfig.RealmIdentifier("mock", "other_realm"), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); final User lookupUser = new User(token.principal()); otherRealm.registerUser(lookupUser); final Settings realmSettings = Settings.builder() - .putList("authorization_realms", "other_realm") + .putList("xpack.security.authc.realms.pki." + REALM_NAME + ".authorization_realms", "other_realm") .build(); final UserRoleMapper roleMapper = buildRoleMapper(Collections.emptySet(), token.dn()); final PkiRealm pkiRealm = buildRealm(roleMapper, realmSettings, otherRealm); @@ -375,4 +387,9 @@ static X509Certificate readCert(Path path) throws Exception { return (X509Certificate) factory.generateCertificate(in); } } + + private static Settings mergeSettings(Settings local, Settings global) { + return Settings.builder().put(global).put(local).build(); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java index 22071d6010d08..8d10f3ffb6946 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.authc.saml; +import org.apache.logging.log4j.LogManager; import org.apache.xml.security.Init; import org.apache.xml.security.encryption.EncryptedData; import org.apache.xml.security.encryption.EncryptedKey; @@ -16,7 +17,6 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.hamcrest.Matchers; @@ -57,7 +57,6 @@ import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; - import java.io.IOException; import java.io.StringReader; import java.nio.charset.StandardCharsets; @@ -125,7 +124,7 @@ public class SamlAuthenticatorTests extends SamlTestCase { public static void init() throws Exception { assumeFalse("Can't run in a FIPS JVM, there is no DOM XMLSignature Factory so we can't sign XML documents", inFipsJvm()); // TODO: Refactor the signing to use org.opensaml.xmlsec.signature.support.Signer so that we can run the tests - SamlUtils.initialize(Loggers.getLogger(SamlAuthenticatorTests.class)); + SamlUtils.initialize(LogManager.getLogger(SamlAuthenticatorTests.class)); // Initialise Apache XML security so that the signDoc methods work correctly. Init.init(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandlerTests.java index 542bbbbdf3dc7..ad707bc028e54 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandlerTests.java @@ -5,15 +5,6 @@ */ package org.elasticsearch.xpack.security.authc.saml; -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URLEncoder; -import java.nio.charset.StandardCharsets; -import java.time.Clock; -import java.util.Arrays; -import java.util.Collections; - import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; @@ -28,6 +19,15 @@ import org.opensaml.security.x509.X509Credential; import org.opensaml.xmlsec.signature.support.SignatureConstants; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.time.Clock; +import java.util.Arrays; +import java.util.Collections; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -208,10 +208,10 @@ private SamlLogoutRequestHandler buildHandler() throws Exception { final SpConfiguration sp = new SpConfiguration("https://sp.test/", "https://sp.test/saml/asc", LOGOUT_URL, signingConfiguration, Arrays.asList(spCredential), Collections.emptyList()); return new SamlLogoutRequestHandler( - clock, - idp, - sp, - TimeValue.timeValueSeconds(1) + clock, + idp, + sp, + TimeValue.timeValueSeconds(1) ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java index f2c91437c3e02..367921ad7635a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.security.authc.saml; import joptsimple.OptionSet; - import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.UserException; @@ -79,16 +78,15 @@ public void testDefaultOptions() throws Exception { final boolean useSigningCredentials = randomBoolean(); final Settings.Builder settingsBuilder = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "my_saml.type", "saml") - .put(RealmSettings.PREFIX + "my_saml.order", 1) - .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") - .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout") - .put(RealmSettings.PREFIX + "my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1"); + .put(RealmSettings.PREFIX + "saml.my_saml.order", 1) + .put(RealmSettings.PREFIX + "saml.my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.logout", "https://kibana.my.corp/saml/logout") + .put(RealmSettings.PREFIX + "saml.my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1"); if (useSigningCredentials) { - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) - .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "saml.my_saml.signing.key", keyPath.toString()); } final Settings settings = settingsBuilder.build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -151,12 +149,12 @@ public void testDefaultOptions() throws Exception { public void testFailIfMultipleRealmsExist() throws Exception { final Settings settings = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "saml_a.type", "saml") - .put(RealmSettings.PREFIX + "saml_a.sp.entity_id", "https://saml.a/") - .put(RealmSettings.PREFIX + "saml_a.sp.acs", "https://saml.a/") - .put(RealmSettings.PREFIX + "saml_b.type", "saml") - .put(RealmSettings.PREFIX + "saml_b.sp.entity_id", "https://saml.b/") - .put(RealmSettings.PREFIX + "saml_b.sp.acs", "https://saml.b/") + .put(RealmSettings.PREFIX + "saml.saml_a.type", "saml") + .put(RealmSettings.PREFIX + "saml.saml_a.sp.entity_id", "https://saml.a/") + .put(RealmSettings.PREFIX + "saml.saml_a.sp.acs", "https://saml.a/") + .put(RealmSettings.PREFIX + "saml.saml_b.type", "saml") + .put(RealmSettings.PREFIX + "saml.saml_b.sp.entity_id", "https://saml.b/") + .put(RealmSettings.PREFIX + "saml.saml_b.sp.acs", "https://saml.b/") .build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -175,12 +173,12 @@ public void testFailIfMultipleRealmsExist() throws Exception { public void testSpecifyRealmNameAsParameter() throws Exception { final Settings settings = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "saml_a.type", "saml") - .put(RealmSettings.PREFIX + "saml_a.sp.entity_id", "https://saml.a/") - .put(RealmSettings.PREFIX + "saml_a.sp.acs", "https://saml.a/acs") - .put(RealmSettings.PREFIX + "saml_b.type", "saml") - .put(RealmSettings.PREFIX + "saml_b.sp.entity_id", "https://saml.b/") - .put(RealmSettings.PREFIX + "saml_b.sp.acs", "https://saml.b/acs") + .put(RealmSettings.PREFIX + "saml.saml_a.type", "saml") + .put(RealmSettings.PREFIX + "saml.saml_a.sp.entity_id", "https://saml.a/") + .put(RealmSettings.PREFIX + "saml.saml_a.sp.acs", "https://saml.a/acs") + .put(RealmSettings.PREFIX + "saml.saml_b.type", "saml") + .put(RealmSettings.PREFIX + "saml.saml_b.sp.entity_id", "https://saml.b/") + .put(RealmSettings.PREFIX + "saml.saml_b.sp.acs", "https://saml.b/acs") .build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -206,11 +204,11 @@ public void testSpecifyRealmNameAsParameter() throws Exception { public void testHandleAttributes() throws Exception { final Settings settings = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "saml1.type", "saml") - .put(RealmSettings.PREFIX + "saml1.sp.entity_id", "https://saml.example.com/") - .put(RealmSettings.PREFIX + "saml1.sp.acs", "https://saml.example.com/") - .put(RealmSettings.PREFIX + "saml1.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1") - .put(RealmSettings.PREFIX + "saml1.attributes.name", "displayName") + .put(RealmSettings.PREFIX + "saml.saml1.type", "saml") + .put(RealmSettings.PREFIX + "saml.saml1.sp.entity_id", "https://saml.example.com/") + .put(RealmSettings.PREFIX + "saml.saml1.sp.acs", "https://saml.example.com/") + .put(RealmSettings.PREFIX + "saml.saml1.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1") + .put(RealmSettings.PREFIX + "saml.saml1.attributes.name", "displayName") .build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -260,10 +258,10 @@ public void testHandleAttributes() throws Exception { public void testHandleAttributesInBatchMode() throws Exception { final Settings settings = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "saml1.type", "saml") - .put(RealmSettings.PREFIX + "saml1.sp.entity_id", "https://saml.example.com/") - .put(RealmSettings.PREFIX + "saml1.sp.acs", "https://saml.example.com/") - .put(RealmSettings.PREFIX + "saml1.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1") + .put(RealmSettings.PREFIX + "saml.saml1.type", "saml") + .put(RealmSettings.PREFIX + "saml.saml1.sp.entity_id", "https://saml.example.com/") + .put(RealmSettings.PREFIX + "saml.saml1.sp.acs", "https://saml.example.com/") + .put(RealmSettings.PREFIX + "saml.saml1.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1") .build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -307,16 +305,16 @@ public void testSigningMetadataWithPfx() throws Exception { final boolean useSigningCredentials = randomBoolean(); final Settings.Builder settingsBuilder = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "my_saml.type", "saml") - .put(RealmSettings.PREFIX + "my_saml.order", 1) - .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") - .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout") - .put(RealmSettings.PREFIX + "my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1"); + .put(RealmSettings.PREFIX + "saml.my_saml.type", "saml") + .put(RealmSettings.PREFIX + "saml.my_saml.order", 1) + .put(RealmSettings.PREFIX + "saml.my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.logout", "https://kibana.my.corp/saml/logout") + .put(RealmSettings.PREFIX + "saml.my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1"); if (useSigningCredentials) { - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) - .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "saml.my_saml.signing.key", keyPath.toString()); } final Settings settings = settingsBuilder.build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -368,15 +366,15 @@ public void testSigningMetadataWithPasswordProtectedPfx() throws Exception { final boolean useSigningCredentials = randomBoolean(); final Settings.Builder settingsBuilder = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "my_saml.type", "saml") - .put(RealmSettings.PREFIX + "my_saml.order", 1) - .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") - .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + .put(RealmSettings.PREFIX + "saml.my_saml.type", "saml") + .put(RealmSettings.PREFIX + "saml.my_saml.order", 1) + .put(RealmSettings.PREFIX + "saml.my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); if (useSigningCredentials) { - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) - .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "saml.my_saml.signing.key", keyPath.toString()); } final Settings settings = settingsBuilder.build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -406,15 +404,15 @@ public void testErrorSigningMetadataWithWrongPassword() throws Exception { final boolean useSigningCredentials = randomBoolean(); final Settings.Builder settingsBuilder = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "my_saml.type", "saml") - .put(RealmSettings.PREFIX + "my_saml.order", 1) - .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") - .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + .put(RealmSettings.PREFIX + "saml.my_saml.type", "saml") + .put(RealmSettings.PREFIX + "saml.my_saml.order", 1) + .put(RealmSettings.PREFIX + "saml.my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); if (useSigningCredentials) { - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) - .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "saml.my_saml.signing.key", keyPath.toString()); } final Settings settings = settingsBuilder.build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -442,15 +440,15 @@ public void testSigningMetadataWithPem() throws Exception { final boolean useSigningCredentials = randomBoolean(); final Settings.Builder settingsBuilder = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "my_saml.type", "saml") - .put(RealmSettings.PREFIX + "my_saml.order", 1) - .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") - .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + .put(RealmSettings.PREFIX + "saml.my_saml.type", "saml") + .put(RealmSettings.PREFIX + "saml.my_saml.order", 1) + .put(RealmSettings.PREFIX + "saml.my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); if (useSigningCredentials) { - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) - .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "saml.my_saml.signing.key", keyPath.toString()); } final Settings settings = settingsBuilder.build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -483,15 +481,15 @@ public void testSigningMetadataWithPasswordProtectedPem() throws Exception { final boolean useSigningCredentials = randomBoolean(); final Settings.Builder settingsBuilder = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "my_saml.type", "saml") - .put(RealmSettings.PREFIX + "my_saml.order", 1) - .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") - .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + .put(RealmSettings.PREFIX + "saml.my_saml.type", "saml") + .put(RealmSettings.PREFIX + "saml.my_saml.order", 1) + .put(RealmSettings.PREFIX + "saml.my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); if (useSigningCredentials) { - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) - .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "saml.my_saml.signing.key", keyPath.toString()); } final Settings settings = settingsBuilder.build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -523,15 +521,15 @@ public void testSigningMetadataWithPasswordProtectedPemInTerminal() throws Excep final boolean useSigningCredentials = randomBoolean(); final Settings.Builder settingsBuilder = Settings.builder() .put("path.home", createTempDir()) - .put(RealmSettings.PREFIX + "my_saml.type", "saml") - .put(RealmSettings.PREFIX + "my_saml.order", 1) - .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") - .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + .put(RealmSettings.PREFIX + "saml.my_saml.type", "saml") + .put(RealmSettings.PREFIX + "saml.my_saml.order", 1) + .put(RealmSettings.PREFIX + "saml.my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); if (useSigningCredentials) { - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) - .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "saml.my_saml.signing.key", keyPath.toString()); } final Settings settings = settingsBuilder.build(); final Environment env = TestEnvironment.newEnvironment(settings); @@ -575,30 +573,32 @@ public void testDefaultOptionsWithSigningAndMultipleEncryptionKeys() throws Exce } final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(RealmSettings.PREFIX + "my_saml.signing.keystore.secure_password", "ks-password"); - secureSettings.setString(RealmSettings.PREFIX + "my_saml.signing.keystore.secure_key_password", "key-password"); - secureSettings.setString(RealmSettings.PREFIX + "my_saml.encryption.keystore.secure_password", "ks-password"); - secureSettings.setString(RealmSettings.PREFIX + "my_saml.encryption.keystore.secure_key_password", "key-password"); + secureSettings.setString(RealmSettings.PREFIX + "saml.my_saml.signing.keystore.secure_password", "ks-password"); + secureSettings.setString(RealmSettings.PREFIX + "saml.my_saml.signing.keystore.secure_key_password", "key-password"); + secureSettings.setString(RealmSettings.PREFIX + "saml.my_saml.encryption.keystore.secure_password", "ks-password"); + secureSettings.setString(RealmSettings.PREFIX + "saml.my_saml.encryption.keystore.secure_key_password", "key-password"); final SamlMetadataCommand command = new SamlMetadataCommand((e) -> keyStore); final OptionSet options = command.getParser().parse(new String[0]); final boolean useSigningCredentials = randomBoolean(); final boolean useEncryptionCredentials = randomBoolean(); - final Settings.Builder settingsBuilder = Settings.builder().put("path.home", dir).put(RealmSettings.PREFIX + "my_saml.type", "saml") - .put(RealmSettings.PREFIX + "my_saml.order", 1).put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") - .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") - .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout") - .put(RealmSettings.PREFIX + "my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1"); + final Settings.Builder settingsBuilder = Settings.builder().put("path.home", dir) + .put(RealmSettings.PREFIX + "saml.my_saml.type", "saml") + .put(RealmSettings.PREFIX + "saml.my_saml.order", 1) + .put(RealmSettings.PREFIX + "saml.my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "saml.my_saml.sp.logout", "https://kibana.my.corp/saml/logout") + .put(RealmSettings.PREFIX + "saml.my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1"); settingsBuilder.setSecureSettings(secureSettings); if (useSigningCredentials) { - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.keystore.path", ksSigningFile.toString()); - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.keystore.type", "PKCS12"); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.keystore.path", ksSigningFile.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.keystore.type", "PKCS12"); } if (useEncryptionCredentials) { - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.encryption.keystore.path", ksEncryptionFile.toString()); - settingsBuilder.put(RealmSettings.PREFIX + "my_saml.encryption.keystore.type", "PKCS12"); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.encryption.keystore.path", ksEncryptionFile.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.encryption.keystore.type", "PKCS12"); } final Settings settings = settingsBuilder.build(); final Environment env = TestEnvironment.newEnvironment(settings); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java index 2ecfdb50230bf..817cda5b0f0f5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.authc.saml; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.collect.Tuple; @@ -64,6 +65,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -88,7 +90,7 @@ public class SamlRealmTests extends SamlTestCase { private static final int METADATA_REFRESH = 3000; private static final String REALM_NAME = "my-saml"; - private static final String REALM_SETTINGS_PREFIX = "xpack.security.authc.realms." + REALM_NAME; + private static final String REALM_SETTINGS_PREFIX = "xpack.security.authc.realms.saml." + REALM_NAME; private Settings globalSettings; private Environment env; @@ -138,6 +140,7 @@ public void testReadIdpMetadataFromHttps() throws Exception { assertEquals(0, proxyServer.requests().size()); Tuple config = buildConfig("https://localhost:" + proxyServer.getPort()); + logger.info("Settings\n{}", config.v1().globalSettings().toDelimitedString('\n')); final ResourceWatcherService watcherService = mock(ResourceWatcherService.class); Tuple> tuple = SamlRealm.initializeResolver(logger, config.v1(), config.v2(), watcherService); @@ -216,30 +219,31 @@ private AuthenticationResult performAuthentication(UserRoleMapper roleMapper, bo final String uidValue = principalIsEmailAddress ? "cbarton@shield.gov" : "cbarton"; final MockLookupRealm lookupRealm = new MockLookupRealm( - new RealmConfig("mock_lookup", Settings.EMPTY,globalSettings, env, threadContext)); + new RealmConfig(new RealmConfig.RealmIdentifier("mock","mock_lookup"), globalSettings, env, threadContext)); final Settings.Builder settingsBuilder = Settings.builder() - .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.name(), useNameId ? "nameid" : "uid") - .put(SamlRealmSettings.GROUPS_ATTRIBUTE.name(), "groups") - .put(SamlRealmSettings.MAIL_ATTRIBUTE.name(), "mail"); + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), useNameId ? "nameid" : "uid") + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.GROUPS_ATTRIBUTE.getAttribute()), "groups") + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.MAIL_ATTRIBUTE.getAttribute()), "mail"); if (principalIsEmailAddress) { final boolean anchoredMatch = randomBoolean(); - settingsBuilder.put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getPattern().getKey(), + settingsBuilder.put(getFullSettingKey(REALM_NAME, SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getPattern()), anchoredMatch ? "^([^@]+)@shield.gov$" : "^([^@]+)@"); } if (populateUserMetadata != null) { - settingsBuilder.put(SamlRealmSettings.POPULATE_USER_METADATA.getKey(), populateUserMetadata.booleanValue()); + settingsBuilder.put(getFullSettingKey(REALM_NAME, SamlRealmSettings.POPULATE_USER_METADATA), + populateUserMetadata.booleanValue()); } if (useAuthorizingRealm) { - settingsBuilder.putList(DelegatedAuthorizationSettings.AUTHZ_REALMS.getKey(), lookupRealm.name()); + settingsBuilder.putList(getFullSettingKey(new RealmConfig.RealmIdentifier("saml", REALM_NAME), + DelegatedAuthorizationSettings.AUTHZ_REALMS), lookupRealm.name()); lookupRealm.registerUser(new User(userPrincipal, new String[]{ "lookup_user_role" }, "Clinton Barton", "cbarton@shield.gov", Collections.singletonMap("is_lookup", true), true)); } final Settings realmSettings = settingsBuilder.build(); - final RealmConfig config = realmConfigFromRealmSettings(realmSettings); - final SamlRealm realm = new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp); - + final RealmConfig config = buildConfig(realmSettings); + final SamlRealm realm = buildRealm(config, roleMapper, authenticator, logoutHandler, idp, sp); initializeRealms(realm, lookupRealm); final SamlToken token = new SamlToken(new byte[0], Collections.singletonList("")); @@ -275,14 +279,24 @@ private void initializeRealms(Realm... realms) { } } + public SamlRealm buildRealm(RealmConfig config, UserRoleMapper roleMapper, SamlAuthenticator authenticator, + SamlLogoutRequestHandler logoutHandler, EntityDescriptor idp, SpConfiguration sp) throws Exception { + try { + return new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp); + } catch (SettingsException e) { + logger.info(new ParameterizedMessage("Settings are invalid:\n{}", config.globalSettings().toDelimitedString('\n')), e); + throw e; + } + } + public void testAttributeSelectionWithRegex() throws Exception { final boolean useFriendlyName = randomBoolean(); final Settings settings = Settings.builder() - .put("attributes.principal", useFriendlyName ? "mail" : "urn:oid:0.9.2342.19200300.100.1.3") - .put("attribute_patterns.principal", "^(.+)@\\w+.example.com$") + .put(REALM_SETTINGS_PREFIX + ".attributes.principal", useFriendlyName ? "mail" : "urn:oid:0.9.2342.19200300.100.1.3") + .put(REALM_SETTINGS_PREFIX + ".attribute_patterns.principal", "^(.+)@\\w+.example.com$") .build(); - final RealmConfig config = realmConfigFromRealmSettings(settings); + final RealmConfig config = buildConfig(settings); final SamlRealmSettings.AttributeSetting principalSetting = new SamlRealmSettings.AttributeSetting("principal"); final SamlRealm.AttributeParser parser = SamlRealm.AttributeParser.forSetting(logger, principalSetting, config, false); @@ -295,15 +309,15 @@ public void testAttributeSelectionWithRegex() throws Exception { ))); final List strings = parser.getAttribute(attributes); - assertThat(strings, contains("john.smith", "jsmith")); + assertThat("For attributes: " + strings, strings, contains("john.smith", "jsmith")); } public void testSettingPatternWithoutAttributeThrowsSettingsException() throws Exception { final Settings realmSettings = Settings.builder() - .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.name(), "nameid") - .put(SamlRealmSettings.NAME_ATTRIBUTE.getPattern().getKey(), "^\\s*(\\S.*\\S)\\s*$") + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), "nameid") + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.NAME_ATTRIBUTE.getPattern()), "^\\s*(\\S.*\\S)\\s*$") .build(); - final RealmConfig config = realmConfigFromRealmSettings(realmSettings); + final RealmConfig config = buildConfig(realmSettings); final UserRoleMapper roleMapper = mock(UserRoleMapper.class); final SamlAuthenticator authenticator = mock(SamlAuthenticator.class); @@ -312,14 +326,14 @@ public void testSettingPatternWithoutAttributeThrowsSettingsException() throws E final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null, Collections.emptyList()); final SettingsException settingsException = expectThrows(SettingsException.class, - () -> new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp)); + () -> buildRealm(config, roleMapper, authenticator, logoutHandler, idp, sp)); assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attribute_patterns.name")); assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attributes.name")); } public void testMissingPrincipalSettingThrowsSettingsException() throws Exception { final Settings realmSettings = Settings.EMPTY; - final RealmConfig config = realmConfigFromRealmSettings(realmSettings); + final RealmConfig config = buildConfig(realmSettings); final UserRoleMapper roleMapper = mock(UserRoleMapper.class); final SamlAuthenticator authenticator = mock(SamlAuthenticator.class); @@ -328,7 +342,7 @@ public void testMissingPrincipalSettingThrowsSettingsException() throws Exceptio final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null, Collections.emptyList()); final SettingsException settingsException = expectThrows(SettingsException.class, - () -> new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp)); + () -> buildRealm(config, roleMapper, authenticator, logoutHandler, idp, sp)); assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attributes.principal")); } @@ -340,13 +354,13 @@ public void testNonMatchingPrincipalPatternThrowsSamlException() throws Exceptio final SamlLogoutRequestHandler logoutHandler = mock(SamlLogoutRequestHandler.class); final Settings realmSettings = Settings.builder() - .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute().getKey(), "mail") - .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getPattern().getKey(), "^([^@]+)@mycorp\\.example\\.com$") + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), "mail") + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getPattern()), "^([^@]+)@mycorp\\.example\\.com$") .build(); - final RealmConfig config = realmConfigFromRealmSettings(realmSettings); + final RealmConfig config = buildConfig(realmSettings); - final SamlRealm realm = new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp); + final SamlRealm realm = buildRealm(config, roleMapper, authenticator, logoutHandler, idp, sp); final SamlToken token = new SamlToken(new byte[0], Collections.singletonList("")); for (String mail : Arrays.asList("john@your-corp.example.com", "john@mycorp.example.com.example.net", "john")) { @@ -531,7 +545,7 @@ public void testCreateSigningCredentialFromKeyStoreFailureScenarios() throws Exc final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig)); final String expectedErrorMessage = "The configured key store for " - + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_SETTINGS.getPrefix()) + + RealmSettings.realmSettingPrefix(realmConfig.identifier()) + "signing." + " does not have a key associated with alias [" + unknownAlias + "] " + "(from setting " + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_KEY_ALIAS) + ")"; assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage()); @@ -552,7 +566,7 @@ public void testCreateSigningCredentialFromKeyStoreFailureScenarios() throws Exc final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig)); final String expectedErrorMessage = "The configured key store for " - + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_SETTINGS.getPrefix()) + + RealmSettings.realmSettingPrefix(realmConfig.identifier()) + "signing." + " does not contain any RSA key pairs"; assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage()); } else { @@ -560,7 +574,7 @@ public void testCreateSigningCredentialFromKeyStoreFailureScenarios() throws Exc final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig)); final String expectedErrorMessage = "The configured key store for " - + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_SETTINGS.getPrefix()) + + RealmSettings.realmSettingPrefix(realmConfig.identifier()) + "signing." + " has multiple keys but no alias has been specified (from setting " + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_KEY_ALIAS) + ")"; assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage()); @@ -590,14 +604,14 @@ public void testBuildLogoutRequest() throws Exception { final SamlLogoutRequestHandler logoutHandler = mock(SamlLogoutRequestHandler.class); final Settings.Builder realmSettings = Settings.builder() - .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute().getKey(), "uid"); + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), "uid"); if (useSingleLogout != null) { - realmSettings.put(SamlRealmSettings.IDP_SINGLE_LOGOUT.getKey(), useSingleLogout.booleanValue()); + realmSettings.put(getFullSettingKey(REALM_NAME, SamlRealmSettings.IDP_SINGLE_LOGOUT), useSingleLogout.booleanValue()); } - final RealmConfig config = realmConfigFromRealmSettings(realmSettings.build()); + final RealmConfig config = buildConfig(realmSettings.build()); - final SamlRealm realm = new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp); + final SamlRealm realm = buildRealm(config, roleMapper, authenticator, logoutHandler, idp, sp); final NameID nameId = SamlUtils.buildObject(NameID.class, NameID.DEFAULT_ELEMENT_NAME); nameId.setFormat(NameID.TRANSIENT); @@ -622,8 +636,8 @@ private EntityDescriptor mockIdp() { return descriptor; } - private Tuple buildConfig(String path) throws Exception { - Settings globalSettings = buildSettings(path).build(); + private Tuple buildConfig(String idpMetaDataPath) throws Exception { + Settings globalSettings = buildSettings(idpMetaDataPath).build(); final Environment env = TestEnvironment.newEnvironment(globalSettings); final RealmConfig config = realmConfigFromGlobalSettings(globalSettings); final SSLService sslService = new SSLService(globalSettings, env); @@ -641,22 +655,24 @@ private Settings.Builder buildSettings(String idpMetaDataPath) { getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) .put(REALM_SETTINGS_PREFIX + ".ssl.certificate_authorities", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) - .put(REALM_SETTINGS_PREFIX + ".type", "saml") - .put(REALM_SETTINGS_PREFIX + "." + SamlRealmSettings.IDP_METADATA_PATH.getKey(), idpMetaDataPath) - .put(REALM_SETTINGS_PREFIX + "." + SamlRealmSettings.IDP_ENTITY_ID.getKey(), TEST_IDP_ENTITY_ID) - .put(REALM_SETTINGS_PREFIX + "." + SamlRealmSettings.IDP_METADATA_HTTP_REFRESH.getKey(), METADATA_REFRESH + "ms") + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.IDP_METADATA_PATH), idpMetaDataPath) + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.IDP_ENTITY_ID), TEST_IDP_ENTITY_ID) + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.IDP_METADATA_HTTP_REFRESH), METADATA_REFRESH + "ms") .put("path.home", createTempDir()) .setSecureSettings(secureSettings); } - private RealmConfig realmConfigFromRealmSettings(Settings realmSettings) { - return new RealmConfig(REALM_NAME, realmSettings, globalSettings, env, threadContext); + private RealmConfig buildConfig(Settings realmSettings) { + final Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(realmSettings).build(); + final Environment env = TestEnvironment.newEnvironment(settings); + return new RealmConfig(new RealmConfig.RealmIdentifier("saml", REALM_NAME), settings, env, threadContext); } private RealmConfig realmConfigFromGlobalSettings(Settings globalSettings) { - final Settings realmSettings = globalSettings.getByPrefix(REALM_SETTINGS_PREFIX + "."); final Environment env = TestEnvironment.newEnvironment(globalSettings); - return new RealmConfig(REALM_NAME, realmSettings, globalSettings, env, new ThreadContext(globalSettings)); + return new RealmConfig(new RealmConfig.RealmIdentifier("saml", REALM_NAME), globalSettings, env, new ThreadContext(globalSettings)); } private void assertIdp1MetadataParsedCorrectly(EntityDescriptor descriptor) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java index adaba34a73aa3..73982c5dfd01c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java @@ -5,12 +5,13 @@ */ package org.elasticsearch.xpack.security.authc.saml; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.PemUtils; @@ -37,7 +38,7 @@ public abstract class SamlTestCase extends ESTestCase { @BeforeClass public static void setupSaml() throws Exception { - Logger logger = Loggers.getLogger(SamlTestCase.class); + Logger logger = LogManager.getLogger(SamlTestCase.class); if (isTurkishLocale()) { // See: https://github.com/elastic/x-pack-elasticsearch/issues/2815 logger.warn("Attempting to run SAML test on turkish-like locale, but that breaks OpenSAML. Switching to English."); @@ -135,4 +136,8 @@ protected ElasticsearchSecurityException expectSamlException(ThrowingRunnable ru assertThat("Exception " + exception + " should be a SAML exception", SamlUtils.isSamlException(exception), is(true)); return exception; } + + protected Settings mergeSettings(Settings local, Settings global) { + return Settings.builder().put(global).put(local).build(); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java index 6230c637b89b0..d5ed8f0dd9b82 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; @@ -67,15 +68,17 @@ public void testCacheSettings() throws Exception { String cachingHashAlgo = Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT); int maxUsers = randomIntBetween(10, 100); TimeValue ttl = TimeValue.timeValueMinutes(randomIntBetween(10, 20)); + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("caching", "test_realm"); Settings settings = Settings.builder() - .put(CachingUsernamePasswordRealmSettings.CACHE_HASH_ALGO_SETTING.getKey(), cachingHashAlgo) - .put(CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING.getKey(), maxUsers) - .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), ttl) - .build(); + .put(globalSettings) + .put(RealmSettings.getFullSettingKey(identifier, CachingUsernamePasswordRealmSettings.CACHE_HASH_ALGO_SETTING), cachingHashAlgo) + .put(RealmSettings.getFullSettingKey(identifier, CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING), maxUsers) + .put(RealmSettings.getFullSettingKey(identifier, CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING), ttl) + .build(); - RealmConfig config = new RealmConfig("test_realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); - CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm("test", config, threadPool) { + RealmConfig config = new RealmConfig(identifier, settings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm(config, threadPool) { @Override protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { listener.onResponse(AuthenticationResult.success(new User("username", new String[]{"r1", "r2", "r3"}))); @@ -241,11 +244,13 @@ public void testCacheDisabledUser() { public void testCacheWithVeryLowTtlExpiresBetweenAuthenticateCalls() throws InterruptedException { TimeValue ttl = TimeValue.timeValueNanos(randomIntBetween(10, 100)); + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("caching", "test_cache_ttl"); Settings settings = Settings.builder() - .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), ttl) + .put(globalSettings) + .put(RealmSettings.getFullSettingKey(identifier, CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING), ttl) .build(); - RealmConfig config = new RealmConfig("test_cache_ttl", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(identifier, settings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(config, threadPool); final UsernamePasswordToken authToken = new UsernamePasswordToken("the-user", new SecureString("the-password")); @@ -270,11 +275,13 @@ public void testCacheWithVeryLowTtlExpiresBetweenAuthenticateCalls() throws Inte public void testReadsDoNotPreventCacheExpiry() throws InterruptedException { TimeValue ttl = TimeValue.timeValueMillis(250); + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("caching", "test_cache_ttl"); Settings settings = Settings.builder() - .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), ttl) + .put(globalSettings) + .put(RealmSettings.getFullSettingKey(identifier, CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING), ttl) .build(); - RealmConfig config = new RealmConfig("test_cache_ttl", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(identifier, settings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(config, threadPool); final UsernamePasswordToken authToken = new UsernamePasswordToken("the-user", new SecureString("the-password")); @@ -377,9 +384,9 @@ public void testSingleAuthPerUserLimit() throws Exception { final AtomicInteger authCounter = new AtomicInteger(0); final Hasher pwdHasher = Hasher.resolve(randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt", "bcrypt9")); final String passwordHash = new String(pwdHasher.hash(password)); - RealmConfig config = new RealmConfig("test_realm", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); - final CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm("test", config, threadPool) { + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("caching", "test_realm"), Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + final CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm(config, threadPool) { @Override protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { authCounter.incrementAndGet(); @@ -443,9 +450,9 @@ public void testCacheConcurrency() throws Exception { final SecureString randomPassword = new SecureString(randomAlphaOfLength(password.length()).toCharArray()); final Hasher localHasher = Hasher.resolve(randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt", "bcrypt9")); final String passwordHash = new String(localHasher.hash(password)); - RealmConfig config = new RealmConfig("test_realm", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); - final CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm("test", config, threadPool) { + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("caching", "test_realm"), Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + final CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm(config, threadPool) { @Override protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { // do something slow @@ -511,9 +518,9 @@ public void testUserLookupConcurrency() throws Exception { final String username = "username"; final AtomicInteger lookupCounter = new AtomicInteger(0); - RealmConfig config = new RealmConfig("test_realm", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); - final CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm("test", config, threadPool) { + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("caching", "test_realm"), Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + final CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm(config, threadPool) { @Override protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { listener.onFailure(new UnsupportedOperationException("authenticate should not be called!")); @@ -568,12 +575,14 @@ protected void doLookupUser(String username, ActionListener listener) { } public void testAuthenticateDisabled() throws Exception { + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("caching", "test_authentication_disabled"); final Settings settings = Settings.builder() - .put(CachingUsernamePasswordRealmSettings.AUTHC_ENABLED_SETTING.getKey(), false) + .put(RealmSettings.getFullSettingKey(realmId, CachingUsernamePasswordRealmSettings.AUTHC_ENABLED_SETTING), false) + .put(globalSettings) .build(); - final Environment env = TestEnvironment.newEnvironment(globalSettings); - final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final RealmConfig config = new RealmConfig("test_authentication_disabled", settings, globalSettings, env, threadContext); + final Environment env = TestEnvironment.newEnvironment(settings); + final ThreadContext threadContext = new ThreadContext(settings); + final RealmConfig config = new RealmConfig(realmId, settings, env, threadContext); final AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(config, threadPool); final UsernamePasswordToken token = new UsernamePasswordToken("phil", new SecureString("tahiti")); @@ -597,7 +606,8 @@ public void testAuthenticateDisabled() throws Exception { static class FailingAuthenticationRealm extends CachingUsernamePasswordRealm { FailingAuthenticationRealm(Settings settings, Settings global, ThreadPool threadPool) { - super("failing", new RealmConfig("failing-test", settings, global, TestEnvironment.newEnvironment(global), + super(new RealmConfig(new RealmConfig.RealmIdentifier("caching", "failing-test"), settings, global, + TestEnvironment.newEnvironment(global), threadPool.getThreadContext()), threadPool); } @@ -615,7 +625,8 @@ protected void doLookupUser(String username, ActionListener listener) { static class ThrowingAuthenticationRealm extends CachingUsernamePasswordRealm { ThrowingAuthenticationRealm(Settings settings, Settings globalSettings, ThreadPool threadPool) { - super("throwing", new RealmConfig("throwing-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + super(new RealmConfig(new RealmConfig.RealmIdentifier("caching", "throwing-test"), settings, globalSettings, + TestEnvironment.newEnvironment(globalSettings), threadPool.getThreadContext()), threadPool); } @@ -638,12 +649,13 @@ static class AlwaysAuthenticateCachingRealm extends CachingUsernamePasswordRealm private boolean usersEnabled = true; AlwaysAuthenticateCachingRealm(Settings globalSettings, ThreadPool threadPool) { - this(new RealmConfig("always-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), + this(new RealmConfig(new RealmConfig.RealmIdentifier("caching", "always-test"), Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), threadPool.getThreadContext()), threadPool); } AlwaysAuthenticateCachingRealm(RealmConfig config, ThreadPool threadPool) { - super("always", config, threadPool); + super(config, threadPool); } void setUsersEnabled(boolean usersEnabled) { @@ -670,7 +682,7 @@ static class LookupNotSupportedRealm extends CachingUsernamePasswordRealm { public final AtomicInteger lookupInvocationCounter = new AtomicInteger(0); LookupNotSupportedRealm(Settings globalSettings, ThreadPool threadPool) { - super("lookup", new RealmConfig("lookup-notsupported-test", Settings.EMPTY, globalSettings, + super(new RealmConfig(new RealmConfig.RealmIdentifier("caching", "lookup-notsupported-test"), Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), threadPool.getThreadContext()), threadPool); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java index 8f0d360b75975..20a8d8b27c3d1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DelegatedAuthorizationSupportTests.java @@ -63,7 +63,12 @@ private List shuffle(List list) { } private RealmConfig buildRealmConfig(String name, Settings settings) { - return new RealmConfig(name, settings, globalSettings, env, threadContext); + return new RealmConfig(new RealmConfig.RealmIdentifier("test", name), + Settings.builder().put(settings) + .normalizePrefix("xpack.security.authc.realms.test." + name + ".") + .put(globalSettings) + .build(), + env, threadContext); } public void testEmptyDelegationList() throws ExecutionException, InterruptedException { @@ -96,13 +101,13 @@ public void testDelegationChainsAreRejected() { .build(); globalSettings = Settings.builder() .put(globalSettings) - .putList("xpack.security.authc.realms.lookup-2.authorization_realms", "lookup-1") + .putList("xpack.security.authc.realms.test.lookup-2.authorization_realms", "lookup-1") .build(); final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new DelegatedAuthorizationSupport(realms, buildRealmConfig("realm1", settings), license) ); assertThat(ex.getMessage(), - equalTo("cannot use realm [mock/lookup-2] as an authorization realm - it is already delegating authorization to [[lookup-1]]")); + equalTo("cannot use realm [test/lookup-2] as an authorization realm - it is already delegating authorization to [[lookup-1]]")); } public void testMatchInDelegationList() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapperTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapperTests.java index 83110c7a10a14..48465641eb4b2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapperTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapperTests.java @@ -38,6 +38,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -50,11 +51,7 @@ public class DnRoleMapperTests extends ESTestCase { - private static final String ROLE_MAPPING_FILE_SETTING = DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(); - private static final String USE_UNMAPPED_GROUPS_AS_ROLES_SETTING_KEY = - DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.getKey(); - - private static final String[] STARK_GROUP_DNS = new String[] { + private static final String[] STARK_GROUP_DNS = new String[]{ //groups can be named by different attributes, depending on the directory, //we don't care what it is named by "cn=shield,ou=marvel,o=superheros", @@ -181,7 +178,7 @@ public void testMapperAutoReloadWithoutListener() throws Exception { assertBusy(() -> { Set resolvedRoles = mapper.resolveRoles("", - Collections.singletonList("cn=fantastic_four,ou=marvel,o=superheros")); + Collections.singletonList("cn=fantastic_four,ou=marvel,o=superheros")); assertThat(resolvedRoles, notNullValue()); assertThat(resolvedRoles.size(), is(1)); assertThat(resolvedRoles, contains("fantastic_four")); @@ -283,11 +280,12 @@ public void testParseFileLenient_WhenCannotReadFile() throws Exception { public void testYaml() throws Exception { Path file = getDataPath("role_mapping.yml"); + final RealmConfig.RealmIdentifier realmIdentifier = new RealmConfig.RealmIdentifier("ldap", "ldap1"); Settings ldapSettings = Settings.builder() - .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) + .put(getFullSettingKey(realmIdentifier, DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING), file.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("ldap1", ldapSettings, settings, TestEnvironment.newEnvironment(settings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(realmIdentifier, mergeSettings(ldapSettings, settings), + TestEnvironment.newEnvironment(settings), new ThreadContext(Settings.EMPTY)); DnRoleMapper mapper = new DnRoleMapper(config, new ResourceWatcherService(settings, threadPool)); @@ -298,11 +296,13 @@ public void testYaml() throws Exception { } public void testRelativeDN() { + final RealmConfig.RealmIdentifier realmIdentifier = new RealmConfig.RealmIdentifier("ldap", "ldap1"); Settings ldapSettings = Settings.builder() - .put(USE_UNMAPPED_GROUPS_AS_ROLES_SETTING_KEY, true) + .put(getFullSettingKey(realmIdentifier, DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING), true) .build(); - RealmConfig config = new RealmConfig("ldap1", ldapSettings, settings, TestEnvironment.newEnvironment(settings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(realmIdentifier, + mergeSettings(ldapSettings, settings), + TestEnvironment.newEnvironment(settings), new ThreadContext(Settings.EMPTY)); DnRoleMapper mapper = new DnRoleMapper(config, new ResourceWatcherService(settings, threadPool)); @@ -311,25 +311,33 @@ public void testRelativeDN() { } public void testUserDNMapping() throws Exception { + final RealmConfig.RealmIdentifier realmIdentifier = new RealmConfig.RealmIdentifier("ldap", "ldap-userdn-role"); Path file = getDataPath("role_mapping.yml"); Settings ldapSettings = Settings.builder() - .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) - .put(USE_UNMAPPED_GROUPS_AS_ROLES_SETTING_KEY, false) + .put(getFullSettingKey(realmIdentifier, DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING), file.toAbsolutePath()) + .put(getFullSettingKey(realmIdentifier, DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING), false) .build(); - RealmConfig config = new RealmConfig("ldap-userdn-role", ldapSettings, settings, TestEnvironment.newEnvironment(settings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(realmIdentifier, mergeSettings(ldapSettings, settings), + TestEnvironment.newEnvironment(settings), new ThreadContext(Settings.EMPTY)); DnRoleMapper mapper = new DnRoleMapper(config, new ResourceWatcherService(settings, threadPool)); - Set roles = mapper.resolveRoles("cn=Horatio Hornblower,ou=people,o=sevenSeas", Collections.emptyList()); + Set roles = mapper.resolveRoles("cn=Horatio Hornblower,ou=people,o=sevenSeas", Collections.emptyList()); assertThat(roles, hasItem("avenger")); } protected DnRoleMapper createMapper(Path file, ResourceWatcherService watcherService) { - Settings realmSettings = Settings.builder() - .put("files.role_mapping", file.toAbsolutePath()) + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("ldap", "ad-group-mapper-test"); + Settings mergedSettings = Settings.builder() + .put(settings) + .put(getFullSettingKey(identifier, DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING), file.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("ad-group-mapper-test", realmSettings, settings, env, new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(identifier, mergedSettings, env, new ThreadContext(Settings.EMPTY)); return new DnRoleMapper(config, watcherService); } + + private Settings mergeSettings(Settings local, Settings global) { + return Settings.builder().put(global).put(local).build(); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java index 01700347f5091..ddc805288d687 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/MockLookupRealm.java @@ -22,7 +22,7 @@ public class MockLookupRealm extends Realm { private final Map lookup; public MockLookupRealm(RealmConfig config) { - super("mock", config); + super(config); lookup = new HashMap<>(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java index 78be4b3ddf4c7..7e0ade512bf5f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RealmUserLookupTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmConfig.RealmIdentifier; import org.elasticsearch.xpack.core.security.user.User; import org.junit.Before; @@ -84,7 +85,7 @@ public void testUserNotFound() throws Exception { } public void testRealmException() { - final Realm realm = new Realm("test", new RealmConfig("test", Settings.EMPTY, globalSettings, env, threadContext)) { + final Realm realm = new Realm(new RealmConfig(new RealmIdentifier("test", "test"), globalSettings, env, threadContext)) { @Override public boolean supports(AuthenticationToken token) { return false; @@ -115,7 +116,7 @@ public void lookupUser(String username, ActionListener listener) { private List buildRealms(int realmCount) { final List realms = new ArrayList<>(realmCount); for (int i = 1; i <= realmCount; i++) { - final RealmConfig config = new RealmConfig("lookup-" + i, Settings.EMPTY, globalSettings, env, threadContext); + final RealmConfig config = new RealmConfig(new RealmIdentifier("mock","lookup-" + i), globalSettings, env, threadContext); final MockLookupRealm realm = new MockLookupRealm(config); for (int j = 0; j < 5; j++) { realm.registerUser(new User(randomAlphaOfLengthBetween(6, 12))); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheckTests.java index e0f71c40607ee..d84213726381d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheckTests.java @@ -5,12 +5,6 @@ */ package org.elasticsearch.xpack.security.authc.support; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Collections; - import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.common.settings.Settings; @@ -18,16 +12,25 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; import org.junit.Before; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; public class RoleMappingFileBootstrapCheckTests extends ESTestCase { - private static final String ROLE_MAPPING_FILE_SETTING = DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(); + private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("ldap", "ldap-realm-name"); + private static final String ROLE_MAPPING_FILE_SETTING = RealmSettings.getFullSettingKey( + REALM_ID, DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING); protected Settings settings; @@ -44,28 +47,31 @@ public void testBootstrapCheckOfValidFile() { Settings ldapSettings = Settings.builder() .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("ldap1", ldapSettings, settings, TestEnvironment.newEnvironment(settings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = getRealmConfig(ldapSettings); final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(config); assertThat(check, notNullValue()); assertThat(check.alwaysEnforce(), equalTo(true)); assertFalse(check.check(new BootstrapContext(settings, null)).isFailure()); } + private RealmConfig getRealmConfig(Settings realmSettings) { + return new RealmConfig(REALM_ID, mergeSettings(realmSettings, settings), + TestEnvironment.newEnvironment(settings), new ThreadContext(Settings.EMPTY)); + } + public void testBootstrapCheckOfMissingFile() { final String fileName = randomAlphaOfLength(10); Path file = createTempDir().resolve(fileName); Settings ldapSettings = Settings.builder() .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("the-realm-name", ldapSettings, settings, TestEnvironment.newEnvironment(settings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = getRealmConfig(ldapSettings); final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(config); assertThat(check, notNullValue()); assertThat(check.alwaysEnforce(), equalTo(true)); final BootstrapCheck.BootstrapCheckResult result = check.check(new BootstrapContext(settings, null)); assertTrue(result.isFailure()); - assertThat(result.getMessage(), containsString("the-realm-name")); + assertThat(result.getMessage(), containsString(REALM_ID.getName())); assertThat(result.getMessage(), containsString(fileName)); assertThat(result.getMessage(), containsString("does not exist")); } @@ -78,14 +84,13 @@ public void testBootstrapCheckWithInvalidYaml() throws IOException { Settings ldapSettings = Settings.builder() .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("the-realm-name", ldapSettings, settings, TestEnvironment.newEnvironment(settings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = getRealmConfig(ldapSettings); final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(config); assertThat(check, notNullValue()); assertThat(check.alwaysEnforce(), equalTo(true)); final BootstrapCheck.BootstrapCheckResult result = check.check(new BootstrapContext(settings, null)); assertTrue(result.isFailure()); - assertThat(result.getMessage(), containsString("the-realm-name")); + assertThat(result.getMessage(), containsString(REALM_ID.getName())); assertThat(result.getMessage(), containsString(file.toString())); assertThat(result.getMessage(), containsString("could not read")); } @@ -98,16 +103,19 @@ public void testBootstrapCheckWithInvalidDn() throws IOException { Settings ldapSettings = Settings.builder() .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) .build(); - RealmConfig config = new RealmConfig("the-realm-name", ldapSettings, settings, TestEnvironment.newEnvironment(settings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = getRealmConfig(ldapSettings); final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(config); assertThat(check, notNullValue()); assertThat(check.alwaysEnforce(), equalTo(true)); final BootstrapCheck.BootstrapCheckResult result = check.check(new BootstrapContext(settings, null)); assertTrue(result.isFailure()); - assertThat(result.getMessage(), containsString("the-realm-name")); + assertThat(result.getMessage(), containsString(REALM_ID.getName())); assertThat(result.getMessage(), containsString(file.toString())); assertThat(result.getMessage(), containsString("invalid DN")); assertThat(result.getMessage(), containsString("not-a-dn")); } + + private Settings mergeSettings(Settings local, Settings global) { + return Settings.builder().put(global).put(local).build(); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 2ac82c8d7c063..d8785b8a72fa6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -38,8 +38,8 @@ public class ExpressionRoleMappingTests extends ESTestCase { @Before public void setupMapping() throws Exception { - realm = new RealmConfig("ldap1", Settings.EMPTY, Settings.EMPTY, Mockito.mock(Environment.class), - new ThreadContext(Settings.EMPTY)); + realm = new RealmConfig(new RealmConfig.RealmIdentifier("ldap", "ldap1"), + Settings.EMPTY, Settings.EMPTY, Mockito.mock(Environment.class), new ThreadContext(Settings.EMPTY)); } public void testParseValidJson() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 052ba38551021..0c9ea5c6de28f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -84,8 +84,8 @@ protected void loadMappings(ActionListener> listener } }; - final RealmConfig realm = new RealmConfig("ldap1", Settings.EMPTY, Settings.EMPTY, mock(Environment.class), - new ThreadContext(Settings.EMPTY)); + final RealmConfig realm = new RealmConfig(new RealmConfig.RealmIdentifier("ldap", "ldap1"), Settings.EMPTY, Settings.EMPTY, + mock(Environment.class), new ThreadContext(Settings.EMPTY)); final PlainActionFuture> future = new PlainActionFuture<>(); final UserRoleMapper.UserData user = new UserRoleMapper.UserData("sasquatch", @@ -197,8 +197,9 @@ private NativeRoleMappingStore buildRoleMappingStoreForInvalidationTesting(Atomi }).when(client).execute(eq(ClearRealmCacheAction.INSTANCE), any(ClearRealmCacheRequest.class), any(ActionListener.class)); final Environment env = TestEnvironment.newEnvironment(settings); - final RealmConfig realmConfig = new RealmConfig(getTestName(), Settings.EMPTY, settings, env, threadContext); - final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm("test", realmConfig, threadPool) { + final RealmConfig realmConfig = new RealmConfig(new RealmConfig.RealmIdentifier("ldap", getTestName()), Settings.EMPTY, + settings, env, threadContext); + final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm(realmConfig, threadPool) { @Override protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { listener.onResponse(AuthenticationResult.notHandled()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java index 9c9f2b1b1a42a..66b1e9d9c2ad4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java @@ -140,7 +140,7 @@ public void testSwitchAndExecuteXpackUser() throws Exception { threadContext.putHeader(headerName, headerValue); threadContext.putTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME, randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.DEPRECATION_ORIGIN, - ClientHelper.MONITORING_ORIGIN, ClientHelper.PERSISTENT_TASK_ORIGIN)); + ClientHelper.MONITORING_ORIGIN, ClientHelper.PERSISTENT_TASK_ORIGIN, ClientHelper.INDEX_LIFECYCLE_ORIGIN)); AuthorizationUtils.switchUserBasedOnActionOriginAndExecute(threadContext, securityContext, consumer); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index 3e7543ffd9904..31b9551b903dd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -117,7 +117,7 @@ public void setup() { .put("cluster.remote.other_remote.seeds", "127.0.0.1:" + randomIntBetween(9351, 9399)) .build(); - indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); + indexNameExpressionResolver = new IndexNameExpressionResolver(); final boolean withAlias = randomBoolean(); final String securityIndexName = SECURITY_INDEX_NAME + (withAlias ? "-" + randomAlphaOfLength(5) : ""); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java index 6ff18cc77a1e2..3e4fbee5197b5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityField; -import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLClientAuth; import org.elasticsearch.xpack.security.LocalStateSecurity; import org.junit.BeforeClass; @@ -139,8 +138,7 @@ public void testThatConnectionToClientTypeConnectionIsRejected() throws IOExcept // test that starting up a node works Settings.Builder nodeSettings = Settings.builder() - .put("xpack.security.authc.realms.file.type", FileRealmSettings.TYPE) - .put("xpack.security.authc.realms.file.order", 0) + .put("xpack.security.authc.realms.file.file.order", 0) .put("node.name", "my-test-node") .put(SecurityField.USER_SETTING.getKey(), "test_user:" + SecuritySettingsSourceField.TEST_PASSWORD) .put("cluster.name", internalCluster().getClusterName()) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java index ee40d3e24bb8a..f17e9f4ff0871 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java @@ -57,7 +57,7 @@ public void init() throws Exception { IPFilter.PROFILE_FILTER_DENY_SETTING))); XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isIpFilteringAllowed()).thenReturn(true); - AuditTrailService auditTrailService = new AuditTrailService(settings, Collections.emptyList(), licenseState); + AuditTrailService auditTrailService = new AuditTrailService(Collections.emptyList(), licenseState); IPFilter ipFilter = new IPFilter(settings, auditTrailService, clusterSettings, licenseState); ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); if (isHttpEnabled) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java index 398b783f642bd..3df00018af420 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/NioIPFilterTests.java @@ -58,7 +58,7 @@ public void init() throws Exception { IPFilter.PROFILE_FILTER_DENY_SETTING))); XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isIpFilteringAllowed()).thenReturn(true); - AuditTrailService auditTrailService = new AuditTrailService(settings, Collections.emptyList(), licenseState); + AuditTrailService auditTrailService = new AuditTrailService(Collections.emptyList(), licenseState); IPFilter ipFilter = new IPFilter(settings, auditTrailService, clusterSettings, licenseState); ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); if (isHttpEnabled) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java index b9cec441fbb23..f513d70e881ae 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -13,9 +13,11 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.Transport; +import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.PemUtils; import org.elasticsearch.xpack.core.ssl.RestrictedTrustManager; @@ -50,7 +52,6 @@ @TestLogging("org.elasticsearch.xpack.ssl.RestrictedTrustManager:DEBUG") public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { - private static final int RESOURCE_RELOAD_MILLIS = 3; private static final TimeValue MAX_WAIT_RELOAD = TimeValue.timeValueSeconds(1); private static Path configPath; @@ -129,7 +130,6 @@ public Settings nodeSettings(int nodeOrdinal) { writeRestrictions("*.trusted"); builder.put("xpack.ssl.trust_restrictions.path", restrictionsPath); - builder.put("resource.reload.interval.high", RESOURCE_RELOAD_MILLIS + "ms"); return builder.build(); } @@ -145,6 +145,7 @@ private void writeRestrictions(String trustedPattern) { } catch (IOException e) { throw new ElasticsearchException("failed to write restrictions", e); } + runResourceWatcher(); } @Override @@ -203,6 +204,23 @@ public void testRestrictionsAreReloaded() throws Exception { }, MAX_WAIT_RELOAD.millis(), TimeUnit.MILLISECONDS); } + /** + * Force the file watch to be updated. + * Ideally we'd just left the service do its thing, but that means waiting for 5sec + * We can drop the 5s down, but then we run into resource contention issues. + * This method just tells the {@link ResourceWatcherService} to run its check at a time that suits the tests. In all other respects + * it works just like normal - the usual file checks apply for detecting it as "changed", and only the previously configured files + * are checked. + */ + private void runResourceWatcher() { + final InternalTestCluster cluster = internalCluster(); + if (cluster.size() > 0) { + final ResourceWatcherService service = cluster.getInstance(ResourceWatcherService.class); + logger.info("Triggering a reload of watched resources"); + service.notifyNow(ResourceWatcherService.Frequency.HIGH); + } + } + private void tryConnect(CertificateInfo certificate) throws Exception { Settings settings = Settings.builder() .put("path.home", createTempDir()) diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 62097e76b97ea..3cde541d304d5 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -51,6 +51,16 @@ bundlePlugin { } } +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + + /********************************************** * SQL Parser regeneration * **********************************************/ diff --git a/x-pack/qa/sql/build.gradle b/x-pack/plugin/sql/qa/build.gradle similarity index 97% rename from x-pack/qa/sql/build.gradle rename to x-pack/plugin/sql/qa/build.gradle index baaf0451e51f2..6b55b6363f2ca 100644 --- a/x-pack/qa/sql/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -16,7 +16,7 @@ dependencies { // CLI testing dependencies compile project(path: xpackModule('sql:sql-cli'), configuration: 'nodeps') - compile "org.jline:jline:3.6.0" + compile "org.jline:jline:3.8.2" } /* disable unit tests because these are all integration tests used @@ -82,7 +82,7 @@ subprojects { * dependencies but we don't really want them because they cause * all kinds of trouble with the jar hell checks. So we suppress * them explicitly for non-es projects. */ - testCompile(xpackProject('qa:sql')) { + testCompile(xpackProject('plugin:sql:qa')) { transitive = false } testCompile "org.elasticsearch.test:framework:${version}" @@ -104,7 +104,7 @@ subprojects { testRuntime (xpackProject('plugin:sql:sql-action')) { transitive = false } - testRuntime "org.jline:jline:3.6.0" + testRuntime "org.jline:jline:3.8.2" } if (project.name != 'security') { diff --git a/x-pack/qa/sql/multinode/build.gradle b/x-pack/plugin/sql/qa/multi-node/build.gradle similarity index 100% rename from x-pack/qa/sql/multinode/build.gradle rename to x-pack/plugin/sql/qa/multi-node/build.gradle diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliSelectIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/CliSelectIT.java similarity index 73% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliSelectIT.java rename to x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/CliSelectIT.java index 56a112df02135..9e5c78219100e 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliSelectIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/CliSelectIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.multi_node; -import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase; +import org.elasticsearch.xpack.sql.qa.cli.SelectTestCase; public class CliSelectIT extends SelectTestCase { } diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliShowIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/CliShowIT.java similarity index 73% rename from x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliShowIT.java rename to x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/CliShowIT.java index 07e544094d547..0b5e97bf534b5 100644 --- a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliShowIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/CliShowIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.multinode; +package org.elasticsearch.xpack.sql.qa.multi_node; -import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase; +import org.elasticsearch.xpack.sql.qa.cli.ShowTestCase; public class CliShowIT extends ShowTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDatabaseMetaDataIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcDatabaseMetaDataIT.java similarity index 73% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDatabaseMetaDataIT.java rename to x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcDatabaseMetaDataIT.java index f653049b9a1ae..344ae01f5b9cb 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDatabaseMetaDataIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcDatabaseMetaDataIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.multi_node; -import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.DatabaseMetaDataTestCase; public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcErrorsIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcErrorsIT.java similarity index 72% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcErrorsIT.java rename to x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcErrorsIT.java index 21a52b609bbb3..fec89fac11f1c 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcErrorsIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcErrorsIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.multi_node; -import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.ErrorsTestCase; public class JdbcErrorsIT extends ErrorsTestCase { } diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcPreparedStatementIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcPreparedStatementIT.java similarity index 73% rename from x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcPreparedStatementIT.java rename to x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcPreparedStatementIT.java index 155e9bf161b1c..b06746acb79ce 100644 --- a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcPreparedStatementIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcPreparedStatementIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.multinode; +package org.elasticsearch.xpack.sql.qa.multi_node; -import org.elasticsearch.xpack.qa.sql.jdbc.PreparedStatementTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.PreparedStatementTestCase; public class JdbcPreparedStatementIT extends PreparedStatementTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShowTablesIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcShowTablesIT.java similarity index 72% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShowTablesIT.java rename to x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcShowTablesIT.java index f68b022f6adae..0936dfabf2bf8 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShowTablesIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/JdbcShowTablesIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.multi_node; -import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.ShowTablesTestCase; public class JdbcShowTablesIT extends ShowTablesTestCase { } diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlIT.java similarity index 79% rename from x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlIT.java rename to x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlIT.java index 231cee1f343bb..ad81bc380d3d1 100644 --- a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.multinode; +package org.elasticsearch.xpack.sql.qa.multi_node; -import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; +import org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase; /** * Integration test for the rest sql action. The one that speaks json directly to a diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java similarity index 96% rename from x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java rename to x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java index 75eaefed77925..9df646872b1e8 100644 --- a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java +++ b/x-pack/plugin/sql/qa/multi-node/src/test/java/org/elasticsearch/xpack/sql/qa/multi_node/RestSqlMultinodeIT.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.multinode; +package org.elasticsearch.xpack.sql.qa.multi_node; import org.apache.http.HttpHost; import org.elasticsearch.client.Request; @@ -25,8 +25,8 @@ import java.util.Map; import static java.util.Collections.singletonList; -import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo; -import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.randomMode; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; /** * Tests specific to multiple nodes. diff --git a/x-pack/qa/sql/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle similarity index 100% rename from x-pack/qa/sql/security/build.gradle rename to x-pack/plugin/sql/qa/security/build.gradle diff --git a/x-pack/qa/sql/security/roles.yml b/x-pack/plugin/sql/qa/security/roles.yml similarity index 100% rename from x-pack/qa/sql/security/roles.yml rename to x-pack/plugin/sql/qa/security/roles.yml diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliErrorsIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliErrorsIT.java similarity index 80% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliErrorsIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliErrorsIT.java index 92d9608a527f1..a37be9455705a 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliErrorsIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliErrorsIT.java @@ -3,11 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase; -import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; +import org.elasticsearch.xpack.sql.qa.cli.ErrorsTestCase; +import org.elasticsearch.xpack.sql.qa.cli.EmbeddedCli.SecurityConfig; public class CliErrorsIT extends ErrorsTestCase { @Override diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliFetchSizeIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliFetchSizeIT.java similarity index 80% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliFetchSizeIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliFetchSizeIT.java index c8ca7db71d1e2..f694d4c9118c4 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliFetchSizeIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliFetchSizeIT.java @@ -3,11 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase; -import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; +import org.elasticsearch.xpack.sql.qa.cli.FetchSizeTestCase; +import org.elasticsearch.xpack.sql.qa.cli.EmbeddedCli.SecurityConfig; public class CliFetchSizeIT extends FetchSizeTestCase { @Override diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java similarity index 96% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSecurityIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java index 3e0b578913841..2995cc4e57750 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSecurityIT.java @@ -3,13 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli; -import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; -import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase; +import org.elasticsearch.xpack.sql.qa.cli.EmbeddedCli; +import org.elasticsearch.xpack.sql.qa.cli.EmbeddedCli.SecurityConfig; +import org.elasticsearch.xpack.sql.qa.cli.ErrorsTestCase; import java.io.IOException; import java.net.URISyntaxException; @@ -20,7 +20,7 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.xpack.qa.sql.cli.CliIntegrationTestCase.elasticsearchAddress; +import static org.elasticsearch.xpack.sql.qa.cli.CliIntegrationTestCase.elasticsearchAddress; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.startsWith; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSelectIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSelectIT.java similarity index 80% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSelectIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSelectIT.java index 596fd1e723644..93faf2d70e937 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSelectIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliSelectIT.java @@ -3,11 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; -import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase; +import org.elasticsearch.xpack.sql.qa.cli.EmbeddedCli.SecurityConfig; +import org.elasticsearch.xpack.sql.qa.cli.SelectTestCase; public class CliSelectIT extends SelectTestCase { @Override diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliShowIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliShowIT.java similarity index 80% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliShowIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliShowIT.java index c05dbcc3d1369..f527188371e84 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliShowIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/CliShowIT.java @@ -3,11 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; -import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase; +import org.elasticsearch.xpack.sql.qa.cli.EmbeddedCli.SecurityConfig; +import org.elasticsearch.xpack.sql.qa.cli.ShowTestCase; public class CliShowIT extends ShowTestCase { @Override diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcConnectionIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcConnectionIT.java similarity index 88% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcConnectionIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcConnectionIT.java index 08aa73f68b9ea..8a4815a4e1eaf 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcConnectionIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcConnectionIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.ConnectionTestCase; import java.util.Properties; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcCsvSpecIT.java similarity index 85% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcCsvSpecIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcCsvSpecIT.java index e5fdf0baf452c..ada9c85b1c1e0 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcCsvSpecIT.java @@ -3,11 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase; -import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; import java.util.Properties; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcDatabaseMetaDataIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcDatabaseMetaDataIT.java similarity index 88% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcDatabaseMetaDataIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcDatabaseMetaDataIT.java index f4aafe4090b95..fabf2489bd8ac 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcDatabaseMetaDataIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcDatabaseMetaDataIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.DatabaseMetaDataTestCase; import java.util.Properties; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcErrorsIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcErrorsIT.java similarity index 88% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcErrorsIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcErrorsIT.java index 2ed8ac7941ff0..d6a3a55733462 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcErrorsIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcErrorsIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.ErrorsTestCase; import java.util.Properties; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcFetchSizeIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcFetchSizeIT.java similarity index 88% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcFetchSizeIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcFetchSizeIT.java index ac239193e9938..c75bbc1a50ac0 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcFetchSizeIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcFetchSizeIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.FetchSizeTestCase; import java.util.Properties; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcPreparedStatementIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcPreparedStatementIT.java similarity index 88% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcPreparedStatementIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcPreparedStatementIT.java index 3ecb0d388c204..36a65f88a6f2d 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcPreparedStatementIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcPreparedStatementIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.jdbc.PreparedStatementTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.PreparedStatementTestCase; import java.util.Properties; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java similarity index 97% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSecurityIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java index 848b98eeb7bf8..d47b06289a8b3 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java @@ -3,12 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.xpack.qa.sql.jdbc.LocalH2; +import org.elasticsearch.xpack.sql.qa.jdbc.LocalH2; import java.net.URISyntaxException; import java.nio.file.Files; @@ -24,10 +24,10 @@ import java.util.Map; import java.util.Properties; -import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets; -import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcIntegrationTestCase.elasticsearchAddress; -import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcIntegrationTestCase.randomKnownTimeZone; -import static org.elasticsearch.xpack.qa.sql.security.RestSqlIT.SSL_ENABLED; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcAssert.assertResultSets; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcIntegrationTestCase.elasticsearchAddress; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcIntegrationTestCase.randomKnownTimeZone; +import static org.elasticsearch.xpack.sql.qa.security.RestSqlIT.SSL_ENABLED; import static org.hamcrest.Matchers.containsString; public class JdbcSecurityIT extends SqlSecurityTestCase { diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcShowTablesIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcShowTablesIT.java similarity index 87% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcShowTablesIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcShowTablesIT.java index ab76b3f33a132..ba54795336f5f 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcShowTablesIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcShowTablesIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.ShowTablesTestCase; import java.util.Properties; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSimpleExampleIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSimpleExampleIT.java similarity index 88% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSimpleExampleIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSimpleExampleIT.java index b01fe72333b64..d7aeff61947c6 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSimpleExampleIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSimpleExampleIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.SimpleExampleTestCase; import java.util.Properties; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSqlSpecIT.java similarity index 90% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSqlSpecIT.java index 609847f513e3a..c8ee720c39f67 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSqlSpecIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.SqlSpecTestCase; import java.util.Properties; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlIT.java similarity index 93% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlIT.java index e5408e48dac02..362b3a93211ea 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlIT.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.io.PathUtils; @@ -11,20 +11,20 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase; import java.net.URISyntaxException; import java.nio.file.Files; import java.nio.file.Path; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + /** * Integration test for the rest sql action. The one that speaks json directly to a * user rather than to the JDBC driver or CLI. */ public class RestSqlIT extends RestSqlTestCase { - static final boolean SSL_ENABLED = Booleans.parseBoolean(System.getProperty("tests.ssl.enabled")); + static final boolean SSL_ENABLED = Booleans.parseBoolean(System.getProperty("tests.ssl.enabled"), false); static Settings securitySettings() { String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java similarity index 98% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java index 607b37e39d192..fd00ce833b5d3 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; @@ -29,8 +29,8 @@ import java.util.Map; import java.util.stream.Collectors; -import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo; -import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.randomMode; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java similarity index 99% rename from x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java rename to x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java index 0e3d2cab2ff84..848fee17c7f1f 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.security; +package org.elasticsearch.xpack.sql.qa.security; import org.apache.lucene.util.SuppressForbidden; import org.elasticsearch.ElasticsearchParseException; diff --git a/x-pack/qa/sql/security/src/test/resources/plugin-security.policy b/x-pack/plugin/sql/qa/security/src/test/resources/plugin-security.policy similarity index 100% rename from x-pack/qa/sql/security/src/test/resources/plugin-security.policy rename to x-pack/plugin/sql/qa/security/src/test/resources/plugin-security.policy diff --git a/x-pack/qa/sql/security/ssl/build.gradle b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle similarity index 100% rename from x-pack/qa/sql/security/ssl/build.gradle rename to x-pack/plugin/sql/qa/security/with-ssl/build.gradle diff --git a/x-pack/qa/sql/security/no-ssl/build.gradle b/x-pack/plugin/sql/qa/security/without-ssl/build.gradle similarity index 100% rename from x-pack/qa/sql/security/no-ssl/build.gradle rename to x-pack/plugin/sql/qa/security/without-ssl/build.gradle diff --git a/x-pack/qa/sql/no-security/build.gradle b/x-pack/plugin/sql/qa/single-node/build.gradle similarity index 100% rename from x-pack/qa/sql/no-security/build.gradle rename to x-pack/plugin/sql/qa/single-node/build.gradle diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliErrorsIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliErrorsIT.java similarity index 72% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliErrorsIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliErrorsIT.java index be6c003c7460c..d8817d23efe57 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliErrorsIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliErrorsIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase; +import org.elasticsearch.xpack.sql.qa.cli.ErrorsTestCase; public class CliErrorsIT extends ErrorsTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java similarity index 98% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java index cbf6d0d476e57..4296c5ae06919 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.cli.CliIntegrationTestCase; +import org.elasticsearch.xpack.sql.qa.cli.CliIntegrationTestCase; import java.io.IOException; diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliFetchSizeIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliFetchSizeIT.java similarity index 72% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliFetchSizeIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliFetchSizeIT.java index e4d2ef1a0e2ca..eb1e8d32a6bf7 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliFetchSizeIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliFetchSizeIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase; +import org.elasticsearch.xpack.sql.qa.cli.FetchSizeTestCase; public class CliFetchSizeIT extends FetchSizeTestCase { } diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliSelectIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliSelectIT.java similarity index 72% rename from x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliSelectIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliSelectIT.java index af6f986e1bc46..76c2a324ed836 100644 --- a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliSelectIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliSelectIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.multinode; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase; +import org.elasticsearch.xpack.sql.qa.cli.SelectTestCase; public class CliSelectIT extends SelectTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliShowIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliShowIT.java similarity index 73% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliShowIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliShowIT.java index 2720b45d2a72e..d3557d1afa434 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliShowIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliShowIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase; +import org.elasticsearch.xpack.sql.qa.cli.ShowTestCase; public class CliShowIT extends ShowTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcConnectionIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcConnectionIT.java similarity index 72% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcConnectionIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcConnectionIT.java index e75cf6d059dfe..fe34346458c8a 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcConnectionIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcConnectionIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.ConnectionTestCase; public class JdbcConnectionIT extends ConnectionTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java similarity index 73% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcCsvSpecIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java index a245e6c85ef24..4f841e02ae3d5 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase; -import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; public class JdbcCsvSpecIT extends CsvSpecTestCase { public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcDatabaseMetaDataIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDatabaseMetaDataIT.java similarity index 73% rename from x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcDatabaseMetaDataIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDatabaseMetaDataIT.java index 9bb663f190aff..13bc7c3695059 100644 --- a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcDatabaseMetaDataIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDatabaseMetaDataIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.multinode; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.DatabaseMetaDataTestCase; public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java similarity index 84% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpecIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java index c8fc587bd6ac6..336b476a37f9b 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java @@ -3,26 +3,26 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.RestClient; -import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; -import org.elasticsearch.xpack.qa.sql.jdbc.DataLoader; -import org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert; -import org.elasticsearch.xpack.qa.sql.jdbc.SpecBaseIntegrationTestCase; -import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.DataLoader; +import org.elasticsearch.xpack.sql.qa.jdbc.JdbcAssert; +import org.elasticsearch.xpack.sql.qa.jdbc.SpecBaseIntegrationTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.SqlSpecTestCase; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.util.List; -import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.csvConnection; -import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.executeCsvQuery; -import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.specParser; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.executeCsvQuery; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; /** * CSV test specification for DOC examples. diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcErrorsIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcErrorsIT.java similarity index 72% rename from x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcErrorsIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcErrorsIT.java index 946a9b6c73165..ba9ef20c34ad4 100644 --- a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcErrorsIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcErrorsIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.multinode; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.ErrorsTestCase; public class JdbcErrorsIT extends ErrorsTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcFetchSizeIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFetchSizeIT.java similarity index 72% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcFetchSizeIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFetchSizeIT.java index b64290957c088..1d08c49dae2f7 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcFetchSizeIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcFetchSizeIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.FetchSizeTestCase; public class JdbcFetchSizeIT extends FetchSizeTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcPreparedStatementIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcPreparedStatementIT.java similarity index 73% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcPreparedStatementIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcPreparedStatementIT.java index 0d711c2798608..9a986a33afd7f 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcPreparedStatementIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcPreparedStatementIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.PreparedStatementTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.PreparedStatementTestCase; public class JdbcPreparedStatementIT extends PreparedStatementTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetIT.java similarity index 80% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetIT.java index 30756a11f62ec..658911fb8d4d3 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetIT.java @@ -4,9 +4,9 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.ResultSetTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.ResultSetTestCase; /* * Integration testing class for "no security" (cluster running without the Security plugin, diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShardFailureIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShardFailureIT.java similarity index 94% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShardFailureIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShardFailureIT.java index e88b6b94d209f..487dc5f717348 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShardFailureIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShardFailureIT.java @@ -3,10 +3,10 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; import org.elasticsearch.client.Request; -import org.elasticsearch.xpack.qa.sql.jdbc.JdbcIntegrationTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.JdbcIntegrationTestCase; import org.junit.Before; import java.io.IOException; diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcShowTablesIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShowTablesIT.java similarity index 72% rename from x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcShowTablesIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShowTablesIT.java index 7af41a5c9d8ef..f5ceefc35c1e3 100644 --- a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcShowTablesIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcShowTablesIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.multinode; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.ShowTablesTestCase; public class JdbcShowTablesIT extends ShowTablesTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSimpleExampleIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSimpleExampleIT.java similarity index 72% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSimpleExampleIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSimpleExampleIT.java index 08539667cf92d..ca79257fc5cae 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSimpleExampleIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSimpleExampleIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.SimpleExampleTestCase; public class JdbcSimpleExampleIT extends SimpleExampleTestCase { } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSqlSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSqlSpecIT.java similarity index 81% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSqlSpecIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSqlSpecIT.java index fb658270729d3..0e1867b26dfdb 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSqlSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcSqlSpecIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.SqlSpecTestCase; public class JdbcSqlSpecIT extends SqlSpecTestCase { public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/RestSqlIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlIT.java similarity index 79% rename from x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/RestSqlIT.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlIT.java index e22c8fb085210..6bbd564e0b4cc 100644 --- a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/RestSqlIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/RestSqlIT.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.nosecurity; +package org.elasticsearch.xpack.sql.qa.single_node; -import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; +import org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase; /** * Integration test for the rest sql action. The one that speaks json directly to a diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/ErrorsTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java similarity index 96% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/ErrorsTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java index 8421ec9631150..5e3b034d75708 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/ErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql; +package org.elasticsearch.xpack.sql.qa; /** * Interface implemented once per SQL access method to ensure that we diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java similarity index 93% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java index 5f992fa06e9f1..059a4ad2233b5 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.cli; +package org.elasticsearch.xpack.sql.qa.cli; import org.elasticsearch.client.Request; import org.elasticsearch.common.CheckedConsumer; @@ -11,13 +11,13 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; +import org.elasticsearch.xpack.sql.qa.cli.EmbeddedCli.SecurityConfig; import org.junit.After; import org.junit.Before; import java.io.IOException; -import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.assertNoSearchContexts; public abstract class CliIntegrationTestCase extends ESRestTestCase { /** diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/EmbeddedCli.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/EmbeddedCli.java similarity index 94% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/EmbeddedCli.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/EmbeddedCli.java index 234d229f324b8..251ff8665af68 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/EmbeddedCli.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/EmbeddedCli.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.cli; +package org.elasticsearch.xpack.sql.qa.cli; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -150,10 +150,21 @@ protected boolean addShutdownHook() { assertEquals("", readLine()); } - // Throw out the logo - while (false == readLine().contains("SQL")) { - ; + // Read until the first "good" line (skip the logo or read until an exception) + boolean isLogoOrException = false; + while (!isLogoOrException) { + String line = readLine(); + if ("SQL".equals(line.trim())) { + // it's almost the bottom of the logo, so read the next line (the version) and break out of the loop + readLine(); + isLogoOrException = true; + } else if (line.contains("Exception")) { + // if it's an exception, just break out of the loop and don't read the next line + // as it will swallow the exception and IT tests won't catch it + isLogoOrException = true; + } } + assertConnectionTest(); } catch (IOException e) { try { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java similarity index 95% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java index dee99a7be1ca6..03267329aa353 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.cli; +package org.elasticsearch.xpack.sql.qa.cli; import org.elasticsearch.client.Request; @@ -14,7 +14,7 @@ /** * Tests for error messages. */ -public abstract class ErrorsTestCase extends CliIntegrationTestCase implements org.elasticsearch.xpack.qa.sql.ErrorsTestCase { +public abstract class ErrorsTestCase extends CliIntegrationTestCase implements org.elasticsearch.xpack.sql.qa.ErrorsTestCase { /** * Starting sequence commons to lots of errors. */ @@ -27,7 +27,7 @@ public abstract class ErrorsTestCase extends CliIntegrationTestCase implements o @Override public void testSelectInvalidSql() throws Exception { assertFoundOneProblem(command("SELECT * FRO")); - assertEquals("line 1:8: Cannot determine columns for *" + END, readLine()); + assertEquals("line 1:8: Cannot determine columns for [*]" + END, readLine()); } @Override diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/FetchSizeTestCase.java similarity index 98% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/FetchSizeTestCase.java index 19162c34e3ccd..afc4c302995d5 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/FetchSizeTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.cli; +package org.elasticsearch.xpack.sql.qa.cli; import org.elasticsearch.client.Request; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/SelectTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/SelectTestCase.java similarity index 97% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/SelectTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/SelectTestCase.java index fffa8e0f72a1d..cad607f894808 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/SelectTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/SelectTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.cli; +package org.elasticsearch.xpack.sql.qa.cli; import org.elasticsearch.test.hamcrest.RegexMatcher; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ShowTestCase.java similarity index 94% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ShowTestCase.java index b4e87d3e20711..00fb96d01855c 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ShowTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.cli; +package org.elasticsearch.xpack.sql.qa.cli; import org.elasticsearch.test.hamcrest.RegexMatcher; @@ -35,10 +35,15 @@ public void testShowFunctions() throws IOException { while (aggregateFunction.matcher(line).matches()) { line = readLine(); } + Pattern conditionalFunction = Pattern.compile("\\s*[A-Z0-9_~]+\\s*\\|\\s*CONDITIONAL\\s*"); + while (conditionalFunction.matcher(line).matches()) { + line = readLine(); + } Pattern scalarFunction = Pattern.compile("\\s*[A-Z0-9_~]+\\s*\\|\\s*SCALAR\\s*"); while (scalarFunction.matcher(line).matches()) { line = readLine(); } + assertThat(line, RegexMatcher.matches("\\s*SCORE\\s*\\|\\s*SCORE\\s*")); assertEquals("", readLine()); } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/package-info.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/package-info.java similarity index 89% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/package-info.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/package-info.java index b77e050cc3422..7eed43371d881 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/package-info.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/package-info.java @@ -8,4 +8,4 @@ * Support for integration tests for the Elasticsearch SQL CLI client * and integration tests shared between multiple qa projects. */ -package org.elasticsearch.xpack.qa.sql.cli; +package org.elasticsearch.xpack.sql.qa.cli; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ConnectionTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ConnectionTestCase.java similarity index 97% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ConnectionTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ConnectionTestCase.java index 444142b7138b0..d416d10854b6b 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ConnectionTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ConnectionTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.elasticsearch.Version; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java similarity index 83% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java index 0402b312338b9..755d701c226bf 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java @@ -3,20 +3,20 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; import java.sql.Connection; import java.sql.ResultSet; import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.csvConnection; -import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.executeCsvQuery; -import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.specParser; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.executeCsvQuery; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; /** * Tests comparing sql queries executed against our jdbc client @@ -29,13 +29,14 @@ public abstract class CsvSpecTestCase extends SpecBaseIntegrationTestCase { public static List readScriptSpec() throws Exception { Parser parser = specParser(); List tests = new ArrayList<>(); + tests.addAll(readScriptSpec("/select.csv-spec", parser)); tests.addAll(readScriptSpec("/command.csv-spec", parser)); tests.addAll(readScriptSpec("/fulltext.csv-spec", parser)); tests.addAll(readScriptSpec("/agg.csv-spec", parser)); tests.addAll(readScriptSpec("/columns.csv-spec", parser)); tests.addAll(readScriptSpec("/datetime.csv-spec", parser)); tests.addAll(readScriptSpec("/alias.csv-spec", parser)); - tests.addAll(readScriptSpec("/nulls.csv-spec", parser)); + tests.addAll(readScriptSpec("/null.csv-spec", parser)); tests.addAll(readScriptSpec("/nested.csv-spec", parser)); tests.addAll(readScriptSpec("/functions.csv-spec", parser)); tests.addAll(readScriptSpec("/math.csv-spec", parser)); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java similarity index 99% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java index 937a5a70b8ad0..8cc8cf6e04044 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java similarity index 99% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java index 8105cbf9a5618..337706b5a5a19 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.apache.http.HttpHost; import org.apache.logging.log4j.LogManager; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DatabaseMetaDataTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java similarity index 97% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DatabaseMetaDataTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java index a5f3a5f83643d..9e4252cc27391 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DatabaseMetaDataTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.elasticsearch.common.CheckedSupplier; @@ -12,7 +12,7 @@ import java.sql.ResultSet; import java.sql.SQLException; -import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcAssert.assertResultSets; /** * Tests for our implementation of {@link DatabaseMetaData}. diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugCsvSpec.java similarity index 88% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugCsvSpec.java index 4e7842dd25adf..8f07fd879b73d 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugCsvSpec.java @@ -3,22 +3,22 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.logging.log4j.Logger; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.util.List; -import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.csvConnection; -import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.executeCsvQuery; -import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.specParser; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.executeCsvQuery; +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; @TestLogging(JdbcTestUtils.SQL_TRACE) public abstract class DebugCsvSpec extends SpecBaseIntegrationTestCase { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugSqlSpec.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugSqlSpec.java similarity index 95% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugSqlSpec.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugSqlSpec.java index 8726d8ddb9cf5..21d2f3301fb96 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugSqlSpec.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugSqlSpec.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java similarity index 97% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java index 65fd0778b57f2..bb9d5f2c2fc11 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.elasticsearch.client.Request; @@ -15,12 +15,12 @@ /** * Tests for exceptions and their messages. */ -public class ErrorsTestCase extends JdbcIntegrationTestCase implements org.elasticsearch.xpack.qa.sql.ErrorsTestCase { +public class ErrorsTestCase extends JdbcIntegrationTestCase implements org.elasticsearch.xpack.sql.qa.ErrorsTestCase { @Override public void testSelectInvalidSql() throws Exception { try (Connection c = esJdbc()) { SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FRO").executeQuery()); - assertEquals("Found 1 problem(s)\nline 1:8: Cannot determine columns for *", e.getMessage()); + assertEquals("Found 1 problem(s)\nline 1:8: Cannot determine columns for [*]", e.getMessage()); } } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java similarity index 96% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java index 784fd358a4548..e0a5bd26db0a2 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.elasticsearch.client.Request; import org.junit.Before; @@ -14,7 +14,7 @@ import java.sql.SQLException; import java.sql.Statement; -import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.assertNoSearchContexts; /** * Tests for setting {@link Statement#setFetchSize(int)} and diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java similarity index 97% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index b55383f1e3f57..5b0a2e226abe8 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.apache.logging.log4j.Logger; import org.relique.jdbc.csv.CsvResultSet; @@ -114,6 +114,11 @@ public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, if (expectedType == Types.FLOAT && expected instanceof CsvResultSet) { expectedType = Types.REAL; } + // csv doesn't support NULL type so skip type checking + if (actualType == Types.NULL && expected instanceof CsvResultSet) { + expectedType = Types.NULL; + } + // when lenient is used, an int is equivalent to a short, etc... assertEquals("Different column type for column [" + expectedName + "] (" + JDBCType.valueOf(expectedType) + " != " + JDBCType.valueOf(actualType) + ")", expectedType, actualType); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java similarity index 93% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java index c6594d7205112..df7e12687caa7 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; @@ -15,7 +15,6 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import org.elasticsearch.xpack.sql.jdbc.jdbcx.JdbcDataSource; -import org.joda.time.DateTimeZone; import org.junit.After; import java.io.IOException; @@ -23,15 +22,13 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Properties; import java.util.Set; -import java.util.TimeZone; -import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.assertNoSearchContexts; public abstract class JdbcIntegrationTestCase extends ESRestTestCase { @After @@ -119,8 +116,8 @@ public static String randomKnownTimeZone() { // from all available JDK timezones. While Joda and JDK are generally in sync, some timezones might not be known // to the current version of Joda and in this case the test might fail. To avoid that, we specify a timezone // known for both Joda and JDK - Set timeZones = new HashSet<>(DateTimeZone.getAvailableIDs()); - timeZones.retainAll(Arrays.asList(TimeZone.getAvailableIDs())); + Set timeZones = new HashSet<>(JODA_TIMEZONE_IDS); + timeZones.retainAll(JAVA_TIMEZONE_IDS); List ids = new ArrayList<>(timeZones); Collections.sort(ids); return randomFrom(ids); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java similarity index 99% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index 2bb4697749a3a..7b511ee63f943 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.apache.logging.log4j.Logger; import org.elasticsearch.xpack.sql.action.CliFormatter; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/LocalH2.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java similarity index 98% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/LocalH2.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java index 8aa5a2287a805..e6295985cf519 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/LocalH2.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/LocalH2.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedSupplier; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/PreparedStatementTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java similarity index 99% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/PreparedStatementTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java index c4ac31120a3bd..34d32db16f4dc 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/PreparedStatementTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.elasticsearch.common.collect.Tuple; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java similarity index 99% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java index 80580f3461ac3..bd7914278b360 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.elasticsearch.client.Request; import org.elasticsearch.common.CheckedBiFunction; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ShowTablesTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java similarity index 94% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ShowTablesTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java index aa250628f7361..ab2ddc9a7fe24 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ShowTablesTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java @@ -3,13 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import java.sql.Connection; import java.sql.ResultSet; import java.util.Locale; -import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcAssert.assertResultSets; public class ShowTablesTestCase extends JdbcIntegrationTestCase { public void testShowTablesWithoutAnyIndexes() throws Exception { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SimpleExampleTestCase.java similarity index 97% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SimpleExampleTestCase.java index 35f2dba77792a..4d3b7698e5b5a 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SimpleExampleTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import java.sql.Connection; import java.sql.ResultSet; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java similarity index 99% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java index 86cbdec197e72..a7d0332508f6d 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Request; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java similarity index 97% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java index 10fae56be48d4..d5e720cae8614 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; @@ -41,6 +41,7 @@ public static List readScriptSpec() throws Exception { tests.addAll(readScriptSpec("/arithmetic.sql-spec", parser)); tests.addAll(readScriptSpec("/string-functions.sql-spec", parser)); tests.addAll(readScriptSpec("/case-functions.sql-spec", parser)); + tests.addAll(readScriptSpec("/null.sql-spec", parser)); return tests; } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/package-info.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/package-info.java similarity index 89% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/package-info.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/package-info.java index 1825d9033c83f..d326b0ab9daac 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/package-info.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/package-info.java @@ -8,4 +8,4 @@ * Support for integration tests for the Elasticsearch SQL JDBC client * and integration tests shared between multiple qa projects. */ -package org.elasticsearch.xpack.qa.sql.jdbc; +package org.elasticsearch.xpack.sql.qa.jdbc; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java similarity index 99% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 4df82119e36d0..7287784089f1b 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.qa.sql.rest; +package org.elasticsearch.xpack.sql.qa.rest; import com.fasterxml.jackson.core.io.JsonStringEncoder; @@ -21,7 +21,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.qa.sql.ErrorsTestCase; +import org.elasticsearch.xpack.sql.qa.ErrorsTestCase; import org.hamcrest.Matcher; import java.io.IOException; @@ -202,7 +202,7 @@ public void testSelectWhereExistsFails() throws Exception { @Override public void testSelectInvalidSql() { String mode = randomFrom("jdbc", "plain"); - expectBadRequest(() -> runSql(mode, "SELECT * FRO"), containsString("1:8: Cannot determine columns for *")); + expectBadRequest(() -> runSql(mode, "SELECT * FRO"), containsString("1:8: Cannot determine columns for [*]")); } @Override @@ -515,7 +515,8 @@ public void testTranslateQueryWithGroupByAndHaving() throws IOException { @SuppressWarnings("unchecked") Map filterScript = (Map) bucketSelector.get("script"); assertEquals(3, filterScript.size()); - assertEquals("InternalSqlScriptUtils.gt(params.a0,params.v0)", filterScript.get("source")); + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.gt(params.a0,params.v0))", + filterScript.get("source")); assertEquals("painless", filterScript.get("lang")); @SuppressWarnings("unchecked") Map filterScriptParams = (Map) filterScript.get("params"); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/package-info.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/package-info.java similarity index 87% rename from x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/package-info.java rename to x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/package-info.java index 1a061730c60bf..a07a23c7b0488 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/package-info.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/package-info.java @@ -7,4 +7,4 @@ /** * Integration tests shared between multiple qa projects. */ -package org.elasticsearch.xpack.qa.sql.rest; +package org.elasticsearch.xpack.sql.qa.rest; diff --git a/x-pack/qa/sql/src/main/resources/agg.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec similarity index 97% rename from x-pack/qa/sql/src/main/resources/agg.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec index 17ed219687ae9..23902516c3ba2 100644 --- a/x-pack/qa/sql/src/main/resources/agg.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec @@ -129,7 +129,7 @@ SELECT MAX(languages) max, MIN(languages) min, SUM(languages) sum, AVG(languages KURTOSIS(languages) kurtosis, SKEWNESS(languages) skewness FROM test_emp GROUP BY languages ORDER BY languages ASC LIMIT 5; - max:bt | min:bt | sum:bt | avg:bt | percent:d | percent_rank:d| kurtosis:d | skewness:d + max:bt | min:bt | sum:bt | avg:d | percent:d | percent_rank:d| kurtosis:d | skewness:d ---------------+---------------+---------------+---------------+---------------+---------------+---------------+--------------- null |null |null |null |null |null |null |null 1 |1 |15 |1 |1.0 |100.0 |NaN |NaN diff --git a/x-pack/qa/sql/src/main/resources/agg.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec similarity index 98% rename from x-pack/qa/sql/src/main/resources/agg.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec index 2fafb75d69bb5..9adbe79edc685 100644 --- a/x-pack/qa/sql/src/main/resources/agg.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec @@ -129,6 +129,10 @@ aggCountAndHaving SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING COUNT(*) > 10 ORDER BY gender; aggCountAndHavingEquality SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING COUNT(*) = 10 ORDER BY gender; +aggCountAndHavingNotEquals +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING COUNT(*) != 10 ORDER BY gender; +aggCountAndHavingNegateEquality +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING NOT COUNT(*) = 10 ORDER BY gender; aggCountOnColumnAndHaving SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING COUNT(gender) > 10 ORDER BY gender; aggCountOnColumnAndWildcardAndHaving @@ -338,7 +342,7 @@ SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY gender OR aggAvgWithCastToDouble SELECT gender g, CAST(AVG(emp_no) AS DOUBLE) a FROM "test_emp" GROUP BY gender ORDER BY gender; aggAvg -SELECT AVG(salary) AS avg FROM test_emp; +SELECT CAST(FLOOR(AVG(salary)) AS INT) AS avg FROM test_emp; aggAvgWithCastAndCount SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a, COUNT(1) c FROM "test_emp" GROUP BY gender ORDER BY gender; aggAvgWithCastAndCountWithFilter diff --git a/x-pack/qa/sql/src/main/resources/alias.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/alias.csv-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/alias.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/alias.csv-spec diff --git a/x-pack/qa/sql/src/main/resources/arithmetic.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/arithmetic.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec diff --git a/x-pack/qa/sql/src/main/resources/arithmetic.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/arithmetic.sql-spec similarity index 95% rename from x-pack/qa/sql/src/main/resources/arithmetic.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/arithmetic.sql-spec index c9ff79dca0d70..c6ce576d38f73 100644 --- a/x-pack/qa/sql/src/main/resources/arithmetic.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/arithmetic.sql-spec @@ -72,7 +72,7 @@ SELECT COUNT(*) % 10000 AS x FROM test_emp GROUP BY gender ORDER BY gender; aggVariableTwoInputs SELECT MAX(emp_no) - MIN(emp_no) AS x FROM test_emp GROUP BY gender ORDER BY gender; aggVariableThreeInputs -SELECT (MAX(emp_no) - MIN(emp_no)) + AVG(emp_no) AS x FROM test_emp GROUP BY gender ORDER BY gender; +SELECT CAST((MAX(emp_no) - MIN(emp_no)) + FLOOR(AVG(emp_no)) AS INT) AS x FROM test_emp GROUP BY gender ORDER BY gender; // ordering orderByPlus diff --git a/x-pack/qa/sql/src/main/resources/case-functions.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/case-functions.sql-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/case-functions.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/case-functions.sql-spec diff --git a/x-pack/qa/sql/src/main/resources/columns.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/columns.csv-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/columns.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/columns.csv-spec diff --git a/x-pack/qa/sql/src/main/resources/command-sys.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/command-sys.csv-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/command-sys.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/command-sys.csv-spec diff --git a/x-pack/qa/sql/src/main/resources/command.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec similarity index 98% rename from x-pack/qa/sql/src/main/resources/command.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/command.csv-spec index cc71dd947129a..d15e849921592 100644 --- a/x-pack/qa/sql/src/main/resources/command.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec @@ -19,6 +19,7 @@ SKEWNESS |AGGREGATE STDDEV_POP |AGGREGATE SUM_OF_SQUARES |AGGREGATE VAR_POP |AGGREGATE +COALESCE |CONDITIONAL DAY |SCALAR DAYNAME |SCALAR DAYOFMONTH |SCALAR @@ -99,7 +100,9 @@ RTRIM |SCALAR SPACE |SCALAR SUBSTRING |SCALAR UCASE |SCALAR -SCORE |SCORE +CAST |SCALAR +CONVERT |SCALAR +SCORE |SCORE ; showFunctionsWithExactMatch diff --git a/x-pack/qa/sql/src/main/resources/datetime.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/datetime.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec diff --git a/x-pack/qa/sql/src/main/resources/datetime.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime.sql-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/datetime.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/datetime.sql-spec diff --git a/x-pack/qa/sql/src/main/resources/debug.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/debug.csv-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/debug.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/debug.csv-spec diff --git a/x-pack/qa/sql/src/main/resources/debug.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/debug.sql-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/debug.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/debug.sql-spec diff --git a/x-pack/qa/sql/src/main/resources/dep_emp.csv b/x-pack/plugin/sql/qa/src/main/resources/dep_emp.csv similarity index 100% rename from x-pack/qa/sql/src/main/resources/dep_emp.csv rename to x-pack/plugin/sql/qa/src/main/resources/dep_emp.csv diff --git a/x-pack/qa/sql/src/main/resources/departments.csv b/x-pack/plugin/sql/qa/src/main/resources/departments.csv similarity index 100% rename from x-pack/qa/sql/src/main/resources/departments.csv rename to x-pack/plugin/sql/qa/src/main/resources/departments.csv diff --git a/x-pack/qa/sql/src/main/resources/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec similarity index 99% rename from x-pack/qa/sql/src/main/resources/docs.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec index 4d5c8c26b8cd0..ccb9498f3590a 100644 --- a/x-pack/qa/sql/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec @@ -196,6 +196,7 @@ SKEWNESS |AGGREGATE STDDEV_POP |AGGREGATE SUM_OF_SQUARES |AGGREGATE VAR_POP |AGGREGATE +COALESCE |CONDITIONAL DAY |SCALAR DAYNAME |SCALAR DAYOFMONTH |SCALAR @@ -276,7 +277,9 @@ RTRIM |SCALAR SPACE |SCALAR SUBSTRING |SCALAR UCASE |SCALAR -SCORE |SCORE +CAST |SCALAR +CONVERT |SCALAR +SCORE |SCORE // end::showFunctions ; @@ -645,7 +648,7 @@ groupByImplicitMultipleAggs // tag::groupByImplicitMultipleAggs SELECT MIN(salary) AS min, MAX(salary) AS max, AVG(salary) AS avg, COUNT(*) AS count FROM emp; - min | max | avg | count + min:i | max:i | avg:d | count:l ---------------+---------------+---------------+--------------- 25324 |74999 |48248 |100 @@ -765,7 +768,7 @@ aggAvg // tag::aggAvg SELECT AVG(salary) AS avg FROM emp; - avg + avg:d --------------- 48248 // end::aggAvg diff --git a/x-pack/qa/sql/src/main/resources/employees.csv b/x-pack/plugin/sql/qa/src/main/resources/employees.csv similarity index 100% rename from x-pack/qa/sql/src/main/resources/employees.csv rename to x-pack/plugin/sql/qa/src/main/resources/employees.csv diff --git a/x-pack/qa/sql/src/main/resources/example.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/example.csv-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/example.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/example.csv-spec diff --git a/x-pack/qa/sql/src/main/resources/example.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/example.sql-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/example.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/example.sql-spec diff --git a/x-pack/qa/sql/src/main/resources/filter.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec similarity index 91% rename from x-pack/qa/sql/src/main/resources/filter.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec index c4ddbf66e0d4d..146a912dc7d9d 100644 --- a/x-pack/qa/sql/src/main/resources/filter.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec @@ -101,6 +101,6 @@ whereWithInAndComplexFunctions SELECT last_name l FROM "test_emp" WHERE emp_no NOT IN (10000, abs(2 - 10003), 10002, 999) AND lcase(first_name) IN ('sumant', 'mary', 'patricio', 'No''Match') ORDER BY emp_no LIMIT 5; whereWithInAndNullHandling1 -SELECT last_name l FROM "test_emp" WHERE birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) AND (emp_no = 10038 OR emp_no = 10039 OR emp_no = 10040) ORDER BY emp_no; +SELECT last_name l FROM "test_emp" WHERE languages in (2, 10)AND (emp_no = 10018 OR emp_no = 10019 OR emp_no = 10020) ORDER BY emp_no; whereWithInAndNullHandling2 -SELECT last_name l FROM "test_emp" WHERE birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) AND (emp_no = 10038 OR emp_no = 10039 OR emp_no = 10040) ORDER BY emp_no; +SELECT last_name l FROM "test_emp" WHERE languages in (2, null, 10) AND (emp_no = 10018 OR emp_no = 10019 OR emp_no = 10020) ORDER BY emp_no; diff --git a/x-pack/qa/sql/src/main/resources/fulltext.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/fulltext.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec diff --git a/x-pack/qa/sql/src/main/resources/functions.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/functions.csv-spec similarity index 96% rename from x-pack/qa/sql/src/main/resources/functions.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/functions.csv-spec index 6e6ccf872b585..930a15f9438a4 100644 --- a/x-pack/qa/sql/src/main/resources/functions.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/functions.csv-spec @@ -232,21 +232,13 @@ SELECT POSITION('x',LCASE("first_name")) pos, "first_name" FROM "test_emp" WHERE pos:i | first_name:s ---------------+--------------- 4 |Guoxiang -null |null -null |null -null |null -null |null -null |null -null |null -null |null -null |null -null |null -null |null -1 |Xinglin +1 |Xinglin ; selectPositionWithLcaseAndConditionWithGroupByAndOrderBy -SELECT POSITION('m',LCASE("first_name")), COUNT(*) pos FROM "test_emp" WHERE POSITION('m',LCASE("first_name")) != 0 GROUP BY POSITION('m',LCASE("first_name")) ORDER BY POSITION('m',LCASE("first_name")) DESC; +SELECT POSITION('m',LCASE("first_name")), COUNT(*) pos FROM "test_emp" + WHERE POSITION('m',LCASE("first_name")) != 0 + GROUP BY POSITION('m',LCASE("first_name")) ORDER BY POSITION('m',LCASE("first_name")) DESC; POSITION(m,LCASE(first_name)):i| pos:l -------------------------------+--------------- @@ -256,7 +248,6 @@ POSITION(m,LCASE(first_name)):i| pos:l 3 |6 2 |1 1 |9 -null |10 ; selectInsertWithPositionAndCondition diff --git a/x-pack/qa/sql/src/main/resources/ip.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/ip.csv-spec similarity index 89% rename from x-pack/qa/sql/src/main/resources/ip.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/ip.csv-spec index e8075d57c05c6..e6eb81ff8bf45 100644 --- a/x-pack/qa/sql/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/ip.csv-spec @@ -72,6 +72,26 @@ SELECT id, client_ip, dest_ip FROM logs WHERE client_ip = '10.0.1.166' ORDER BY 34 |10.0.1.166 |2001:cafe::ff07:bdcc:bc59:ff9e ; +filterExactMatchIpv4WithIn_CastAsIP +SELECT id, client_ip, dest_ip FROM logs WHERE dest_ip IN (CAST('172.16.1.1' AS IP), CAST('2001:cafe::13e1:16fc:8726:1bf8' AS IP)) ORDER BY id DESC LIMIT 3; + + id | client_ip | dest_ip +---------------+---------------+------------------------------ +100 |10.0.0.129 |172.16.1.1 +78 |10.0.1.199 |2001:cafe::13e1:16fc:8726:1bf8 +69 |10.0.1.166 |2001:cafe::13e1:16fc:8726:1bf8 +; + +filterExactMatchIpv4WithIn_CastAsString +SELECT id, client_ip, dest_ip FROM logs WHERE CAST(dest_ip AS STRING) IN ('172.16.1.1', '2001:cafe::13e1:16fc:8726:1bf8') ORDER BY id DESC LIMIT 3; + + id | client_ip | dest_ip +---------------+---------------+------------------------------ +100 |10.0.0.129 |172.16.1.1 +78 |10.0.1.199 |2001:cafe::13e1:16fc:8726:1bf8 +69 |10.0.1.166 |2001:cafe::13e1:16fc:8726:1bf8 +; + filterExactMatchIpv6 SELECT id, client_ip, dest_ip FROM logs WHERE dest_ip = 'fe80::86ba:3bff:fe05:c3f3' ORDER BY id LIMIT 10; diff --git a/x-pack/qa/sql/src/main/resources/library.csv b/x-pack/plugin/sql/qa/src/main/resources/library.csv similarity index 100% rename from x-pack/qa/sql/src/main/resources/library.csv rename to x-pack/plugin/sql/qa/src/main/resources/library.csv diff --git a/x-pack/qa/sql/src/main/resources/logs.csv b/x-pack/plugin/sql/qa/src/main/resources/logs.csv similarity index 100% rename from x-pack/qa/sql/src/main/resources/logs.csv rename to x-pack/plugin/sql/qa/src/main/resources/logs.csv diff --git a/x-pack/plugin/sql/qa/src/main/resources/math.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/math.csv-spec new file mode 100644 index 0000000000000..7a63f412f439f --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/math.csv-spec @@ -0,0 +1,178 @@ +// this one doesn't work in H2 at all +truncateWithAsciiHavingAndOrderBy +SELECT TRUNCATE(ASCII(LEFT(first_name, 1)), 1), COUNT(*) count FROM test_emp GROUP BY ASCII(LEFT(first_name, 1)) HAVING COUNT(*) > 5 ORDER BY TRUNCATE(ASCII(LEFT(first_name, 1)), 1) DESC; + +TRUNCATE(ASCII(LEFT(first_name,1)),1):i| count:l +---------------------------------------+--------------- +null |10 +66 |7 +72 |6 +75 |7 +77 |9 +83 |11 +; + +truncateWithNoSecondParameterWithAsciiHavingAndOrderBy +SELECT TRUNCATE(ASCII(LEFT(first_name, 1))), COUNT(*) count FROM test_emp GROUP BY ASCII(LEFT(first_name, 1)) HAVING COUNT(*) > 5 ORDER BY TRUNCATE(ASCII(LEFT(first_name, 1))) DESC; + +TRUNCATE(ASCII(LEFT(first_name,1)),0):i| count:l +---------------------------------------+--------------- +null |10 +66 |7 +72 |6 +75 |7 +77 |9 +83 |11 +; + +roundWithGroupByAndOrderBy +SELECT ROUND(salary, 2) ROUNDED, salary FROM test_emp GROUP BY ROUNDED, salary ORDER BY ROUNDED LIMIT 10; + + ROUNDED | salary +---------------+--------------- +25324 |25324 +25945 |25945 +25976 |25976 +26436 |26436 +27215 |27215 +28035 |28035 +28336 |28336 +28941 |28941 +29175 |29175 +30404 |30404 +; + +truncateWithGroupByAndOrderBy +SELECT TRUNCATE(salary, 2) TRUNCATED, salary FROM test_emp GROUP BY TRUNCATED, salary ORDER BY TRUNCATED LIMIT 10; + + TRUNCATED | salary +---------------+--------------- +25324 |25324 +25945 |25945 +25976 |25976 +26436 |26436 +27215 |27215 +28035 |28035 +28336 |28336 +28941 |28941 +29175 |29175 +30404 |30404 +; + +truncateWithAsciiAndOrderBy +SELECT TRUNCATE(ASCII(LEFT(first_name,1)), -1) AS initial, first_name, ASCII(LEFT(first_name, 1)) FROM test_emp ORDER BY ASCII(LEFT(first_name, 1)) DESC LIMIT 15; + + initial | first_name |ASCII(LEFT(first_name,1)) +---------------+---------------+------------------------- +90 |Zvonko |90 +90 |Zhongwei |90 +80 |Yongqiao |89 +80 |Yishay |89 +80 |Yinghua |89 +80 |Xinglin |88 +80 |Weiyi |87 +80 |Vishv |86 +80 |Valdiodio |86 +80 |Valter |86 +80 |Uri |85 +80 |Udi |85 +80 |Tzvetan |84 +80 |Tse |84 +80 |Tuval |84 +; + +truncateWithHavingAndGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, COUNT(*) c, TRUNCATE(AVG(salary)) tr FROM test_emp GROUP BY languages HAVING TRUNCATE(AVG(salary)) > 40000 ORDER BY languages; + + mi:i | ma:i | c:l | tr:d +---------------+---------------+---------------+--------------- +28336 |74999 |10 |52519.0 +25976 |73717 |15 |50576.0 +29175 |73578 |19 |48178.0 +26436 |74970 |17 |52418.0 +27215 |74572 |18 |47733.0 +25324 |66817 |21 |41680.0 +; + +minMaxTruncateAndRoundOfAverageWithHavingRoundAndTruncate +SELECT MIN(salary) mi, MAX(salary) ma, YEAR(hire_date) year, ROUND(AVG(languages), 1), TRUNCATE(AVG(languages), 1), COUNT(*) FROM test_emp GROUP BY YEAR(hire_date) HAVING ROUND(AVG(languages), 1) > 2.5 AND TRUNCATE(AVG(languages), 1) <= 3.0 ORDER BY YEAR(hire_date); + + mi:i | ma:i | year:i |ROUND(AVG(languages),1):d|TRUNCATE(AVG(languages),1):d| COUNT(1):l +---------------+---------------+---------------+-------------------------+----------------------------+--------------- +25324 |70011 |1987 |3.0 |3.0 |15 +25945 |73578 |1988 |2.9 |2.8 |9 +25976 |74970 |1989 |3.0 |3.0 |13 +31120 |71165 |1990 |3.1 |3.0 |12 +30404 |58715 |1993 |3.0 |3.0 |3 +35742 |67492 |1994 |2.8 |2.7 |4 +45656 |45656 |1996 |3.0 |3.0 |1 +; + +minMaxRoundWithHavingRound +SELECT MIN(salary) mi, MAX(salary) ma, YEAR(hire_date) year, ROUND(AVG(languages), 1), COUNT(*) FROM test_emp GROUP BY YEAR(hire_date) HAVING ROUND(AVG(languages), 1) > 2.5 ORDER BY YEAR(hire_date); + + mi:i | ma:i | year:i |ROUND(AVG(languages),1):d| COUNT(1):l +---------------+---------------+---------------+-------------------------+--------------- +26436 |74999 |1985 |3.1 |11 +31897 |61805 |1986 |3.5 |11 +25324 |70011 |1987 |3.0 |15 +25945 |73578 |1988 |2.9 |9 +25976 |74970 |1989 |3.0 |13 +31120 |71165 |1990 |3.1 |12 +32568 |65030 |1991 |3.3 |6 +27215 |60781 |1992 |4.1 |8 +30404 |58715 |1993 |3.0 |3 +35742 |67492 |1994 |2.8 |4 +45656 |45656 |1996 |3.0 |1 +; + +groupByAndOrderByTruncateWithPositiveParameter +SELECT TRUNCATE(AVG(salary), 2), AVG(salary), COUNT(*) FROM test_emp GROUP BY TRUNCATE(salary, 2) ORDER BY TRUNCATE(salary, 2) DESC LIMIT 10; + +TRUNCATE(AVG(salary),2):d| AVG(salary):d | COUNT(1):l +-------------------------+---------------+--------------- +74999.0 |74999.0 |1 +74970.0 |74970.0 |1 +74572.0 |74572.0 |1 +73851.0 |73851.0 |1 +73717.0 |73717.0 |1 +73578.0 |73578.0 |1 +71165.0 |71165.0 |1 +70011.0 |70011.0 |1 +69904.0 |69904.0 |1 +68547.0 |68547.0 |1 +; + +groupByAndOrderByRoundWithPositiveParameter +SELECT ROUND(AVG(salary), 2), AVG(salary), COUNT(*) FROM test_emp GROUP BY ROUND(salary, 2) ORDER BY ROUND(salary, 2) DESC LIMIT 10; + +ROUND(AVG(salary),2):d| AVG(salary):d | COUNT(1):l +----------------------+---------------+--------------- +74999.0 |74999.0 |1 +74970.0 |74970.0 |1 +74572.0 |74572.0 |1 +73851.0 |73851.0 |1 +73717.0 |73717.0 |1 +73578.0 |73578.0 |1 +71165.0 |71165.0 |1 +70011.0 |70011.0 |1 +69904.0 |69904.0 |1 +68547.0 |68547.0 |1 +; + +groupByAndOrderByRoundWithNoSecondParameter +SELECT ROUND(AVG(salary)), ROUND(salary) rounded, AVG(salary), COUNT(*) FROM test_emp GROUP BY rounded ORDER BY rounded DESC LIMIT 10; + +ROUND(AVG(salary),0):d| rounded:i | AVG(salary):d | COUNT(1):l +----------------------+---------------+---------------+--------------- +74999.0 |74999 |74999.0 |1 +74970.0 |74970 |74970.0 |1 +74572.0 |74572 |74572.0 |1 +73851.0 |73851 |73851.0 |1 +73717.0 |73717 |73717.0 |1 +73578.0 |73578 |73578.0 |1 +71165.0 |71165 |71165.0 |1 +70011.0 |70011 |70011.0 |1 +69904.0 |69904 |69904.0 |1 +68547.0 |68547 |68547.0 |1 +; diff --git a/x-pack/qa/sql/src/main/resources/math.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/math.sql-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/math.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/math.sql-spec diff --git a/x-pack/plugin/sql/qa/src/main/resources/nested.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/nested.csv-spec new file mode 100644 index 0000000000000..89808901e9cff --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/nested.csv-spec @@ -0,0 +1,224 @@ +// +// Nested documents +// +// CsvJdbc has issues with foo.bar so most fields are aliases or wrapped inside a function + +describeParent +DESCRIBE test_emp; + + column | type | mapping +--------------------+---------------+--------------- +birth_date |TIMESTAMP |DATE +dep |STRUCT |NESTED +dep.dep_id |VARCHAR |KEYWORD +dep.dep_name |VARCHAR |TEXT +dep.dep_name.keyword|VARCHAR |KEYWORD +dep.from_date |TIMESTAMP |DATE +dep.to_date |TIMESTAMP |DATE +emp_no |INTEGER |INTEGER +first_name |VARCHAR |TEXT +first_name.keyword |VARCHAR |KEYWORD +gender |VARCHAR |KEYWORD +hire_date |TIMESTAMP |DATE +languages |TINYINT |BYTE +last_name |VARCHAR |TEXT +last_name.keyword |VARCHAR |KEYWORD +salary |INTEGER |INTEGER +; + +nestedStar +SELECT dep.* FROM test_emp ORDER BY dep.dep_id LIMIT 5; + + dep.dep_id:s | dep.dep_name:s | dep.from_date:ts | dep.to_date:ts + +d001 |Marketing |1993-08-03T00:00:00.000Z|9999-01-01T00:00:00.000Z +d001 |Marketing |1992-04-27T00:00:00.000Z|1995-07-22T00:00:00.000Z +d001 |Marketing |1988-04-25T00:00:00.000Z|9999-01-01T00:00:00.000Z +d002 |Finance |1993-03-21T00:00:00.000Z|2000-08-10T00:00:00.000Z +d002 |Finance |1990-12-25T00:00:00.000Z|1992-11-05T00:00:00.000Z +; + +// Test for https://github.com/elastic/elasticsearch/issues/30054 +nestedSingleFieldSelect +SELECT dep.dep_id FROM test_emp LIMIT 5; + + dep.dep_id:s + +d005 +d007 +d004 +d004 +d003 +; + +filterPerNestedWithOrderByTopLevel +SELECT first_name f, last_name l, YEAR(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY f LIMIT 5; + +f:s | l:s | d:i + +Chirstian |Koblick |1986 +Duangkaew |Piveteau |1996 +Gino |Leonhardt |1989 +Hidefumi |Caine |1992 +Jayson |Mandell |1999 +; + +filterPerNestedWithOrderByNested +SELECT first_name f, last_name l, YEAR(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY dep.from_date LIMIT 5; + +f:s | l:s | d:i + +Sreekrishna |Servieres |1985 +Zhongwei |Rosen |1986 +Chirstian |Koblick |1986 +Vishv |Zockler |1987 +null |Chappelet |1988 +; + +filterPerNestedWithOrderByNestedWithAlias +SELECT first_name f, dep.dep_id i, MONTH(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY i LIMIT 5; + +f:s | i:s | d:i + +Parto | d004 | 12 +Chirstian | d004 | 12 +Duangkaew | d004 | 11 +Kazuhide | d004 | 7 +Mayuko | d004 | 12 +; + +filterPerNestedWithOrderByNestedWithoutProjection +SELECT first_name f, MONTH(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY dep.dep_id LIMIT 5; + +f:s | d:i + +Parto | 12 +Chirstian | 12 +Duangkaew | 11 +Kazuhide | 7 +Mayuko | 12 +; + +selectWithScalarOnNested +SELECT first_name f, last_name l, YEAR(dep.from_date) start FROM test_emp WHERE dep.dep_name = 'Production' AND languages > 1 ORDER BY start LIMIT 5; + +f:s | l:s | start:i + +Sreekrishna |Servieres |1985 +Zhongwei |Rosen |1986 +Chirstian |Koblick |1986 +null |Chappelet |1988 +Zvonko |Nyanchama |1989 +; + +selectWithScalarOnNestedWithoutProjection +SELECT first_name f, last_name l FROM test_emp WHERE dep.dep_name = 'Production' AND languages > 1 ORDER BY YEAR(dep.from_date) LIMIT 5; + +f:s | l:s + +Sreekrishna |Servieres +Zhongwei |Rosen +Chirstian |Koblick +null |Chappelet +Zvonko |Nyanchama +; + +// +// Tests for bug https://github.com/elastic/elasticsearch/issues/32951 fix +// + +selectNestedFieldFirst +SELECT dep.dep_id, last_name FROM test_emp ORDER BY last_name LIMIT 5; + +dep.dep_id:s | last_name:s + +d005 |Awdeh +d003 |Azuma +d002 |Baek +d003 |Baek +d004 |Bamford +; + +selectNestedFieldLast +SELECT first_name, dep.dep_id FROM test_emp ORDER BY first_name LIMIT 5; + +first_name:s | dep.dep_id:s +---------------+--------------- +Alejandro |d002 +Amabile |d005 +Anneke |d005 +Anoosh |d005 +Arumugam |d008 +; + +selectNestedFieldInTheMiddle +SELECT first_name, dep.dep_name, last_name FROM test_emp ORDER BY first_name LIMIT 5; + +first_name:s |dep.dep_name:s |last_name:s + +Alejandro |Finance |McAlpine +Amabile |Development |Gomatam +Anneke |Development |Preusig +Anoosh |Development |Peyn +Arumugam |Research |Ossenbruggen +; + +selectNestedFieldInTheMiddleAndAtTheEnd +SELECT first_name, dep.dep_name, last_name, dep.dep_id FROM test_emp ORDER BY first_name LIMIT 5; + + first_name:s |dep.dep_name:s | last_name:s | dep.dep_id:s + +Alejandro |Finance |McAlpine |d002 +Amabile |Development |Gomatam |d005 +Anneke |Development |Preusig |d005 +Anoosh |Development |Peyn |d005 +Arumugam |Research |Ossenbruggen |d008 +; + +selectNestedFieldInTheMiddleAndAtBeggining +SELECT dep.dep_id, first_name, dep.dep_name, last_name FROM test_emp ORDER BY first_name LIMIT 5; + + dep.dep_id:s | first_name:s |dep.dep_name:s | last_name:s + +d002 |Alejandro |Finance |McAlpine +d005 |Amabile |Development |Gomatam +d005 |Anneke |Development |Preusig +d005 |Anoosh |Development |Peyn +d008 |Arumugam |Research |Ossenbruggen +; + +selectNestedFieldWithWildcardAtBeggining +SELECT dep.*, first_name FROM test_emp ORDER BY first_name LIMIT 5; + + dep.dep_id:s |dep.dep_name:s | dep.from_date:ts | dep.to_date:ts | first_name:s + +d002 |Finance |1991-06-26T00:00:00.000Z|9999-01-01T00:00:00.000Z|Alejandro +d005 |Development |1992-11-18T00:00:00.000Z|9999-01-01T00:00:00.000Z|Amabile +d005 |Development |1990-08-05T00:00:00.000Z|9999-01-01T00:00:00.000Z|Anneke +d005 |Development |1991-08-30T00:00:00.000Z|9999-01-01T00:00:00.000Z|Anoosh +d008 |Research |1987-04-18T00:00:00.000Z|1997-11-08T00:00:00.000Z|Arumugam +; + +selectNestedFieldWithWildcardAtTheEnd +SELECT first_name, dep.* FROM test_emp ORDER BY first_name LIMIT 5; + + first_name:s | dep.dep_id:s |dep.dep_name:s | dep.from_date:ts | dep.to_date:ts + +Alejandro |d002 |Finance |1991-06-26T00:00:00.000Z|9999-01-01T00:00:00.000Z +Amabile |d005 |Development |1992-11-18T00:00:00.000Z|9999-01-01T00:00:00.000Z +Anneke |d005 |Development |1990-08-05T00:00:00.000Z|9999-01-01T00:00:00.000Z +Anoosh |d005 |Development |1991-08-30T00:00:00.000Z|9999-01-01T00:00:00.000Z +Arumugam |d008 |Research |1987-04-18T00:00:00.000Z|1997-11-08T00:00:00.000Z +; + +selectNestedFieldWithWildcardInTheMiddle +SELECT first_name, dep.*, last_name FROM test_emp ORDER BY first_name LIMIT 5; + + first_name:s | dep.dep_id:s |dep.dep_name:s | dep.from_date:ts | dep.to_date:ts | last_name:s + +Alejandro |d002 |Finance |1991-06-26T00:00:00.000Z|9999-01-01T00:00:00.000Z|McAlpine +Amabile |d005 |Development |1992-11-18T00:00:00.000Z|9999-01-01T00:00:00.000Z|Gomatam +Anneke |d005 |Development |1990-08-05T00:00:00.000Z|9999-01-01T00:00:00.000Z|Preusig +Anoosh |d005 |Development |1991-08-30T00:00:00.000Z|9999-01-01T00:00:00.000Z|Peyn +Arumugam |d008 |Research |1987-04-18T00:00:00.000Z|1997-11-08T00:00:00.000Z|Ossenbruggen +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec new file mode 100644 index 0000000000000..474fceaed4612 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec @@ -0,0 +1,75 @@ +// +// Null expressions +// + +dateTimeOverNull +SELECT YEAR(CAST(NULL AS DATE)) d; + +d:i +null +; + +addOfNull +SELECT CAST(NULL AS INT) + CAST(NULL AS FLOAT) AS n; + +n:d +null +; + + +divOfCastedNull +SELECT 5 / CAST(NULL AS FLOAT) + 10 AS n; + +n:d +null +; + +divNoNull +SELECT 5 / null + 1 AS n; + +n:i +null +; + +coalesceJustWithNull +SELECT COALESCE(null, null, null) AS c; + +c +null +; + +coalesceFirstNotNull +SELECT COALESCE(123) AS c; + +c +123 +; + + +coalesceWithFirstNullOfString +SELECT COALESCE(null, 'first') AS c; + +c:s +first +; + +coalesceWithFirstNullOfNumber +SELECT COALESCE(null, 123) AS c; + +c:i +123 +; + +coalesceMixed +SELECT COALESCE(null, 123, null, 321) AS c; + +c:i +123 +; + +coalesceScalar +SELECT COALESCE(null, ABS(123) + 1) AS c; + +c:i +124 +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/null.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/null.sql-spec new file mode 100644 index 0000000000000..8976c8d8a5ea9 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/null.sql-spec @@ -0,0 +1,12 @@ +// +// Null expressions +// + +coalesceField +SELECT COALESCE(null, ABS(emp_no) + 1) AS c FROM test_emp ORDER BY emp_no LIMIT 5; + +coalesceHaving +SELECT COALESCE(null, ABS(MAX(emp_no)) + 1, 123) AS c FROM test_emp GROUP BY languages HAVING c > 100 ORDER BY languages LIMIT 5; + +coalesceWhere +SELECT COALESCE(null, ABS(emp_no) + 1, 123) AS c FROM test_emp WHERE COALESCE(null, ABS(emp_no) + 1, 123, 321) > 100 ORDER BY emp_no NULLS FIRST LIMIT 5; diff --git a/x-pack/qa/sql/src/main/resources/plugin-security.policy b/x-pack/plugin/sql/qa/src/main/resources/plugin-security.policy similarity index 100% rename from x-pack/qa/sql/src/main/resources/plugin-security.policy rename to x-pack/plugin/sql/qa/src/main/resources/plugin-security.policy diff --git a/x-pack/plugin/sql/qa/src/main/resources/select.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/select.csv-spec new file mode 100644 index 0000000000000..2aa7d9bdc7b51 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/select.csv-spec @@ -0,0 +1,187 @@ +// +// SELECT with = and != +// +// Need to CAST as STRING since for boolean types jdbc CSV translates null -> false +equalsSelectClause +SELECT CAST(4 = 4 AS STRING), CAST(NOT 4 = 4 AS STRING), CAST(3 = 4 AS STRING), CAST(NOT 3 = 4 AS STRING), CAST(1 = null AS STRING), CAST(NOT null = 1 AS STRING); + + CAST(4 == 4 AS VARCHAR):s | CAST(NOT(4 == 4) AS VARCHAR):s | CAST(3 == 4 AS VARCHAR):s | CAST(NOT(3 == 4) AS VARCHAR):s | CAST(1 == null AS VARCHAR):s | CAST(NOT(null == 1) AS VARCHAR):s +----------------------------+---------------------------------+----------------------------+---------------------------------+-------------------------------+----------------------------------- +true |false |false |true |null |null +; + +notEqualsSelectClause +SELECT CAST(4 != 4 AS STRING), CAST(NOT 4 != 4 AS STRING), CAST(3 != 4 AS STRING), CAST(NOT 3 != 4 AS STRING), CAST(1 != null AS STRING), CAST(NOT 1 != null AS STRING); + + CAST(4 != 4 AS VARCHAR):s | CAST(NOT(4 != 4) AS VARCHAR):s | CAST(3 != 4 AS VARCHAR):s | CAST(NOT(3 != 4) AS VARCHAR):s | CAST(1 != null AS VARCHAR):s | CAST(NOT(1 != null) AS VARCHAR):s +----------------------------+---------------------------------+----------------------------+---------------------------------+-------------------------------+----------------------------------- +false |true |true |false |null |null +; + +equalSelectClauseWithTableColumns +SELECT CAST(languages = 2 AS STRING), CAST(NOT languages = 2 AS STRING), CAST(languages = null AS STRING), CAST(NOT languages = null AS STRING) +FROM "test_emp" WHERE emp_no IN(10018, 10019, 10020) ORDER BY emp_no; + + CAST((languages) == 2 AS VARCHAR):s | CAST(NOT((languages) == 2) AS VARCHAR):s | CAST((languages) == null AS VARCHAR):s | CAST(NOT((languages) == null) AS VARCHAR):s +--------------------------------------+-------------------------------------------+-----------------------------------------+--------------------------------------------- +true |false |null |null +false |true |null |null +null |null |null |null +; + +notEqualsAndNotEqualsSelectClauseWithTableColumns +SELECT CAST(languages != 2 AS STRING), CAST(NOT languages != 2 AS STRING), CAST(languages != null AS STRING), CAST(NOT languages != null AS STRING) +FROM "test_emp" WHERE emp_no IN(10018, 10019, 10020) ORDER BY emp_no; + + CAST((languages) != 2 AS VARCHAR):s | CAST(NOT((languages) != 2) AS VARCHAR):s | CAST((languages) != null AS VARCHAR):s | CAST(NOT((languages) != null) AS VARCHAR):s +--------------------------------------+-------------------------------------------+-----------------------------------------+--------------------------------------------- +false |true |null |null +true |false |null |null +null |null |null |null +; + + +// +// SELECT with OR and AND and NULL handling +// +// Need to CAST as STRING since for boolean types jdbc CSV translates null -> false +selectWithOrAndNullHandling +SELECT CAST(true OR null AS STRING), CAST(null OR true AS STRING), CAST(false OR null AS STRING), CAST(null OR false AS STRING), CAST(null OR null AS STRING); + + CAST(true OR null AS VARCHAR):s | CAST(null OR true AS VARCHAR):s | CAST(false OR null AS VARCHAR):s | CAST(null OR false AS VARCHAR):s | CAST(null OR null AS VARCHAR):s +----------------------------------+----------------------------------+-----------------------------------+-----------------------------------+--------------------------------- +true |true |null |null |null +; + +selectWithAndAndNullHandling +SELECT CAST(true AND null AS STRING), CAST(null AND true AS STRING), CAST(false AND null AS STRING), CAST(null AND false AS STRING), CAST(null AND null AS STRING); + + CAST(true AND null AS VARCHAR):s | CAST(null AND true AS VARCHAR):s | CAST(false AND null AS VARCHAR):s | CAST(null AND false AS VARCHAR):s | CAST(null AND null AS VARCHAR):s +-----------------------------------+-----------------------------------+------------------------------------+------------------------------------+---------------------------------- +null |null |false |false |null +; + +selectWithOrAndAndAndNullHandling_WithTableColumns +SELECT CAST(languages = 2 OR null AS STRING), CAST(languages = 2 AND null AS STRING) FROM test_emp WHERE emp_no BETWEEN 10018 AND 10020 ORDER BY emp_no; + + CAST(((languages) == 2) OR null AS VARCHAR):s | CAST(((languages) == 2) AND null AS VARCHAR):s +-----------------------------------------------+------------------------------------------------ +true |null +null |false +null |null +; + + +// +// SELECT with IN +// +inWithLiterals +SELECT 1 IN (1, 2, 3), 1 IN (2, 3); + + 1 IN (1, 2, 3):b | 1 IN (2, 3):b +-------------------+------------- +true |false +; + +inWithLiteralsAndFunctions +SELECT 1 IN (2 - 1, 2, 3), abs(-1) IN (2, 3, abs(4 - 5)); + + 1 IN (2 - 1, 2, 3) | ABS(-1) IN (2, 3, ABS(4 - 5)) +---------------------+------------------------------ +true |true +; + + +inWithLiteralsAndNegation +SELECT 1 NOT IN (1, 1 + 1, 3), 1 NOT IN (2, 3); + + NOT(1 IN (1, 1 + 1, 3)) | NOT(1 IN (2, 3)) +--------------------------+----------------- +false |true +; + +// Need to CAST as STRING since for boolean types the jdbc CSV translates null -> false +inWithNullHandling +SELECT CAST(2 IN (1, null, 3) AS STRING), CAST(3 IN (1, null, 3) AS STRING), CAST(null IN (1, null, 3) AS STRING), CAST(null IN (1, 2, 3) AS STRING); + + CAST(2 IN (1, null, 3) AS VARCHAR):s | CAST(3 IN (1, null, 3) AS VARCHAR):s | CAST(null IN (1, null, 3) AS VARCHAR):s | CAST(null IN (1, 2, 3) AS VARCHAR):s +---------------------------------------+--------------------------------------+------------------------------------------+-------------------------------------- +null |true |null |null +; + +inWithNullHandlingAndNegation +SELECT CAST(NOT 2 IN (1, null, 3) AS STRING), CAST(3 NOT IN (1, null, 3) AS STRING), CAST(NOT null IN (1, null, 3) AS STRING), CAST(null NOT IN (1, 2, 3) AS STRING); + + CAST(NOT(2 IN (1, null, 3)) AS VARCHAR):s | CAST(NOT(3 IN (1, null, 3)) AS VARCHAR):s | CAST(NOT(null IN (1, null, 3)) AS VARCHAR):s | CAST(NOT(null IN (1, 2, 3)) AS VARCHAR):s +--------------------------------------------+--------------------------------------------+-----------------------------------------------+------------------------------------------- +null |false |null |null +; + +// +// SELECT with IN and table columns +// +inWithTableColumn +SELECT emp_no IN (10000, 10001, 10002) FROM test_emp WHERE emp_no BETWEEN 10001 AND 10004 ORDER BY emp_no; + + emp_no IN (10000, 10001, 10002):b +---------------------------------- +true +true +false +false +; + +inWithTableColumnAndFunction +SELECT emp_no IN (10000, 10000 + 1, abs(-10000 - 2)) FROM test_emp WHERE emp_no BETWEEN 10001 AND 10004 ORDER BY emp_no; + + emp_no IN (10000, 10000 + 1, ABS(-10000 - 2)):b +------------------------------------------------ +true +true +false +false +; + +inWithTableColumnAndNegation +SELECT emp_no NOT IN (10000, 10000 + 1, 10002) FROM test_emp WHERE emp_no BETWEEN 10001 AND 10004 ORDER BY emp_no; + + NOT(emp_no IN (10000, 10000 + 1, 10002)):b +------------------------------------------- +false +false +true +true +; + +inWithTableColumnAndComplexFunctions +SELECT emp_no IN (1, abs(1 - 10002), 3) OR emp_no NOT IN (10000, 10000 + 2, 10003) FROM test_emp WHERE emp_no BETWEEN 10001 AND 10004 ORDER BY emp_no; + +(emp_no IN (1, ABS(1 - 10002), 3)) OR (NOT(emp_no IN (10000, 10000 + 2, 10003))):b +---------------------------------------------------------------------------------- +true +false +false +true +; + + +// Need to CAST as STRING since for boolean types the jdbc CSV translates null -> false +inWithTableColumnAndNullHandling +SELECT emp_no, CAST(languages IN (2, 3) AS STRING), CAST(languages IN (2, null, 3) AS STRING) FROM test_emp WHERE emp_no BETWEEN 10018 AND 10020 ORDER BY emp_no; + + emp_no:i | CAST(languages IN (2, 3) AS VARCHAR):s | CAST(languages IN (2, null, 3) AS VARCHAR):s +----------+-----------------------------------------+---------------------------------------------- +10018 |true |true +10019 |false |null +10020 |null |null +; + +inWithTableColumnAndNullHandlingAndNegation +SELECT emp_no, CAST(languages NOT IN (2, 3) AS STRING), CAST(NOT languages IN (2, null, 3) AS STRING) FROM test_emp WHERE emp_no BETWEEN 10018 AND 10020 ORDER BY emp_no; + + emp_no:i | CAST(NOT(languages IN (2, 3)) AS VARCHAR):s | CAST(NOT(languages IN (2, null, 3)) AS VARCHAR):s +----------+----------------------------------------------+--------------------------------------------------- +10018 |false |false +10019 |true |null +10020 |null |null +; diff --git a/x-pack/qa/sql/src/main/resources/select.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/select.sql-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/select.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/select.sql-spec diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql similarity index 100% rename from x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql rename to x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedure_columns.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_procedure_columns.sql similarity index 100% rename from x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedure_columns.sql rename to x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_procedure_columns.sql diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedures.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_procedures.sql similarity index 100% rename from x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedures.sql rename to x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_procedures.sql diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_table_types.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_table_types.sql similarity index 100% rename from x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_table_types.sql rename to x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_table_types.sql diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_tables.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_tables.sql similarity index 100% rename from x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_tables.sql rename to x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_tables.sql diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_show_tables.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_show_tables.sql similarity index 100% rename from x-pack/qa/sql/src/main/resources/setup_mock_show_tables.sql rename to x-pack/plugin/sql/qa/src/main/resources/setup_mock_show_tables.sql diff --git a/x-pack/qa/sql/src/main/resources/setup_test_emp.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_test_emp.sql similarity index 100% rename from x-pack/qa/sql/src/main/resources/setup_test_emp.sql rename to x-pack/plugin/sql/qa/src/main/resources/setup_test_emp.sql diff --git a/x-pack/qa/sql/src/main/resources/string-functions.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/string-functions.sql-spec similarity index 100% rename from x-pack/qa/sql/src/main/resources/string-functions.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/string-functions.sql-spec diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..68553b80b1a1b --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +3757a90f73f505d40e6e200d1bacbff897f67548 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 75200bc0c1525..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e884b8ce62a2102b24bfdbe8911674cd5b0d06d9 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index f0022040b4909..49f2dcef3d95c 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -16,7 +16,7 @@ archivesBaseName = 'elasticsearch-sql-cli' description = 'Command line interface to Elasticsearch that speaks SQL' dependencies { - compile "org.jline:jline:3.6.0" + compile "org.jline:jline:3.8.2" compile xpackProject('plugin:sql:sql-client') compile xpackProject('plugin:sql:sql-action') compile "org.elasticsearch:elasticsearch-cli:${version}" diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-3.6.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-3.6.0.jar.sha1 deleted file mode 100644 index d6938e6d80b0d..0000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jline-3.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c8ecc302d6b7d19da41c66be7d428c17cd6b12b2 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-3.8.2.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-3.8.2.jar.sha1 new file mode 100644 index 0000000000000..29e11fa3a021e --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-3.8.2.jar.sha1 @@ -0,0 +1 @@ +8b81efadcb78388b662ede7965b272be56a86ec1 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java index 306189b535a03..40cce57321d65 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java @@ -8,12 +8,14 @@ import org.elasticsearch.xpack.sql.cli.Cli; import org.elasticsearch.xpack.sql.cli.CliTerminal; import org.elasticsearch.xpack.sql.cli.FatalCliException; +import org.elasticsearch.xpack.sql.client.Version; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -34,6 +36,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher public void printLogo(CliTerminal terminal) { terminal.clear(); + int lineLength = 0; try (InputStream in = Cli.class.getResourceAsStream("/logo.txt")) { if (in == null) { throw new FatalCliException("Could not find logo!"); @@ -41,6 +44,7 @@ public void printLogo(CliTerminal terminal) { try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { String line; while ((line = reader.readLine()) != null) { + lineLength = Math.max(lineLength, line.length()); terminal.println(line); } } @@ -48,6 +52,10 @@ public void printLogo(CliTerminal terminal) { throw new FatalCliException("Could not load logo!", e); } + // print the version centered on the last line + char[] whitespaces = new char[(lineLength - Version.CURRENT.version.length()) / 2]; + Arrays.fill(whitespaces, ' '); + terminal.println(new StringBuilder().append(whitespaces).append(Version.CURRENT.version).toString()); terminal.println(); } diff --git a/x-pack/plugin/sql/sql-cli/src/main/resources/logo.txt b/x-pack/plugin/sql/sql-cli/src/main/resources/logo.txt index 2880a1d41534d..bcf746c40c63e 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/resources/logo.txt +++ b/x-pack/plugin/sql/sql-cli/src/main/resources/logo.txt @@ -1 +1,27 @@ - Elasticsearch SQL \ No newline at end of file + asticElasticE + ElasticE sticEla + sticEl ticEl Elast + lasti Elasti tic + cEl ast icE + icE as cEl + icE as cEl + icEla las El + sticElasticElast icElas + las last ticElast +El asti asti stic +El asticEla Elas icE +El Elas cElasticE ticEl cE +Ela ticEl ticElasti cE + las astic last icE + sticElas asti stic + icEl sticElasticElast + icE sticE ticEla + icE sti cEla + icEl sti Ela + cEl sti cEl + Ela astic ticE + asti ElasticElasti + ticElasti lasticElas + ElasticElast + + SQL \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java index b6a54a3a8cf5d..39c48bc31d926 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.cli.TestTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.client.Version; import static org.hamcrest.Matchers.containsString; import static org.mockito.Mockito.mock; @@ -93,6 +94,7 @@ public void testPrintLogo() throws Exception { testTerminal.print("not clean"); assertTrue(new PrintLogoCommand().handle(testTerminal, cliSession, "logo")); assertThat(testTerminal.toString(), containsString("SQL")); + assertThat(testTerminal.toString(), containsString(Version.CURRENT.version)); verifyNoMoreInteractions(httpClient); } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index 3ad3b9090a543..e07351a877e33 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -246,4 +246,4 @@ public boolean isCompatibleWith(DataType other) { (isString() && other.isString()) || (isNumeric() && other.isNumeric()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java index 9876718d4d295..b376e38e40bc0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -380,7 +380,13 @@ private List expandProjections(List List output = child.output(); for (NamedExpression ne : projections) { if (ne instanceof UnresolvedStar) { - result.addAll(expandStar((UnresolvedStar) ne, output)); + List expanded = expandStar((UnresolvedStar) ne, output); + // the field exists, but cannot be expanded (no sub-fields) + if (expanded.isEmpty()) { + result.add(ne); + } else { + result.addAll(expanded); + } } else if (ne instanceof UnresolvedAlias) { UnresolvedAlias ua = (UnresolvedAlias) ne; if (ua.child() instanceof UnresolvedStar) { @@ -403,6 +409,13 @@ private List expandStar(UnresolvedStar us, List outp // since this is an unresolved start we don't know whether it's a path or an actual qualifier Attribute q = resolveAgainstList(us.qualifier(), output); + // the wildcard couldn't be expanded because the field doesn't exist at all + // so, add to the list of expanded attributes its qualifier (the field without the wildcard) + // the qualifier will be unresolved and later used in the error message presented to the user + if (q == null) { + expanded.add(us.qualifier()); + return expanded; + } // now use the resolved 'qualifier' to match for (Attribute attr : output) { // filter the attributes that match based on their path diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index c8834240c6ceb..5394625d88272 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.sql.expression.function.Functions; import org.elasticsearch.xpack.sql.expression.function.Score; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; -import org.elasticsearch.xpack.sql.expression.predicate.In; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.sql.plan.logical.Aggregate; import org.elasticsearch.xpack.sql.plan.logical.Distinct; import org.elasticsearch.xpack.sql.plan.logical.Filter; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java index 2a30db7e44d7b..94ea45b5ec83e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.execution.search; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; @@ -16,7 +17,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; @@ -42,7 +42,7 @@ */ public class CompositeAggregationCursor implements Cursor { - private final Logger log = Loggers.getLogger(getClass()); + private final Logger log = LogManager.getLogger(getClass()); public static final String NAME = "c"; @@ -229,4 +229,4 @@ public boolean equals(Object obj) { public String toString() { return "cursor for composite on index [" + Arrays.toString(indices) + "]"; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index e3c604feda076..f3a397dc68b3d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.execution.search; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; @@ -13,7 +14,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -61,7 +61,7 @@ // TODO: add retry/back-off public class Querier { - private final Logger log = Loggers.getLogger(getClass()); + private final Logger log = LogManager.getLogger(getClass()); private final TimeValue keepAlive, timeout; private final int size; @@ -443,4 +443,4 @@ public final void onFailure(Exception ex) { listener.onFailure(ex); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java index 0fa20022f1cd3..dcaca223f4ce2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.execution.search; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.ClearScrollRequest; @@ -16,7 +17,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; @@ -28,7 +28,7 @@ public class ScrollCursor implements Cursor { - private final Logger log = Loggers.getLogger(getClass()); + private final Logger log = LogManager.getLogger(getClass()); public static final String NAME = "s"; @@ -124,4 +124,4 @@ public static void cleanCursor(Client client, String scrollId, ActionListener names(Collection e) { List names = new ArrayList<>(e.size()); for (Expression ex : e) { @@ -127,22 +142,50 @@ public static Pipe pipe(Expression e) { throw new SqlIllegalArgumentException("Cannot create pipe for {}", e); } - public static TypeResolution typeMustBe(Expression e, Predicate predicate, String message) { - return predicate.test(e) ? TypeResolution.TYPE_RESOLVED : new TypeResolution(message); + public static List pipe(List expressions) { + List pipes = new ArrayList<>(expressions.size()); + for (Expression e : expressions) { + pipes.add(pipe(e)); + } + return pipes; } - public static TypeResolution typeMustBeNumeric(Expression e) { - return e.dataType().isNumeric() ? TypeResolution.TYPE_RESOLVED : new TypeResolution(incorrectTypeErrorMessage(e, "numeric")); + public static TypeResolution typeMustBeBoolean(Expression e, String operationName, ParamOrdinal paramOrd) { + return typeMustBe(e, dt -> dt == DataType.BOOLEAN, operationName, paramOrd, "boolean"); } - public static TypeResolution typeMustBeNumericOrDate(Expression e) { - return e.dataType().isNumeric() || e.dataType() == DataType.DATE ? - TypeResolution.TYPE_RESOLVED : - new TypeResolution(incorrectTypeErrorMessage(e, "numeric", "date")); + public static TypeResolution typeMustBeInteger(Expression e, String operationName, ParamOrdinal paramOrd) { + return typeMustBe(e, dt -> dt.isInteger, operationName, paramOrd, "integer"); + } + + public static TypeResolution typeMustBeNumeric(Expression e, String operationName, ParamOrdinal paramOrd) { + return typeMustBe(e, DataType::isNumeric, operationName, paramOrd, "numeric"); } - - private static String incorrectTypeErrorMessage(Expression e, String...acceptedTypes) { - return "Argument required to be " + Strings.arrayToDelimitedString(acceptedTypes, " or ") - + " ('" + Expressions.name(e) + "' type is '" + e.dataType().esType + "')"; + + public static TypeResolution typeMustBeString(Expression e, String operationName, ParamOrdinal paramOrd) { + return typeMustBe(e, DataType::isString, operationName, paramOrd, "string"); + } + + public static TypeResolution typeMustBeDate(Expression e, String operationName, ParamOrdinal paramOrd) { + return typeMustBe(e, dt -> dt == DataType.DATE, operationName, paramOrd, "date"); + } + + public static TypeResolution typeMustBeNumericOrDate(Expression e, String operationName, ParamOrdinal paramOrd) { + return typeMustBe(e, dt -> dt.isNumeric() || dt == DataType.DATE, operationName, paramOrd, "numeric", "date"); + } + + public static TypeResolution typeMustBe(Expression e, + Predicate predicate, + String operationName, + ParamOrdinal paramOrd, + String... acceptedTypes) { + return predicate.test(e.dataType()) || DataTypes.isNull(e.dataType())? + TypeResolution.TYPE_RESOLVED : + new TypeResolution(format(Locale.ROOT, "[%s]%s argument must be [%s], found value [%s] type [%s]", + operationName, + paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : " " + paramOrd.name().toLowerCase(Locale.ROOT), + Strings.arrayToDelimitedString(acceptedTypes, " or "), + Expressions.name(e), + e.dataType().esType)); } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java index 3c334c233f91a..7148a08a3facd 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java @@ -26,6 +26,7 @@ public class Literal extends NamedExpression { public static final Literal TRUE = Literal.of(Location.EMPTY, Boolean.TRUE); public static final Literal FALSE = Literal.of(Location.EMPTY, Boolean.FALSE); + public static final Literal NULL = Literal.of(Location.EMPTY, null); private final Object value; private final DataType dataType; @@ -160,7 +161,11 @@ public static Literal of(String name, Expression foldable) { if (name == null) { name = foldable instanceof NamedExpression ? ((NamedExpression) foldable).name() : String.valueOf(fold); } - return new Literal(foldable.location(), name, fold, foldable.dataType()); } + + public static Literal of(Expression source, Object value) { + String name = source instanceof NamedExpression ? ((NamedExpression) source).name() : String.valueOf(value); + return new Literal(source.location(), name, value, source.dataType()); + } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java index 948a5465efa80..c9ef08eab5aee 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java @@ -66,12 +66,12 @@ public boolean equals(Object obj) { } private String message() { - return (qualifier() != null ? qualifier() + "." : "") + "*"; + return (qualifier() != null ? qualifier().qualifiedName() + "." : "") + "*"; } @Override public String unresolvedMessage() { - return "Cannot determine columns for " + message(); + return "Cannot determine columns for [" + message() + "]"; } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java index 6c6f1a2633ac8..000a9c097c98f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java @@ -46,7 +46,7 @@ public String name() { @Override public boolean nullable() { - return false; + return Expressions.nullable(children()); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java index d513ca07df4ae..a284ba83a972b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java @@ -19,6 +19,7 @@ public class FunctionDefinition { public interface Builder { Function build(UnresolvedFunction uf, boolean distinct, TimeZone tz); } + private final String name; private final List aliases; private final Class clazz; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java index 4da4cf4d02301..70c9c18a5a4c0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.Sum; import org.elasticsearch.xpack.sql.expression.function.aggregate.SumOfSquares; import org.elasticsearch.xpack.sql.expression.function.aggregate.VarPop; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayName; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfMonth; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfWeek; @@ -81,9 +82,11 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.string.Space; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Substring; import org.elasticsearch.xpack.sql.expression.function.scalar.string.UCase; +import org.elasticsearch.xpack.sql.expression.predicate.conditional.Coalesce; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Mod; import org.elasticsearch.xpack.sql.parser.ParsingException; import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.StringUtils; import java.util.Arrays; @@ -116,14 +119,14 @@ public class FunctionRegistry { public FunctionRegistry() { defineDefaultFunctions(); } - + /** * Constructor specifying alternate functions for testing. */ FunctionRegistry(FunctionDefinition... functions) { addToMap(functions); } - + private void defineDefaultFunctions() { // Aggregate functions addToMap(def(Avg.class, Avg::new), @@ -140,6 +143,8 @@ private void defineDefaultFunctions() { def(Skewness.class, Skewness::new), def(Kurtosis.class, Kurtosis::new)); // Scalar functions + // conditional + addToMap(def(Coalesce.class, Coalesce::new)); // Date addToMap(def(DayName.class, DayName::new, "DAYNAME"), def(DayOfMonth.class, DayOfMonth::new, "DAYOFMONTH", "DAY", "DOM"), @@ -206,11 +211,13 @@ private void defineDefaultFunctions() { def(Space.class, Space::new), def(Substring.class, Substring::new), def(UCase.class, UCase::new)); + // DataType conversion + addToMap(def(Cast.class, Cast::new, "CONVERT")); // Special addToMap(def(Score.class, Score::new)); } - - protected void addToMap(FunctionDefinition...functions) { + + void addToMap(FunctionDefinition...functions) { // temporary map to hold [function_name/alias_name : function instance] Map batchMap = new HashMap<>(); for (FunctionDefinition f : functions) { @@ -227,7 +234,7 @@ protected void addToMap(FunctionDefinition...functions) { // sort the temporary map by key name and add it to the global map of functions defs.putAll(batchMap.entrySet().stream() .sorted(Map.Entry.comparingByKey()) - .collect(Collectors., String, + .collect(Collectors., String, FunctionDefinition, LinkedHashMap> toMap(Map.Entry::getKey, Map.Entry::getValue, (oldValue, newValue) -> oldValue, LinkedHashMap::new))); } @@ -306,6 +313,26 @@ static FunctionDefinition def(Class function, return def(function, builder, false, aliases); } + /** + * Build a {@linkplain FunctionDefinition} for multi-arg function that + * is not aware of time zone and does not support {@code DISTINCT}. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + static FunctionDefinition def(Class function, + MultiFunctionBuilder ctorRef, String... aliases) { + FunctionBuilder builder = (location, children, distinct, tz) -> { + if (distinct) { + throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + } + return ctorRef.build(location, children); + }; + return def(function, builder, false, aliases); + } + + interface MultiFunctionBuilder { + T build(Location location, List children); + } + /** * Build a {@linkplain FunctionDefinition} for a unary function that is not * aware of time zone but does support {@code DISTINCT}. @@ -321,6 +348,7 @@ static FunctionDefinition def(Class function, }; return def(function, builder, false, aliases); } + interface DistinctAwareUnaryFunctionBuilder { T build(Location location, Expression target, boolean distinct); } @@ -343,6 +371,7 @@ static FunctionDefinition def(Class function, }; return def(function, builder, true, aliases); } + interface DatetimeUnaryFunctionBuilder { T build(Location location, Expression target, TimeZone tz); } @@ -369,6 +398,7 @@ static FunctionDefinition def(Class function, }; return def(function, builder, false, aliases); } + interface BinaryFunctionBuilder { T build(Location location, Expression lhs, Expression rhs); } @@ -387,10 +417,11 @@ private static FunctionDefinition def(Class function, Functi }; return new FunctionDefinition(primaryName, unmodifiableList(Arrays.asList(aliases)), function, datetime, realBuilder); } + private interface FunctionBuilder { Function build(Location location, List children, boolean distinct, TimeZone tz); } - + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do static FunctionDefinition def(Class function, ThreeParametersFunctionBuilder ctorRef, String... aliases) { @@ -408,11 +439,11 @@ static FunctionDefinition def(Class function, }; return def(function, builder, false, aliases); } - + interface ThreeParametersFunctionBuilder { T build(Location location, Expression source, Expression exp1, Expression exp2); } - + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do static FunctionDefinition def(Class function, FourParametersFunctionBuilder ctorRef, String... aliases) { @@ -427,11 +458,30 @@ static FunctionDefinition def(Class function, }; return def(function, builder, false, aliases); } - + interface FourParametersFunctionBuilder { T build(Location location, Expression source, Expression exp1, Expression exp2, Expression exp3); } + /** + * Special method to create function definition for {@link Cast} as its + * signature is not compatible with {@link UnresolvedFunction} + * + * @return Cast function definition + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + private static FunctionDefinition def(Class function, + CastFunctionBuilder ctorRef, + String... aliases) { + FunctionBuilder builder = (location, children, distinct, tz) -> + ctorRef.build(location, children.get(0), children.get(0).dataType()); + return def(function, builder, false, aliases); + } + + private interface CastFunctionBuilder { + T build(Location location, Expression expression, DataType dataType); + } + private static String normalize(String name) { // translate CamelCase to camel_case return StringUtils.camelCaseToUnderscore(name); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java index dc75f0f5be37a..b2f4ab8ef2ca2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java @@ -8,11 +8,13 @@ import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; -import org.elasticsearch.xpack.sql.expression.function.Score; +import org.elasticsearch.xpack.sql.expression.predicate.conditional.ConditionalFunction; public enum FunctionType { + AGGREGATE(AggregateFunction.class), + CONDITIONAL(ConditionalFunction.class), SCALAR(ScalarFunction.class), SCORE(Score.class); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java index 7c413feba8448..0dfed0d5c0057 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.sql.expression.function.aggregate; -import java.util.List; - import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; +import java.util.List; + /** * Find the arithmatic mean of a field. */ @@ -41,6 +41,6 @@ public String innerName() { @Override public DataType dataType() { - return field().dataType(); + return DataType.DOUBLE; } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java index fde06f239cb79..9c6b1374f0777 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; @@ -44,6 +45,6 @@ public String innerName() { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeNumericOrDate(field()); + return Expressions.typeMustBeNumericOrDate(field(), functionName(), ParamOrdinal.DEFAULT); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java index 42109aaf5d69a..e0b68999d64a1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; @@ -47,6 +48,6 @@ public String innerName() { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeNumericOrDate(field()); + return Expressions.typeMustBeNumericOrDate(field(), functionName(), ParamOrdinal.DEFAULT); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java index a71dcfbbb9e67..f384e157ec4ae 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.type.DataType; @@ -24,7 +25,7 @@ abstract class NumericAggregate extends AggregateFunction { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeNumeric(field()); + return Expressions.typeMustBeNumeric(field(), functionName(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java index a3293161e0879..6e644fb4f751c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -43,7 +44,7 @@ protected TypeResolution resolveType() { TypeResolution resolution = super.resolveType(); if (TypeResolution.TYPE_RESOLVED.equals(resolution)) { - resolution = Expressions.typeMustBeNumeric(percent()); + resolution = Expressions.typeMustBeNumeric(percent(), functionName(), ParamOrdinal.DEFAULT); } return resolution; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java index dabe27a0caef4..f01dad8800ccf 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -41,12 +42,11 @@ public Expression replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { TypeResolution resolution = super.resolveType(); - - if (TypeResolution.TYPE_RESOLVED.equals(resolution)) { - resolution = Expressions.typeMustBeNumeric(value); + if (resolution.unresolved()) { + return resolution; } - return resolution; + return Expressions.typeMustBeNumeric(value, functionName(), ParamOrdinal.DEFAULT); } public Expression value() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java index 298039640446e..5c874cc7667f7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java @@ -16,6 +16,7 @@ import java.util.Objects; public class Cast extends UnaryScalarFunction { + private final DataType dataType; public Cast(Location location, Expression field, DataType dataType) { @@ -65,7 +66,7 @@ public boolean nullable() { protected TypeResolution resolveType() { return DataTypeConversion.canConvert(from(), to()) ? TypeResolution.TYPE_RESOLVED : - new TypeResolution("Cannot cast %s to %s", from(), to()); + new TypeResolution("Cannot cast [" + from() + "] to [" + to()+ "]"); } @Override @@ -102,4 +103,4 @@ public String name() { sb.insert(sb.length() - 1, " AS " + to().sqlName()); return sb.toString(); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java index ae35f9c760c43..6954abbbaa7dc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -25,9 +25,15 @@ import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; import org.elasticsearch.xpack.sql.expression.gen.processor.HitExtractorProcessor; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.predicate.conditional.CoalesceProcessor; +import org.elasticsearch.xpack.sql.expression.predicate.logical.BinaryLogicProcessor; +import org.elasticsearch.xpack.sql.expression.predicate.logical.NotProcessor; +import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNotNullProcessor; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.BinaryArithmeticProcessor; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.UnaryArithmeticProcessor; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparisonProcessor; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.InProcessor; +import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexProcessor; import java.util.ArrayList; import java.util.List; @@ -49,13 +55,24 @@ public static List getNamedWriteables() { entries.add(new Entry(Processor.class, CastProcessor.NAME, CastProcessor::new)); entries.add(new Entry(Processor.class, ChainingProcessor.NAME, ChainingProcessor::new)); - // comparators - entries.add(new Entry(Processor.class, BinaryComparisonProcessor.NAME, BinaryComparisonProcessor::new)); + // logical + entries.add(new Entry(Processor.class, BinaryLogicProcessor.NAME, BinaryLogicProcessor::new)); + entries.add(new Entry(Processor.class, NotProcessor.NAME, NotProcessor::new)); + // null + entries.add(new Entry(Processor.class, CoalesceProcessor.NAME, CoalesceProcessor::new)); + entries.add(new Entry(Processor.class, IsNotNullProcessor.NAME, IsNotNullProcessor::new)); // arithmetic entries.add(new Entry(Processor.class, BinaryArithmeticProcessor.NAME, BinaryArithmeticProcessor::new)); entries.add(new Entry(Processor.class, UnaryArithmeticProcessor.NAME, UnaryArithmeticProcessor::new)); entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new)); + // comparators + entries.add(new Entry(Processor.class, BinaryComparisonProcessor.NAME, BinaryComparisonProcessor::new)); + entries.add(new Entry(Processor.class, InProcessor.NAME, InProcessor::new)); + // regex + entries.add(new Entry(Processor.class, RegexProcessor.NAME, RegexProcessor::new)); + + // datetime entries.add(new Entry(Processor.class, DateTimeProcessor.NAME, DateTimeProcessor::new)); entries.add(new Entry(Processor.class, NamedDateTimeProcessor.NAME, NamedDateTimeProcessor::new)); @@ -73,4 +90,12 @@ public static List getNamedWriteables() { entries.add(new Entry(Processor.class, SubstringFunctionProcessor.NAME, SubstringFunctionProcessor::new)); return entries; } -} \ No newline at end of file + + public static List process(List processors, Object input) { + List values = new ArrayList<>(processors.size()); + for (Processor p : processors) { + values.add(p.process(input)); + } + return values; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java index 952941342b583..130acd8eddcd3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java @@ -8,10 +8,10 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; -import org.elasticsearch.xpack.sql.type.DataType; import org.joda.time.DateTime; import java.util.Objects; @@ -42,11 +42,7 @@ protected final NodeInfo info() { @Override protected TypeResolution resolveType() { - if (field().dataType() == DataType.DATE) { - return TypeResolution.TYPE_RESOLVED; - } - return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression ([" - + Expressions.name(field()) + "] of type [" + field().dataType().esType + "])"); + return Expressions.typeMustBeDate(field(), functionName(), ParamOrdinal.DEFAULT); } public TimeZone timeZone() { @@ -90,4 +86,4 @@ public boolean equals(Object obj) { public int hashCode() { return Objects.hash(field(), timeZone()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java index 1d26a88c012aa..6b067a9a8755e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -19,7 +20,7 @@ public abstract class BinaryNumericFunction extends BinaryScalarFunction { private final BinaryMathOperation operation; - protected BinaryNumericFunction(Location location, Expression left, Expression right, BinaryMathOperation operation) { + BinaryNumericFunction(Location location, Expression left, Expression right, BinaryMathOperation operation) { super(location, left, right); this.operation = operation; } @@ -35,18 +36,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = resolveInputType(left().dataType()); + TypeResolution resolution = Expressions.typeMustBeNumeric(left(), functionName(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; - if (resolution == TypeResolution.TYPE_RESOLVED) { - return resolveInputType(right().dataType()); } - return resolution; - } - - protected TypeResolution resolveInputType(DataType inputType) { - return inputType.isNumeric() ? - TypeResolution.TYPE_RESOLVED : - new TypeResolution("'%s' requires a numeric type, received %s", scriptMethodName(), inputType.esType); + return Expressions.typeMustBeNumeric(right(), functionName(), ParamOrdinal.SECOND); } @Override @@ -74,4 +69,4 @@ public boolean equals(Object obj) { && Objects.equals(other.right(), right()) && Objects.equals(other.operation, operation); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java index cd37e539bfcd9..ce6239c3cac49 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.math; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; @@ -57,8 +59,7 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - return field().dataType().isNumeric() ? TypeResolution.TYPE_RESOLVED - : new TypeResolution("'%s' requires a numeric type, received %s", operation(), field().dataType().esType); + return Expressions.typeMustBeNumeric(field(), operation().toString(), ParamOrdinal.DEFAULT); } @Override @@ -81,4 +82,4 @@ public boolean equals(Object obj) { public int hashCode() { return Objects.hash(field()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java index c1e7433444233..b18ebe4f4916d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java @@ -10,12 +10,13 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.tree.Location; -import org.elasticsearch.xpack.sql.type.DataType; import java.util.Locale; import java.util.Objects; import java.util.function.BiFunction; +import static org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; +import static org.elasticsearch.xpack.sql.expression.Expressions.typeMustBeString; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; /** @@ -41,14 +42,15 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - if (!left().dataType().isString()) { - return new TypeResolution("'%s' requires first parameter to be a string type, received %s", functionName(), left().dataType()); + TypeResolution resolution = typeMustBeString(left(), functionName(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; } - - return resolveSecondParameterInputType(right().dataType()); + + return resolveSecondParameterInputType(right()); } - protected abstract TypeResolution resolveSecondParameterInputType(DataType inputType); + protected abstract TypeResolution resolveSecondParameterInputType(Expression e); @Override public Object fold() { @@ -83,4 +85,4 @@ public boolean equals(Object obj) { return Objects.equals(other.left(), left()) && Objects.equals(other.right(), right()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java index eaddf4bc70f24..8cc90e050e09f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java @@ -25,10 +25,8 @@ public BinaryStringNumericFunction(Location location, Expression left, Expressio protected abstract BinaryStringNumericOperation operation(); @Override - protected TypeResolution resolveSecondParameterInputType(DataType inputType) { - return inputType.isNumeric() ? - TypeResolution.TYPE_RESOLVED : - new TypeResolution("'%s' requires second parameter to be a numeric type, received %s", functionName(), inputType); + protected TypeResolution resolveSecondParameterInputType(Expression e) { + return Expressions.typeMustBeNumeric(e,functionName(), Expressions.ParamOrdinal.SECOND); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java index 0c3c8d9453ccd..3d4816cedb0df 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.string; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.type.DataType; @@ -19,10 +20,8 @@ public BinaryStringStringFunction(Location location, Expression left, Expression } @Override - protected TypeResolution resolveSecondParameterInputType(DataType inputType) { - return inputType.isString() ? - TypeResolution.TYPE_RESOLVED : - new TypeResolution("'%s' requires second parameter to be a string type, received %s", functionName(), inputType); + protected TypeResolution resolveSecondParameterInputType(Expression e) { + return Expressions.typeMustBeString(e, functionName(), Expressions.ParamOrdinal.SECOND); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java index 9733160cdd993..3bd03986eb5c4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -35,12 +36,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = StringFunctionUtils.resolveStringInputType(left().dataType(), functionName()); - if (sourceResolution != TypeResolution.TYPE_RESOLVED) { + TypeResolution sourceResolution = Expressions.typeMustBeString(left(), functionName(), ParamOrdinal.FIRST); + if (sourceResolution.unresolved()) { return sourceResolution; } - return StringFunctionUtils.resolveStringInputType(right().dataType(), functionName()); + return Expressions.typeMustBeString(right(), functionName(), ParamOrdinal.SECOND); } @Override @@ -48,6 +49,11 @@ protected Pipe makePipe() { return new ConcatFunctionPipe(location(), this, Expressions.pipe(left()), Expressions.pipe(right())); } + @Override + public boolean nullable() { + return left().nullable() && right().nullable(); + } + @Override public boolean foldable() { return left().foldable() && right().foldable(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java index 990cd2921dc07..c3c496fc67151 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -45,22 +46,22 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = StringFunctionUtils.resolveStringInputType(source.dataType(), functionName()); - if (sourceResolution != TypeResolution.TYPE_RESOLVED) { + TypeResolution sourceResolution = Expressions.typeMustBeString(source, functionName(), ParamOrdinal.FIRST); + if (sourceResolution.unresolved()) { return sourceResolution; } - TypeResolution startResolution = StringFunctionUtils.resolveNumericInputType(start.dataType(), functionName()); - if (startResolution != TypeResolution.TYPE_RESOLVED) { + TypeResolution startResolution = Expressions.typeMustBeNumeric(start, functionName(), ParamOrdinal.SECOND); + if (startResolution.unresolved()) { return startResolution; } - TypeResolution lengthResolution = StringFunctionUtils.resolveNumericInputType(length.dataType(), functionName()); - if (lengthResolution != TypeResolution.TYPE_RESOLVED) { + TypeResolution lengthResolution = Expressions.typeMustBeNumeric(length, functionName(), ParamOrdinal.THIRD); + if (lengthResolution.unresolved()) { return lengthResolution; } - return StringFunctionUtils.resolveStringInputType(replacement.dataType(), functionName()); + return Expressions.typeMustBeString(replacement, functionName(), ParamOrdinal.FOURTH); } @Override @@ -135,4 +136,4 @@ public Expression replaceChildren(List newChildren) { return new Insert(location(), newChildren.get(0), newChildren.get(1), newChildren.get(2), newChildren.get(3)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java index 53f73c170c63c..f8650db70682a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -47,17 +48,19 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution patternResolution = StringFunctionUtils.resolveStringInputType(pattern.dataType(), functionName()); - if (patternResolution != TypeResolution.TYPE_RESOLVED) { + TypeResolution patternResolution = Expressions.typeMustBeString(pattern, functionName(), ParamOrdinal.FIRST); + if (patternResolution.unresolved()) { return patternResolution; } - TypeResolution sourceResolution = StringFunctionUtils.resolveStringInputType(source.dataType(), functionName()); - if (sourceResolution != TypeResolution.TYPE_RESOLVED) { + TypeResolution sourceResolution = Expressions.typeMustBeString(source, functionName(), ParamOrdinal.SECOND); + if (sourceResolution.unresolved()) { return sourceResolution; } - return start == null ? TypeResolution.TYPE_RESOLVED : StringFunctionUtils.resolveNumericInputType(start.dataType(), functionName()); + return start == null ? + TypeResolution.TYPE_RESOLVED : + Expressions.typeMustBeNumeric(start, functionName(), ParamOrdinal.THIRD); } @Override @@ -136,4 +139,4 @@ public Expression replaceChildren(List newChildren) { return new Locate(location(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java index 9c8e856dbad0d..55710047b2c19 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -29,31 +30,31 @@ public class Replace extends ScalarFunction { private final Expression source, pattern, replacement; - + public Replace(Location location, Expression source, Expression pattern, Expression replacement) { super(location, Arrays.asList(source, pattern, replacement)); this.source = source; this.pattern = pattern; this.replacement = replacement; } - + @Override protected TypeResolution resolveType() { if (!childrenResolved()) { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = StringFunctionUtils.resolveStringInputType(source.dataType(), functionName()); - if (sourceResolution != TypeResolution.TYPE_RESOLVED) { + TypeResolution sourceResolution = Expressions.typeMustBeString(source, functionName(), ParamOrdinal.FIRST); + if (sourceResolution.unresolved()) { return sourceResolution; } - - TypeResolution patternResolution = StringFunctionUtils.resolveStringInputType(pattern.dataType(), functionName()); - if (patternResolution != TypeResolution.TYPE_RESOLVED) { + + TypeResolution patternResolution = Expressions.typeMustBeString(pattern, functionName(), ParamOrdinal.SECOND); + if (patternResolution.unresolved()) { return patternResolution; } - - return StringFunctionUtils.resolveStringInputType(replacement.dataType(), functionName()); + + return Expressions.typeMustBeString(replacement, functionName(), ParamOrdinal.THIRD); } @Override @@ -102,7 +103,7 @@ private ScriptTemplate asScriptFrom(ScriptTemplate sourceScript, ScriptTemplate .script(replacementScript.params()) .build(), dataType()); } - + @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), @@ -123,4 +124,4 @@ public Expression replaceChildren(List newChildren) { return new Replace(location(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionUtils.java index 33eb61012eac4..7fc55770bd3f8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionUtils.java @@ -5,9 +5,6 @@ */ package org.elasticsearch.xpack.sql.expression.function.scalar.string; -import org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; -import org.elasticsearch.xpack.sql.type.DataType; - abstract class StringFunctionUtils { /** @@ -74,15 +71,5 @@ private static boolean hasLength(String s) { return (s != null && s.length() > 0); } - static TypeResolution resolveStringInputType(DataType inputType, String functionName) { - return inputType.isString() ? - TypeResolution.TYPE_RESOLVED : - new TypeResolution("'%s' requires a string type, received %s", functionName, inputType.esType); - } - - static TypeResolution resolveNumericInputType(DataType inputType, String functionName) { - return inputType.isNumeric() ? - TypeResolution.TYPE_RESOLVED : - new TypeResolution("'%s' requires a numeric type, received %s", functionName, inputType.esType); - } + } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java index e147566511092..ea8378a224d91 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -44,17 +45,17 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = StringFunctionUtils.resolveStringInputType(source.dataType(), functionName()); - if (sourceResolution != TypeResolution.TYPE_RESOLVED) { + TypeResolution sourceResolution = Expressions.typeMustBeString(source, functionName(), ParamOrdinal.FIRST); + if (sourceResolution.unresolved()) { return sourceResolution; } - TypeResolution startResolution = StringFunctionUtils.resolveNumericInputType(start.dataType(), functionName()); - if (startResolution != TypeResolution.TYPE_RESOLVED) { + TypeResolution startResolution = Expressions.typeMustBeNumeric(start, functionName(), ParamOrdinal.SECOND); + if (startResolution.unresolved()) { return startResolution; } - return StringFunctionUtils.resolveNumericInputType(length.dataType(), functionName()); + return Expressions.typeMustBeNumeric(length, functionName(), ParamOrdinal.THIRD); } @Override @@ -123,4 +124,4 @@ public Expression replaceChildren(List newChildren) { return new Substring(location(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java index af9bd05fd15cc..8c64fefc36b40 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.string; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor.StringOperation; @@ -41,9 +43,7 @@ protected TypeResolution resolveType() { if (!childrenResolved()) { return new TypeResolution("Unresolved children"); } - - return field().dataType().isString() ? TypeResolution.TYPE_RESOLVED : new TypeResolution( - "'%s' requires a string type, received %s", operation(), field().dataType().esType); + return Expressions.typeMustBeString(field(), operation().toString(), ParamOrdinal.DEFAULT); } @Override @@ -82,4 +82,4 @@ public boolean equals(Object obj) { public int hashCode() { return Objects.hash(field()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java index 0753af03f147f..a14acef60e578 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.string; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor.StringOperation; @@ -43,9 +45,7 @@ protected TypeResolution resolveType() { if (!childrenResolved()) { return new TypeResolution("Unresolved children"); } - - return field().dataType().isInteger ? TypeResolution.TYPE_RESOLVED : new TypeResolution( - "'%s' requires a integer type, received %s", operation(), field().dataType().esType); + return Expressions.typeMustBeInteger(field(), operation().toString(), ParamOrdinal.DEFAULT); } @Override @@ -83,4 +83,4 @@ public boolean equals(Object obj) { UnaryStringIntFunction other = (UnaryStringIntFunction) obj; return Objects.equals(other.field(), field()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index 9aabb3f10ecdc..beeb1174ba245 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -21,16 +21,19 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.string.ReplaceFunctionProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor.StringOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.string.SubstringFunctionProcessor; -import org.elasticsearch.xpack.sql.expression.predicate.IsNotNullProcessor; import org.elasticsearch.xpack.sql.expression.predicate.logical.BinaryLogicProcessor.BinaryLogicOperation; +import org.elasticsearch.xpack.sql.expression.predicate.conditional.CoalesceProcessor; import org.elasticsearch.xpack.sql.expression.predicate.logical.NotProcessor; +import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNotNullProcessor; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.UnaryArithmeticProcessor.UnaryArithmeticOperation; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.InProcessor; import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexProcessor.RegexOperation; import org.elasticsearch.xpack.sql.util.StringUtils; import java.time.ZonedDateTime; +import java.util.List; import java.util.Map; /** @@ -38,6 +41,7 @@ * Acts as a registry of the various static methods used internally by the scalar functions * (to simplify the whitelist definition). */ +@SuppressWarnings("unused") public final class InternalSqlScriptUtils { private InternalSqlScriptUtils() {} @@ -50,7 +54,7 @@ private InternalSqlScriptUtils() {} public static Object docValue(Map> doc, String fieldName) { if (doc.containsKey(fieldName)) { ScriptDocValues docValues = doc.get(fieldName); - if (docValues.size() > 0) { + if (!docValues.isEmpty()) { return docValues.get(0); } } @@ -81,6 +85,10 @@ public static Boolean eq(Object left, Object right) { return BinaryComparisonOperation.EQ.apply(left, right); } + public static Boolean neq(Object left, Object right) { + return BinaryComparisonOperation.NEQ.apply(left, right); + } + public static Boolean lt(Object left, Object right) { return BinaryComparisonOperation.LT.apply(left, right); } @@ -113,6 +121,17 @@ public static Boolean notNull(Object expression) { return IsNotNullProcessor.apply(expression); } + public static Boolean in(Object value, List values) { + return InProcessor.apply(value, values); + } + + // + // Null + // + public static Object coalesce(List expressions) { + return CoalesceProcessor.apply(expressions); + } + // // Regex // @@ -375,4 +394,4 @@ public static String substring(String s, Number start, Number length) { public static String ucase(String s) { return (String) StringOperation.UCASE.apply(s); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/MultiPipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/MultiPipe.java new file mode 100644 index 0000000000000..d25e7a2e660a2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/MultiPipe.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.gen.pipeline; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.ArrayList; +import java.util.List; + +public abstract class MultiPipe extends Pipe { + + protected MultiPipe(Location location, Expression expression, List children) { + super(location, expression, children); + } + + @Override + public Processor asProcessor() { + List procs = new ArrayList<>(); + for (Pipe pipe : children()) { + procs.add(pipe.asProcessor()); + } + + return asProcessor(procs); + } + + public abstract Processor asProcessor(List procs); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/Pipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/Pipe.java index 5c96d2c9244ab..b92cb9a15eae4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/Pipe.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/Pipe.java @@ -6,13 +6,16 @@ package org.elasticsearch.xpack.sql.expression.gen.pipeline; import org.elasticsearch.xpack.sql.capabilities.Resolvable; +import org.elasticsearch.xpack.sql.capabilities.Resolvables; import org.elasticsearch.xpack.sql.execution.search.FieldExtraction; +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.Node; +import java.util.ArrayList; import java.util.List; /** @@ -38,6 +41,27 @@ public Expression expression() { return expression; } + @Override + public boolean resolved() { + return Resolvables.resolved(children()); + } + + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + children().forEach(c -> c.collectFields(sourceBuilder)); + } + + @Override + public boolean supportedByAggsOnlyQuery() { + for (Pipe pipe : children()) { + if (pipe.supportedByAggsOnlyQuery()) { + return true; + } + } + + return false; + } + public abstract Processor asProcessor(); /** @@ -47,9 +71,16 @@ public Expression expression() { * @return {@code this} if the resolution doesn't change the * definition, a new {@link Pipe} otherwise */ - public abstract Pipe resolveAttributes(AttributeResolver resolver); + public Pipe resolveAttributes(AttributeResolver resolver) { + List newPipes = new ArrayList<>(children().size()); + for (Pipe p : children()) { + newPipes.add(p.resolveAttributes(resolver)); + } + + return children().equals(newPipes) ? this : replaceChildren(newPipes); + } public interface AttributeResolver { FieldExtraction resolve(Attribute attribute); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/UnaryPipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/UnaryPipe.java index 8e36f448929ab..8e2c87dc75cda 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/UnaryPipe.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/UnaryPipe.java @@ -98,4 +98,4 @@ public boolean equals(Object obj) { && Objects.equals(child, other.child) && Objects.equals(expression(), other.expression()); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Scripts.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Scripts.java index f9e2588a9c035..21ac12e51da89 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Scripts.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Scripts.java @@ -87,4 +87,4 @@ public static ScriptTemplate binaryMethod(String methodName, ScriptTemplate left .build(), dataType); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryOperator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryOperator.java index 9684913dba86d..e5e6e3563a8d0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryOperator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryOperator.java @@ -6,8 +6,9 @@ package org.elasticsearch.xpack.sql.expression.predicate; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.Location; -import org.elasticsearch.xpack.sql.type.DataType; /** * Operator is a specialized binary predicate where both sides have the compatible types @@ -23,7 +24,7 @@ protected BinaryOperator(Location location, Expression left, Expression right, F super(location, left, right, function); } - protected abstract TypeResolution resolveInputType(DataType inputType); + protected abstract TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal); public abstract BinaryOperator swapLeftAndRight(); @@ -32,14 +33,11 @@ protected TypeResolution resolveType() { if (!childrenResolved()) { return new TypeResolution("Unresolved children"); } - DataType l = left().dataType(); - DataType r = right().dataType(); - TypeResolution resolution = resolveInputType(l); - - if (resolution == TypeResolution.TYPE_RESOLVED) { - return resolveInputType(r); + TypeResolution resolution = resolveInputType(left(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; } - return resolution; + return resolveInputType(right(), ParamOrdinal.SECOND); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/Coalesce.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/Coalesce.java new file mode 100644 index 0000000000000..8a2ddc4d10564 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/Coalesce.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.predicate.conditional; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +import java.util.ArrayList; +import java.util.List; +import java.util.StringJoiner; + +import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; + +public class Coalesce extends ConditionalFunction { + + private DataType dataType = DataType.NULL; + + public Coalesce(Location location, List fields) { + super(location, fields); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Coalesce::new, children()); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new Coalesce(location(), newChildren); + } + + @Override + protected TypeResolution resolveType() { + for (Expression e : children()) { + dataType = DataTypeConversion.commonType(dataType, e.dataType()); + } + return TypeResolution.TYPE_RESOLVED; + } + + @Override + public DataType dataType() { + return dataType; + } + + @Override + public boolean foldable() { + // if the first entry is foldable, so is coalesce + // that's because the nulls are eliminated by the optimizer + // and if the first expression is folded (and not null), the rest do not matter + List children = children(); + return (children.isEmpty() || (children.get(0).foldable() && children.get(0).fold() != null)); + } + + @Override + public Object fold() { + List children = children(); + return children.isEmpty() ? null : children.get(0).fold(); + } + + @Override + public ScriptTemplate asScript() { + List templates = new ArrayList<>(); + for (Expression ex : children()) { + templates.add(asScript(ex)); + } + + StringJoiner template = new StringJoiner(",", "{sql}.coalesce([", "])"); + ParamsBuilder params = paramsBuilder(); + + for (ScriptTemplate scriptTemplate : templates) { + template.add(scriptTemplate.template()); + params.script(scriptTemplate.params()); + } + + return new ScriptTemplate(template.toString(), params.build(), dataType); + } + + @Override + protected Pipe makePipe() { + return new CoalescePipe(location(), this, Expressions.pipe(children())); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CoalescePipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CoalescePipe.java new file mode 100644 index 0000000000000..a19d13e02df92 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CoalescePipe.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.predicate.conditional; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.MultiPipe; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; + +public class CoalescePipe extends MultiPipe { + + public CoalescePipe(Location location, Expression expression, List children) { + super(location, expression, children); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, CoalescePipe::new, expression(), children()); + } + + @Override + public Pipe replaceChildren(List newChildren) { + return new CoalescePipe(location(), expression(), newChildren); + } + + @Override + public Processor asProcessor(List procs) { + return new CoalesceProcessor(procs); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CoalesceProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CoalesceProcessor.java new file mode 100644 index 0000000000000..e23b43634238d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CoalesceProcessor.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.conditional; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class CoalesceProcessor implements Processor { + + public static final String NAME = "nco"; + + private final List processsors; + + public CoalesceProcessor(List processors) { + this.processsors = processors; + } + + public CoalesceProcessor(StreamInput in) throws IOException { + processsors = in.readNamedWriteableList(Processor.class); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteableList(processsors); + } + + @Override + public Object process(Object input) { + for (Processor proc : processsors) { + Object result = proc.process(input); + if (result != null) { + return result; + } + } + return null; + } + + public static Object apply(List values) { + if (values == null || values.isEmpty()) { + return null; + } + + for (Object object : values) { + if (object != null) { + return object; + } + } + + return null; + } + + @Override + public int hashCode() { + return Objects.hash(processsors); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CoalesceProcessor that = (CoalesceProcessor) o; + return Objects.equals(processsors, that.processsors); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java new file mode 100644 index 0000000000000..2feff40643335 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.predicate.conditional; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +/** + * Base class for conditional predicates. + */ +public abstract class ConditionalFunction extends ScalarFunction { + + protected ConditionalFunction(Location location, List fields) { + super(location, fields); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java index 68147bd727ab0..93c50fbc1351b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java @@ -25,13 +25,17 @@ public DataType dataType() { } @Override - protected TypeResolution resolveInputType(DataType inputType) { - return DataType.BOOLEAN == inputType ? TypeResolution.TYPE_RESOLVED : new TypeResolution( - "'%s' requires type %s not %s", symbol(), DataType.BOOLEAN.sqlName(), inputType.sqlName()); + protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { + return Expressions.typeMustBeBoolean(e, functionName(), paramOrdinal); } @Override protected Pipe makePipe() { return new BinaryLogicPipe(location(), this, Expressions.pipe(left()), Expressions.pipe(right()), function()); } + + @Override + public boolean nullable() { + return left().nullable() && right().nullable(); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessor.java index 334a80b7d578d..15f024b4f539e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessor.java @@ -27,7 +27,7 @@ public enum BinaryLogicOperation implements PredicateBiFunction { if (Boolean.TRUE.equals(l) || Boolean.TRUE.equals(r)) { return Boolean.TRUE; @@ -36,7 +36,7 @@ public enum BinaryLogicOperation implements PredicateBiFunction process; private final String symbol; @@ -84,8 +84,18 @@ public String getWriteableName() { @Override protected void checkParameter(Object param) { - if (!(param instanceof Boolean)) { + if (param != null && !(param instanceof Boolean)) { throw new SqlIllegalArgumentException("A boolean is required; received {}", param); } } -} \ No newline at end of file + + @Override + public Object process(Object input) { + Object l = left().process(input); + checkParameter(l); + Object r = right().process(input); + checkParameter(r); + + return doProcess(l, r); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java index 55115ffb4df11..60546347d244a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; import org.elasticsearch.xpack.sql.expression.gen.script.Scripts; @@ -36,8 +37,7 @@ protected TypeResolution resolveType() { if (DataType.BOOLEAN == field().dataType()) { return TypeResolution.TYPE_RESOLVED; } - return new TypeResolution("Cannot negate expression ([" + Expressions.name(field()) + "] of type [" - + field().dataType().esType + "])"); + return Expressions.typeMustBeBoolean(field(), functionName(), ParamOrdinal.DEFAULT); } @Override @@ -68,4 +68,4 @@ protected Expression canonicalize() { public DataType dataType() { return DataType.BOOLEAN; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/NotProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/NotProcessor.java index 14425d35578ac..3480854d5015e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/NotProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/NotProcessor.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.expression.predicate.logical; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; @@ -13,12 +14,14 @@ public class NotProcessor implements Processor { - static final NotProcessor INSTANCE = new NotProcessor(); + public static final NotProcessor INSTANCE = new NotProcessor(); public static final String NAME = "ln"; private NotProcessor() {} + public NotProcessor(StreamInput in) throws IOException {} + @Override public String getWriteableName() { return NAME; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/nulls/IsNotNull.java similarity index 96% rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/nulls/IsNotNull.java index bd3fd5bf0811b..c4c848aecff32 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/nulls/IsNotNull.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.expression.predicate; +package org.elasticsearch.xpack.sql.expression.predicate.nulls; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNullProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/nulls/IsNotNullProcessor.java similarity index 83% rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNullProcessor.java rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/nulls/IsNotNullProcessor.java index b29ae263f3907..ddc5f11975165 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNullProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/nulls/IsNotNullProcessor.java @@ -3,8 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.expression.predicate; +package org.elasticsearch.xpack.sql.expression.predicate.nulls; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; @@ -14,10 +15,12 @@ public class IsNotNullProcessor implements Processor { static final IsNotNullProcessor INSTANCE = new IsNotNullProcessor(); - public static final String NAME = "inn"; + public static final String NAME = "ninn"; private IsNotNullProcessor() {} + public IsNotNullProcessor(StreamInput in) throws IOException {} + @Override public String getWriteableName() { return NAME; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java index 049c454735577..cdc3cb5415fae 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java @@ -21,10 +21,8 @@ protected ArithmeticOperation(Location location, Expression left, Expression rig } @Override - protected TypeResolution resolveInputType(DataType inputType) { - return inputType.isNumeric() ? - TypeResolution.TYPE_RESOLVED : - new TypeResolution("'%s' requires a numeric type, received %s", symbol(), inputType.esType); + protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { + return Expressions.typeMustBeNumeric(e, symbol(), paramOrdinal); } @Override @@ -41,4 +39,4 @@ public DataType dataType() { protected Pipe makePipe() { return new BinaryArithmeticPipe(location(), this, Expressions.pipe(left()), Expressions.pipe(right()), function()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java index 47ea773f514fa..ebd44a44abce9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.NamedExpression; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; @@ -38,7 +39,7 @@ protected Neg replaceChild(Expression newChild) { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeNumeric(field()); + return Expressions.typeMustBeNumeric(field(), functionName(), ParamOrdinal.DEFAULT); } @Override @@ -65,4 +66,4 @@ public String processScript(String script) { protected Processor makeProcessor() { return new UnaryArithmeticProcessor(UnaryArithmeticOperation.NEGATE); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java index 7e7adf2c86528..b8c21c1448acc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java @@ -21,7 +21,7 @@ protected BinaryComparison(Location location, Expression left, Expression right, } @Override - protected TypeResolution resolveInputType(DataType inputType) { + protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { return TypeResolution.TYPE_RESOLVED; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparisonProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparisonProcessor.java index e33d7b282006d..7c7983cf2c1bb 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparisonProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparisonProcessor.java @@ -19,6 +19,7 @@ public class BinaryComparisonProcessor extends FunctionalBinaryProcessor { EQ(Comparisons::eq, "=="), + NEQ(Comparisons::neq, "!="), GT(Comparisons::gt, ">"), GTE(Comparisons::gte, ">="), LT(Comparisons::lt, "<"), @@ -62,4 +63,4 @@ public BinaryComparisonProcessor(StreamInput in) throws IOException { public String getWriteableName() { return NAME; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/Comparisons.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/Comparisons.java index 79d3f2b318b59..7c45371a24848 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/Comparisons.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/Comparisons.java @@ -19,6 +19,11 @@ public static Boolean eq(Object l, Object r) { return i == null ? null : i.intValue() == 0; } + static Boolean neq(Object l, Object r) { + Integer i = compare(l, r); + return i == null ? null : i.intValue() != 0; + } + static Boolean lt(Object l, Object r) { Integer i = compare(l, r); return i == null ? null : i.intValue() < 0; @@ -50,6 +55,9 @@ static Boolean in(Object l, Set r) { */ @SuppressWarnings({ "rawtypes", "unchecked" }) static Integer compare(Object l, Object r) { + if (l == null || r == null) { + return null; + } // typical number comparison if (l instanceof Number && r instanceof Number) { return compare((Number) l, (Number) r); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/Equals.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/Equals.java index 15dbacafc4add..23b8b879123b3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/Equals.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/Equals.java @@ -6,11 +6,12 @@ package org.elasticsearch.xpack.sql.expression.predicate.operator.comparison; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; -public class Equals extends BinaryComparison { +public class Equals extends BinaryComparison implements BinaryOperator.Negateable { public Equals(Location location, Expression left, Expression right) { super(location, left, right, BinaryComparisonOperation.EQ); @@ -30,4 +31,9 @@ protected Equals replaceChildren(Expression newLeft, Expression newRight) { public Equals swapLeftAndRight() { return new Equals(location(), right(), left()); } + + @Override + public BinaryOperator negate() { + return new NotEquals(location(), left(), right()); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java similarity index 62% rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java index 9b16b77511ca7..8a3ac8edaee77 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java @@ -3,20 +3,17 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.expression.predicate; +package org.elasticsearch.xpack.sql.expression.predicate.operator.comparison; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.expression.NamedExpression; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; -import org.elasticsearch.xpack.sql.expression.gen.script.Params; -import org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptWeaver; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.Comparisons; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.InPipe; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; @@ -30,7 +27,6 @@ import java.util.StringJoiner; import java.util.stream.Collectors; -import static java.lang.String.format; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; public class In extends NamedExpression implements ScriptWeaver { @@ -84,29 +80,17 @@ public boolean foldable() { @Override public Boolean fold() { - if (value.dataType() == DataType.NULL) { + // Optimization for early return and Query folding to LocalExec + if (value.dataType() == DataType.NULL || + list.size() == 1 && list.get(0).dataType() == DataType.NULL) { return null; } - if (list.size() == 1 && list.get(0).dataType() == DataType.NULL) { - return false; - } - - Object foldedLeftValue = value.fold(); - Boolean result = false; - for (Expression rightValue : list) { - Boolean compResult = Comparisons.eq(foldedLeftValue, rightValue.fold()); - if (compResult == null) { - result = null; - } else if (compResult) { - return true; - } - } - return result; + return InProcessor.apply(value.fold(), Foldables.valuesOf(list, value.dataType())); } @Override public String name() { - StringJoiner sj = new StringJoiner(", ", " IN(", ")"); + StringJoiner sj = new StringJoiner(", ", " IN (", ")"); list.forEach(e -> sj.add(Expressions.name(e))); return Expressions.name(value) + sj.toString(); } @@ -122,34 +106,18 @@ public Attribute toAttribute() { @Override public ScriptTemplate asScript() { - StringJoiner sj = new StringJoiner(" || "); ScriptTemplate leftScript = asScript(value); - List rightParams = new ArrayList<>(); - String scriptPrefix = leftScript + "=="; - LinkedHashSet values = list.stream().map(Expression::fold).collect(Collectors.toCollection(LinkedHashSet::new)); - for (Object valueFromList : values) { - // if checked against null => false - if (valueFromList != null) { - if (valueFromList instanceof Expression) { - ScriptTemplate rightScript = asScript((Expression) valueFromList); - sj.add(scriptPrefix + rightScript.template()); - rightParams.add(rightScript.params()); - } else { - if (valueFromList instanceof String) { - sj.add(scriptPrefix + '"' + valueFromList + '"'); - } else { - sj.add(scriptPrefix + valueFromList.toString()); - } - } - } - } - ParamsBuilder paramsBuilder = paramsBuilder().script(leftScript.params()); - for (Params p : rightParams) { - paramsBuilder = paramsBuilder.script(p); - } + // fold & remove duplicates + List values = new ArrayList<>(new LinkedHashSet<>(Foldables.valuesOf(list, value.dataType()))); - return new ScriptTemplate(format(Locale.ROOT, "%s", sj.toString()), paramsBuilder.build(), dataType()); + return new ScriptTemplate( + formatTemplate(String.format(Locale.ROOT, "{sql}.in(%s, {})", leftScript.template())), + paramsBuilder() + .script(leftScript.params()) + .variable(values) + .build(), + dataType()); } @Override @@ -173,6 +141,6 @@ public boolean equals(Object obj) { In other = (In) obj; return Objects.equals(value, other.value) - && Objects.equals(list, other.list); + && Objects.equals(list, other.list); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java index 0a901b5b5e6fe..82233e250e364 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; import java.io.IOException; @@ -19,7 +20,7 @@ public class InProcessor implements Processor { private final List processsors; - public InProcessor(List processors) { + InProcessor(List processors) { this.processsors = processors; } @@ -40,14 +41,17 @@ public final void writeTo(StreamOutput out) throws IOException { @Override public Object process(Object input) { Object leftValue = processsors.get(processsors.size() - 1).process(input); - Boolean result = false; + return apply(leftValue, Processors.process(processsors.subList(0, processsors.size() - 1), leftValue)); + } - for (int i = 0; i < processsors.size() - 1; i++) { - Boolean compResult = Comparisons.eq(leftValue, processsors.get(i).process(input)); + public static Boolean apply(Object input, List values) { + Boolean result = Boolean.FALSE; + for (Object v : values) { + Boolean compResult = Comparisons.eq(input, v); if (compResult == null) { result = null; - } else if (compResult) { - return true; + } else if (compResult == Boolean.TRUE) { + return Boolean.TRUE; } } return result; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/NotEquals.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/NotEquals.java new file mode 100644 index 0000000000000..41e0a939fbd07 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/NotEquals.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.predicate.BinaryOperator; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class NotEquals extends BinaryComparison implements BinaryOperator.Negateable { + + public NotEquals(Location location, Expression left, Expression right) { + super(location, left, right, BinaryComparisonOperation.NEQ); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, NotEquals::new, left(), right()); + } + + @Override + protected NotEquals replaceChildren(Expression newLeft, Expression newRight) { + return new NotEquals(location(), newLeft, newRight); + } + + @Override + public NotEquals swapLeftAndRight() { + return new NotEquals(location(), right(), left()); + } + + @Override + public BinaryOperator negate() { + return new Equals(location(), left(), right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index 8443358a12cb2..039f25b5ef590 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -41,15 +41,19 @@ import org.elasticsearch.xpack.sql.expression.predicate.BinaryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.Predicates; import org.elasticsearch.xpack.sql.expression.predicate.Range; +import org.elasticsearch.xpack.sql.expression.predicate.conditional.Coalesce; import org.elasticsearch.xpack.sql.expression.predicate.logical.And; import org.elasticsearch.xpack.sql.expression.predicate.logical.Not; import org.elasticsearch.xpack.sql.expression.predicate.logical.Or; +import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.sql.plan.logical.Aggregate; import org.elasticsearch.xpack.sql.plan.logical.EsRelation; import org.elasticsearch.xpack.sql.plan.logical.Filter; @@ -63,6 +67,7 @@ import org.elasticsearch.xpack.sql.rule.RuleExecutor; import org.elasticsearch.xpack.sql.session.EmptyExecutable; import org.elasticsearch.xpack.sql.session.SingletonExecutable; +import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.CollectionUtils; import java.util.ArrayList; @@ -122,7 +127,9 @@ protected Iterable.Batch> batches() { new CombineProjections(), // folding new ReplaceFoldableAttributes(), + new FoldNull(), new ConstantFolding(), + new SimplifyCoalesce(), // boolean new BooleanSimplification(), new BooleanLiteralsOnTheRight(), @@ -682,8 +689,7 @@ protected LogicalPlan rule(Filter filter) { if (TRUE.equals(filter.condition())) { return filter.child(); } - // TODO: add comparison with null as well - if (FALSE.equals(filter.condition())) { + if (FALSE.equals(filter.condition()) || Expressions.isNull(filter.condition())) { return new LocalRelation(filter.location(), new EmptyExecutable(filter.output())); } } @@ -1112,6 +1118,37 @@ private boolean canPropagateFoldable(LogicalPlan p) { } } + static class FoldNull extends OptimizerExpressionRule { + + FoldNull() { + super(TransformDirection.UP); + } + + @Override + protected Expression rule(Expression e) { + if (e instanceof IsNotNull) { + if (((IsNotNull) e).field().nullable() == false) { + return new Literal(e.location(), Expressions.name(e), Boolean.TRUE, DataType.BOOLEAN); + } + } + // see https://github.com/elastic/elasticsearch/issues/34876 + // similar for IsNull once it gets introduced + + if (e instanceof In) { + In in = (In) e; + if (Expressions.isNull(in.value())) { + return Literal.of(in, null); + } + } + + if (e.nullable() && Expressions.anyMatch(e.children(), Expressions::isNull)) { + return Literal.of(e, null); + } + + return e; + } + } + static class ConstantFolding extends OptimizerExpressionRule { ConstantFolding() { @@ -1128,6 +1165,38 @@ protected Expression rule(Expression e) { return e.foldable() ? Literal.of(e) : e; } } + + static class SimplifyCoalesce extends OptimizerExpressionRule { + + SimplifyCoalesce() { + super(TransformDirection.DOWN); + } + + @Override + protected Expression rule(Expression e) { + if (e instanceof Coalesce) { + Coalesce c = (Coalesce) e; + + // find the first non-null foldable child (if any) and remove the rest + // while at it, exclude any nulls found + List newChildren = new ArrayList<>(); + + for (Expression child : c.children()) { + if (Expressions.isNull(child) == false) { + newChildren.add(child); + if (child.foldable()) { + break; + } + } + } + + if (newChildren.size() < c.children().size()) { + return new Coalesce(c.location(), newChildren); + } + } + return e; + } + } static class BooleanSimplification extends OptimizerExpressionRule { @@ -1275,7 +1344,7 @@ private Expression simplify(BinaryComparison bc) { } // false for equality - if (bc instanceof GreaterThan || bc instanceof LessThan) { + if (bc instanceof NotEquals || bc instanceof GreaterThan || bc instanceof LessThan) { if (!l.nullable() && !r.nullable() && l.semanticEquals(r)) { return FALSE; } @@ -1892,4 +1961,4 @@ protected LogicalPlan rule(LogicalPlan plan) { enum TransformDirection { UP, DOWN }; -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index 30f2edc53f17c..40c9544bfd0c7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -24,8 +24,7 @@ import org.elasticsearch.xpack.sql.expression.function.Function; import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; -import org.elasticsearch.xpack.sql.expression.predicate.In; -import org.elasticsearch.xpack.sql.expression.predicate.IsNotNull; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.sql.expression.predicate.Range; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; @@ -33,6 +32,7 @@ import org.elasticsearch.xpack.sql.expression.predicate.logical.And; import org.elasticsearch.xpack.sql.expression.predicate.logical.Not; import org.elasticsearch.xpack.sql.expression.predicate.logical.Or; +import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Mod; @@ -44,6 +44,7 @@ import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.sql.expression.predicate.regex.Like; import org.elasticsearch.xpack.sql.expression.predicate.regex.LikePattern; import org.elasticsearch.xpack.sql.expression.predicate.regex.RLike; @@ -165,7 +166,7 @@ public Expression visitComparison(ComparisonContext ctx) { case SqlBaseParser.EQ: return new Equals(loc, left, right); case SqlBaseParser.NEQ: - return new Not(loc, new Equals(loc, left, right)); + return new NotEquals(loc, left, right); case SqlBaseParser.LT: return new LessThan(loc, left, right); case SqlBaseParser.LTE: @@ -388,6 +389,8 @@ public DataType visitPrimitiveDataType(PrimitiveDataTypeContext ctx) { case "varchar": case "string": return DataType.KEYWORD; + case "ip": + return DataType.IP; default: throw new ParsingException(source(ctx), "Does not recognize type {}", type); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java index 3605898210fc5..c17c1311cccc6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -30,7 +30,7 @@ import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; -import org.elasticsearch.xpack.sql.expression.predicate.In; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.sql.plan.physical.AggregateExec; import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.sql.plan.physical.FilterExec; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index 9fcd542ef631d..21b6f20001bf7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -31,8 +31,6 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; -import org.elasticsearch.xpack.sql.expression.predicate.In; -import org.elasticsearch.xpack.sql.expression.predicate.IsNotNull; import org.elasticsearch.xpack.sql.expression.predicate.Range; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; @@ -40,12 +38,15 @@ import org.elasticsearch.xpack.sql.expression.predicate.logical.And; import org.elasticsearch.xpack.sql.expression.predicate.logical.Not; import org.elasticsearch.xpack.sql.expression.predicate.logical.Or; +import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.sql.expression.predicate.regex.Like; import org.elasticsearch.xpack.sql.expression.predicate.regex.LikePattern; import org.elasticsearch.xpack.sql.expression.predicate.regex.RLike; @@ -536,16 +537,15 @@ protected QueryTranslation asQuery(BinaryComparison bc, boolean onAggs) { // // Agg context means HAVING -> PipelineAggs // - ScriptTemplate script = bc.asScript(); if (onAggs) { - aggFilter = new AggFilter(at.id().toString(), script); + aggFilter = new AggFilter(at.id().toString(), bc.asScript()); } else { // query directly on the field if (at instanceof FieldAttribute) { query = wrapIfNested(translateQuery(bc), ne); } else { - query = new ScriptQuery(at.location(), script); + query = new ScriptQuery(at.location(), bc.asScript()); } } return new QueryTranslation(query, aggFilter); @@ -576,7 +576,7 @@ private static Query translateQuery(BinaryComparison bc) { if (bc instanceof LessThanOrEqual) { return new RangeQuery(loc, name, null, false, value, true, format); } - if (bc instanceof Equals) { + if (bc instanceof Equals || bc instanceof NotEquals) { if (bc.left() instanceof FieldAttribute) { FieldAttribute fa = (FieldAttribute) bc.left(); // equality should always be against an exact match @@ -585,7 +585,11 @@ private static Query translateQuery(BinaryComparison bc) { name = fa.exactAttribute().name(); } } - return new TermQuery(loc, name, value); + Query query = new TermQuery(loc, name, value); + if (bc instanceof NotEquals) { + query = new NotQuery(loc, query); + } + return query; } throw new SqlIllegalArgumentException("Don't know how to translate binary comparison [{}] in [{}]", bc.right().nodeString(), @@ -655,11 +659,10 @@ protected QueryTranslation asQuery(Range r, boolean onAggs) { // // Agg context means HAVING -> PipelineAggs // - ScriptTemplate script = r.asScript(); Attribute at = ((NamedExpression) e).toAttribute(); if (onAggs) { - aggFilter = new AggFilter(at.id().toString(), script); + aggFilter = new AggFilter(at.id().toString(), r.asScript()); } else { // typical range; no scripting involved if (at instanceof FieldAttribute) { @@ -669,7 +672,7 @@ protected QueryTranslation asQuery(Range r, boolean onAggs) { } // scripted query else { - query = new ScriptQuery(at.location(), script); + query = new ScriptQuery(at.location(), r.asScript()); } } return new QueryTranslation(query, aggFilter); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java index be0aae2001c5a..39872b4da7c3c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.sql.action.SqlClearCursorRequest; @@ -27,10 +26,9 @@ public class TransportSqlClearCursorAction extends HandledTransportAction) SqlClearCursorRequest::new); + public TransportSqlClearCursorAction(TransportService transportService, ActionFilters actionFilters, PlanExecutor planExecutor, + SqlLicenseChecker sqlLicenseChecker) { + super(NAME, transportService, actionFilters, (Writeable.Reader) SqlClearCursorRequest::new); this.planExecutor = planExecutor; this.sqlLicenseChecker = sqlLicenseChecker; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java index 689dd365f76e4..9e5954af737ee 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.sql.action.SqlQueryAction; @@ -37,10 +36,9 @@ public class TransportSqlQueryAction extends HandledTransportAction) SqlQueryRequest::new); + super(SqlQueryAction.NAME, transportService, actionFilters, (Writeable.Reader) SqlQueryRequest::new); this.planExecutor = planExecutor; this.sqlLicenseChecker = sqlLicenseChecker; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java index 95a10497fdc54..2ad63490539fa 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.sql.action.SqlTranslateAction; @@ -27,10 +26,9 @@ public class TransportSqlTranslateAction extends HandledTransportAction) SqlTranslateRequest::new); + super(SqlTranslateAction.NAME, transportService, actionFilters, (Writeable.Reader) SqlTranslateRequest::new); this.planExecutor = planExecutor; this.sqlLicenseChecker = sqlLicenseChecker; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java index 14b51a942ad4f..47ab30c976941 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java @@ -8,6 +8,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.expression.gen.script.Scripts; import org.elasticsearch.xpack.sql.util.Check; import java.util.Collection; @@ -26,7 +27,8 @@ public class AggFilter extends PipelineAgg { public AggFilter(String name, ScriptTemplate scriptTemplate) { super(BUCKET_SELECTOR_ID_PREFIX + name); Check.isTrue(scriptTemplate != null, "a valid script is required"); - this.scriptTemplate = scriptTemplate; + // make script null safe + this.scriptTemplate = Scripts.nullSafeFilter(scriptTemplate); this.aggPaths = scriptTemplate.aggPaths(); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java index b3d50b8149a43..1e76cb296fed7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java @@ -9,10 +9,10 @@ import org.elasticsearch.search.sort.NestedSortBuilder; import org.elasticsearch.xpack.sql.tree.Location; -import static org.elasticsearch.index.query.QueryBuilders.boolQuery; - import java.util.Objects; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; + public class NotQuery extends Query { private final Query child; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermsQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermsQuery.java index 91ea49a8a3ce3..66d206f829a32 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermsQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermsQuery.java @@ -9,7 +9,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.tree.Location; -import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; import java.util.Collections; import java.util.LinkedHashSet; @@ -27,7 +27,7 @@ public class TermsQuery extends LeafQuery { public TermsQuery(Location location, String term, List values) { super(location); this.term = term; - values.removeIf(e -> e.dataType() == DataType.NULL); + values.removeIf(e -> DataTypes.isNull(e.dataType())); if (values.isEmpty()) { this.values = Collections.emptySet(); } else { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java index d63357e151aed..5bdd74fefc3df 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java @@ -7,8 +7,8 @@ import java.util.function.UnaryOperator; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.sql.tree.Node; import org.elasticsearch.xpack.sql.util.ReflectionUtils; @@ -22,7 +22,7 @@ */ public abstract class Rule> implements UnaryOperator { - protected Logger log = Loggers.getLogger(getClass()); + protected Logger log = LogManager.getLogger(getClass()); private final String name; private final Class typeToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java index 2936e6342add9..90f3537c62561 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.sql.rule; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.sql.tree.Node; import org.elasticsearch.xpack.sql.tree.NodeUtils; @@ -18,7 +18,7 @@ public abstract class RuleExecutor> { - private final Logger log = Loggers.getLogger(getClass()); + private final Logger log = LogManager.getLogger(getClass()); public static class Limiter { public static final Limiter DEFAULT = new Limiter(100); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java index 605cb11beba9d..26436c614f565 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.type; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -101,7 +102,10 @@ public static Conversion conversionFor(DataType from, DataType to) { if (from == to) { return Conversion.IDENTITY; } - if (to == DataType.NULL) { + if (to == DataType.NULL || from == DataType.NULL) { + return Conversion.NULL; + } + if (from == DataType.NULL) { return Conversion.NULL; } @@ -117,6 +121,8 @@ private static Conversion conversion(DataType from, DataType to) { case KEYWORD: case TEXT: return conversionToString(from); + case IP: + return conversionToIp(from); case LONG: return conversionToLong(from); case INTEGER: @@ -146,6 +152,13 @@ private static Conversion conversionToString(DataType from) { return Conversion.OTHER_TO_STRING; } + private static Conversion conversionToIp(DataType from) { + if (from.isString()) { + return Conversion.STRING_TO_IP; + } + return null; + } + private static Conversion conversionToLong(DataType from) { if (from.isRational) { return Conversion.RATIONAL_TO_LONG; @@ -409,7 +422,14 @@ public enum Conversion { STRING_TO_BOOLEAN(fromString(DataTypeConversion::convertToBoolean, "Boolean")), DATE_TO_BOOLEAN(fromDate(value -> value != 0)), - BOOL_TO_LONG(fromBool(value -> value ? 1L : 0L)); + BOOL_TO_LONG(fromBool(value -> value ? 1L : 0L)), + + STRING_TO_IP(o -> { + if (!InetAddresses.isInetAddress(o.toString())) { + throw new SqlIllegalArgumentException( "[" + o + "] is not a valid IPv4 or IPv6 address"); + } + return o; + }); private final Function converter; @@ -464,4 +484,4 @@ public static DataType asInteger(DataType dataType) { return dataType.isInteger ? dataType : LONG; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java index 6fc7f95bef71e..92bc6f33a5de5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -8,7 +8,9 @@ import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.joda.time.DateTime; -public abstract class DataTypes { +public final class DataTypes { + + private DataTypes() {} public static boolean isNull(DataType from) { return from == DataType.NULL; @@ -118,4 +120,4 @@ public static Integer metaSqlRadix(DataType t) { // null means radix is not applicable for the given type. return t.isInteger ? Integer.valueOf(10) : (t.isRational ? Integer.valueOf(2) : null); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt index 998dab84783f0..dd8a8ad800a1e 100644 --- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt +++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt @@ -20,10 +20,12 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS # Comparison # Boolean eq(Object, Object) + Boolean neq(Object, Object) Boolean lt(Object, Object) Boolean lte(Object, Object) Boolean gt(Object, Object) Boolean gte(Object, Object) + Boolean in(Object, java.util.List) # # Logical @@ -33,6 +35,11 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS Boolean not(Boolean) Boolean notNull(Object) +# +# Null +# + Object coalesce(java.util.List) + # # Regex # @@ -107,4 +114,4 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS String space(Number) String substring(String, Number, Number) String ucase(String) -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index e69b694968a25..3b03079ca723b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -49,6 +49,38 @@ public void testMissingIndex() { public void testMissingColumn() { assertEquals("1:8: Unknown column [xxx]", verify("SELECT xxx FROM test")); } + + public void testMissingColumnWithWildcard() { + assertEquals("1:8: Unknown column [xxx]", verify("SELECT xxx.* FROM test")); + } + + public void testMisspelledColumnWithWildcard() { + assertEquals("1:8: Unknown column [tex], did you mean [text]?", verify("SELECT tex.* FROM test")); + } + + public void testColumnWithNoSubFields() { + assertEquals("1:8: Cannot determine columns for [text.*]", verify("SELECT text.* FROM test")); + } + + public void testMultipleColumnsWithWildcard1() { + assertEquals("1:14: Unknown column [a]\n" + + "line 1:17: Unknown column [b]\n" + + "line 1:22: Unknown column [c]\n" + + "line 1:25: Unknown column [tex], did you mean [text]?", verify("SELECT bool, a, b.*, c, tex.* FROM test")); + } + + public void testMultipleColumnsWithWildcard2() { + assertEquals("1:8: Unknown column [tex], did you mean [text]?\n" + + "line 1:21: Unknown column [a]\n" + + "line 1:24: Unknown column [dat], did you mean [date]?\n" + + "line 1:31: Unknown column [c]", verify("SELECT tex.*, bool, a, dat.*, c FROM test")); + } + + public void testMultipleColumnsWithWildcard3() { + assertEquals("1:8: Unknown column [ate], did you mean [date]?\n" + + "line 1:21: Unknown column [keyw], did you mean [keyword]?\n" + + "line 1:29: Unknown column [da], did you mean [date]?" , verify("SELECT ate.*, bool, keyw.*, da FROM test")); + } public void testMisspelledColumn() { assertEquals("1:8: Unknown column [txt], did you mean [text]?", verify("SELECT txt FROM test")); @@ -148,11 +180,6 @@ public void testGroupByAggregate() { verify("SELECT AVG(int) FROM test GROUP BY AVG(int)")); } - public void testNotSupportedAggregateOnDate() { - assertEquals("1:8: Argument required to be numeric ('date' type is 'date')", - verify("SELECT AVG(date) FROM test")); - } - public void testGroupByOnNested() { assertEquals("1:38: Grouping isn't (yet) compatible with nested fields [dep.dep_id]", verify("SELECT dep.dep_id FROM test GROUP BY dep.dep_id")); @@ -237,4 +264,70 @@ public void testInNestedWithDifferentDataTypesFromLeftValue_WhereClause() { assertEquals("1:46: expected data type [TEXT], value provided is of type [INTEGER]", verify("SELECT * FROM test WHERE int = 1 OR text IN (1, 2)")); } + + public void testNotSupportedAggregateOnDate() { + assertEquals("1:8: [AVG] argument must be [numeric], found value [date] type [date]", + verify("SELECT AVG(date) FROM test")); + } + + public void testNotSupportedAggregateOnString() { + assertEquals("1:8: [MAX] argument must be [numeric or date], found value [keyword] type [keyword]", + verify("SELECT MAX(keyword) FROM test")); + } + + public void testInvalidTypeForStringFunction_WithOneArg() { + assertEquals("1:8: [LENGTH] argument must be [string], found value [1] type [integer]", + verify("SELECT LENGTH(1)")); + } + + public void testInvalidTypeForNumericFunction_WithOneArg() { + assertEquals("1:8: [COS] argument must be [numeric], found value [foo] type [keyword]", + verify("SELECT COS('foo')")); + } + + public void testInvalidTypeForBooleanFunction_WithOneArg() { + assertEquals("1:8: [NOT] argument must be [boolean], found value [foo] type [keyword]", + verify("SELECT NOT 'foo'")); + } + + public void testInvalidTypeForStringFunction_WithTwoArgs() { + assertEquals("1:8: [CONCAT] first argument must be [string], found value [1] type [integer]", + verify("SELECT CONCAT(1, 'bar')")); + assertEquals("1:8: [CONCAT] second argument must be [string], found value [2] type [integer]", + verify("SELECT CONCAT('foo', 2)")); + } + + public void testInvalidTypeForNumericFunction_WithTwoArgs() { + assertEquals("1:8: [TRUNCATE] first argument must be [numeric], found value [foo] type [keyword]", + verify("SELECT TRUNCATE('foo', 2)")); + assertEquals("1:8: [TRUNCATE] second argument must be [numeric], found value [bar] type [keyword]", + verify("SELECT TRUNCATE(1.2, 'bar')")); + } + + public void testInvalidTypeForBooleanFuntion_WithTwoArgs() { + assertEquals("1:8: [OR] first argument must be [boolean], found value [1] type [integer]", + verify("SELECT 1 OR true")); + assertEquals("1:8: [OR] second argument must be [boolean], found value [2] type [integer]", + verify("SELECT true OR 2")); + } + + public void testInvalidTypeForFunction_WithThreeArgs() { + assertEquals("1:8: [REPLACE] first argument must be [string], found value [1] type [integer]", + verify("SELECT REPLACE(1, 'foo', 'bar')")); + assertEquals("1:8: [REPLACE] second argument must be [string], found value [2] type [integer]", + verify("SELECT REPLACE('text', 2, 'bar')")); + assertEquals("1:8: [REPLACE] third argument must be [string], found value [3] type [integer]", + verify("SELECT REPLACE('text', 'foo', 3)")); + } + + public void testInvalidTypeForFunction_WithFourArgs() { + assertEquals("1:8: [INSERT] first argument must be [string], found value [1] type [integer]", + verify("SELECT INSERT(1, 1, 2, 'new')")); + assertEquals("1:8: [INSERT] second argument must be [numeric], found value [foo] type [keyword]", + verify("SELECT INSERT('text', 'foo', 2, 'new')")); + assertEquals("1:8: [INSERT] third argument must be [numeric], found value [bar] type [keyword]", + verify("SELECT INSERT('text', 1, 'bar', 'new')")); + assertEquals("1:8: [INSERT] fourth argument must be [string], found value [3] type [integer]", + verify("SELECT INSERT('text', 1, 2, 3)")); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ProcessorTests.java new file mode 100644 index 0000000000000..e3d51f1f7dd87 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ProcessorTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.tree.NodeSubclassTests; +import org.junit.BeforeClass; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.LinkedHashSet; +import java.util.List; + +import static java.util.stream.Collectors.toCollection; + + +public class ProcessorTests extends ESTestCase { + + private static List> processors; + + @BeforeClass + public static void init() throws Exception { + processors = NodeSubclassTests.subclassesOf(Processor.class); + } + + + public void testProcessorRegistration() throws Exception { + LinkedHashSet registered = Processors.getNamedWriteables().stream() + .map(e -> e.name) + .collect(toCollection(LinkedHashSet::new)); + + // discover available processors + int missing = processors.size() - registered.size(); + + + if (missing > 0) { + List notRegistered = new ArrayList<>(); + for (Class proc : processors) { + String procName = proc.getName(); + assertTrue(procName + " does NOT implement NamedWriteable", NamedWriteable.class.isAssignableFrom(proc)); + Field name = null; + String value = null; + try { + name = proc.getField("NAME"); + } catch (Exception ex) { + fail(procName + " does NOT provide a NAME field\n" + ex); + } + try { + value = name.get(proc).toString(); + } catch (Exception ex) { + fail(procName + " does NOT provide a static NAME field\n" + ex); + } + if (!registered.contains(value)) { + notRegistered.add(procName); + } + } + + fail(missing + " processor(s) not registered : " + notRegistered); + } else { + assertEquals("Detection failed: discovered more registered processors than classes", 0, missing); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessorTests.java new file mode 100644 index 0000000000000..ec29e912a2c90 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessorTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.logical; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +public class BinaryLogicProcessorTests extends AbstractWireSerializingTestCase { + + private static final Processor FALSE = new ConstantProcessor(false); + private static final Processor TRUE = new ConstantProcessor(true); + private static final Processor NULL = new ConstantProcessor((Object) null); + + public static BinaryLogicProcessor randomProcessor() { + return new BinaryLogicProcessor( + new ConstantProcessor(randomFrom(Boolean.FALSE, Boolean.TRUE, null)), + new ConstantProcessor(randomFrom(Boolean.FALSE, Boolean.TRUE, null)), + randomFrom(BinaryLogicProcessor.BinaryLogicOperation.values())); + } + + @Override + protected BinaryLogicProcessor createTestInstance() { + return randomProcessor(); + } + + @Override + protected Reader instanceReader() { + return BinaryLogicProcessor::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + public void testOR() { + assertEquals(true, new BinaryLogicProcessor(TRUE, FALSE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); + assertEquals(true, new BinaryLogicProcessor(FALSE, TRUE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); + assertEquals(false, new BinaryLogicProcessor(FALSE, FALSE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); + assertEquals(true, new BinaryLogicProcessor(TRUE, TRUE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); + } + + public void testORNullHandling() { + assertEquals(true, new BinaryLogicProcessor(TRUE, NULL, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); + assertEquals(true, new BinaryLogicProcessor(NULL, TRUE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); + assertNull(new BinaryLogicProcessor(FALSE, NULL, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); + assertNull(new BinaryLogicProcessor(NULL, FALSE, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); + assertNull(new BinaryLogicProcessor(NULL, NULL, BinaryLogicProcessor.BinaryLogicOperation.OR).process(null)); + } + + public void testAnd() { + assertEquals(false, new BinaryLogicProcessor(TRUE, FALSE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); + assertEquals(false, new BinaryLogicProcessor(FALSE, TRUE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); + assertEquals(false, new BinaryLogicProcessor(FALSE, FALSE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); + assertEquals(true, new BinaryLogicProcessor(TRUE, TRUE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); + } + + public void testAndNullHandling() { + assertNull(new BinaryLogicProcessor(TRUE, NULL, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); + assertNull(new BinaryLogicProcessor(NULL, TRUE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); + assertEquals(false, new BinaryLogicProcessor(FALSE, NULL, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); + assertEquals(false, new BinaryLogicProcessor(NULL, FALSE, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); + assertNull(new BinaryLogicProcessor(NULL, NULL, BinaryLogicProcessor.BinaryLogicOperation.AND).process(null)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparisonProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparisonProcessorTests.java index 0761ec5f2fa21..394818be2408a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparisonProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparisonProcessorTests.java @@ -11,12 +11,6 @@ import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparisonProcessor; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.LessThanOrEqual; import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; @@ -48,6 +42,11 @@ public void testEq() { assertEquals(false, new Equals(EMPTY, l(3), l(4)).makePipe().asProcessor().process(null)); } + public void testNEq() { + assertEquals(false, new NotEquals(EMPTY, l(4), l(4)).makePipe().asProcessor().process(null)); + assertEquals(true, new NotEquals(EMPTY, l(3), l(4)).makePipe().asProcessor().process(null)); + } + public void testGt() { assertEquals(true, new GreaterThan(EMPTY, l(4), l(3)).makePipe().asProcessor().process(null)); assertEquals(false, new GreaterThan(EMPTY, l(3), l(4)).makePipe().asProcessor().process(null)); @@ -73,14 +72,15 @@ public void testLte() { } public void testHandleNull() { - assertNull(new Equals(EMPTY, l(null), l(3)).makePipe().asProcessor().process(null)); - assertNull(new GreaterThan(EMPTY, l(null), l(3)).makePipe().asProcessor().process(null)); - assertNull(new GreaterThanOrEqual(EMPTY, l(null), l(3)).makePipe().asProcessor().process(null)); - assertNull(new LessThan(EMPTY, l(null), l(3)).makePipe().asProcessor().process(null)); - assertNull(new LessThanOrEqual(EMPTY, l(null), l(3)).makePipe().asProcessor().process(null)); + assertNull(new Equals(EMPTY, Literal.NULL, l(3)).makePipe().asProcessor().process(null)); + assertNull(new NotEquals(EMPTY, Literal.NULL, l(3)).makePipe().asProcessor().process(null)); + assertNull(new GreaterThan(EMPTY, Literal.NULL, l(3)).makePipe().asProcessor().process(null)); + assertNull(new GreaterThanOrEqual(EMPTY, Literal.NULL, l(3)).makePipe().asProcessor().process(null)); + assertNull(new LessThan(EMPTY, Literal.NULL, l(3)).makePipe().asProcessor().process(null)); + assertNull(new LessThanOrEqual(EMPTY, Literal.NULL, l(3)).makePipe().asProcessor().process(null)); } private static Literal l(Object value) { return Literal.of(EMPTY, value); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/InProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessorTests.java similarity index 92% rename from x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/InProcessorTests.java rename to x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessorTests.java index 12bba003115f4..3303072e50078 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/InProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessorTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.expression.predicate; +package org.elasticsearch.xpack.sql.expression.predicate.operator.comparison; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable.Reader; @@ -11,10 +11,10 @@ import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.InProcessor; import java.util.Arrays; +import static org.elasticsearch.xpack.sql.expression.Literal.NULL; import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; public class InProcessorTests extends AbstractWireSerializingTestCase { @@ -22,7 +22,6 @@ public class InProcessorTests extends AbstractWireSerializingTestCase b) { return ((Literal) new ConstantFolding().rule(b)).value(); } + public void testNullFoldingIsNotNull() { + assertEquals(Literal.TRUE, new FoldNull().rule(new IsNotNull(EMPTY, Literal.TRUE))); + } + + public void testGenericNullableExpression() { + FoldNull rule = new FoldNull(); + // date-time + assertNullLiteral(rule.rule(new DayName(EMPTY, Literal.NULL, randomTimeZone()))); + // math function + assertNullLiteral(rule.rule(new Cos(EMPTY, Literal.NULL))); + // string function + assertNullLiteral(rule.rule(new Ascii(EMPTY, Literal.NULL))); + assertNullLiteral(rule.rule(new Repeat(EMPTY, getFieldAttribute(), Literal.NULL))); + // arithmetic + assertNullLiteral(rule.rule(new Add(EMPTY, getFieldAttribute(), Literal.NULL))); + // comparison + assertNullLiteral(rule.rule(new GreaterThan(EMPTY, getFieldAttribute(), Literal.NULL))); + // regex + assertNullLiteral(rule.rule(new RLike(EMPTY, getFieldAttribute(), Literal.NULL))); + } + + public void testSimplifyCoalesceNulls() { + Expression e = new SimplifyCoalesce().rule(new Coalesce(EMPTY, asList(Literal.NULL, Literal.NULL))); + assertEquals(Coalesce.class, e.getClass()); + assertEquals(0, e.children().size()); + } + + public void testSimplifyCoalesceRandomNulls() { + Expression e = new SimplifyCoalesce().rule(new Coalesce(EMPTY, randomListOfNulls())); + assertEquals(Coalesce.class, e.getClass()); + assertEquals(0, e.children().size()); + } + + public void testSimplifyCoalesceRandomNullsWithValue() { + Expression e = new SimplifyCoalesce().rule(new Coalesce(EMPTY, + CollectionUtils.combine( + CollectionUtils.combine(randomListOfNulls(), Literal.TRUE, Literal.FALSE, Literal.TRUE), + randomListOfNulls()))); + assertEquals(1, e.children().size()); + assertEquals(Literal.TRUE, e.children().get(0)); + } + + private List randomListOfNulls() { + return asList(randomArray(1, 10, i -> new Literal[i], () -> Literal.NULL)); + } + + public void testSimplifyCoalesceFirstLiteral() { + Expression e = new SimplifyCoalesce() + .rule(new Coalesce(EMPTY, + Arrays.asList(Literal.NULL, Literal.TRUE, Literal.FALSE, new Abs(EMPTY, getFieldAttribute())))); + assertEquals(Coalesce.class, e.getClass()); + assertEquals(1, e.children().size()); + assertEquals(Literal.TRUE, e.children().get(0)); + } + // // Logical simplifications // + private void assertNullLiteral(Expression expression) { + assertEquals(Literal.class, expression.getClass()); + assertNull(expression.fold()); + } + public void testBinaryComparisonSimplification() { assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new Equals(EMPTY, FIVE, FIVE))); + assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new NotEquals(EMPTY, FIVE, FIVE))); assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new GreaterThanOrEqual(EMPTY, FIVE, FIVE))); assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new LessThanOrEqual(EMPTY, FIVE, FIVE))); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java index 0ee0c9bcca122..70ff5ac8c0e7d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java @@ -14,6 +14,8 @@ import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Sub; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.sql.type.DataType; import static org.hamcrest.core.StringStartsWith.startsWith; @@ -158,6 +160,22 @@ public void testComplexArithmetic() { assertEquals("2", ((Literal) sub2.children().get(1)).name()); } + public void testEquals() { + Expression expr = parser.createExpression("a = 10"); + assertEquals(Equals.class, expr.getClass()); + Equals eq = (Equals) expr; + assertEquals("(a) == 10", eq.name()); + assertEquals(2, eq.children().size()); + } + + public void testNotEquals() { + Expression expr = parser.createExpression("a != 10"); + assertEquals(NotEquals.class, expr.getClass()); + NotEquals neq = (NotEquals) expr; + assertEquals("(a) != 10", neq.name()); + assertEquals(2, neq.children().size()); + } + public void testCastWithUnquotedDataType() { Expression expr = parser.createExpression("CAST(10*2 AS long)"); assertEquals(Cast.class, expr.getClass()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java index 5fac14e2397db..77606ab1390dd 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java @@ -5,13 +5,14 @@ */ package org.elasticsearch.xpack.sql.planner; -import org.elasticsearch.test.AbstractBuilderTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.sql.optimizer.Optimizer; import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.sql.plan.physical.LocalExec; import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.sql.session.EmptyExecutable; @@ -25,7 +26,7 @@ import static org.hamcrest.Matchers.startsWith; -public class QueryFolderTests extends AbstractBuilderTestCase { +public class QueryFolderTests extends ESTestCase { private static SqlParser parser; private static Analyzer analyzer; @@ -64,6 +65,24 @@ public void testFoldingToLocalExecWithProject() { assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); } + public void testFoldingOfIsNotNull() { + PhysicalPlan p = plan("SELECT keyword FROM test WHERE (keyword IS NULL) IS NOT NULL"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec ee = (EsQueryExec) p; + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + } + + public void testFoldingToLocalExecWithNullFilter() { + PhysicalPlan p = plan("SELECT keyword FROM test WHERE null IN (1, 2)"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + } + public void testFoldingToLocalExecWithProject_FoldableIn() { PhysicalPlan p = plan("SELECT keyword FROM test WHERE int IN (null, null)"); assertEquals(LocalExec.class, p.getClass()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index c1e5a0d2dafad..4423887c16135 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.sql.planner; -import org.elasticsearch.test.AbstractBuilderTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.logical.Project; import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation; +import org.elasticsearch.xpack.sql.querydsl.agg.AggFilter; import org.elasticsearch.xpack.sql.querydsl.query.Query; import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; import org.elasticsearch.xpack.sql.querydsl.query.ScriptQuery; @@ -29,13 +30,13 @@ import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; import java.util.Map; import java.util.TimeZone; -import static org.hamcrest.core.StringStartsWith.startsWith; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.startsWith; -public class QueryTranslatorTests extends AbstractBuilderTestCase { +public class QueryTranslatorTests extends ESTestCase { private static SqlParser parser; private static Analyzer analyzer; @@ -160,7 +161,7 @@ public void testLikeConstructsNotSupported() { assertEquals("Scalar function (LTRIM(keyword)) not allowed (yet) as arguments for LIKE", ex.getMessage()); } - public void testTranslateInExpression_WhereClause() throws IOException { + public void testTranslateInExpression_WhereClause() { LogicalPlan p = plan("SELECT * FROM test WHERE keyword IN ('foo', 'bar', 'lala', 'foo', concat('la', 'la'))"); assertTrue(p instanceof Project); assertTrue(p.children().get(0) instanceof Filter); @@ -170,10 +171,11 @@ public void testTranslateInExpression_WhereClause() throws IOException { Query query = translation.query; assertTrue(query instanceof TermsQuery); TermsQuery tq = (TermsQuery) query; - assertEquals("keyword:(bar foo lala)", tq.asBuilder().toQuery(createShardContext()).toString()); + assertEquals("{\"terms\":{\"keyword\":[\"foo\",\"bar\",\"lala\"],\"boost\":1.0}}", + tq.asBuilder().toString().replaceAll("\\s", "")); } - public void testTranslateInExpression_WhereClauseAndNullHandling() throws IOException { + public void testTranslateInExpression_WhereClauseAndNullHandling() { LogicalPlan p = plan("SELECT * FROM test WHERE keyword IN ('foo', null, 'lala', null, 'foo', concat('la', 'la'))"); assertTrue(p instanceof Project); assertTrue(p.children().get(0) instanceof Filter); @@ -183,7 +185,8 @@ public void testTranslateInExpression_WhereClauseAndNullHandling() throws IOExce Query query = translation.query; assertTrue(query instanceof TermsQuery); TermsQuery tq = (TermsQuery) query; - assertEquals("keyword:(foo lala)", tq.asBuilder().toQuery(createShardContext()).toString()); + assertEquals("{\"terms\":{\"keyword\":[\"foo\",\"lala\"],\"boost\":1.0}}", + tq.asBuilder().toString().replaceAll("\\s", "")); } public void testTranslateInExpressionInvalidValues_WhereClause() { @@ -194,32 +197,68 @@ public void testTranslateInExpressionInvalidValues_WhereClause() { assertFalse(condition.foldable()); SqlIllegalArgumentException ex = expectThrows(SqlIllegalArgumentException.class, () -> QueryTranslator.toQuery(condition, false)); assertEquals("Line 1:52: Comparisons against variables are not (currently) supported; " + - "offender [keyword] in [keyword IN(foo, bar, keyword)]", ex.getMessage()); + "offender [keyword] in [keyword IN (foo, bar, keyword)]", ex.getMessage()); } - public void testTranslateInExpression_HavingClause_Painless() { - LogicalPlan p = plan("SELECT keyword, max(int) FROM test GROUP BY keyword HAVING max(int) in (10, 20, 30 - 10)"); + public void testTranslateInExpression_WhereClause_Painless() { + LogicalPlan p = plan("SELECT int FROM test WHERE POWER(int, 2) IN (10, null, 20, 30 - 10)"); assertTrue(p instanceof Project); assertTrue(p.children().get(0) instanceof Filter); Expression condition = ((Filter) p.children().get(0)).condition(); assertFalse(condition.foldable()); QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); assertTrue(translation.query instanceof ScriptQuery); - ScriptQuery sq = (ScriptQuery) translation.query; - assertEquals("InternalSqlScriptUtils.nullSafeFilter(params.a0==10 || params.a0==20)", sq.script().toString()); - assertThat(sq.script().params().toString(), startsWith("[{a=MAX(int){a->")); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.in(" + + "InternalSqlScriptUtils.power(InternalSqlScriptUtils.docValue(doc,params.v0),params.v1), params.v2))", + sc.script().toString()); + assertEquals("[{v=int}, {v=2}, {v=[10.0, null, 20.0]}]", sc.script().params().toString()); } - public void testTranslateInExpression_HavingClauseAndNullHandling_Painless() { - LogicalPlan p = plan("SELECT keyword, max(int) FROM test GROUP BY keyword HAVING max(int) in (10, null, 20, null, 30 - 10)"); + public void testTranslateInExpression_HavingClause_Painless() { + LogicalPlan p = plan("SELECT keyword, max(int) FROM test GROUP BY keyword HAVING max(int) IN (10, 20, 30 - 10)"); assertTrue(p instanceof Project); assertTrue(p.children().get(0) instanceof Filter); Expression condition = ((Filter) p.children().get(0)).condition(); assertFalse(condition.foldable()); - QueryTranslation translation = QueryTranslator.toQuery(condition, false); - assertTrue(translation.query instanceof ScriptQuery); - ScriptQuery sq = (ScriptQuery) translation.query; - assertEquals("InternalSqlScriptUtils.nullSafeFilter(params.a0==10 || params.a0==20)", sq.script().toString()); - assertThat(sq.script().params().toString(), startsWith("[{a=MAX(int){a->")); + QueryTranslation translation = QueryTranslator.toQuery(condition, true); + assertNull(translation.query); + AggFilter aggFilter = translation.aggFilter; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.in(params.a0, params.v0))", + aggFilter.scriptTemplate().toString()); + assertThat(aggFilter.scriptTemplate().params().toString(), startsWith("[{a=MAX(int){a->")); + assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=[10, 20]}]")); + } + + public void testTranslateInExpression_HavingClause_PainlessOneArg() { + LogicalPlan p = plan("SELECT keyword, max(int) FROM test GROUP BY keyword HAVING max(int) IN (10, 30 - 20)"); + assertTrue(p instanceof Project); + assertTrue(p.children().get(0) instanceof Filter); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, true); + assertNull(translation.query); + AggFilter aggFilter = translation.aggFilter; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.in(params.a0, params.v0))", + aggFilter.scriptTemplate().toString()); + assertThat(aggFilter.scriptTemplate().params().toString(), startsWith("[{a=MAX(int){a->")); + assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=[10]}]")); + + } + + public void testTranslateInExpression_HavingClause_PainlessAndNullHandling() { + LogicalPlan p = plan("SELECT keyword, max(int) FROM test GROUP BY keyword HAVING max(int) IN (10, null, 20, 30, null, 30 - 10)"); + assertTrue(p instanceof Project); + assertTrue(p.children().get(0) instanceof Filter); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, true); + assertNull(translation.query); + AggFilter aggFilter = translation.aggFilter; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.in(params.a0, params.v0))", + aggFilter.scriptTemplate().toString()); + assertThat(aggFilter.scriptTemplate().params().toString(), startsWith("[{a=MAX(int){a->")); + assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=[10, null, 20, 30]}]")); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java index 4a60224666515..363254f414c49 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java @@ -31,7 +31,7 @@ public void testSqlDisabled() { assertThat(plugin.getActions(), empty()); assertThat(plugin.getRestHandlers(Settings.EMPTY, mock(RestController.class), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, new SettingsFilter(Settings.EMPTY, Collections.emptyList()), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, new SettingsFilter(Collections.emptyList()), mock(IndexNameExpressionResolver.class), () -> mock(DiscoveryNodes.class)), empty()); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java index 4c763fa95cd26..9510b8d2213ff 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.tree; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.ESTestCase; @@ -16,12 +17,17 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.sql.expression.function.aggregate.Avg; import org.elasticsearch.xpack.sql.expression.function.aggregate.InnerAggregate; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.sql.expression.function.aggregate.PercentileRanks; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentiles; import org.elasticsearch.xpack.sql.expression.gen.pipeline.AggExtractorInput; import org.elasticsearch.xpack.sql.expression.gen.pipeline.BinaryPipesTests; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.FullTextPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.InPipe; import org.elasticsearch.xpack.sql.expression.predicate.regex.LikePattern; import org.elasticsearch.xpack.sql.tree.NodeTests.ChildrenAreAProperty; import org.elasticsearch.xpack.sql.tree.NodeTests.Dummy; @@ -41,6 +47,7 @@ import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; import java.util.HashMap; @@ -78,6 +85,10 @@ * */ public class NodeSubclassTests> extends ESTestCase { + + private static final List>> CLASSES_WITH_MIN_TWO_CHILDREN = Arrays.asList( + In.class, InPipe.class, Percentile.class, Percentiles.class, PercentileRanks.class); + private final Class subclass; public NodeSubclassTests(Class subclass) { @@ -147,7 +158,6 @@ public void testTransform() throws Exception { /** * Test {@link Node#replaceChildren} implementation on {@link #subclass}. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34775") public void testReplaceChildren() throws Exception { Constructor ctor = longestCtor(subclass); Object[] nodeCtorArgs = ctorArgs(ctor); @@ -343,20 +353,14 @@ private Object makeArg(Type argType) { */ @SuppressWarnings("unchecked") private static Object makeArg(Class> toBuildClass, Type argType) throws Exception { + if (argType instanceof ParameterizedType) { ParameterizedType pt = (ParameterizedType) argType; if (pt.getRawType() == Map.class) { - Map map = new HashMap<>(); - int size = between(0, 10); - while (map.size() < size) { - Object key = makeArg(toBuildClass, pt.getActualTypeArguments()[0]); - Object value = makeArg(toBuildClass, pt.getActualTypeArguments()[1]); - map.put(key, value); - } - return map; + return makeMap(toBuildClass, pt); } if (pt.getRawType() == List.class) { - return makeList(toBuildClass, pt, between(1, 10)); + return makeList(toBuildClass, pt); } if (pt.getRawType() == EnumSet.class) { @SuppressWarnings("rawtypes") @@ -512,6 +516,10 @@ public boolean equals(Object obj) { } } + private static List makeList(Class> toBuildClass, ParameterizedType listType) throws Exception { + return makeList(toBuildClass, listType, randomSizeForCollection(toBuildClass)); + } + private static List makeList(Class> toBuildClass, ParameterizedType listType, int size) throws Exception { List list = new ArrayList<>(); for (int i = 0; i < size; i++) { @@ -520,6 +528,27 @@ private static List makeList(Class> toBuildClass, Parameter return list; } + private static Object makeMap(Class> toBuildClass, ParameterizedType pt) throws Exception { + Map map = new HashMap<>(); + int size = randomSizeForCollection(toBuildClass); + while (map.size() < size) { + Object key = makeArg(toBuildClass, pt.getActualTypeArguments()[0]); + Object value = makeArg(toBuildClass, pt.getActualTypeArguments()[1]); + map.put(key, value); + } + return map; + } + + private static int randomSizeForCollection(Class> toBuildClass) { + int minCollectionLength = 0; + int maxCollectionLength = 10; + + if (CLASSES_WITH_MIN_TWO_CHILDREN.stream().anyMatch(c -> c == toBuildClass)) { + minCollectionLength = 2; + } + return between(minCollectionLength, maxCollectionLength); + } + private List makeListOfSameSizeOtherThan(Type listType, List original) throws Exception { if (original.isEmpty()) { throw new IllegalArgumentException("Can't make a different empty list"); @@ -557,7 +586,7 @@ public static > T makeNode(Class nodeClass) throw /** * Find all subclasses of a particular class. */ - private static List> subclassesOf(Class clazz) throws IOException { + public static List> subclassesOf(Class clazz) throws IOException { @SuppressWarnings("unchecked") // The map is built this way List> lookup = (List>) subclassCache.get(clazz); if (lookup != null) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index f773634fe721f..7a04139430e33 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -7,10 +7,13 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; +import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; + public class DataTypeConversionTests extends ESTestCase { public void testConversionToString() { Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, DataType.KEYWORD); @@ -221,6 +224,12 @@ public void testConversionToNull() { assertNull(conversion.convert(10.0)); } + public void testConversionFromNull() { + Conversion conversion = DataTypeConversion.conversionFor(DataType.NULL, DataType.INTEGER); + assertNull(conversion.convert(null)); + assertNull(conversion.convert(10)); + } + public void testConversionToIdentity() { Conversion conversion = DataTypeConversion.conversionFor(DataType.INTEGER, DataType.INTEGER); assertNull(conversion.convert(null)); @@ -252,4 +261,19 @@ public void testConversionToUnsupported() { () -> DataTypeConversion.conversionFor(DataType.INTEGER, DataType.UNSUPPORTED)); assertEquals("cannot convert from [INTEGER] to [UNSUPPORTED]", e.getMessage()); } + + public void testStringToIp() { + Conversion conversion = DataTypeConversion.conversionFor(DataType.KEYWORD, DataType.IP); + assertNull(conversion.convert(null)); + assertEquals("192.168.1.1", conversion.convert("192.168.1.1")); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("10.1.1.300")); + assertEquals("[10.1.1.300] is not a valid IPv4 or IPv6 address", e.getMessage()); + } + + public void testIpToString() { + Conversion ipToString = DataTypeConversion.conversionFor(DataType.IP, DataType.KEYWORD); + assertEquals("10.0.0.1", ipToString.convert(new Literal(EMPTY, "10.0.0.1", DataType.IP))); + Conversion stringToIp = DataTypeConversion.conversionFor(DataType.KEYWORD, DataType.IP); + assertEquals("10.0.0.1", ipToString.convert(stringToIp.convert(Literal.of(EMPTY, "10.0.0.1")))); + } } diff --git a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java index a8d97f44b8d7d..a761755c7cbad 100644 --- a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java +++ b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.test.rest; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.http.HttpStatus; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.client.Request; diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.auto_follow_stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.auto_follow_stats.json deleted file mode 100644 index 4d5ca5fe39cb8..0000000000000 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.auto_follow_stats.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "ccr.auto_follow_stats": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", - "methods": [ "GET" ], - "url": { - "path": "/_ccr/auto_follow/stats", - "paths": [ "/_ccr/auto_follow/stats" ], - "parts": {}, - "body": null - } - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json index a5c8de7483613..735387b173b73 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json @@ -1,6 +1,6 @@ { "ccr.delete_auto_follow_pattern": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html", "methods": [ "DELETE" ], "url": { "path": "/_ccr/auto_follow/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow.json index ea6c2256794a4..635a4e62683bb 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow.json @@ -1,6 +1,6 @@ { "ccr.follow": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html", "methods": [ "PUT" ], "url": { "path": "/{index}/_ccr/follow", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_stats.json new file mode 100644 index 0000000000000..e860a590b5247 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_stats.json @@ -0,0 +1,16 @@ +{ + "ccr.follow_stats": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html", + "methods": [ "GET" ], + "url": { + "path": "/{index}/_ccr/stats", + "paths": [ "/{index}/_ccr/stats" ], + "parts": { + "index": { + "type": "list", + "description": "A comma-separated list of index patterns; use `_all` to perform the operation on all indices" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.get_auto_follow_pattern.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.get_auto_follow_pattern.json index 65d0b9c8dea1e..6aea678fc004b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.get_auto_follow_pattern.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.get_auto_follow_pattern.json @@ -1,6 +1,6 @@ { "ccr.get_auto_follow_pattern": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html", "methods": [ "GET" ], "url": { "path": "/_ccr/auto_follow/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.pause_follow.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.pause_follow.json index b6cfbe9fd0c69..e128dab917ca4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.pause_follow.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.pause_follow.json @@ -1,6 +1,6 @@ { "ccr.pause_follow": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html", "methods": [ "POST" ], "url": { "path": "/{index}/_ccr/pause_follow", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json index 5f7aac790a00c..128b262ec7a50 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json @@ -1,6 +1,6 @@ { "ccr.put_auto_follow_pattern": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html", "methods": [ "PUT" ], "url": { "path": "/_ccr/auto_follow/{name}", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.resume_follow.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.resume_follow.json index b4f806e8b7fec..61bdf82372fc0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.resume_follow.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.resume_follow.json @@ -1,6 +1,6 @@ { "ccr.resume_follow": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html", "methods": [ "POST" ], "url": { "path": "/{index}/_ccr/resume_follow", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json index aa9e9a7fec379..ad9ab49bcbe46 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json @@ -1,16 +1,12 @@ { "ccr.stats": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html", "methods": [ "GET" ], "url": { "path": "/_ccr/stats", - "paths": [ "/_ccr/stats", "/{index}/_ccr/stats" ], - "parts": { - "index": { - "type": "list", - "description": "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" - } - } + "paths": [ "/_ccr/stats" ], + "parts": {}, + "body": null } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.delete_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.delete_lifecycle.json new file mode 100644 index 0000000000000..4deaaaffc15b4 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.delete_lifecycle.json @@ -0,0 +1,19 @@ +{ + "ilm.delete_lifecycle": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "DELETE" ], + "url": { + "path": "/_ilm/policy/{policy}", + "paths": ["/_ilm/policy/{policy}"], + "parts": { + "policy": { + "type" : "string", + "description" : "The name of the index lifecycle policy" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json new file mode 100644 index 0000000000000..de1b0deb5b9e0 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json @@ -0,0 +1,24 @@ +{ + "ilm.explain_lifecycle": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "GET" ], + "url": { + "path": "/{index}/_ilm/explain", + "paths": ["/{index}/_ilm/explain"], + "parts": { + "index": { + "type" : "string", + "description" : "The name of the index to explain" + } + }, + "params": { + "human": { + "type" : "boolean", + "default" : "false", + "description" : "Return data such as dates in a human readable format" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_lifecycle.json new file mode 100644 index 0000000000000..9fbabb964792e --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_lifecycle.json @@ -0,0 +1,19 @@ +{ + "ilm.get_lifecycle": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "GET" ], + "url": { + "path": "/_ilm/policy/{policy}", + "paths": ["/_ilm/policy/{policy}", "/_ilm/policy"], + "parts": { + "policy": { + "type" : "string", + "description" : "The name of the index lifecycle policy" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_status.json new file mode 100644 index 0000000000000..1bf2c1adf0263 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.get_status.json @@ -0,0 +1,13 @@ +{ + "ilm.get_status": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "GET" ], + "url": { + "path": "/_ilm/status", + "paths": ["/_ilm/status"], + "parts": {}, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json new file mode 100644 index 0000000000000..ca3f1e76fb256 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.move_to_step.json @@ -0,0 +1,21 @@ +{ + "ilm.move_to_step": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "POST" ], + "url": { + "path": "/_ilm/move/{index}", + "paths": ["/_ilm/move/{index}"], + "parts": { + "index": { + "type" : "string", + "description" : "The name of the index whose lifecycle step is to change" + } + }, + "params": { + } + }, + "body": { + "description": "The new lifecycle step to move to" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.put_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.put_lifecycle.json new file mode 100644 index 0000000000000..f23b3b3eb1537 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.put_lifecycle.json @@ -0,0 +1,21 @@ +{ + "ilm.put_lifecycle": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "PUT" ], + "url": { + "path": "/_ilm/policy/{policy}", + "paths": ["/_ilm/policy/{policy}"], + "parts": { + "policy": { + "type" : "string", + "description" : "The name of the index lifecycle policy" + } + }, + "params": { + } + }, + "body": { + "description": "The lifecycle policy definition to register" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json new file mode 100644 index 0000000000000..e05cd5df02282 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json @@ -0,0 +1,19 @@ +{ + "ilm.remove_policy": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ilm/remove", + "paths": ["/{index}/_ilm/remove", "/_ilm/remove"], + "parts": { + "index": { + "type" : "string", + "description" : "The name of the index to remove policy on" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.retry.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.retry.json new file mode 100644 index 0000000000000..af3e7fd43eb56 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.retry.json @@ -0,0 +1,19 @@ +{ + "ilm.retry": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ilm/retry", + "paths": ["/{index}/_ilm/retry"], + "parts": { + "index": { + "type" : "string", + "description" : "The name of the indices (comma-separated) whose failed lifecycle step is to be retry" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.start.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.start.json new file mode 100644 index 0000000000000..0f2c6e347c452 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.start.json @@ -0,0 +1,13 @@ +{ + "ilm.start": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "POST" ], + "url": { + "path": "/_ilm/start", + "paths": ["/_ilm/start"], + "parts": {}, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.stop.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.stop.json new file mode 100644 index 0000000000000..f4d6526765971 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.stop.json @@ -0,0 +1,13 @@ +{ + "ilm.stop": { + "documentation": "http://www.elastic.co/guide/en/index_lifecycle/current/index_lifecycle.html", + "methods": [ "POST" ], + "url": { + "path": "/_ilm/stop", + "paths": ["/_ilm/stop"], + "parts": {}, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.find_file_structure.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.find_file_structure.json index fd1cbb986a617..66dc18c8c20b3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.find_file_structure.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.find_file_structure.json @@ -22,7 +22,7 @@ }, "format": { "type": "enum", - "options": [ "json", "xml", "delimited", "semi_structured_text" ], + "options": [ "ndjson", "xml", "delimited", "semi_structured_text" ], "description": "Optional parameter to specify the high level file format" }, "has_header_row": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json index 40eda835a4bc8..86c982a888151 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json @@ -8,14 +8,14 @@ "parts": { "metric": { "type" : "enum", - "options" : ["_all", "queued_watches", "pending_watches"], + "options" : ["_all", "queued_watches", "current_watches", "pending_watches"], "description" : "Controls what additional stat metrics should be include in the response" } }, "params": { "metric": { "type" : "enum", - "options" : ["_all", "queued_watches", "pending_watches"], + "options" : ["_all", "queued_watches", "current_watches", "pending_watches"], "description" : "Controls what additional stat metrics should be include in the response" }, "emit_stacktraces": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml index c206a08e6ca91..0497f15f757c2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml index bf2f3bcec1cd8..6e11a1dddc71d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml index 667f80410e054..5ca3349d6c031 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_job_force.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_job_force.yml index 5faba0c8031b2..3ef47c2b60880 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_job_force.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_job_force.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml index c13b2473cc785..e4f9bc579f48e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index 6e9579a061339..28b5d5c9315e8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -1,6 +1,7 @@ --- setup: - + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml index 549305579ed64..7f1cf3137a1ba 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml @@ -1,5 +1,8 @@ +setup: + - skip: + features: headers --- -"Test JSON file structure analysis without overrides": +"Test NDJSON file structure analysis without overrides": - do: headers: # This is to stop the usual content type randomization, which @@ -26,7 +29,7 @@ - match: { num_messages_analyzed: 3 } - match: { charset: "UTF-8" } - match: { has_byte_order_marker: false } - - match: { format: json } + - match: { format: ndjson } - match: { timestamp_field: time } - match: { joda_timestamp_formats.0: UNIX } - match: { java_timestamp_formats.0: UNIX } @@ -56,7 +59,7 @@ - is_false: explanation --- -"Test JSON file structure analysis with overrides": +"Test NDJSON file structure analysis with overrides": - do: headers: # This is to stop the usual content type randomization, which @@ -64,7 +67,7 @@ Content-Type: "application/json" xpack.ml.find_file_structure: charset: UTF-8 - format: json + format: ndjson timestamp_field: time timestamp_format: UNIX explain: true @@ -86,7 +89,7 @@ - match: { num_messages_analyzed: 3 } - match: { charset: "UTF-8" } - match: { has_byte_order_marker: false } - - match: { format: json } + - match: { format: ndjson } - match: { timestamp_field: time } - match: { joda_timestamp_formats.0: UNIX } - match: { java_timestamp_formats.0: UNIX } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml index 7f3250c7db614..2bc1a25ddad73 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeeds.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeeds.yml index 6daedaa8068cc..e1509084d9186 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeeds.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeeds.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml index e411251363b71..33da3db73cbf8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml index 6a60bbb96da6f..fac725e8d5efe 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml @@ -1,3 +1,6 @@ +setup: + - skip: + features: headers --- "Test CRUD on two jobs in shared index": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_groups.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_groups.yml index d1e2851e17690..c398cc15fafe2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_groups.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_groups.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml index 3b08753e20913..f23bce44002a5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -399,6 +399,8 @@ --- "Test cannot decrease model_memory_limit below current usage": + - skip: + features: headers - do: xpack.ml.put_job: job_id: jobs-crud-model-memory-limit-decrease @@ -527,7 +529,8 @@ --- "Test close job": - + - skip: + features: headers - do: xpack.ml.put_job: job_id: jobs-crud-close-job @@ -762,7 +765,8 @@ --- "Test force close job": - + - skip: + features: headers - do: xpack.ml.put_job: job_id: jobs-crud-force-close-job @@ -929,7 +933,8 @@ --- "Test cannot create job with existing result document": - + - skip: + features: headers - do: headers: Content-Type: application/json @@ -1068,7 +1073,8 @@ --- "Test max model memory limit": - + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser @@ -1315,7 +1321,8 @@ --- "Test open job when persistent task allocation disabled": - + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get.yml index 7c4903bae95cf..507b6ddd45828 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml index 125f8cbf7f8d2..6feecf82333b2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml index 307a1d0a80d7e..c5545120d8c60 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml index 9b875fb1afd86..b25884da5fd00 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml index 249ff7c72d7ad..654e98c468fbe 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml index 513e1fb875774..a050ddf286297 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml index b841c8c23069f..75003bbff053d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml index 0f01613203704..576b0295abd53 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml @@ -1,3 +1,6 @@ +setup: + - skip: + features: headers --- "Test new fields are mapped as keyword": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml index 7bc4f7df92acd..34f7a4bb72fa3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml index ce638fdceaa19..5c1321a04d047 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml index d216ecfe13e1a..48607dac7d5b6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: indices.create: index: airline-data @@ -229,7 +231,7 @@ setup: job_id: "start-stop-datafeed-job-field-without-mappings" - do: - catch: /datafeed \[start-stop-datafeed-job-field-without-mappings-feed] cannot retrieve field \[airline2\] because it has no mappings/ + catch: /\[start-stop-datafeed-job-field-without-mappings-feed] cannot retrieve field \[airline2\] because it has no mappings/ xpack.ml.start_datafeed: datafeed_id: "start-stop-datafeed-job-field-without-mappings-feed" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml index 9966ae668c08f..777738cd503e4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/40_get_user_privs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/40_get_user_privs.yml index 2019d4586a7ab..80123ea7c3c58 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/40_get_user_privs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/40_get_user_privs.yml @@ -204,7 +204,7 @@ teardown: "Test get_user_privileges for single role": - skip: reason: "contains is a newly added assertion" - features: contains + features: contains - do: headers: { Authorization: "Basic dGVzdC0xOjEyMzQ1Njc4" } # test-1 xpack.security.get_user_privileges: {} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml index 861be094fa62d..db485279b2bf2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: indices.create: index: foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml index 759ddbad2b463..4db805ae2f9f8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: indices.create: index: foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml index f8bb401a7721e..f39cfc6ca13a5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: indices.create: index: foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml index bd49f2c338906..38b7303ecb3ae 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: indices.create: index: foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index cbb6f8956b14f..e0371cf5f0949 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: indices.create: index: foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index 3a756efc0d72a..851d1cfa172e5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: indices.create: index: foo @@ -708,5 +710,180 @@ setup: - match: { aggregations.histo.buckets.3.doc_count: 10 } - match: { aggregations.histo.buckets.3.the_max.value: 3 } +--- +"Wildcards matching single rollup index": + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup*" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" + + - length: { aggregations.histo.buckets: 4 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 1 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 2 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 10 } + - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } + - match: { aggregations.histo.buckets.3.doc_count: 20 } + +--- +"Wildcards matching two rollup indices": + + - do: + indices.create: + index: bar + body: + mappings: + _doc: + properties: + timestamp: + type: date + partition: + type: keyword + price: + type: integer + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: bar + body: > + { + "index_pattern": "bar", + "rollup_index": "bar_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h" + }, + "terms": { + "fields": ["partition"] + } + }, + "metrics": [ + { + "field": "price", + "metrics": ["max"] + } + ] + } + + - do: + catch: /RollupSearch currently only supports searching one rollup index at a time\./ + xpack.rollup.rollup_search: + index: "*_rollup" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" + + +--- +"Rollup search via alias": + + - do: + indices.put_alias: + index: foo_rollup + name: rollup_alias + + - do: + xpack.rollup.rollup_search: + index: "rollup_alias" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" + - length: { aggregations.histo.buckets: 4 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 1 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 2 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 10 } + - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } + - match: { aggregations.histo.buckets.3.doc_count: 20 } + +--- +"Rollup search via alias, multiple rollup indices match": + + - do: + indices.create: + index: bar + body: + mappings: + _doc: + properties: + timestamp: + type: date + partition: + type: keyword + price: + type: integer + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: bar + body: > + { + "index_pattern": "bar", + "rollup_index": "bar_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h" + }, + "terms": { + "fields": ["partition"] + } + }, + "metrics": [ + { + "field": "price", + "metrics": ["max"] + } + ] + } + + - do: + indices.put_alias: + index: foo_rollup,bar_rollup + name: rollup_alias + + - do: + catch: /RollupSearch currently only supports searching one rollup index at a time\./ + xpack.rollup.rollup_search: + index: "rollup_alias" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml index 57bfd821ea24d..3db0fa34ae2ea 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml @@ -6,8 +6,6 @@ setup: cluster.health: wait_for_status: yellow - - --- teardown: - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml index 38a357bcd6825..0ed11ab1b04a0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: indices.create: index: foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml index 849aca3332dfe..b77eb8a786adc 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: headers - do: indices.create: index: foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml index 9844dea9135a3..5a90af3725294 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml @@ -12,3 +12,55 @@ emit_stacktraces: "true" - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } + +--- +"Test watcher stats current watches": + - skip: + version: " - 6.99.99" + reason: metrics were fixed in 7.0.0 + + - do: + xpack.watcher.stats: + metric: "current_watches" + + - is_false: stats.0.queued_watches + - is_true: stats.0.current_watches + +--- +"Test watcher stats queued watches": + - skip: + version: " - 6.99.99" + reason: metrics were fixed in 7.0.0 + + - do: + xpack.watcher.stats: + metric: "queued_watches" + + - is_false: stats.0.current_watches + - is_true: stats.0.queued_watches + +--- +"Test watcher stats queued watches using pending_watches": + - skip: + version: " - 6.99.99" + reason: metrics were fixed in 7.0.0 + features: warnings + + - do: + warnings: + - 'The pending_watches parameter is deprecated, use queued_watches instead' + + xpack.watcher.stats: + metric: "pending_watches" + + - is_false: stats.0.current_watches + - is_true: stats.0.queued_watches + +--- +"Test watcher stats all watches": + - do: + xpack.watcher.stats: + metric: "_all" + + - is_true: stats.0.current_watches + - is_true: stats.0.queued_watches diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml index 47e1a1160b580..81699d70c4ea1 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml @@ -17,6 +17,7 @@ - contains: { nodes.$master.modules: { name: x-pack-core } } - contains: { nodes.$master.modules: { name: x-pack-deprecation } } - contains: { nodes.$master.modules: { name: x-pack-graph } } + - contains: { nodes.$master.modules: { name: x-pack-ilm } } - contains: { nodes.$master.modules: { name: x-pack-logstash } } - contains: { nodes.$master.modules: { name: x-pack-ml } } - contains: { nodes.$master.modules: { name: x-pack-monitoring } } diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java index d91cc2a6f0148..2b46a661e17b5 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.protocol.xpack.migration.UpgradeActionRequired; import org.elasticsearch.script.Script; @@ -43,17 +42,16 @@ public class IndexUpgradeCheck extends AbstractComponent { * Creates a new upgrade check * * @param name - the name of the check - * @param settings - system settings * @param actionRequired - return true if they can work with the index with specified name * @param client - client * @param clusterService - cluster service * @param types - a list of types that the reindexing should be limited to * @param updateScript - the upgrade script that should be used during reindexing */ - public IndexUpgradeCheck(String name, Settings settings, + public IndexUpgradeCheck(String name, Function actionRequired, Client client, ClusterService clusterService, String[] types, Script updateScript) { - this(name, settings, actionRequired, client, clusterService, types, updateScript, + this(name, actionRequired, client, clusterService, types, updateScript, listener -> listener.onResponse(null), (t, listener) -> listener.onResponse(TransportResponse.Empty.INSTANCE)); } @@ -61,7 +59,6 @@ public IndexUpgradeCheck(String name, Settings settings, * Creates a new upgrade check * * @param name - the name of the check - * @param settings - system settings * @param actionRequired - return true if they can work with the index with specified name * @param client - client * @param clusterService - cluster service @@ -70,12 +67,11 @@ public IndexUpgradeCheck(String name, Settings settings, * @param preUpgrade - action that should be performed before upgrade * @param postUpgrade - action that should be performed after upgrade */ - public IndexUpgradeCheck(String name, Settings settings, + public IndexUpgradeCheck(String name, Function actionRequired, Client client, ClusterService clusterService, String[] types, Script updateScript, Consumer> preUpgrade, BiConsumer> postUpgrade) { - super(settings); this.name = name; this.actionRequired = actionRequired; this.reindexer = new InternalIndexReindexer<>(client, clusterService, IndexUpgradeCheckVersion.UPRADE_VERSION, updateScript, diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java index ad0ebd6815f2d..1f51ff391ce2f 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.protocol.xpack.migration.UpgradeActionRequired; @@ -29,10 +28,9 @@ public class IndexUpgradeService extends AbstractComponent { private final IndexNameExpressionResolver indexNameExpressionResolver; - public IndexUpgradeService(Settings settings, List upgradeChecks) { - super(settings); + public IndexUpgradeService(List upgradeChecks) { this.upgradeChecks = upgradeChecks; - this.indexNameExpressionResolver = new IndexNameExpressionResolver(settings); + this.indexNameExpressionResolver = new IndexNameExpressionResolver(); } /** diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java index e454ac4a0140b..985baeaf9ab3f 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java @@ -46,11 +46,9 @@ public class Upgrade extends Plugin implements ActionPlugin { public static final Version UPGRADE_INTRODUCED = Version.CURRENT.minimumCompatibilityVersion(); - private final Settings settings; private final List> upgradeCheckFactories; - public Upgrade(Settings settings) { - this.settings = settings; + public Upgrade() { this.upgradeCheckFactories = new ArrayList<>(); } @@ -63,7 +61,7 @@ public Collection createComponents(Client client, ClusterService cluster for (BiFunction checkFactory : upgradeCheckFactories) { upgradeChecks.add(checkFactory.apply(client, clusterService)); } - return Collections.singletonList(new IndexUpgradeService(settings, Collections.unmodifiableList(upgradeChecks))); + return Collections.singletonList(new IndexUpgradeService(Collections.unmodifiableList(upgradeChecks))); } @Override diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java index edb560174390c..ff1a80d4edd01 100644 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -28,11 +27,11 @@ public class TransportIndexUpgradeAction extends TransportMasterNodeAction( - "test", Settings.EMPTY, + "test", indexMetaData -> { if (indexMetaData.getIndex().getName().equals(testIndex)) { return UpgradeActionRequired.UPGRADE; @@ -116,7 +115,7 @@ public void testInternalUpgradePrePostChecks() throws Exception { ); ensureYellow(testIndex); - IndexUpgradeService service = new IndexUpgradeService(Settings.EMPTY, Collections.singletonList(check)); + IndexUpgradeService service = new IndexUpgradeService(Collections.singletonList(check)); PlainActionFuture future = PlainActionFuture.newFuture(); service.upgrade(new TaskId("abc", 123), testIndex, clusterService().state(), future); diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java index f980450c07f7c..961b86a53cdbf 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java @@ -27,7 +27,7 @@ public class IndexUpgradeServiceTests extends ESTestCase { - private IndexUpgradeCheck upgradeBarCheck = new IndexUpgradeCheck("upgrade_bar", Settings.EMPTY, + private IndexUpgradeCheck upgradeBarCheck = new IndexUpgradeCheck("upgrade_bar", (Function) indexMetaData -> { if ("bar".equals(indexMetaData.getSettings().get("test.setting"))) { return UpgradeActionRequired.UPGRADE; @@ -36,7 +36,7 @@ public class IndexUpgradeServiceTests extends ESTestCase { } }, null, null, null, null); - private IndexUpgradeCheck reindexFooCheck = new IndexUpgradeCheck("reindex_foo", Settings.EMPTY, + private IndexUpgradeCheck reindexFooCheck = new IndexUpgradeCheck("reindex_foo", (Function) indexMetaData -> { if ("foo".equals(indexMetaData.getSettings().get("test.setting"))) { return UpgradeActionRequired.REINDEX; @@ -45,10 +45,10 @@ public class IndexUpgradeServiceTests extends ESTestCase { } }, null, null, null, null); - private IndexUpgradeCheck everythingIsFineCheck = new IndexUpgradeCheck("everything_is_fine", Settings.EMPTY, + private IndexUpgradeCheck everythingIsFineCheck = new IndexUpgradeCheck("everything_is_fine", indexMetaData -> UpgradeActionRequired.UP_TO_DATE, null, null, null, null); - private IndexUpgradeCheck unreachableCheck = new IndexUpgradeCheck("unreachable", Settings.EMPTY, + private IndexUpgradeCheck unreachableCheck = new IndexUpgradeCheck("unreachable", (Function) indexMetaData -> { fail("Unreachable check is called"); return null; @@ -57,14 +57,14 @@ public class IndexUpgradeServiceTests extends ESTestCase { public void testIndexUpgradeServiceMultipleCheck() throws Exception { IndexUpgradeService service; if (randomBoolean()) { - service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + service = new IndexUpgradeService(Arrays.asList( upgradeBarCheck, reindexFooCheck, everythingIsFineCheck, unreachableCheck // This one should never be called )); } else { - service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + service = new IndexUpgradeService(Arrays.asList( reindexFooCheck, upgradeBarCheck, everythingIsFineCheck, @@ -93,7 +93,7 @@ public void testIndexUpgradeServiceMultipleCheck() throws Exception { public void testNoMatchingChecks() throws Exception { - IndexUpgradeService service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + IndexUpgradeService service = new IndexUpgradeService(Arrays.asList( upgradeBarCheck, reindexFooCheck )); @@ -113,7 +113,7 @@ public void testNoMatchingChecks() throws Exception { } public void testEarlierChecksWin() throws Exception { - IndexUpgradeService service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + IndexUpgradeService service = new IndexUpgradeService(Arrays.asList( everythingIsFineCheck, upgradeBarCheck, reindexFooCheck @@ -132,7 +132,7 @@ public void testEarlierChecksWin() throws Exception { } public void testGenericTest() throws Exception { - IndexUpgradeService service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + IndexUpgradeService service = new IndexUpgradeService(Arrays.asList( upgradeBarCheck, reindexFooCheck )); diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java index 1cc6ce8e54777..1b9ad0a79fc38 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.upgrade; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -16,7 +17,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; @@ -97,8 +97,8 @@ public String pluginScriptLang() { public MockUpgradePlugin(Settings settings) { this.settings = settings; - this.upgrade = new Upgrade(settings); - Loggers.getLogger(IndexUpgradeTasksIT.class).info("MockUpgradePlugin is created"); + this.upgrade = new Upgrade(); + LogManager.getLogger(IndexUpgradeTasksIT.class).info("MockUpgradePlugin is created"); } @@ -121,8 +121,8 @@ public Collection createComponents(Client client, ClusterService cluster ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - return Arrays.asList(new IndexUpgradeService(settings, Collections.singletonList( - new IndexUpgradeCheck("test", settings, + return Arrays.asList(new IndexUpgradeService(Collections.singletonList( + new IndexUpgradeCheck("test", new Function() { @Override public UpgradeActionRequired apply(IndexMetaData indexMetaData) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index f87d9454f2d77..ce9db36eef08d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -272,7 +272,7 @@ public Collection createComponents(Client client, ClusterService cluster throw new UncheckedIOException(e); } - new WatcherIndexTemplateRegistry(settings, clusterService, threadPool, client); + new WatcherIndexTemplateRegistry(clusterService, threadPool, client); // http client httpClient = new HttpClient(settings, getSslService(), cryptoService); @@ -290,7 +290,7 @@ public Collection createComponents(Client client, ClusterService cluster reloadableServices.add(slackService); reloadableServices.add(pagerDutyService); - TextTemplateEngine templateEngine = new TextTemplateEngine(settings, scriptService); + TextTemplateEngine templateEngine = new TextTemplateEngine(scriptService); Map emailAttachmentParsers = new HashMap<>(); emailAttachmentParsers.put(HttpEmailAttachementParser.TYPE, new HttpEmailAttachementParser(httpClient, templateEngine)); emailAttachmentParsers.put(DataAttachmentParser.TYPE, new DataAttachmentParser()); @@ -376,7 +376,7 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) .setConcurrentRequests(SETTING_BULK_CONCURRENT_REQUESTS.get(settings)) .build(); - HistoryStore historyStore = new HistoryStore(settings, bulkProcessor); + HistoryStore historyStore = new HistoryStore(bulkProcessor); // schedulers final Set scheduleParsers = new HashSet<>(); @@ -395,15 +395,15 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) final Set triggerEngines = new HashSet<>(); triggerEngines.add(manualTriggerEngine); triggerEngines.add(configuredTriggerEngine); - final TriggerService triggerService = new TriggerService(settings, triggerEngines); + final TriggerService triggerService = new TriggerService(triggerEngines); - final TriggeredWatch.Parser triggeredWatchParser = new TriggeredWatch.Parser(settings, triggerService); + final TriggeredWatch.Parser triggeredWatchParser = new TriggeredWatch.Parser(triggerService); final TriggeredWatchStore triggeredWatchStore = new TriggeredWatchStore(settings, client, triggeredWatchParser, bulkProcessor); final WatcherSearchTemplateService watcherSearchTemplateService = - new WatcherSearchTemplateService(settings, scriptService, xContentRegistry); + new WatcherSearchTemplateService(scriptService, xContentRegistry); final WatchExecutor watchExecutor = getWatchExecutor(threadPool); - final WatchParser watchParser = new WatchParser(settings, triggerService, registry, inputRegistry, cryptoService, getClock()); + final WatchParser watchParser = new WatchParser(triggerService, registry, inputRegistry, cryptoService, getClock()); final ExecutionService executionService = new ExecutionService(settings, historyStore, triggeredWatchStore, watchExecutor, getClock(), watchParser, clusterService, client, threadPool.generic()); @@ -415,9 +415,9 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) watchParser, client); final WatcherLifeCycleService watcherLifeCycleService = - new WatcherLifeCycleService(settings, clusterService, watcherService); + new WatcherLifeCycleService(clusterService, watcherService); - listener = new WatcherIndexingListener(settings, watchParser, getClock(), triggerService); + listener = new WatcherIndexingListener(watchParser, getClock(), triggerService); clusterService.addListener(listener); return Arrays.asList(registry, inputRegistry, historyStore, triggerService, triggeredWatchParser, diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java index 086528054bcf3..156a6e5de2818 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java @@ -19,7 +19,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.engine.Engine; @@ -65,8 +64,7 @@ final class WatcherIndexingListener extends AbstractComponent implements Indexin private final TriggerService triggerService; private volatile Configuration configuration = INACTIVE; - WatcherIndexingListener(Settings settings, WatchParser parser, Clock clock, TriggerService triggerService) { - super(settings); + WatcherIndexingListener(WatchParser parser, Clock clock, TriggerService triggerService) { this.parser = parser; this.clock = clock; this.triggerService = triggerService; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index a67101e35871f..1bf9c4ded606b 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; @@ -42,8 +41,7 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. private volatile WatcherService watcherService; - WatcherLifeCycleService(Settings settings, ClusterService clusterService, WatcherService watcherService) { - super(settings); + WatcherLifeCycleService(ClusterService clusterService, WatcherService watcherService) { this.watcherService = watcherService; clusterService.addListener(this); // Close if the indices service is being stopped, so we don't run into search failures (locally) that will diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index 75fd13915de3e..f26ab5a14fb06 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -83,7 +83,6 @@ public class WatcherService extends AbstractComponent { WatcherService(Settings settings, TriggerService triggerService, TriggeredWatchStore triggeredWatchStore, ExecutionService executionService, WatchParser parser, Client client, ExecutorService executor) { - super(settings); this.triggerService = triggerService; this.triggeredWatchStore = triggeredWatchStore; this.executionService = executionService; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java index cd682260ad9a4..2a327851558e5 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java @@ -75,11 +75,10 @@ public class HttpClient extends AbstractComponent implements Closeable { private final CryptoService cryptoService; public HttpClient(Settings settings, SSLService sslService, CryptoService cryptoService) { - super(settings); this.defaultConnectionTimeout = HttpSettings.CONNECTION_TIMEOUT.get(settings); this.defaultReadTimeout = HttpSettings.READ_TIMEOUT.get(settings); this.maxResponseSize = HttpSettings.MAX_HTTP_RESPONSE_SIZE.get(settings); - this.settingsProxy = getProxyFromSettings(); + this.settingsProxy = getProxyFromSettings(settings); this.cryptoService = cryptoService; HttpClientBuilder clientBuilder = HttpClientBuilder.create(); @@ -228,7 +227,7 @@ static void setProxy(RequestConfig.Builder config, HttpRequest request, HttpProx * * @return An HTTP proxy instance, if no settings are configured this will be an HttpProxy.NO_PROXY instance */ - private HttpProxy getProxyFromSettings() { + private HttpProxy getProxyFromSettings(Settings settings) { String proxyHost = HttpSettings.PROXY_HOST.get(settings); Scheme proxyScheme = HttpSettings.PROXY_SCHEME.exists(settings) ? Scheme.parse(HttpSettings.PROXY_SCHEME.get(settings)) : Scheme.HTTP; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java index b7198f90c59ec..2f2d3d7b9f31a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.watcher.common.text; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; @@ -17,12 +15,11 @@ import java.util.HashMap; import java.util.Map; -public class TextTemplateEngine extends AbstractComponent { +public class TextTemplateEngine { private final ScriptService service; - public TextTemplateEngine(Settings settings, ScriptService service) { - super(settings); + public TextTemplateEngine(ScriptService service) { this.service = service; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/AbstractCompareCondition.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/AbstractCompareCondition.java index 81e3eb464e69b..7b0c176927d8d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/AbstractCompareCondition.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/AbstractCompareCondition.java @@ -9,7 +9,7 @@ import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.watcher.support.Variables; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareCondition.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareCondition.java index c1c458b1d661a..47af1bf13d820 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareCondition.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareCondition.java @@ -8,8 +8,8 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.watcher.common.xcontent.XContentUtils; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.XContentUtils; +import org.elasticsearch.common.xcontent.ObjectPath; import java.io.IOException; import java.time.Clock; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/CompareCondition.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/CompareCondition.java index a3b21dec295d2..7c990e6582ea2 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/CompareCondition.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/CompareCondition.java @@ -8,8 +8,8 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.watcher.common.xcontent.XContentUtils; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.XContentUtils; +import org.elasticsearch.common.xcontent.ObjectPath; import java.io.IOException; import java.time.Clock; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java index 7a0b7f14bcaf6..2beafa9be2804 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java @@ -101,7 +101,6 @@ public class ExecutionService extends AbstractComponent { public ExecutionService(Settings settings, HistoryStore historyStore, TriggeredWatchStore triggeredWatchStore, WatchExecutor executor, Clock clock, WatchParser parser, ClusterService clusterService, Client client, ExecutorService genericExecutor) { - super(settings); this.historyStore = historyStore; this.triggeredWatchStore = triggeredWatchStore; this.executor = executor; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatch.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatch.java index 51998a14bd770..91475f499714e 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatch.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatch.java @@ -8,8 +8,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -48,12 +46,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static class Parser extends AbstractComponent { + public static class Parser { private final TriggerService triggerService; - public Parser(Settings settings, TriggerService triggerService) { - super(settings); + public Parser(TriggerService triggerService) { this.triggerService = triggerService; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java index 9a4b555d63355..049a7ff200e77 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java @@ -60,7 +60,6 @@ public class TriggeredWatchStore extends AbstractComponent { private final BulkProcessor bulkProcessor; public TriggeredWatchStore(Settings settings, Client client, TriggeredWatch.Parser triggeredWatchParser, BulkProcessor bulkProcessor) { - super(settings); this.scrollSize = settings.getAsInt("xpack.watcher.execution.scroll.size", 1000); this.client = ClientHelper.clientWithOrigin(client, WATCHER_ORIGIN); this.scrollTimeout = settings.getAsTime("xpack.watcher.execution.scroll.timeout", TimeValue.timeValueMinutes(5)); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java index 723568f8ba75d..a20eef6b4895b 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; @@ -32,8 +31,7 @@ public class HistoryStore extends AbstractComponent { private final BulkProcessor bulkProcessor; - public HistoryStore(Settings settings, BulkProcessor bulkProcessor) { - super(settings); + public HistoryStore(BulkProcessor bulkProcessor) { this.bulkProcessor = bulkProcessor; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInputFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInputFactory.java index e6c78f6ac0203..9ca790c3df5b8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInputFactory.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInputFactory.java @@ -26,7 +26,7 @@ public SearchInputFactory(Settings settings, Client client, NamedXContentRegistr ScriptService scriptService) { this.client = client; this.defaultTimeout = settings.getAsTime("xpack.watcher.input.search.default_timeout", TimeValue.timeValueMinutes(1)); - this.searchTemplateService = new WatcherSearchTemplateService(settings, scriptService, xContentRegistry); + this.searchTemplateService = new WatcherSearchTemplateService(scriptService, xContentRegistry); } @Override diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java index 027825ab77871..0b545c7942821 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java @@ -28,15 +28,14 @@ public abstract class NotificationService extends AbstractComponent { private Map accounts; private Account defaultAccount; - public NotificationService(Settings settings, String type, + public NotificationService(String type, ClusterSettings clusterSettings, List> pluginSettings) { - this(settings, type); + this(type); clusterSettings.addSettingsUpdateConsumer(this::reload, pluginSettings); } // Used for testing only - NotificationService(Settings settings, String type) { - super(settings); + NotificationService(String type) { this.type = type; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java index e45ed55cee3ac..70922e57bd078 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java @@ -104,7 +104,7 @@ public class EmailService extends NotificationService { private final CryptoService cryptoService; public EmailService(Settings settings, @Nullable CryptoService cryptoService, ClusterSettings clusterSettings) { - super(settings, "email", clusterSettings, EmailService.getSettings()); + super("email", clusterSettings, EmailService.getSettings()); this.cryptoService = cryptoService; // ensure logging of setting changes clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java index f6026c0efce3b..d0d9ecc78104c 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.watcher.notification.email.attachment; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.LoggerMessageFormat; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -22,13 +22,13 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.http.HttpClient; import org.elasticsearch.xpack.watcher.common.http.HttpMethod; import org.elasticsearch.xpack.watcher.common.http.HttpProxy; import org.elasticsearch.xpack.watcher.common.http.HttpRequest; import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; import org.elasticsearch.xpack.watcher.notification.email.Attachment; @@ -74,7 +74,7 @@ public ReportingAttachmentParser(Settings settings, HttpClient httpClient, TextT this.retries = RETRIES_SETTING.get(settings); this.httpClient = httpClient; this.templateEngine = templateEngine; - this.logger = Loggers.getLogger(getClass()); + this.logger = LogManager.getLogger(getClass()); } @Override diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java index 2f21c2299a9a9..58840aec9775b 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java @@ -65,7 +65,7 @@ public class HipChatService extends NotificationService { private HipChatServer defaultServer; public HipChatService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { - super(settings, "hipchat", clusterSettings, HipChatService.getSettings()); + super("hipchat", clusterSettings, HipChatService.getSettings()); this.httpClient = httpClient; // ensure logging of setting changes clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java index 49c05f36b2445..d7b7fe2003b95 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java @@ -62,7 +62,7 @@ public class JiraService extends NotificationService { private final HttpClient httpClient; public JiraService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { - super(settings, "jira", clusterSettings, JiraService.getSettings()); + super("jira", clusterSettings, JiraService.getSettings()); this.httpClient = httpClient; // ensure logging of setting changes clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java index c10bcf4782f4c..6834fcd4e2e1a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java @@ -40,7 +40,7 @@ public class PagerDutyService extends NotificationService { private final HttpClient httpClient; public PagerDutyService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { - super(settings, "pagerduty", clusterSettings, PagerDutyService.getSettings()); + super("pagerduty", clusterSettings, PagerDutyService.getSettings()); this.httpClient = httpClient; clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_SERVICE_API_KEY, (s, o) -> {}, (s, o) -> {}); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java index 2a38e08d59903..888da55430a8b 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java @@ -40,7 +40,7 @@ public class SlackService extends NotificationService { private final HttpClient httpClient; public SlackService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { - super(settings, "slack", clusterSettings, SlackService.getSettings()); + super("slack", clusterSettings, SlackService.getSettings()); this.httpClient = httpClient; clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_URL, (s, o) -> {}, (s, o) -> {}); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/DynamicAttachments.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/DynamicAttachments.java index 4c53fc767c788..3a5eae2eb56dd 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/DynamicAttachments.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/DynamicAttachments.java @@ -9,7 +9,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; import java.io.IOException; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatcherStatsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatcherStatsAction.java index 90c756c1323be..fad5b9cf93fa3 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatcherStatsAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatcherStatsAction.java @@ -5,6 +5,9 @@ */ package org.elasticsearch.xpack.watcher.rest.action; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestController; @@ -21,6 +24,9 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestWatcherStatsAction extends WatcherRestHandler { + private static final Logger logger = LogManager.getLogger(RestWatcherStatsAction.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + public RestWatcherStatsAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(GET, URI_BASE + "/stats", this); @@ -41,8 +47,12 @@ protected RestChannelConsumer doPrepareRequest(final RestRequest restRequest, Wa request.includeCurrentWatches(true); request.includeQueuedWatches(true); } else { - request.includeCurrentWatches(metrics.contains("queued_watches")); - request.includeQueuedWatches(metrics.contains("pending_watches")); + request.includeCurrentWatches(metrics.contains("current_watches")); + request.includeQueuedWatches(metrics.contains("queued_watches") || metrics.contains("pending_watches")); + } + + if (metrics.contains("pending_watches")) { + deprecationLogger.deprecated("The pending_watches parameter is deprecated, use queued_watches instead"); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java index 36ec856beddff..6d0a02062ee17 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.gateway.GatewayService; @@ -51,8 +50,7 @@ public class WatcherIndexTemplateRegistry extends AbstractComponent implements C private final TemplateConfig[] indexTemplates; private final ConcurrentMap templateCreationsInProgress = new ConcurrentHashMap<>(); - public WatcherIndexTemplateRegistry(Settings settings, ClusterService clusterService, ThreadPool threadPool, Client client) { - super(settings); + public WatcherIndexTemplateRegistry(ClusterService clusterService, ThreadPool threadPool, Client client) { this.client = client; this.threadPool = threadPool; this.indexTemplates = TEMPLATE_CONFIGS; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java index 2208aab428a39..d8451fbc7b3b3 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -36,8 +35,7 @@ public class WatcherSearchTemplateService extends AbstractComponent { private final ScriptService scriptService; private final NamedXContentRegistry xContentRegistry; - public WatcherSearchTemplateService(Settings settings, ScriptService scriptService, NamedXContentRegistry xContentRegistry) { - super(settings); + public WatcherSearchTemplateService(ScriptService scriptService, NamedXContentRegistry xContentRegistry) { this.scriptService = scriptService; this.xContentRegistry = xContentRegistry; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransformFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransformFactory.java index 862936688727c..72b91d0756240 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransformFactory.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransformFactory.java @@ -27,7 +27,7 @@ public SearchTransformFactory(Settings settings, Client client, NamedXContentReg super(LogManager.getLogger(ExecutableSearchTransform.class)); this.client = client; this.defaultTimeout = settings.getAsTime("xpack.watcher.transform.search.default_timeout", TimeValue.timeValueMinutes(1)); - this.searchTemplateService = new WatcherSearchTemplateService(settings, scriptService, xContentRegistry); + this.searchTemplateService = new WatcherSearchTemplateService(scriptService, xContentRegistry); } @Override diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/WatcherTransportAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/WatcherTransportAction.java index 56ce9f6d4a280..5033535c2fd58 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/WatcherTransportAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/WatcherTransportAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.tasks.Task; @@ -24,9 +23,9 @@ public abstract class WatcherTransportAction request) { - super(settings, actionName, transportService, actionFilters, request); + super(actionName, transportService, actionFilters, request); this.licenseState = licenseState; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java index 98c98ca1b537b..9db741ecf4773 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; @@ -52,10 +51,10 @@ public class TransportAckWatchAction extends WatcherTransportAction) DeleteWatchRequest::new); + public TransportDeleteWatchAction(TransportService transportService, ActionFilters actionFilters, Client client) { + super(DeleteWatchAction.NAME, transportService, actionFilters, (Supplier) DeleteWatchRequest::new); this.client = client; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java index 0cc9af6aafa7b..eca30cfca8c56 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -65,11 +64,11 @@ public class TransportExecuteWatchAction extends WatcherTransportAction engines; private final Map perWatchStats = new HashMap<>(); - public TriggerService(Settings settings, Set engines) { - super(settings); + public TriggerService(Set engines) { Map builder = new HashMap<>(); for (TriggerEngine engine : engines) { builder.put(engine.type(), engine); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEngine.java index dfee54c391aff..c53162bd7d9c8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEngine.java @@ -7,7 +7,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; @@ -33,8 +32,7 @@ public abstract class ScheduleTriggerEngine extends AbstractComponent implements protected final ScheduleRegistry scheduleRegistry; protected final Clock clock; - public ScheduleTriggerEngine(Settings settings, ScheduleRegistry scheduleRegistry, Clock clock) { - super(settings); + public ScheduleTriggerEngine(ScheduleRegistry scheduleRegistry, Clock clock) { this.scheduleRegistry = scheduleRegistry; this.clock = clock; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index bd0204766aff4..2fe74be2b0c2e 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -41,7 +41,7 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { private Ticker ticker; public TickerScheduleTriggerEngine(Settings settings, ScheduleRegistry scheduleRegistry, Clock clock) { - super(settings, scheduleRegistry, clock); + super(scheduleRegistry, clock); this.tickInterval = TICKER_INTERVAL_SETTING.get(settings); this.schedules = new ConcurrentHashMap<>(); this.ticker = new Ticker(Node.NODE_DATA_SETTING.get(settings)); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java index fe2f80d07f4f0..6c0880299376d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -60,9 +59,8 @@ public class WatchParser extends AbstractComponent { private final ExecutableCondition defaultCondition; private final List defaultActions; - public WatchParser(Settings settings, TriggerService triggerService, ActionRegistry actionRegistry, InputRegistry inputRegistry, + public WatchParser(TriggerService triggerService, ActionRegistry actionRegistry, InputRegistry inputRegistry, @Nullable CryptoService cryptoService, Clock clock) { - super(settings); this.triggerService = triggerService; this.actionRegistry = actionRegistry; this.inputRegistry = inputRegistry; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherFeatureSetTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherFeatureSetTests.java index e1e8b5b2ddd7d..46b0fc0c30e7b 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherFeatureSetTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherFeatureSetTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.common.stats.Counters; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java index 752f97b6b1a22..f351ed2e154ed 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java @@ -86,7 +86,7 @@ public class WatcherIndexingListenerTests extends ESTestCase { @Before public void setup() throws Exception { clock.freeze(); - listener = new WatcherIndexingListener(Settings.EMPTY, parser, clock, triggerService); + listener = new WatcherIndexingListener(parser, clock, triggerService); Map map = new HashMap<>(); map.put(shardId, new ShardAllocationConfiguration(0, 1, Collections.singletonList("foo"))); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 384338af5a284..467966e96fdbf 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -78,7 +78,7 @@ public void prepareServices() { }; doAnswer(answer).when(clusterService).submitStateUpdateTask(anyString(), any(ClusterStateUpdateTask.class)); watcherService = mock(WatcherService.class); - lifeCycleService = new WatcherLifeCycleService(Settings.EMPTY, clusterService, watcherService); + lifeCycleService = new WatcherLifeCycleService(clusterService, watcherService); } public void testNoRestartWithoutAllocationIdsConfigured() { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 73ad045652435..0f670ea4cde9e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -223,7 +223,7 @@ public void testPausingWatcherServiceAlsoPausesTriggerService() { String engineType = "foo"; TriggerEngine triggerEngine = mock(TriggerEngine.class); when(triggerEngine.type()).thenReturn(engineType); - TriggerService triggerService = new TriggerService(Settings.EMPTY, Collections.singleton(triggerEngine)); + TriggerService triggerService = new TriggerService(Collections.singleton(triggerEngine)); Trigger trigger = mock(Trigger.class); when(trigger.type()).thenReturn(engineType); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java index 9b851131d7d6d..a753bf05b8229 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import java.util.Map; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java index aa03bf4545e5b..c4604d8e2a14d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java @@ -300,7 +300,7 @@ private WatchExecutionContext createWatchExecutionContext() { class UpperCaseTextTemplateEngine extends TextTemplateEngine { UpperCaseTextTemplateEngine() { - super(Settings.EMPTY, mock(ScriptService.class)); + super(mock(ScriptService.class)); } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionTests.java index 22a6ced9e7e08..a0d09e39c029e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionTests.java @@ -293,7 +293,7 @@ class ModelTextTemplateEngine extends TextTemplateEngine { private final Map model; ModelTextTemplateEngine(Map model) { - super(Settings.EMPTY, mock(ScriptService.class)); + super(mock(ScriptService.class)); this.model = model; } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java index b2a1e7bb2cd58..ccb25de61e46e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequestBuilder; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; import org.elasticsearch.xpack.core.watcher.watch.Watch; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java index 002d833c20913..ecc071d598105 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.watcher.common.text; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; @@ -47,7 +46,7 @@ public class TextTemplateTests extends ESTestCase { @Before public void init() throws Exception { service = mock(ScriptService.class); - engine = new TextTemplateEngine(Settings.EMPTY, service); + engine = new TextTemplateEngine(service); } public void testRender() throws Exception { @@ -131,7 +130,7 @@ private void assertNoCompilation(String input) { private void assertScriptServiceInvoked(final String input) { ScriptService scriptService = mock(ScriptService.class); - TextTemplateEngine e = new TextTemplateEngine(Settings.EMPTY, scriptService); + TextTemplateEngine e = new TextTemplateEngine(scriptService); TemplateScript.Factory compiledTemplate = templateParams -> new TemplateScript(templateParams) { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index 069dca8f2b187..13761948adccf 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -53,7 +53,7 @@ import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; import org.elasticsearch.xpack.core.watcher.input.Input; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; import org.elasticsearch.xpack.core.watcher.transform.Transform; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index 4012c8d24b5b5..428ec96df97e0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -398,10 +398,10 @@ public void testTriggeredWatchParser() throws Exception { triggeredWatch.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); ScheduleRegistry scheduleRegistry = new ScheduleRegistry(Collections.singleton(new CronSchedule.Parser())); - TriggerEngine triggerEngine = new WatchTests.ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, new ClockMock()); - TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + TriggerEngine triggerEngine = new WatchTests.ParseOnlyScheduleTriggerEngine(scheduleRegistry, new ClockMock()); + TriggerService triggerService = new TriggerService(singleton(triggerEngine)); - TriggeredWatch.Parser parser = new TriggeredWatch.Parser(Settings.EMPTY, triggerService); + TriggeredWatch.Parser parser = new TriggeredWatch.Parser(triggerService); TriggeredWatch parsedTriggeredWatch = parser.parse(triggeredWatch.id().value(), 0, BytesReference.bytes(jsonBuilder)); XContentBuilder jsonBuilder2 = XContentFactory.jsonBuilder(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java index 19bf1ba5a1fd3..2ea364de18b4e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java @@ -71,7 +71,7 @@ public void init() { when(threadPool.getThreadContext()).thenReturn(new ThreadContext(settings)); BulkProcessor.Listener listener = mock(BulkProcessor.Listener.class); BulkProcessor bulkProcessor = BulkProcessor.builder(client, listener).setConcurrentRequests(0).setBulkActions(1).build(); - historyStore = new HistoryStore(settings, bulkProcessor); + historyStore = new HistoryStore(bulkProcessor); } public void testPut() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java index 0e120793fbc18..a93b1e94ce957 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.watcher.common.http.HttpMethod; import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java index c29b3e1f8b458..42788b2ea7ad1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; import org.elasticsearch.xpack.core.watcher.watch.Payload; import org.elasticsearch.xpack.watcher.common.http.HttpClient; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java index cb86913678a96..ddf45de816367 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java @@ -81,7 +81,7 @@ public void testAccountDoesNotExist() throws Exception{ private static class TestNotificationService extends NotificationService { TestNotificationService(Settings settings) { - super(settings, "test"); + super("test"); reload(settings); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java index 97907bbef8a59..d46524b93e508 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java @@ -352,7 +352,7 @@ public void testThatUrlIsTemplatable() throws Exception { .thenReturn(new HttpResponse(503)) .thenReturn(new HttpResponse(200, randomAlphaOfLength(10))); - TextTemplateEngine replaceHttpWithHttpsTemplateEngine = new TextTemplateEngine(Settings.EMPTY, null) { + TextTemplateEngine replaceHttpWithHttpsTemplateEngine = new TextTemplateEngine(null) { @Override public String render(TextTemplate textTemplate, Map model) { return textTemplate.getTemplate().replaceAll("REPLACEME", "REPLACED"); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/FilterXContentTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/FilterXContentTests.java index eadf739b175a3..fbe0b98f4ca63 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/FilterXContentTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/FilterXContentTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.hamcrest.Matchers; import java.io.IOException; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java index 4ab5b7b7b87aa..cd544c85b30bb 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java @@ -8,7 +8,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.execution.Wid; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.core.watcher.watch.Payload; import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java index 3201a69b486d3..e93a86b93eb23 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java @@ -72,7 +72,7 @@ public void createRegistryAndClient() { }).when(indicesAdminClient).putTemplate(any(PutIndexTemplateRequest.class), any(ActionListener.class)); ClusterService clusterService = mock(ClusterService.class); - registry = new WatcherIndexTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client); + registry = new WatcherIndexTemplateRegistry(clusterService, threadPool, client); } public void testThatNonExistingTemplatesAreAddedImmediately() { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/MockTextTemplateEngine.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/MockTextTemplateEngine.java index eeefa20c3a158..a8db182bda054 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/MockTextTemplateEngine.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/MockTextTemplateEngine.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.watcher.test; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; @@ -13,7 +12,7 @@ public class MockTextTemplateEngine extends TextTemplateEngine { public MockTextTemplateEngine() { - super(Settings.EMPTY, null); + super(null); } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/TimeWarpedWatcher.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/TimeWarpedWatcher.java index 99cf45e583dc7..23f7e6e7c79b5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/TimeWarpedWatcher.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/TimeWarpedWatcher.java @@ -59,7 +59,7 @@ protected Clock getClock() { @Override protected TriggerEngine getTriggerEngine(Clock clock, ScheduleRegistry scheduleRegistry){ - return new ScheduleTriggerEngineMock(settings, scheduleRegistry, clock); + return new ScheduleTriggerEngineMock(scheduleRegistry, clock); } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java index 80e802cf817ac..8a7eb84157610 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java @@ -212,7 +212,7 @@ public BenchmarkWatcher(Settings settings) { @Override protected TriggerEngine getTriggerEngine(Clock clock, ScheduleRegistry scheduleRegistry) { - return new ScheduleTriggerEngineMock(settings, scheduleRegistry, clock); + return new ScheduleTriggerEngineMock(scheduleRegistry, clock); } } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java index 90b14233b8d27..9b92294a07022 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.xpack.core.watcher.client.WatcherClient; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; import org.elasticsearch.xpack.watcher.condition.ScriptCondition; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java index 5c0562c0a0075..8714d89415195 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java @@ -189,6 +189,6 @@ public void testThatEmptyRequestBodyWorks() throws Exception { private WatcherSearchTemplateService watcherSearchTemplateService() { SearchModule module = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); - return new WatcherSearchTemplateService(Settings.EMPTY, scriptService, new NamedXContentRegistry(module.getNamedXContents())); + return new WatcherSearchTemplateService(scriptService, new NamedXContentRegistry(module.getNamedXContents())); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java index 87c10c97c8fc6..143f760f23c1e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/chain/ChainTransformTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/chain/ChainTransformTests.java index edf05fac33828..ddc1e07534ef5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/chain/ChainTransformTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/chain/ChainTransformTests.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.watcher.transform.chain; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -144,7 +144,7 @@ private static class NamedExecutableTransform extends ExecutableTransform watches = new ConcurrentHashMap<>(); - public ScheduleTriggerEngineMock(Settings settings, ScheduleRegistry scheduleRegistry, Clock clock) { - super(settings, scheduleRegistry, clock); + public ScheduleTriggerEngineMock(ScheduleRegistry scheduleRegistry, Clock clock) { + super(scheduleRegistry, clock); } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/TriggerServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/TriggerServiceTests.java index f0d3e88b127eb..6372823d36d97 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/TriggerServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/TriggerServiceTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.watcher.trigger; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; @@ -39,7 +38,7 @@ public class TriggerServiceTests extends ESTestCase { public void setupTriggerService() { TriggerEngine triggerEngine = mock(TriggerEngine.class); when(triggerEngine.type()).thenReturn(ENGINE_TYPE); - service = new TriggerService(Settings.EMPTY, Collections.singleton(triggerEngine)); + service = new TriggerService(Collections.singleton(triggerEngine)); // simple watch, input and simple action watch1 = createWatch("1"); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java index 2e09c7446db05..daf1f18f3bd2f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.watcher.watch; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.search.SearchRequest; @@ -12,7 +13,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -172,7 +172,7 @@ public void init() throws Exception { templateEngine = mock(TextTemplateEngine.class); htmlSanitizer = mock(HtmlSanitizer.class); licenseState = mock(XPackLicenseState.class); - logger = Loggers.getLogger(WatchTests.class); + logger = LogManager.getLogger(WatchTests.class); searchTemplateService = mock(WatcherSearchTemplateService.class); } @@ -184,8 +184,8 @@ public void testParserSelfGenerated() throws Exception { Schedule schedule = randomSchedule(); Trigger trigger = new ScheduleTrigger(schedule); ScheduleRegistry scheduleRegistry = registry(schedule); - TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, clock); - TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(scheduleRegistry, clock); + TriggerService triggerService = new TriggerService(singleton(triggerEngine)); ExecutableInput input = randomInput(); InputRegistry inputRegistry = registry(input.type()); @@ -212,7 +212,7 @@ public void testParserSelfGenerated() throws Exception { BytesReference bytes = BytesReference.bytes(jsonBuilder().value(watch)); logger.info("{}", bytes.utf8ToString()); - WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, clock); + WatchParser watchParser = new WatchParser(triggerService, actionRegistry, inputRegistry, null, clock); Watch parsedWatch = watchParser.parse("_name", includeStatus, bytes, XContentType.JSON); @@ -233,7 +233,7 @@ public void testThatBothStatusFieldsCanBeRead() throws Exception { InputRegistry inputRegistry = mock(InputRegistry.class); ActionRegistry actionRegistry = mock(ActionRegistry.class); // a fake trigger service that advances past the trigger end object, which cannot be done with mocking - TriggerService triggerService = new TriggerService(Settings.EMPTY, Collections.emptySet()) { + TriggerService triggerService = new TriggerService(Collections.emptySet()) { @Override public Trigger parseTrigger(String jobName, XContentParser parser) throws IOException { while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -254,7 +254,7 @@ public Trigger parseTrigger(String jobName, XContentParser parser) throws IOExce } WatchStatus watchStatus = new WatchStatus(new DateTime(clock.millis()), unmodifiableMap(actionsStatuses)); - WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, clock); + WatchParser watchParser = new WatchParser(triggerService, actionRegistry, inputRegistry, null, clock); XContentBuilder builder = jsonBuilder().startObject().startObject("trigger").endObject().field("status", watchStatus).endObject(); Watch watch = watchParser.parse("foo", true, BytesReference.bytes(builder), XContentType.JSON); assertThat(watch.status().state().getTimestamp().getMillis(), is(clock.millis())); @@ -266,8 +266,8 @@ public Trigger parseTrigger(String jobName, XContentParser parser) throws IOExce public void testParserBadActions() throws Exception { ClockMock clock = ClockMock.frozen(); ScheduleRegistry scheduleRegistry = registry(randomSchedule()); - TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, clock); - TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(scheduleRegistry, clock); + TriggerService triggerService = new TriggerService(singleton(triggerEngine)); ConditionRegistry conditionRegistry = conditionRegistry(); ExecutableInput input = randomInput(); InputRegistry inputRegistry = registry(input.type()); @@ -282,7 +282,7 @@ public void testParserBadActions() throws Exception { .startObject() .startArray("actions").endArray() .endObject(); - WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, clock); + WatchParser watchParser = new WatchParser(triggerService, actionRegistry, inputRegistry, null, clock); try { watchParser.parse("failure", false, BytesReference.bytes(jsonBuilder), XContentType.JSON); fail("This watch should fail to parse as actions is an array"); @@ -294,8 +294,8 @@ public void testParserBadActions() throws Exception { public void testParserDefaults() throws Exception { Schedule schedule = randomSchedule(); ScheduleRegistry scheduleRegistry = registry(schedule); - TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, Clock.systemUTC()); - TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(scheduleRegistry, Clock.systemUTC()); + TriggerService triggerService = new TriggerService(singleton(triggerEngine)); ConditionRegistry conditionRegistry = conditionRegistry(); InputRegistry inputRegistry = registry(new ExecutableNoneInput().type()); @@ -308,7 +308,7 @@ public void testParserDefaults() throws Exception { .field(ScheduleTrigger.TYPE, schedule(schedule).build()) .endObject(); builder.endObject(); - WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, Clock.systemUTC()); + WatchParser watchParser = new WatchParser(triggerService, actionRegistry, inputRegistry, null, Clock.systemUTC()); Watch watch = watchParser.parse("failure", false, BytesReference.bytes(builder), XContentType.JSON); assertThat(watch, notNullValue()); assertThat(watch.trigger(), instanceOf(ScheduleTrigger.class)); @@ -322,16 +322,16 @@ public void testParserDefaults() throws Exception { public void testParseWatch_verifyScriptLangDefault() throws Exception { ScheduleRegistry scheduleRegistry = registry(new IntervalSchedule(new IntervalSchedule.Interval(1, IntervalSchedule.Interval.Unit.SECONDS))); - TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, Clock.systemUTC()); - TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(scheduleRegistry, Clock.systemUTC()); + TriggerService triggerService = new TriggerService(singleton(triggerEngine)); ConditionRegistry conditionRegistry = conditionRegistry(); InputRegistry inputRegistry = registry(SearchInput.TYPE); TransformRegistry transformRegistry = transformRegistry(); ActionRegistry actionRegistry = registry(Collections.emptyList(), conditionRegistry, transformRegistry); - WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, Clock.systemUTC()); + WatchParser watchParser = new WatchParser(triggerService, actionRegistry, inputRegistry, null, Clock.systemUTC()); - WatcherSearchTemplateService searchTemplateService = new WatcherSearchTemplateService(settings, scriptService, xContentRegistry()); + WatcherSearchTemplateService searchTemplateService = new WatcherSearchTemplateService(scriptService, xContentRegistry()); XContentBuilder builder = jsonBuilder(); builder.startObject(); @@ -441,15 +441,15 @@ private WatchParser createWatchparser() throws Exception { ScheduleRegistry scheduleRegistry = registry(new IntervalSchedule(new IntervalSchedule.Interval(1, IntervalSchedule.Interval.Unit.SECONDS))); - TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, Clock.systemUTC()); - TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(scheduleRegistry, Clock.systemUTC()); + TriggerService triggerService = new TriggerService(singleton(triggerEngine)); ConditionRegistry conditionRegistry = conditionRegistry(); InputRegistry inputRegistry = registry(SimpleInput.TYPE); TransformRegistry transformRegistry = transformRegistry(); ActionRegistry actionRegistry = registry(actions, conditionRegistry, transformRegistry); - return new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, Clock.systemUTC()); + return new WatchParser(triggerService, actionRegistry, inputRegistry, null, Clock.systemUTC()); } private static Schedule randomSchedule() { @@ -645,8 +645,8 @@ protected NamedXContentRegistry xContentRegistry() { public static class ParseOnlyScheduleTriggerEngine extends ScheduleTriggerEngine { - public ParseOnlyScheduleTriggerEngine(Settings settings, ScheduleRegistry registry, Clock clock) { - super(settings, registry, clock); + public ParseOnlyScheduleTriggerEngine(ScheduleRegistry registry, Clock clock) { + super(registry, clock); } @Override diff --git a/x-pack/protocol/LICENSE.txt b/x-pack/protocol/LICENSE.txt deleted file mode 100644 index d645695673349..0000000000000 --- a/x-pack/protocol/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java deleted file mode 100644 index 1ec83a8c05ae7..0000000000000 --- a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/watcher/DeleteWatchRequest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.protocol.xpack.watcher; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ValidateActions; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.uid.Versions; - -import java.io.IOException; - -/** - * A delete watch request to delete an watch by name (id) - */ -public class DeleteWatchRequest extends ActionRequest { - - private String id; - private long version = Versions.MATCH_ANY; - - public DeleteWatchRequest() { - this(null); - } - - public DeleteWatchRequest(String id) { - this.id = id; - } - - /** - * @return The name of the watch to be deleted - */ - public String getId() { - return id; - } - - /** - * Sets the name of the watch to be deleted - */ - public void setId(String id) { - this.id = id; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (id == null){ - validationException = ValidateActions.addValidationError("watch id is missing", validationException); - } else if (PutWatchRequest.isValidId(id) == false) { - validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); - } - return validationException; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - id = in.readString(); - version = in.readLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(id); - out.writeLong(version); - } - - @Override - public String toString() { - return "delete [" + id + "]"; - } -} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java deleted file mode 100644 index a4daa39566462..0000000000000 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.protocol.xpack.migration; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; - -public class IndexUpgradeInfoRequestTests extends AbstractWireSerializingTestCase { - @Override - protected IndexUpgradeInfoRequest createTestInstance() { - int indexCount = randomInt(4); - String[] indices = new String[indexCount]; - for (int i = 0; i < indexCount; i++) { - indices[i] = randomAlphaOfLength(10); - } - IndexUpgradeInfoRequest request = new IndexUpgradeInfoRequest(indices); - if (randomBoolean()) { - request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); - } - return request; - } - - @Override - protected Writeable.Reader instanceReader() { - return IndexUpgradeInfoRequest::new; - } - - public void testNullIndices() { - expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest((String[])null)); - expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest().indices((String[])null)); - } -} diff --git a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java b/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java deleted file mode 100644 index 42de1ae60908a..0000000000000 --- a/x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.protocol.xpack.migration; - -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; - -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; - -public class IndexUpgradeInfoResponseTests extends AbstractStreamableXContentTestCase { - @Override - protected IndexUpgradeInfoResponse doParseInstance(XContentParser parser) { - return IndexUpgradeInfoResponse.fromXContent(parser); - } - - @Override - protected IndexUpgradeInfoResponse createBlankInstance() { - return new IndexUpgradeInfoResponse(); - } - - @Override - protected IndexUpgradeInfoResponse createTestInstance() { - return randomIndexUpgradeInfoResponse(randomIntBetween(0, 10)); - } - - private static IndexUpgradeInfoResponse randomIndexUpgradeInfoResponse(int numIndices) { - Map actions = new HashMap<>(); - for (int i = 0; i < numIndices; i++) { - actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); - } - return new IndexUpgradeInfoResponse(actions); - } - - @Override - protected IndexUpgradeInfoResponse mutateInstance(IndexUpgradeInfoResponse instance) { - if (instance.getActions().size() == 0) { - return randomIndexUpgradeInfoResponse(1); - } - Map actions = new HashMap<>(instance.getActions()); - if (randomBoolean()) { - Iterator> iterator = actions.entrySet().iterator(); - iterator.next(); - iterator.remove(); - } else { - actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); - } - return new IndexUpgradeInfoResponse(actions); - } -} diff --git a/x-pack/qa/audit-tests/build.gradle b/x-pack/qa/audit-tests/build.gradle index 8af672fe92aee..126e3834bab4a 100644 --- a/x-pack/qa/audit-tests/build.gradle +++ b/x-pack/qa/audit-tests/build.gradle @@ -17,6 +17,7 @@ project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) integTestCluster { distribution 'zip' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/build.gradle b/x-pack/qa/build.gradle index ae77c8f893543..7d705d5b0dc9f 100644 --- a/x-pack/qa/build.gradle +++ b/x-pack/qa/build.gradle @@ -18,18 +18,3 @@ subprojects { } } } - -/* Remove assemble on all qa projects because we don't need to publish - * artifacts for them. */ -gradle.projectsEvaluated { - subprojects { - Task assemble = project.tasks.findByName('assemble') - if (assemble) { - assemble.enabled = false - } - Task dependenciesInfo = project.tasks.findByName('dependenciesInfo') - if (dependenciesInfo) { - dependenciesInfo.enabled = false - } - } -} diff --git a/x-pack/qa/core-rest-tests-with-security/build.gradle b/x-pack/qa/core-rest-tests-with-security/build.gradle index 35b08de01252a..dbf5aa6fc227c 100644 --- a/x-pack/qa/core-rest-tests-with-security/build.gradle +++ b/x-pack/qa/core-rest-tests-with-security/build.gradle @@ -22,6 +22,7 @@ integTestRunner { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index e483645390be4..5011aa1d307f1 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -6,17 +6,21 @@ package org.elasticsearch.xpack.security.authc.kerberos; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import javax.security.auth.Subject; import java.io.IOException; import java.nio.file.Path; import java.security.AccessController; @@ -28,8 +32,6 @@ import java.util.Locale; import java.util.Set; -import javax.security.auth.Subject; - /** * Base Test class for Kerberos. *

      @@ -41,6 +43,8 @@ */ public abstract class KerberosTestCase extends ESTestCase { + protected static final String REALM_NAME = "test-kerb-realm"; + protected Settings globalSettings; protected Settings settings; protected List serviceUserNames; @@ -51,6 +55,7 @@ public abstract class KerberosTestCase extends ESTestCase { private static Locale restoreLocale; private static Set unsupportedLocaleLanguages; + static { unsupportedLocaleLanguages = new HashSet<>(); /* @@ -82,9 +87,9 @@ public abstract class KerberosTestCase extends ESTestCase { @BeforeClass public static void setupKerberos() throws Exception { if (isLocaleUnsupported()) { - Logger logger = Loggers.getLogger(KerberosTestCase.class); + Logger logger = LogManager.getLogger(KerberosTestCase.class); logger.warn("Attempting to run Kerberos test on {} locale, but that breaks SimpleKdcServer. Switching to English.", - Locale.getDefault()); + Locale.getDefault()); restoreLocale = Locale.getDefault(); Locale.setDefault(Locale.ENGLISH); } @@ -126,7 +131,7 @@ public void startSimpleKdcLdapServer() throws Exception { throw ExceptionsHelper.convertToRuntime(e); } }); - settings = KerberosRealmTestCase.buildKerberosRealmSettings(ktabPathForService.toString()); + settings = KerberosRealmTestCase.buildKerberosRealmSettings(REALM_NAME, ktabPathForService.toString()); } @After @@ -136,10 +141,15 @@ public void tearDownMiniKdc() throws IOException, PrivilegedActionException { } } + protected Path getKeytabPath(Environment env) { + final Setting setting = KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.getConcreteSettingForNamespace(REALM_NAME); + return env.configFile().resolve(setting.get(settings)); + } + /** * Creates principals and exports them to the keytab created in the directory. * - * @param dir Directory where the key tab would be created. + * @param dir Directory where the key tab would be created. * @param princNames principal names to be created * @return {@link Path} to key tab file. * @throws Exception thrown if principal or keytab could not be created @@ -154,7 +164,7 @@ protected Path createPrincipalKeyTab(final Path dir, final String... princNames) * Creates principal with given name and password. * * @param principalName Principal name - * @param password Password + * @param password Password * @throws Exception thrown if principal could not be created */ protected void createPrincipal(final String principalName, final char[] password) throws Exception { @@ -175,8 +185,8 @@ protected String principalName(final String user) { * Invokes Subject.doAs inside a doPrivileged block * * @param subject {@link Subject} - * @param action {@link PrivilegedExceptionAction} action for performing inside - * Subject.doAs + * @param action {@link PrivilegedExceptionAction} action for performing inside + * Subject.doAs * @return T Type of value as returned by PrivilegedAction * @throws PrivilegedActionException when privileged action threw exception */ diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTicketValidatorTests.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTicketValidatorTests.java index 340d05ce35e0f..53d7b38a33986 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTicketValidatorTests.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTicketValidatorTests.java @@ -13,17 +13,15 @@ import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.ietf.jgss.GSSException; +import javax.security.auth.login.LoginException; import java.io.IOException; import java.nio.file.Path; import java.security.PrivilegedActionException; import java.util.Base64; import java.util.concurrent.ExecutionException; -import javax.security.auth.login.LoginException; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -40,12 +38,12 @@ public void testKerbTicketGeneratedForDifferentServerFailsValidation() throws Ex // Client login and init token preparation final String clientUserName = randomFrom(clientUserNames); try (SpnegoClient spnegoClient = - new SpnegoClient(principalName(clientUserName), new SecureString("pwd".toCharArray()), principalName("differentServer"));) { + new SpnegoClient(principalName(clientUserName), new SecureString("pwd".toCharArray()), principalName("differentServer"))) { final String base64KerbToken = spnegoClient.getBase64EncodedTokenForSpnegoHeader(); assertThat(base64KerbToken, is(notNullValue())); final Environment env = TestEnvironment.newEnvironment(globalSettings); - final Path keytabPath = env.configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(settings)); + final Path keytabPath = getKeytabPath(env); final PlainActionFuture> future = new PlainActionFuture<>(); kerberosTicketValidator.validateTicket(Base64.getDecoder().decode(base64KerbToken), keytabPath, true, future); final GSSException gssException = expectThrows(GSSException.class, () -> unwrapExpectedExceptionFromFutureAndThrow(future)); @@ -57,7 +55,7 @@ public void testInvalidKerbTicketFailsValidation() throws Exception { final String base64KerbToken = Base64.getEncoder().encodeToString(randomByteArrayOfLength(5)); final Environment env = TestEnvironment.newEnvironment(globalSettings); - final Path keytabPath = env.configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(settings)); + final Path keytabPath = getKeytabPath(env); kerberosTicketValidator.validateTicket(Base64.getDecoder().decode(base64KerbToken), keytabPath, true, new ActionListener>() { boolean exceptionHandled = false; @@ -87,9 +85,9 @@ public void testWhenKeyTabWithInvalidContentFailsValidation() assertThat(base64KerbToken, is(notNullValue())); final Path ktabPath = KerberosRealmTestCase.writeKeyTab(workDir.resolve("invalid.keytab"), "not - a - valid - key - tab"); - settings = KerberosRealmTestCase.buildKerberosRealmSettings(ktabPath.toString()); + settings = KerberosRealmTestCase.buildKerberosRealmSettings(REALM_NAME, ktabPath.toString()); final Environment env = TestEnvironment.newEnvironment(globalSettings); - final Path keytabPath = env.configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(settings)); + final Path keytabPath = getKeytabPath(env); final PlainActionFuture> future = new PlainActionFuture<>(); kerberosTicketValidator.validateTicket(Base64.getDecoder().decode(base64KerbToken), keytabPath, true, future); final GSSException gssException = expectThrows(GSSException.class, () -> unwrapExpectedExceptionFromFutureAndThrow(future)); @@ -100,13 +98,14 @@ public void testWhenKeyTabWithInvalidContentFailsValidation() public void testValidKebrerosTicket() throws PrivilegedActionException, GSSException, LoginException { // Client login and init token preparation final String clientUserName = randomFrom(clientUserNames); - try (SpnegoClient spnegoClient = new SpnegoClient(principalName(clientUserName), new SecureString("pwd".toCharArray()), - principalName(randomFrom(serviceUserNames)));) { + final SecureString password = new SecureString("pwd".toCharArray()); + final String servicePrincipalName = principalName(randomFrom(serviceUserNames)); + try (SpnegoClient spnegoClient = new SpnegoClient(principalName(clientUserName), password, servicePrincipalName)) { final String base64KerbToken = spnegoClient.getBase64EncodedTokenForSpnegoHeader(); assertThat(base64KerbToken, is(notNullValue())); final Environment env = TestEnvironment.newEnvironment(globalSettings); - final Path keytabPath = env.configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(settings)); + final Path keytabPath = getKeytabPath(env); final PlainActionFuture> future = new PlainActionFuture<>(); kerberosTicketValidator.validateTicket(Base64.getDecoder().decode(base64KerbToken), keytabPath, true, future); assertThat(future.actionGet(), is(notNullValue())); diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java index 53bf9e2b78d0b..13601d2fe202f 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java @@ -14,10 +14,10 @@ import org.apache.kerby.kerberos.kerb.server.KdcConfigKey; import org.apache.kerby.kerberos.kerb.server.SimpleKdcServer; import org.apache.kerby.util.NetworkUtil; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -37,7 +37,7 @@ * Starts in memory Ldap server and then uses it as backend for Kdc Server. */ public class SimpleKdcLdapServer { - private static final Logger logger = Loggers.getLogger(SimpleKdcLdapServer.class); + private static final Logger logger = LogManager.getLogger(SimpleKdcLdapServer.class); private Path workDir = null; private SimpleKdcServer simpleKdc; @@ -221,4 +221,4 @@ public Void run() throws Exception { logger.info("SimpleKdcServer stoppped."); } -} \ No newline at end of file +} diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServerTests.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServerTests.java index b1c75d957a7c8..6d9aae49a483d 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServerTests.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServerTests.java @@ -15,9 +15,6 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; -import org.elasticsearch.xpack.security.authc.kerberos.KerberosAuthenticationToken; -import org.elasticsearch.xpack.security.authc.kerberos.KerberosTicketValidator; import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; import org.ietf.jgss.GSSException; @@ -61,7 +58,7 @@ public void testClientServiceMutualAuthentication() throws PrivilegedActionExcep // Service Login final Environment env = TestEnvironment.newEnvironment(globalSettings); - final Path keytabPath = env.configFile().resolve(KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH.get(settings)); + final Path keytabPath = getKeytabPath(env); // Handle Authz header which contains base64 token final PlainActionFuture> future = new PlainActionFuture<>(); new KerberosTicketValidator().validateTicket((byte[]) kerbAuthnToken.credentials(), keytabPath, true, future); diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index c0fb7eb2b77d7..9504f527cdd63 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -142,7 +142,12 @@ subprojects { configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { dependsOn copyTestNodeKeystore if (version.before('6.3.0')) { - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" + String depVersion = version; + if (project.bwcVersions.unreleased.contains(version)) { + depVersion += "-SNAPSHOT" + } + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" + } bwcVersion = version numBwcNodes = 2 @@ -237,7 +242,7 @@ subprojects { // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.snapshotsIndexCompatible) { + for (final def version : bwcVersions.unreleasedIndexCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index c112709bbe04b..254fa37f94463 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; diff --git a/x-pack/qa/multi-cluster-search-security/build.gradle b/x-pack/qa/multi-cluster-search-security/build.gradle index e79490df829e3..c31b2c0ad1d5e 100644 --- a/x-pack/qa/multi-cluster-search-security/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/build.gradle @@ -16,6 +16,7 @@ remoteClusterTestCluster { numNodes = 2 clusterName = 'remote-cluster' setting 'cluster.remote.connect', false + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/multi-node/build.gradle b/x-pack/qa/multi-node/build.gradle index 4369287caba32..243a6f40438cc 100644 --- a/x-pack/qa/multi-node/build.gradle +++ b/x-pack/qa/multi-node/build.gradle @@ -8,6 +8,7 @@ dependencies { integTestCluster { numNodes = 2 clusterName = 'multi-node' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java index 3ea1b8e67471c..d4f35c5099064 100644 --- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import java.io.IOException; import java.time.Instant; diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java index f96823df019a5..9abac404cea14 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java @@ -19,8 +19,11 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.elasticsearch.xpack.security.authc.ldap.LdapSessionFactory; @@ -37,6 +40,7 @@ import java.util.Map; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -93,23 +97,23 @@ public void initializeSslSocketFactory() throws Exception { mockSecureSettings.setString("xpack.ssl.truststore.secure_password", "changeit"); // configure realm to load config with certificate verification mode - builder.put("xpack.security.authc.realms." + REALM_NAME + ".ssl.truststore.path", truststore); - mockSecureSettings.setString("xpack.security.authc.realms." + REALM_NAME + ".ssl.truststore.secure_password", "changeit"); - builder.put("xpack.security.authc.realms." + REALM_NAME + ".ssl.verification_mode", VerificationMode.CERTIFICATE); + builder.put("xpack.security.authc.realms.ldap." + REALM_NAME + ".ssl.truststore.path", truststore); + mockSecureSettings.setString("xpack.security.authc.realms.ldap." + REALM_NAME + ".ssl.truststore.secure_password", "changeit"); + builder.put("xpack.security.authc.realms.ldap." + REALM_NAME + ".ssl.verification_mode", VerificationMode.CERTIFICATE); } else { // fake realms so ssl will get loaded - builder.put("xpack.security.authc.realms.foo.ssl.truststore.path", truststore); - mockSecureSettings.setString("xpack.security.authc.realms.foo.ssl.truststore.secure_password", "changeit"); - builder.put("xpack.security.authc.realms.foo.ssl.verification_mode", VerificationMode.FULL); - builder.put("xpack.security.authc.realms." + REALM_NAME + ".ssl.truststore.path", truststore); - mockSecureSettings.setString("xpack.security.authc.realms." + REALM_NAME + ".ssl.truststore.secure_password", "changeit"); - builder.put("xpack.security.authc.realms." + REALM_NAME + ".ssl.verification_mode", VerificationMode.CERTIFICATE); + builder.put("xpack.security.authc.realms.ldap.foo.ssl.truststore.path", truststore); + mockSecureSettings.setString("xpack.security.authc.realms.ldap.foo.ssl.truststore.secure_password", "changeit"); + builder.put("xpack.security.authc.realms.ldap.foo.ssl.verification_mode", VerificationMode.FULL); + builder.put("xpack.security.authc.realms.ldap." + REALM_NAME + ".ssl.truststore.path", truststore); + mockSecureSettings.setString("xpack.security.authc.realms.ldap." + REALM_NAME + ".ssl.truststore.secure_password", "changeit"); + builder.put("xpack.security.authc.realms.ldap." + REALM_NAME + ".ssl.verification_mode", VerificationMode.CERTIFICATE); // If not using global ssl, need to set the truststore for the "full verification" realm - builder.put("xpack.security.authc.realms.vmode_full.ssl.truststore.path", truststore); - mockSecureSettings.setString("xpack.security.authc.realms.vmode_full.ssl.truststore.secure_password", "changeit"); + builder.put("xpack.security.authc.realms.ldap.vmode_full.ssl.truststore.path", truststore); + mockSecureSettings.setString("xpack.security.authc.realms.ldap.vmode_full.ssl.truststore.secure_password", "changeit"); } - builder.put("xpack.security.authc.realms.vmode_full.ssl.verification_mode", VerificationMode.FULL); + builder.put("xpack.security.authc.realms.ldap.vmode_full.ssl.verification_mode", VerificationMode.FULL); globalSettings = builder.setSecureSettings(mockSecureSettings).build(); Environment environment = TestEnvironment.newEnvironment(globalSettings); @@ -120,12 +124,13 @@ public void testConnect() throws Exception { //openldap does not use cn as naming attributes by default String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; - RealmConfig config = new RealmConfig("oldap-test", buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, - LdapSearchScope.ONE_LEVEL), globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "oldap-test"); + RealmConfig config = new RealmConfig(realmId, + buildLdapSettings(realmId, OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); - String[] users = new String[] { "blackwidow", "cap", "hawkeye", "hulk", "ironman", "thor" }; + String[] users = new String[]{"blackwidow", "cap", "hawkeye", "hulk", "ironman", "thor"}; for (String user : users) { logger.info("testing connect as user [{}]", user); try (LdapSession ldap = session(sessionFactory, user, PASSWORD_SECURE_STRING)) { @@ -139,11 +144,13 @@ public void testGroupSearchScopeBase() throws Exception { String groupSearchBase = "cn=Avengers,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; - RealmConfig config = new RealmConfig(REALM_NAME, buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, - LdapSearchScope.BASE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", REALM_NAME); + RealmConfig config = new RealmConfig(realmId, + buildLdapSettings(realmId, OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.BASE), + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); - String[] users = new String[] { "blackwidow", "cap", "hawkeye", "hulk", "ironman", "thor" }; + String[] users = new String[]{"blackwidow", "cap", "hawkeye", "hulk", "ironman", "thor"}; for (String user : users) { try (LdapSession ldap = session(sessionFactory, user, PASSWORD_SECURE_STRING)) { assertThat(groups(ldap), hasItem(containsString("Avengers"))); @@ -154,13 +161,14 @@ public void testGroupSearchScopeBase() throws Exception { public void testCustomFilter() throws Exception { String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "oldap-test"); Settings settings = Settings.builder() - .put(buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) - .put("group_search.filter", "(&(objectclass=posixGroup)(memberUid={0}))") - .put("group_search.user_attribute", "uid") + .put(buildLdapSettings(realmId, OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) + .put(getFullSettingKey(realmId.getName(), SearchGroupsResolverSettings.FILTER), "(&(objectclass=posixGroup)(memberUid={0}))") + .put(getFullSettingKey(realmId.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid") .build(); - RealmConfig config = new RealmConfig("oldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(realmId, settings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); try (LdapSession ldap = session(sessionFactory, "selvig", PASSWORD_SECURE_STRING)) { @@ -170,16 +178,17 @@ public void testCustomFilter() throws Exception { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29758") public void testTcpTimeout() throws Exception { + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "oldap-test"); String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; Settings settings = Settings.builder() - .put(buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) - .put("group_search.filter", "(objectClass=*)") - .put("ssl.verification_mode", VerificationMode.CERTIFICATE) - .put(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING, "1ms") //1 millisecond + .put(buildLdapSettings(realmId, OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put(getFullSettingKey(realmId.getName(), SearchGroupsResolverSettings.FILTER), "(objectClass=*)") + .put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), VerificationMode.CERTIFICATE) + .put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "1ms") .build(); - RealmConfig config = new RealmConfig("oldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(realmId, settings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); LDAPException expected = expectThrows(LDAPException.class, @@ -191,14 +200,13 @@ public void testStandardLdapConnectionHostnameVerificationFailure() throws Excep //openldap does not use cn as naming attributes by default String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "vmode_full"); Settings settings = Settings.builder() // The certificate used in the vagrant box is valid for "localhost", but not for "127.0.0.1" - .put(buildLdapSettings(OPEN_LDAP_IP_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) + .put(buildLdapSettings(realmId, OPEN_LDAP_IP_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) .build(); - - // Pick up the "full" verification mode config - RealmConfig config = new RealmConfig("vmode_full", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); + final Environment env = TestEnvironment.newEnvironment(globalSettings); + RealmConfig config = new RealmConfig(realmId, settings, env, new ThreadContext(Settings.EMPTY)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); String user = "blackwidow"; @@ -214,14 +222,14 @@ public void testStandardLdapConnectionHostnameVerificationSuccess() throws Excep //openldap does not use cn as naming attributes by default String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "vmode_full"); Settings settings = Settings.builder() // The certificate used in the vagrant box is valid for "localhost" (but not for "127.0.0.1") - .put(buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) + .put(buildLdapSettings(realmId, OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) .build(); - // Pick up the "full" verification mode config - RealmConfig config = new RealmConfig("vmode_full", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(Settings.EMPTY)); + RealmConfig config = new RealmConfig(realmId, settings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); final String user = "blackwidow"; @@ -232,7 +240,13 @@ public void testStandardLdapConnectionHostnameVerificationSuccess() throws Excep } public void testResolveSingleValuedAttributeFromConnection() throws Exception { - LdapMetaDataResolver resolver = new LdapMetaDataResolver(Settings.builder().putList("metadata", "cn", "sn").build(), true); + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "oldap-test"); + final Settings settings = Settings.builder() + .putList(getFullSettingKey(realmId.getName(), LdapMetaDataResolverSettings.ADDITIONAL_META_DATA_SETTING), "cn", "sn") + .build(); + final RealmConfig config = new RealmConfig(realmId, settings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + LdapMetaDataResolver resolver = new LdapMetaDataResolver(config, true); try (LDAPConnection ldapConnection = setupOpenLdapConnection()) { final Map map = resolve(ldapConnection, resolver); assertThat(map.size(), equalTo(2)); @@ -242,7 +256,13 @@ public void testResolveSingleValuedAttributeFromConnection() throws Exception { } public void testResolveMultiValuedAttributeFromConnection() throws Exception { - LdapMetaDataResolver resolver = new LdapMetaDataResolver(Settings.builder().putList("metadata", "objectClass").build(), true); + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "oldap-test"); + final Settings settings = Settings.builder() + .putList(getFullSettingKey(realmId.getName(), LdapMetaDataResolverSettings.ADDITIONAL_META_DATA_SETTING), "objectClass") + .build(); + final RealmConfig config = new RealmConfig(realmId, settings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + LdapMetaDataResolver resolver = new LdapMetaDataResolver(config, true); try (LDAPConnection ldapConnection = setupOpenLdapConnection()) { final Map map = resolve(ldapConnection, resolver); assertThat(map.size(), equalTo(1)); @@ -252,23 +272,33 @@ public void testResolveMultiValuedAttributeFromConnection() throws Exception { } public void testResolveMissingAttributeFromConnection() throws Exception { - LdapMetaDataResolver resolver = new LdapMetaDataResolver(Settings.builder().putList("metadata", "alias").build(), true); + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "oldap-test"); + final Settings settings = Settings.builder() + .putList(getFullSettingKey(realmId.getName(), LdapMetaDataResolverSettings.ADDITIONAL_META_DATA_SETTING), "alias") + .build(); + final RealmConfig config = new RealmConfig(realmId, settings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + LdapMetaDataResolver resolver = new LdapMetaDataResolver(config, true); try (LDAPConnection ldapConnection = setupOpenLdapConnection()) { final Map map = resolve(ldapConnection, resolver); assertThat(map.size(), equalTo(0)); } } - private Settings buildLdapSettings(String ldapUrl, String userTemplate, String groupSearchBase, LdapSearchScope scope) { + private Settings buildLdapSettings(RealmConfig.RealmIdentifier realmId, String ldapUrl, String userTemplate, + String groupSearchBase, LdapSearchScope scope) { + final String[] urls = {ldapUrl}; + final String[] templates = {userTemplate}; Settings.Builder builder = Settings.builder() - .put(LdapTestCase.buildLdapSettings(ldapUrl, userTemplate, groupSearchBase, scope)); - builder.put("group_search.user_attribute", "uid"); + .put(LdapTestCase.buildLdapSettings(realmId, urls, templates, groupSearchBase, scope, null, false)); + builder.put(getFullSettingKey(realmId.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid"); if (useGlobalSSL) { return builder.build(); } return builder - .put("ssl.truststore.path", getDataPath(LDAPTRUST_PATH)) - .put("ssl.truststore.password", "changeit") + .put(getFullSettingKey(realmId, SSLConfigurationSettings.TRUST_STORE_PATH_REALM), getDataPath(LDAPTRUST_PATH)) + .put(getFullSettingKey(realmId, SSLConfigurationSettings.LEGACY_TRUST_STORE_PASSWORD_REALM), "changeit") + .put(globalSettings) .build(); } diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java index 42c2d9cd07e7f..5030fdecadf64 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java @@ -19,8 +19,11 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; @@ -34,6 +37,7 @@ import java.util.Locale; import java.util.Objects; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -70,34 +74,29 @@ public void testUserSearchWithBindUserOpenLDAP() throws Exception { final boolean useSecureBindPassword = randomBoolean(); String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; String userSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "oldap-test"); final Settings.Builder realmSettings = Settings.builder() - .put(LdapTestCase.buildLdapSettings(new String[]{OpenLdapTests.OPEN_LDAP_DNS_URL}, Strings.EMPTY_ARRAY, groupSearchBase, - LdapSearchScope.ONE_LEVEL)) - .put("user_search.base_dn", userSearchBase) - .put("group_search.user_attribute", "uid") - .put("bind_dn", "uid=blackwidow,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com") - .put("user_search.pool.enabled", randomBoolean()) - .put("ssl.verification_mode", "full"); + .put(LdapTestCase.buildLdapSettings(realmId, new String[]{OpenLdapTests.OPEN_LDAP_DNS_URL}, Strings.EMPTY_ARRAY, + groupSearchBase, LdapSearchScope.ONE_LEVEL, null, false)) + .put(getFullSettingKey(realmId.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), userSearchBase) + .put(getFullSettingKey(realmId.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid") + .put(getFullSettingKey(realmId, PoolingSessionFactorySettings.BIND_DN), + "uid=blackwidow,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(realmId.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), randomBoolean()) + .put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), "full"); if (useSecureBindPassword) { final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("secure_bind_password", OpenLdapTests.PASSWORD); + secureSettings.setString(getFullSettingKey(realmId, PoolingSessionFactorySettings.SECURE_BIND_PASSWORD), + OpenLdapTests.PASSWORD); realmSettings.setSecureSettings(secureSettings); } else { - realmSettings.put("bind_password", OpenLdapTests.PASSWORD); + realmSettings.put(getFullSettingKey(realmId, PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD), OpenLdapTests.PASSWORD); } - RealmConfig config = new RealmConfig("oldap-test", realmSettings.build(), globalSettings, + final Settings settings = realmSettings.put(globalSettings).build(); + RealmConfig config = new RealmConfig(realmId, settings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); - Settings.Builder builder = Settings.builder() - .put(globalSettings, false); - builder.put(Settings.builder().put(config.settings(), false).normalizePrefix("xpack.security.authc.realms.oldap-test.").build()); - final MockSecureSettings secureSettings = new MockSecureSettings(); - if (useSecureBindPassword) { - secureSettings.setString("xpack.security.authc.realms.oldap-test.secure_bind_password", OpenLdapTests.PASSWORD); - } - builder.setSecureSettings(secureSettings); - Settings settings = builder.build(); - SSLService sslService = new SSLService(settings, TestEnvironment.newEnvironment(settings)); + SSLService sslService = new SSLService(settings, TestEnvironment.newEnvironment(settings)); String[] users = new String[]{"cap", "hawkeye", "hulk", "ironman", "thor"}; try (LdapUserSearchSessionFactory sessionFactory = new LdapUserSearchSessionFactory(config, sslService, threadPool)) { @@ -119,7 +118,9 @@ public void testUserSearchWithBindUserOpenLDAP() throws Exception { } if (useSecureBindPassword == false) { - assertSettingDeprecationsAndWarnings(new Setting[]{PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD}); + assertSettingDeprecationsAndWarnings(new Setting[]{ + config.getConcreteSetting(PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD) + }); } } diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java index b55431dee1b00..98794dd4f705c 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java @@ -9,11 +9,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.OpenLdapTests; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import java.util.List; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasItem; @@ -24,14 +27,15 @@ public class SearchGroupsResolverTests extends GroupsResolverTestCase { private static final String BRUCE_BANNER_DN = "uid=hulk,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("ldap", "my-ldap-realm"); public void testResolveSubTree() throws Exception { Settings settings = Settings.builder() - .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") - .put("group_search.user_attribute", "uid") + .put(getFullSettingKey(REALM_ID, SearchGroupsResolverSettings.BASE_DN), "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(REALM_ID.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid") .build(); - SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + SearchGroupsResolver resolver = new SearchGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); assertThat(groups, containsInAnyOrder( @@ -43,12 +47,13 @@ public void testResolveSubTree() throws Exception { public void testResolveOneLevel() throws Exception { Settings settings = Settings.builder() - .put("group_search.base_dn", "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(REALM_ID, SearchGroupsResolverSettings.BASE_DN), + "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com") .put("group_search.scope", LdapSearchScope.ONE_LEVEL) - .put("group_search.user_attribute", "uid") + .put(getFullSettingKey(REALM_ID.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid") .build(); - SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + SearchGroupsResolver resolver = new SearchGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); assertThat(groups, containsInAnyOrder( @@ -60,12 +65,13 @@ public void testResolveOneLevel() throws Exception { public void testResolveBase() throws Exception { Settings settings = Settings.builder() - .put("group_search.base_dn", "cn=Avengers,ou=People,dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(REALM_ID, SearchGroupsResolverSettings.BASE_DN), + "cn=Avengers,ou=People,dc=oldap,dc=test,dc=elasticsearch,dc=com") .put("group_search.scope", LdapSearchScope.BASE) - .put("group_search.user_attribute", "uid") + .put(getFullSettingKey(REALM_ID.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid") .build(); - SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + SearchGroupsResolver resolver = new SearchGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); assertThat(groups, hasItem(containsString("Avengers"))); @@ -73,28 +79,28 @@ public void testResolveBase() throws Exception { public void testResolveCustomFilter() throws Exception { Settings settings = Settings.builder() - .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(REALM_ID, SearchGroupsResolverSettings.BASE_DN), "dc=oldap,dc=test,dc=elasticsearch,dc=com") .put("group_search.filter", "(&(objectclass=posixGroup)(memberUID={0}))") - .put("group_search.user_attribute", "uid") + .put(getFullSettingKey(REALM_ID.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid") .build(); - SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + SearchGroupsResolver resolver = new SearchGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, "uid=selvig,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com", - TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); assertThat(groups, hasItem(containsString("Geniuses"))); } public void testFilterIncludesPosixGroups() throws Exception { Settings settings = Settings.builder() - .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") - .put("group_search.user_attribute", "uid") + .put(getFullSettingKey(REALM_ID, SearchGroupsResolverSettings.BASE_DN), "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(REALM_ID.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid") .build(); - SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + SearchGroupsResolver resolver = new SearchGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, "uid=selvig,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com", - TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); assertThat(groups, hasItem(containsString("Geniuses"))); } @@ -104,7 +110,7 @@ public void testCreateWithoutSpecifyingBaseDN() throws Exception { .build(); try { - new SearchGroupsResolver(settings); + new SearchGroupsResolver(config(REALM_ID, settings)); fail("base_dn must be specified and an exception should have been thrown"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("base_dn must be specified")); @@ -113,9 +119,9 @@ public void testCreateWithoutSpecifyingBaseDN() throws Exception { public void testReadUserAttributeUid() throws Exception { Settings settings = Settings.builder() - .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") - .put("group_search.user_attribute", "uid").build(); - SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + .put(getFullSettingKey(REALM_ID, SearchGroupsResolverSettings.BASE_DN), "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(REALM_ID.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "uid").build(); + SearchGroupsResolver resolver = new SearchGroupsResolver(config(REALM_ID, settings)); PlainActionFuture future = new PlainActionFuture<>(); resolver.readUserAttribute(ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(5), future); assertThat(future.actionGet(), is("hulk")); @@ -123,10 +129,10 @@ public void testReadUserAttributeUid() throws Exception { public void testReadUserAttributeCn() throws Exception { Settings settings = Settings.builder() - .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") - .put("group_search.user_attribute", "cn") + .put(getFullSettingKey(REALM_ID, SearchGroupsResolverSettings.BASE_DN), "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(REALM_ID.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "cn") .build(); - SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + SearchGroupsResolver resolver = new SearchGroupsResolver(config(REALM_ID, settings)); PlainActionFuture future = new PlainActionFuture<>(); resolver.readUserAttribute(ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(5), future); @@ -135,10 +141,10 @@ public void testReadUserAttributeCn() throws Exception { public void testReadNonExistentUserAttribute() throws Exception { Settings settings = Settings.builder() - .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") - .put("group_search.user_attribute", "doesntExists") + .put(getFullSettingKey(REALM_ID, SearchGroupsResolverSettings.BASE_DN), "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(REALM_ID.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "doesntExists") .build(); - SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + SearchGroupsResolver resolver = new SearchGroupsResolver(config(REALM_ID, settings)); PlainActionFuture future = new PlainActionFuture<>(); resolver.readUserAttribute(ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(5), future); @@ -147,10 +153,10 @@ public void testReadNonExistentUserAttribute() throws Exception { public void testReadBinaryUserAttribute() throws Exception { Settings settings = Settings.builder() - .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") - .put("group_search.user_attribute", "userPassword") + .put(getFullSettingKey(REALM_ID, SearchGroupsResolverSettings.BASE_DN), "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put(getFullSettingKey(REALM_ID.getName(), SearchGroupsResolverSettings.USER_ATTRIBUTE), "userPassword") .build(); - SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + SearchGroupsResolver resolver = new SearchGroupsResolver(config(REALM_ID, settings)); PlainActionFuture future = new PlainActionFuture<>(); resolver.readUserAttribute(ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(5), future); diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 97c0e8e17fee7..ea2b7d6990622 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -12,6 +12,7 @@ dependencies { integTestCluster { // Whitelist reindexing from the local node so we can test it. setting 'reindex.remote.whitelist', '127.0.0.1:*' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/10_reindex.yml b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/10_reindex.yml index 407a1c1849526..a5779ff94d06d 100644 --- a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/10_reindex.yml +++ b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/10_reindex.yml @@ -1,3 +1,6 @@ +setup: + - skip: + features: headers --- "Reindex as same user works": diff --git a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/15_reindex_from_remote.yml b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/15_reindex_from_remote.yml index 3c31b8cc5b039..b558ad7267718 100644 --- a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/15_reindex_from_remote.yml +++ b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/15_reindex_from_remote.yml @@ -163,6 +163,8 @@ --- "Using a script to write to an index to which you don't have access is forbidden even if you read as a superuser": + - skip: + features: headers - do: index: index: source diff --git a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/20_update_by_query.yml b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/20_update_by_query.yml index 627b29ea8b529..caf29f2fc6b6c 100644 --- a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/20_update_by_query.yml +++ b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/20_update_by_query.yml @@ -1,3 +1,6 @@ +setup: + - skip: + features: headers --- "Update_by_query as same user works": diff --git a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/30_delete_by_query.yml b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/30_delete_by_query.yml index 59b6f2b7792a6..deffc4ce5e2c5 100644 --- a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/30_delete_by_query.yml +++ b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/30_delete_by_query.yml @@ -1,3 +1,6 @@ +setup: + - skip: + features: headers --- "Delete_by_query as same user works": diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 5774e5d78561d..c3cec6695bbe6 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -30,7 +30,12 @@ for (Version version : bwcVersions.wireCompatible) { configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { if (version.before('6.3.0')) { - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" + String depVersion = version; + if (project.bwcVersions.unreleased.contains(version)) { + depVersion += "-SNAPSHOT" + } + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" + } bwcVersion = version numBwcNodes = 3 @@ -125,7 +130,7 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.snapshotsWireCompatible) { + for (final def version : bwcVersions.unreleasedWireCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index b9f8481d0bb97..e013ffd017574 100644 --- a/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -29,6 +29,11 @@ protected boolean preserveRollupJobsUponCompletion() { return true; } + @Override + protected boolean preserveILMPoliciesUponCompletion() { + return true; + } + enum CLUSTER_TYPE { OLD, MIXED, diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 90da6cf4e58b8..ca149ba7e16ff 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -124,7 +124,11 @@ subprojects { configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { dependsOn copyTestNodeKeystore if (version.before('6.3.0')) { - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" + String depVersion = version; + if (project.bwcVersions.unreleased.contains(version)) { + depVersion += "-SNAPSHOT" + } + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" } String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' @@ -148,6 +152,16 @@ subprojects { setting 'xpack.ssl.keystore.password', 'testnode' dependsOn copyTestNodeKeystore extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + if (version.onOrAfter('7.0.0')) { + setting 'xpack.security.authc.realms.file.file1.order', '0' + setting 'xpack.security.authc.realms.native.native1.order', '1' + } else { + setting 'xpack.security.authc.realms.file1.type', 'file' + setting 'xpack.security.authc.realms.file1.order', '0' + setting 'xpack.security.authc.realms.native1.type', 'native' + setting 'xpack.security.authc.realms.native1.order', '1' + } + if (withSystemKey) { if (version.onOrAfter('5.1.0') && version.before('6.0.0')) { // The setting didn't exist until 5.1.0 @@ -281,7 +295,7 @@ subprojects { // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.snapshotsWireCompatible) { + for (final def version : bwcVersions.unreleasedWireCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index a1430965339c3..3b72674ed0751 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -43,6 +43,11 @@ protected boolean preserveRollupJobsUponCompletion() { return true; } + @Override + protected boolean preserveILMPoliciesUponCompletion() { + return true; + } + enum ClusterType { OLD, MIXED, diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupIDUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupIDUpgradeIT.java index 0da79b27bfaf6..f51835af7196b 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupIDUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupIDUpgradeIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.ObjectPath; import org.hamcrest.Matcher; import java.io.IOException; diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index bd1b4ec28feee..a6e6da57b8ac6 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -47,6 +47,11 @@ protected boolean preserveRollupJobsUponCompletion() { return true; } + @Override + protected boolean preserveILMPoliciesUponCompletion() { + return true; + } + public UpgradeClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_security.yml index c145d4394243e..7350557e82f0c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_security.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_security.yml @@ -1,5 +1,7 @@ --- "Verify native store security actions": + - skip: + features: headers # create native user and role - do: xpack.security.put_user: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml index 46ade4823a221..a265605c536d2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml @@ -1,5 +1,7 @@ --- "Verify user and role in upgraded cluster": + - skip: + features: headers - do: headers: Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle index 11e89d93c8e6a..8e6672f21e9d5 100644 --- a/x-pack/qa/saml-idp-tests/build.gradle +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -35,31 +35,28 @@ integTestCluster { setting 'xpack.security.enabled', 'true' setting 'xpack.security.http.ssl.enabled', 'false' setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.authc.realms.file.type', 'file' - setting 'xpack.security.authc.realms.file.order', '0' + setting 'xpack.security.authc.realms.file.file.order', '0' // SAML realm 1 (no authorization_realms) - setting 'xpack.security.authc.realms.shibboleth.type', 'saml' - setting 'xpack.security.authc.realms.shibboleth.order', '1' - setting 'xpack.security.authc.realms.shibboleth.idp.entity_id', 'https://test.shibboleth.elastic.local/' - setting 'xpack.security.authc.realms.shibboleth.idp.metadata.path', 'idp-metadata.xml' - setting 'xpack.security.authc.realms.shibboleth.sp.entity_id', 'http://mock1.http.elastic.local/' + setting 'xpack.security.authc.realms.saml.shibboleth.order', '1' + setting 'xpack.security.authc.realms.saml.shibboleth.idp.entity_id', 'https://test.shibboleth.elastic.local/' + setting 'xpack.security.authc.realms.saml.shibboleth.idp.metadata.path', 'idp-metadata.xml' + setting 'xpack.security.authc.realms.saml.shibboleth.sp.entity_id', 'http://mock1.http.elastic.local/' // The port in the ACS URL is fake - the test will bind the mock webserver // to a random port and then whenever it needs to connect to a URL on the // mock webserver it will replace 54321 with the real port - setting 'xpack.security.authc.realms.shibboleth.sp.acs', 'http://localhost:54321/saml/acs1' - setting 'xpack.security.authc.realms.shibboleth.attributes.principal', 'uid' - setting 'xpack.security.authc.realms.shibboleth.attributes.name', 'urn:oid:2.5.4.3' + setting 'xpack.security.authc.realms.saml.shibboleth.sp.acs', 'http://localhost:54321/saml/acs1' + setting 'xpack.security.authc.realms.saml.shibboleth.attributes.principal', 'uid' + setting 'xpack.security.authc.realms.saml.shibboleth.attributes.name', 'urn:oid:2.5.4.3' // SAML realm 2 (uses authorization_realms) - setting 'xpack.security.authc.realms.shibboleth_native.type', 'saml' - setting 'xpack.security.authc.realms.shibboleth_native.order', '2' - setting 'xpack.security.authc.realms.shibboleth_native.idp.entity_id', 'https://test.shibboleth.elastic.local/' - setting 'xpack.security.authc.realms.shibboleth_native.idp.metadata.path', 'idp-metadata.xml' - setting 'xpack.security.authc.realms.shibboleth_native.sp.entity_id', 'http://mock2.http.elastic.local/' - setting 'xpack.security.authc.realms.shibboleth_native.sp.acs', 'http://localhost:54321/saml/acs2' - setting 'xpack.security.authc.realms.shibboleth_native.attributes.principal', 'uid' - setting 'xpack.security.authc.realms.shibboleth_native.authorization_realms', 'native' - setting 'xpack.security.authc.realms.native.type', 'native' - setting 'xpack.security.authc.realms.native.order', '3' + setting 'xpack.security.authc.realms.saml.shibboleth_native.type', 'saml' + setting 'xpack.security.authc.realms.saml.shibboleth_native.order', '2' + setting 'xpack.security.authc.realms.saml.shibboleth_native.idp.entity_id', 'https://test.shibboleth.elastic.local/' + setting 'xpack.security.authc.realms.saml.shibboleth_native.idp.metadata.path', 'idp-metadata.xml' + setting 'xpack.security.authc.realms.saml.shibboleth_native.sp.entity_id', 'http://mock2.http.elastic.local/' + setting 'xpack.security.authc.realms.saml.shibboleth_native.sp.acs', 'http://localhost:54321/saml/acs2' + setting 'xpack.security.authc.realms.saml.shibboleth_native.attributes.principal', 'uid' + setting 'xpack.security.authc.realms.saml.shibboleth_native.authorization_realms', 'native' + setting 'xpack.security.authc.realms.native.native.order', '3' setting 'xpack.ml.enabled', 'false' diff --git a/x-pack/qa/security-client-tests/build.gradle b/x-pack/qa/security-client-tests/build.gradle index e676e55a152d4..556e36e51467f 100644 --- a/x-pack/qa/security-client-tests/build.gradle +++ b/x-pack/qa/security-client-tests/build.gradle @@ -19,6 +19,7 @@ integTestRunner { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'true' setting 'xpack.ml.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index aef4fc33f6abe..e3bbf6e613f4e 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -19,14 +19,12 @@ integTestRunner { integTestCluster { dependsOn buildZip - setting 'xpack.security.authc.realms.custom.order', '0' - setting 'xpack.security.authc.realms.custom.type', 'custom' - setting 'xpack.security.authc.realms.custom.filtered_setting', 'should be filtered' - setting 'xpack.security.authc.realms.esusers.order', '1' - setting 'xpack.security.authc.realms.esusers.type', 'file' - setting 'xpack.security.authc.realms.native.type', 'native' - setting 'xpack.security.authc.realms.native.order', '2' + setting 'xpack.security.authc.realms.custom.custom.order', '0' + setting 'xpack.security.authc.realms.custom.custom.filtered_setting', 'should be filtered' + setting 'xpack.security.authc.realms.file.esusers.order', '1' + setting 'xpack.security.authc.realms.native.native.order', '2' setting 'xpack.security.enabled', 'true' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java index 90b5eefcb56d4..3f85d8086d678 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java @@ -56,11 +56,11 @@ public AuthenticationFailureHandler getAuthenticationFailureHandler() { @Override public List, ActionListener>> getRolesProviders(Settings settings, ResourceWatcherService resourceWatcherService) { - CustomInMemoryRolesProvider rp1 = new CustomInMemoryRolesProvider(settings, Collections.singletonMap(ROLE_A, "read")); + CustomInMemoryRolesProvider rp1 = new CustomInMemoryRolesProvider(Collections.singletonMap(ROLE_A, "read")); Map roles = new HashMap<>(); roles.put(ROLE_A, "all"); roles.put(ROLE_B, "all"); - CustomInMemoryRolesProvider rp2 = new CustomInMemoryRolesProvider(settings, roles); + CustomInMemoryRolesProvider rp2 = new CustomInMemoryRolesProvider(roles); return Arrays.asList(rp1, rp2); } } diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java index 07f769849d5d0..9314b6a675056 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java @@ -5,13 +5,15 @@ */ package org.elasticsearch.example; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.example.realm.CustomRealm; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.List; /** @@ -25,7 +27,9 @@ public Collection getRestHeaders() { } @Override - public List getSettingsFilter() { - return Collections.singletonList("xpack.security.authc.realms.*.filtered_setting"); + public List> getSettings() { + List> list = new ArrayList<>(RealmSettings.getStandardSettings(CustomRealm.TYPE)); + list.add(RealmSettings.simpleString(CustomRealm.TYPE, "filtered_setting", Setting.Property.NodeScope, Setting.Property.Filtered)); + return list; } } diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java index dfd4a81ea2157..b62eb4cae0eb9 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java @@ -28,7 +28,7 @@ public class CustomRealm extends Realm { static final String[] ROLES = new String[] { "superuser" }; public CustomRealm(RealmConfig config) { - super(TYPE, config); + super(config); } @Override diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/role/CustomInMemoryRolesProvider.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/role/CustomInMemoryRolesProvider.java index 0d5a71e6244b4..44616054dc7a8 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/role/CustomInMemoryRolesProvider.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/role/CustomInMemoryRolesProvider.java @@ -7,7 +7,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; @@ -30,8 +29,7 @@ public class CustomInMemoryRolesProvider private final Map rolePermissionSettings; - public CustomInMemoryRolesProvider(Settings settings, Map rolePermissionSettings) { - super(settings); + public CustomInMemoryRolesProvider(Map rolePermissionSettings) { this.rolePermissionSettings = rolePermissionSettings; } diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java index f7bdb0d0baa9a..4487187a80b6d 100644 --- a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java @@ -118,8 +118,8 @@ public void testSettingsFiltering() throws Exception { for(NodeInfo info : nodeInfos.getNodes()) { Settings settings = info.getSettings(); assertNotNull(settings); - assertNull(settings.get("xpack.security.authc.realms.custom.filtered_setting")); - assertEquals(CustomRealm.TYPE, settings.get("xpack.security.authc.realms.custom.type")); + assertNull(settings.get("xpack.security.authc.realms.custom.custom.filtered_setting")); + assertEquals("0", settings.get("xpack.security.authc.realms.custom.custom.order")); } } } diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java index d1435ebaa3c28..cc7579df27fb3 100644 --- a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java @@ -22,8 +22,8 @@ public class CustomRealmTests extends ESTestCase { public void testAuthenticate() { Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); - CustomRealm realm = new CustomRealm(new RealmConfig("test", Settings.EMPTY, globalSettings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + CustomRealm realm = new CustomRealm(new RealmConfig(new RealmConfig.RealmIdentifier(CustomRealm.TYPE, "test"), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); SecureString password = CustomRealm.KNOWN_PW.clone(); UsernamePasswordToken token = new UsernamePasswordToken(CustomRealm.KNOWN_USER, password); PlainActionFuture plainActionFuture = new PlainActionFuture<>(); @@ -36,8 +36,8 @@ public void testAuthenticate() { public void testAuthenticateBadUser() { Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); - CustomRealm realm = new CustomRealm(new RealmConfig("test", Settings.EMPTY, globalSettings, - TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + CustomRealm realm = new CustomRealm(new RealmConfig(new RealmConfig.RealmIdentifier(CustomRealm.TYPE, "test"), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); SecureString password = CustomRealm.KNOWN_PW.clone(); UsernamePasswordToken token = new UsernamePasswordToken(CustomRealm.KNOWN_USER + "1", password); PlainActionFuture plainActionFuture = new PlainActionFuture<>(); diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java index d80f98964f3a6..0be06995502ba 100644 --- a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java @@ -46,7 +46,7 @@ public class UsersToolTests extends CommandTestCase { // the mock filesystem we use so permissions/users/groups can be modified static FileSystem jimfs; String pathHomeParameter; - String fileTypeParameter; + String fileOrderParameter; // the config dir for each test to use Path confDir; @@ -92,10 +92,10 @@ public void setupHome() throws IOException { settings = Settings.builder() .put("path.home", homeDir) - .put("xpack.security.authc.realms.file.type", "file") + .put("xpack.security.authc.realms.file.file.order", 0) .build(); pathHomeParameter = "-Epath.home=" + homeDir; - fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + fileOrderParameter = "-Expack.security.authc.realms.file.file.order=0"; } @AfterClass @@ -329,18 +329,18 @@ public void testParseMultipleRoles() throws Exception { public void testUseraddNoPassword() throws Exception { terminal.addSecretInput(SecuritySettingsSourceField.TEST_PASSWORD); terminal.addSecretInput(SecuritySettingsSourceField.TEST_PASSWORD); - execute("useradd", pathHomeParameter, fileTypeParameter, "username"); + execute("useradd", pathHomeParameter, fileOrderParameter, "username"); assertUser("username", SecuritySettingsSourceField.TEST_PASSWORD); } public void testUseraddPasswordOption() throws Exception { - execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + execute("useradd", pathHomeParameter, fileOrderParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD); assertUser("username", SecuritySettingsSourceField.TEST_PASSWORD); } public void testUseraddUserExists() throws Exception { UserException e = expectThrows(UserException.class, () -> { - execute("useradd", pathHomeParameter, fileTypeParameter, "existing_user", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + execute("useradd", pathHomeParameter, fileOrderParameter, "existing_user", "-p", SecuritySettingsSourceField.TEST_PASSWORD); }); assertEquals(ExitCodes.CODE_ERROR, e.exitCode); assertEquals("User [existing_user] already exists", e.getMessage()); @@ -349,7 +349,7 @@ public void testUseraddUserExists() throws Exception { public void testUseraddReservedUser() throws Exception { final String name = randomFrom(ElasticUser.NAME, KibanaUser.NAME); UserException e = expectThrows(UserException.class, () -> { - execute("useradd", pathHomeParameter, fileTypeParameter, name, "-p", SecuritySettingsSourceField.TEST_PASSWORD); + execute("useradd", pathHomeParameter, fileOrderParameter, name, "-p", SecuritySettingsSourceField.TEST_PASSWORD); }); assertEquals(ExitCodes.DATA_ERROR, e.exitCode); assertEquals("Invalid username [" + name + "]... Username [" + name + "] is reserved and may not be used.", e.getMessage()); @@ -358,27 +358,27 @@ public void testUseraddReservedUser() throws Exception { public void testUseraddNoRoles() throws Exception { Files.delete(confDir.resolve("users_roles")); Files.createFile(confDir.resolve("users_roles")); - execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + execute("useradd", pathHomeParameter, fileOrderParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD); List lines = Files.readAllLines(confDir.resolve("users_roles"), StandardCharsets.UTF_8); assertTrue(lines.toString(), lines.isEmpty()); } public void testUserdelUnknownUser() throws Exception { UserException e = expectThrows(UserException.class, () -> { - execute("userdel", pathHomeParameter, fileTypeParameter, "unknown"); + execute("userdel", pathHomeParameter, fileOrderParameter, "unknown"); }); assertEquals(ExitCodes.NO_USER, e.exitCode); assertTrue(e.getMessage(), e.getMessage().contains("User [unknown] doesn't exist")); } public void testUserdel() throws Exception { - execute("userdel", pathHomeParameter, fileTypeParameter, "existing_user"); + execute("userdel", pathHomeParameter, fileOrderParameter, "existing_user"); assertNoUser("existing_user"); } public void testPasswdUnknownUser() throws Exception { UserException e = expectThrows(UserException.class, () -> { - execute("passwd", pathHomeParameter, fileTypeParameter, "unknown", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + execute("passwd", pathHomeParameter, fileOrderParameter, "unknown", "-p", SecuritySettingsSourceField.TEST_PASSWORD); }); assertEquals(ExitCodes.NO_USER, e.exitCode); assertTrue(e.getMessage(), e.getMessage().contains("User [unknown] doesn't exist")); @@ -387,65 +387,65 @@ public void testPasswdUnknownUser() throws Exception { public void testPasswdNoPasswordOption() throws Exception { terminal.addSecretInput("newpassword"); terminal.addSecretInput("newpassword"); - execute("passwd", pathHomeParameter, fileTypeParameter, "existing_user"); + execute("passwd", pathHomeParameter, fileOrderParameter, "existing_user"); assertUser("existing_user", "newpassword"); assertRole("test_admin", "existing_user", "existing_user2"); // roles unchanged } public void testPasswd() throws Exception { - execute("passwd", pathHomeParameter, fileTypeParameter, "existing_user", "-p", "newpassword"); + execute("passwd", pathHomeParameter, fileOrderParameter, "existing_user", "-p", "newpassword"); assertUser("existing_user", "newpassword"); assertRole("test_admin", "existing_user"); // roles unchanged } public void testRolesUnknownUser() throws Exception { UserException e = expectThrows(UserException.class, () -> { - execute("roles", pathHomeParameter, fileTypeParameter, "unknown"); + execute("roles", pathHomeParameter, fileOrderParameter, "unknown"); }); assertEquals(ExitCodes.NO_USER, e.exitCode); assertTrue(e.getMessage(), e.getMessage().contains("User [unknown] doesn't exist")); } public void testRolesAdd() throws Exception { - execute("roles", pathHomeParameter, fileTypeParameter, "existing_user", "-a", "test_r1"); + execute("roles", pathHomeParameter, fileOrderParameter, "existing_user", "-a", "test_r1"); assertRole("test_admin", "existing_user"); assertRole("test_r1", "existing_user"); } public void testRolesRemove() throws Exception { - execute("roles", pathHomeParameter, fileTypeParameter, "existing_user", "-r", "test_admin"); + execute("roles", pathHomeParameter, fileOrderParameter, "existing_user", "-r", "test_admin"); assertRole("test_admin", "existing_user2"); } public void testRolesAddAndRemove() throws Exception { - execute("roles", pathHomeParameter, fileTypeParameter, "existing_user", "-a", "test_r1", "-r", "test_admin"); + execute("roles", pathHomeParameter, fileOrderParameter, "existing_user", "-a", "test_r1", "-r", "test_admin"); assertRole("test_admin", "existing_user2"); assertRole("test_r1", "existing_user"); } public void testRolesRemoveLeavesExisting() throws Exception { - execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD, + execute("useradd", pathHomeParameter, fileOrderParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD, "-r", "test_admin"); - execute("roles", pathHomeParameter, fileTypeParameter, "existing_user", "-r", "test_admin"); + execute("roles", pathHomeParameter, fileOrderParameter, "existing_user", "-r", "test_admin"); assertRole("test_admin", "username"); } public void testRolesNoAddOrRemove() throws Exception { - String output = execute("roles", pathHomeParameter, fileTypeParameter, "existing_user"); + String output = execute("roles", pathHomeParameter, fileOrderParameter, "existing_user"); assertTrue(output, output.contains("existing_user")); assertTrue(output, output.contains("test_admin")); } public void testListUnknownUser() throws Exception { UserException e = expectThrows(UserException.class, () -> { - execute("list", pathHomeParameter, fileTypeParameter, "unknown"); + execute("list", pathHomeParameter, fileOrderParameter, "unknown"); }); assertEquals(ExitCodes.NO_USER, e.exitCode); assertTrue(e.getMessage(), e.getMessage().contains("User [unknown] doesn't exist")); } public void testListAllUsers() throws Exception { - String output = execute("list", pathHomeParameter, fileTypeParameter); + String output = execute("list", pathHomeParameter, fileOrderParameter); assertTrue(output, output.contains("existing_user")); assertTrue(output, output.contains("test_admin")); assertTrue(output, output.contains("existing_user2")); @@ -456,7 +456,7 @@ public void testListAllUsers() throws Exception { } public void testListSingleUser() throws Exception { - String output = execute("list", pathHomeParameter, fileTypeParameter, "existing_user"); + String output = execute("list", pathHomeParameter, fileOrderParameter, "existing_user"); assertTrue(output, output.contains("existing_user")); assertTrue(output, output.contains("test_admin")); assertFalse(output, output.contains("existing_user2")); @@ -467,9 +467,9 @@ public void testListSingleUser() throws Exception { } public void testListUnknownRoles() throws Exception { - execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD, + execute("useradd", pathHomeParameter, fileOrderParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD, "-r", "test_r1,r2,r3"); - String output = execute("list", pathHomeParameter, fileTypeParameter, "username"); + String output = execute("list", pathHomeParameter, fileOrderParameter, "username"); assertTrue(output, output.contains("username")); assertTrue(output, output.contains("r2*,r3*,test_r1")); } @@ -479,14 +479,14 @@ public void testListNoUsers() throws Exception { Files.createFile(confDir.resolve("users")); Files.delete(confDir.resolve("users_roles")); Files.createFile(confDir.resolve("users_roles")); - String output = execute("list", pathHomeParameter, fileTypeParameter); + String output = execute("list", pathHomeParameter, fileOrderParameter); assertTrue(output, output.contains("No users found")); } public void testListUserWithoutRoles() throws Exception { - String output = execute("list", pathHomeParameter, fileTypeParameter, "existing_user3"); + String output = execute("list", pathHomeParameter, fileOrderParameter, "existing_user3"); assertTrue(output, output.contains("existing_user3")); - output = execute("list", pathHomeParameter, fileTypeParameter); + output = execute("list", pathHomeParameter, fileOrderParameter); assertTrue(output, output.contains("existing_user3")); // output should not contain '*' which indicates unknown role @@ -497,9 +497,9 @@ public void testUserAddNoConfig() throws Exception { Path homeDir = jimfs.getPath("eshome"); IOUtils.rm(confDir.resolve("users")); pathHomeParameter = "-Epath.home=" + homeDir; - fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + fileOrderParameter = "-Expack.security.authc.realms.file.file.order=0"; UserException e = expectThrows(UserException.class, () -> { - execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + execute("useradd", pathHomeParameter, fileOrderParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD); }); assertEquals(ExitCodes.CONFIG, e.exitCode); assertThat(e.getMessage(), containsString("Configuration file [eshome/config/users] is missing")); @@ -509,9 +509,9 @@ public void testUserListNoConfig() throws Exception { Path homeDir = jimfs.getPath("eshome"); IOUtils.rm(confDir.resolve("users")); pathHomeParameter = "-Epath.home=" + homeDir; - fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + fileOrderParameter = "-Expack.security.authc.realms.file.file.order=0"; UserException e = expectThrows(UserException.class, () -> { - execute("list", pathHomeParameter, fileTypeParameter); + execute("list", pathHomeParameter, fileOrderParameter); }); assertEquals(ExitCodes.CONFIG, e.exitCode); assertThat(e.getMessage(), containsString("Configuration file [eshome/config/users] is missing")); @@ -521,9 +521,9 @@ public void testUserDelNoConfig() throws Exception { Path homeDir = jimfs.getPath("eshome"); IOUtils.rm(confDir.resolve("users")); pathHomeParameter = "-Epath.home=" + homeDir; - fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + fileOrderParameter = "-Expack.security.authc.realms.file.file.order=0"; UserException e = expectThrows(UserException.class, () -> { - execute("userdel", pathHomeParameter, fileTypeParameter, "username"); + execute("userdel", pathHomeParameter, fileOrderParameter, "username"); }); assertEquals(ExitCodes.CONFIG, e.exitCode); assertThat(e.getMessage(), containsString("Configuration file [eshome/config/users] is missing")); @@ -533,9 +533,9 @@ public void testListUserRolesNoConfig() throws Exception { Path homeDir = jimfs.getPath("eshome"); IOUtils.rm(confDir.resolve("users_roles")); pathHomeParameter = "-Epath.home=" + homeDir; - fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + fileOrderParameter = "-Expack.security.authc.realms.file.file.order=0"; UserException e = expectThrows(UserException.class, () -> { - execute("roles", pathHomeParameter, fileTypeParameter, "username"); + execute("roles", pathHomeParameter, fileOrderParameter, "username"); }); assertEquals(ExitCodes.CONFIG, e.exitCode); assertThat(e.getMessage(), containsString("Configuration file [eshome/config/users_roles] is missing")); diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle index 7813ff3d3d56c..57be337f634f2 100644 --- a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle @@ -8,6 +8,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.monitoring.enabled', 'true' setting 'xpack.watcher.enabled', 'true' setting 'xpack.security.enabled', 'false' diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 4f338d07fb531..2304421bee724 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -71,6 +71,7 @@ integTestCluster { setting 'xpack.security.http.ssl.certificate', 'testnode.crt' keystoreSetting 'xpack.security.http.ssl.secure_key_passphrase', 'testnode' + setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' // copy keystores, keys and certificates into config/ extraConfigFile nodeKeystore.name, nodeKeystore diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle index 50e217b28b270..f5007e5b0910b 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/build.gradle +++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle @@ -14,6 +14,7 @@ task copyWatcherRestTests(type: Copy) { integTestCluster { dependsOn copyWatcherRestTests + setting 'xpack.ilm.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml index 0b74ebb0c0058..e3c512560a992 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml @@ -1,5 +1,7 @@ --- "Test watcher is protected by security": + - skip: + features: headers - do: headers: { es-security-runas-user: powerless_user } catch: forbidden diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml index 7a0634f5187b1..779b41748acf0 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml @@ -28,6 +28,8 @@ teardown: --- "Test watch search input is run as user who added the watch": + - skip: + features: headers - do: xpack.watcher.put_watch: id: "my_watch" @@ -81,6 +83,8 @@ teardown: --- "Test watch is runas user properly recorded": + - skip: + features: headers - do: xpack.watcher.put_watch: id: "my_watch" @@ -133,7 +137,8 @@ teardown: --- "Test watch search input does not work against index user is not allowed to read": - + - skip: + features: headers - do: # by impersonating this request as powerless user we cannot query the my_test_index # headers: { es-security-runas-user: powerless_user } @@ -290,6 +295,8 @@ teardown: --- "Test watch index action requires permission to write to an index": + - skip: + features: headers - do: xpack.watcher.put_watch: id: "my_watch" @@ -339,6 +346,8 @@ teardown: --- "Test watch index action does not work without permissions": + - skip: + features: headers - do: xpack.watcher.put_watch: id: "my_watch" diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle index 5923afcacad94..fc22fe9aa065f 100644 --- a/x-pack/qa/smoke-test-watcher/build.gradle +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -14,6 +14,7 @@ dependencies { } integTestCluster { + setting 'xpack.ilm.enabled', 'false' setting 'xpack.security.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateIT.java index feeea24871240..e9c5106d44e87 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateIT.java @@ -41,7 +41,7 @@ public void init() throws Exception { Map> contexts = Collections.singletonMap(Watcher.SCRIPT_TEMPLATE_CONTEXT.name, Watcher.SCRIPT_TEMPLATE_CONTEXT); ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, contexts); - textTemplateEngine = new TextTemplateEngine(Settings.EMPTY, scriptService); + textTemplateEngine = new TextTemplateEngine(scriptService); } public void testEscaping() throws Exception { diff --git a/x-pack/qa/sql/src/main/resources/math.csv-spec b/x-pack/qa/sql/src/main/resources/math.csv-spec deleted file mode 100644 index 9ec40848b3089..0000000000000 --- a/x-pack/qa/sql/src/main/resources/math.csv-spec +++ /dev/null @@ -1,184 +0,0 @@ -// this one doesn't work in H2 at all -truncateWithAsciiHavingAndOrderBy -SELECT TRUNCATE(ASCII(LEFT(first_name, 1)), 1), COUNT(*) count FROM test_emp GROUP BY ASCII(LEFT(first_name, 1)) HAVING COUNT(*) > 5 ORDER BY TRUNCATE(ASCII(LEFT(first_name, 1)), 1) DESC; - -TRUNCATE(ASCII(LEFT(first_name,1)),1):i| count:l ----------------------------------------+--------------- -null |10 -66 |7 -72 |6 -75 |7 -77 |9 -83 |11 -; - -truncateWithNoSecondParameterWithAsciiHavingAndOrderBy -SELECT TRUNCATE(ASCII(LEFT(first_name, 1))), COUNT(*) count FROM test_emp GROUP BY ASCII(LEFT(first_name, 1)) HAVING COUNT(*) > 5 ORDER BY TRUNCATE(ASCII(LEFT(first_name, 1))) DESC; - -TRUNCATE(ASCII(LEFT(first_name,1)),0):i| count:l ----------------------------------------+--------------- -null |10 -66 |7 -72 |6 -75 |7 -77 |9 -83 |11 -; - -roundWithGroupByAndOrderBy -SELECT ROUND(salary, 2) ROUNDED, salary FROM test_emp GROUP BY ROUNDED, salary ORDER BY ROUNDED LIMIT 10; - - ROUNDED | salary ----------------+--------------- -25324 |25324 -25945 |25945 -25976 |25976 -26436 |26436 -27215 |27215 -28035 |28035 -28336 |28336 -28941 |28941 -29175 |29175 -30404 |30404 -; - -truncateWithGroupByAndOrderBy -SELECT TRUNCATE(salary, 2) TRUNCATED, salary FROM test_emp GROUP BY TRUNCATED, salary ORDER BY TRUNCATED LIMIT 10; - - TRUNCATED | salary ----------------+--------------- -25324 |25324 -25945 |25945 -25976 |25976 -26436 |26436 -27215 |27215 -28035 |28035 -28336 |28336 -28941 |28941 -29175 |29175 -30404 |30404 -; - -truncateWithAsciiAndOrderBy -SELECT TRUNCATE(ASCII(LEFT(first_name,1)), -1) AS initial, first_name, ASCII(LEFT(first_name, 1)) FROM test_emp ORDER BY ASCII(LEFT(first_name, 1)) DESC LIMIT 15; - - initial | first_name |ASCII(LEFT(first_name,1)) ----------------+---------------+------------------------- -90 |Zvonko |90 -90 |Zhongwei |90 -80 |Yongqiao |89 -80 |Yishay |89 -80 |Yinghua |89 -80 |Xinglin |88 -80 |Weiyi |87 -80 |Vishv |86 -80 |Valdiodio |86 -80 |Valter |86 -80 |Uri |85 -80 |Udi |85 -80 |Tzvetan |84 -80 |Tse |84 -80 |Tuval |84 -; - -truncateWithHavingAndGroupBy -SELECT MIN(salary) mi, MAX(salary) ma, COUNT(*) c, TRUNCATE(AVG(salary)) tr FROM test_emp GROUP BY languages HAVING TRUNCATE(AVG(salary)) > 40000 ORDER BY languages; - - mi:i | ma:I | c:l | tr:i ----------------+---------------+-----------------+----------------- -28336 |74999 |10 |52519 -25976 |73717 |15 |50576 -29175 |73578 |19 |48178 -26436 |74970 |17 |52418 -27215 |74572 |18 |47733 -25324 |66817 |21 |41680 -; - -// https://github.com/elastic/elasticsearch/issues/33773 -minMaxTruncateAndRoundOfAverageWithHavingRoundAndTruncate-Ignore -SELECT MIN(salary) mi, MAX(salary) ma, YEAR(hire_date) year, ROUND(AVG(languages), 1), TRUNCATE(AVG(languages), 1), COUNT(*) FROM test_emp GROUP BY YEAR(hire_date) HAVING ROUND(AVG(languages), 1) > 2.5 AND TRUNCATE(AVG(languages), 1) <= 3.0 ORDER BY YEAR(hire_date); - - mi | ma | year |ROUND(AVG(languages),1)|TRUNCATE(AVG(languages),1)| COUNT(1) --------------+-------------+---------------+-----------------------+--------------------------+-------------- -26436 |74999 |1985 |3.1 |3.0 |11 -25976 |74970 |1989 |3.1 |3.0 |13 -31120 |71165 |1990 |3.1 |3.0 |12 -32568 |65030 |1991 |2.8 |2.8 |6 -30404 |58715 |1993 |3.0 |3.0 |3 -35742 |67492 |1994 |2.8 |2.7 |4 -28035 |65367 |1995 |2.6 |2.6 |5 -45656 |45656 |1996 |3.0 |3.0 |1 -64675 |64675 |1997 |3.0 |3.0 |1 -; - -// https://github.com/elastic/elasticsearch/issues/33773 -minMaxRoundWithHavingRound-Ignore -SELECT MIN(salary) mi, MAX(salary) ma, YEAR(hire_date) year, ROUND(AVG(languages), 1), COUNT(*) FROM test_emp GROUP BY YEAR(hire_date) HAVING ROUND(AVG(languages), 1) > 2.5 ORDER BY YEAR(hire_date); - - mi | ma | year |ROUND(AVG(languages),1)| COUNT(1) --------------+-------------+---------------+-----------------------+-------------- -26436 |74999 |1985 |3.1 |11 -31897 |61805 |1986 |3.5 |11 -25324 |70011 |1987 |3.1 |15 -25945 |73578 |1988 |3.1 |9 -25976 |74970 |1989 |3.1 |13 -31120 |71165 |1990 |3.1 |12 -32568 |65030 |1991 |2.8 |6 -27215 |60781 |1992 |4.1 |8 -30404 |58715 |1993 |3.0 |3 -35742 |67492 |1994 |2.8 |4 -28035 |65367 |1995 |2.6 |5 -45656 |45656 |1996 |3.0 |1 -64675 |64675 |1997 |3.0 |1 -; - -groupByAndOrderByTruncateWithPositiveParameter -SELECT TRUNCATE(AVG(salary), 2), AVG(salary), COUNT(*) FROM test_emp GROUP BY TRUNCATE(salary, 2) ORDER BY TRUNCATE(salary, 2) DESC LIMIT 10; - -TRUNCATE(AVG(salary),2):i|AVG(salary):i | COUNT(1):l --------------------------+---------------+--------------- -74999 |74999 |1 -74970 |74970 |1 -74572 |74572 |1 -73851 |73851 |1 -73717 |73717 |1 -73578 |73578 |1 -71165 |71165 |1 -70011 |70011 |1 -69904 |69904 |1 -68547 |68547 |1 -; - -groupByAndOrderByRoundWithPositiveParameter -SELECT ROUND(AVG(salary), 2), AVG(salary), COUNT(*) FROM test_emp GROUP BY ROUND(salary, 2) ORDER BY ROUND(salary, 2) DESC LIMIT 10; - -ROUND(AVG(salary),2):i| AVG(salary):i | COUNT(1):l -----------------------+-----------------+--------------- -74999 |74999 |1 -74970 |74970 |1 -74572 |74572 |1 -73851 |73851 |1 -73717 |73717 |1 -73578 |73578 |1 -71165 |71165 |1 -70011 |70011 |1 -69904 |69904 |1 -68547 |68547 |1 -; - -groupByAndOrderByRoundWithNoSecondParameter -SELECT ROUND(AVG(salary)), ROUND(salary) rounded, AVG(salary), COUNT(*) FROM test_emp GROUP BY rounded ORDER BY rounded DESC LIMIT 10; - -ROUND(AVG(salary),0):i| rounded:i | AVG(salary):i | COUNT(1):l -----------------------+-----------------+-----------------+--------------- -74999 |74999 |74999 |1 -74970 |74970 |74970 |1 -74572 |74572 |74572 |1 -73851 |73851 |73851 |1 -73717 |73717 |73717 |1 -73578 |73578 |73578 |1 -71165 |71165 |71165 |1 -70011 |70011 |70011 |1 -69904 |69904 |69904 |1 -68547 |68547 |68547 |1 -; diff --git a/x-pack/qa/sql/src/main/resources/nested.csv-spec b/x-pack/qa/sql/src/main/resources/nested.csv-spec deleted file mode 100644 index 428ed78120440..0000000000000 --- a/x-pack/qa/sql/src/main/resources/nested.csv-spec +++ /dev/null @@ -1,112 +0,0 @@ -// -// Nested documents -// -// CsvJdbc has issues with foo.bar so msot fields are aliases or wrapped inside a function - -describeParent -DESCRIBE test_emp; - - column | type | mapping ---------------------+---------------+--------------- -birth_date |TIMESTAMP |DATE -dep |STRUCT |NESTED -dep.dep_id |VARCHAR |KEYWORD -dep.dep_name |VARCHAR |TEXT -dep.dep_name.keyword|VARCHAR |KEYWORD -dep.from_date |TIMESTAMP |DATE -dep.to_date |TIMESTAMP |DATE -emp_no |INTEGER |INTEGER -first_name |VARCHAR |TEXT -first_name.keyword |VARCHAR |KEYWORD -gender |VARCHAR |KEYWORD -hire_date |TIMESTAMP |DATE -languages |TINYINT |BYTE -last_name |VARCHAR |TEXT -last_name.keyword |VARCHAR |KEYWORD -salary |INTEGER |INTEGER -; - -// disable until we figure out how to use field names with . in their name -//nestedStar -//SELECT dep.* FROM test_emp ORDER BY dep.dep_id LIMIT 5; - -//dep.dep_id:s | dep.dep_name:s | dep.from_date:ts | dep.to_date:ts - -//d001 | Marketing | 744336000000 | 253370764800000 -//d001 | Marketing | 704332800000 | 806371200000 -//d001 | Marketing | 577929600000 | 253370764800000 -//d002 | Finance | 732672000000 | 965865600000 -//d007 | Sales | 720921600000 | 253370764800000 -//; - -filterPerNestedWithOrderByTopLevel -SELECT first_name f, last_name l, YEAR(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY f LIMIT 5; - -f:s | l:s | d:i - -Chirstian |Koblick |1986 -Duangkaew |Piveteau |1996 -Gino |Leonhardt |1989 -Hidefumi |Caine |1992 -Jayson |Mandell |1999 -; - -filterPerNestedWithOrderByNested -SELECT first_name f, last_name l, YEAR(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY dep.from_date LIMIT 5; - -f:s | l:s | d:i - -Sreekrishna |Servieres |1985 -Zhongwei |Rosen |1986 -Chirstian |Koblick |1986 -Vishv |Zockler |1987 -null |Chappelet |1988 -; - -filterPerNestedWithOrderByNestedWithAlias -SELECT first_name f, dep.dep_id i, MONTH(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY i LIMIT 5; - -f:s | i:s | d:i - -Parto | d004 | 12 -Chirstian | d004 | 12 -Duangkaew | d004 | 11 -Kazuhide | d004 | 7 -Mayuko | d004 | 12 -; - -filterPerNestedWithOrderByNestedWithoutProjection -SELECT first_name f, MONTH(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY dep.dep_id LIMIT 5; - -f:s | d:i - -Parto | 12 -Chirstian | 12 -Duangkaew | 11 -Kazuhide | 7 -Mayuko | 12 -; - -selectWithScalarOnNested -SELECT first_name f, last_name l, YEAR(dep.from_date) start FROM test_emp WHERE dep.dep_name = 'Production' AND languages > 1 ORDER BY start LIMIT 5; - -f:s | l:s | start:i - -Sreekrishna |Servieres |1985 -Zhongwei |Rosen |1986 -Chirstian |Koblick |1986 -null |Chappelet |1988 -Zvonko |Nyanchama |1989 -; - -selectWithScalarOnNestedWithoutProjection -SELECT first_name f, last_name l FROM test_emp WHERE dep.dep_name = 'Production' AND languages > 1 ORDER BY YEAR(dep.from_date) LIMIT 5; - -f:s | l:s - -Sreekrishna |Servieres -Zhongwei |Rosen -Chirstian |Koblick -null |Chappelet -Zvonko |Nyanchama -; diff --git a/x-pack/qa/sql/src/main/resources/nulls.csv-spec b/x-pack/qa/sql/src/main/resources/nulls.csv-spec deleted file mode 100644 index 1cb9a1ed7f319..0000000000000 --- a/x-pack/qa/sql/src/main/resources/nulls.csv-spec +++ /dev/null @@ -1,25 +0,0 @@ -// -// Null expressions -// - -nullDate -SELECT YEAR(CAST(NULL AS DATE)) d; - -d:i -null -; - -nullAdd -SELECT CAST(NULL AS INT) + CAST(NULL AS FLOAT) AS n; - -n:d -null -; - - -nullDiv -SELECT 5 / CAST(NULL AS FLOAT) + 10 AS n; - -n:d -null -; diff --git a/x-pack/qa/sql/src/main/resources/select.csv-spec b/x-pack/qa/sql/src/main/resources/select.csv-spec deleted file mode 100644 index bf208c62026df..0000000000000 --- a/x-pack/qa/sql/src/main/resources/select.csv-spec +++ /dev/null @@ -1,102 +0,0 @@ -// SELECT with IN -inWithLiterals -SELECT 1 IN (1, 2, 3), 1 IN (2, 3); - - 1 IN (1, 2, 3) | 1 IN (2, 3) ------------------+------------- -true |false -; - -inWithLiteralsAndFunctions -SELECT 1 IN (2 - 1, 2, 3), abs(-1) IN (2, 3, abs(4 - 5)); - - 1 IN (1, 2, 3) | 1 IN (2, 3) ------------------+------------- -true |false -; - - -inWithLiteralsAndNegation -SELECT NOT 1 IN (1, 1 + 1, 3), NOT 1 IN (2, 3); - - 1 IN (1, 2, 3) | 1 IN (2, 3) ------------------+------------- -false |true -; - - -inWithNullHandling -SELECT 2 IN (1, null, 3), 3 IN (1, null, 3), null IN (1, null, 3), null IN (1, 2, 3); - - 2 IN (1, null, 3) | 3 IN (1, null, 3) | null IN (1, null, 3) | null IN (1, 2, 3) ---------------------+--------------------+-----------------------+------------------- -null |true |null | null -; - -inWithNullHandlingAndNegation -SELECT NOT 2 IN (1, null, 3), NOT 3 IN (1, null, 3), NOT null IN (1, null, 3), NOT null IN (1, 2, 3); - - NOT 2 IN (1, null, 3) | NOT 3 IN (1, null, 3) | NOT null IN (1, null, 3) | null IN (1, 2, 3) -------------------------+------------------------+---------------------------+-------------------- -null |false |null | null -; - -// -// SELECT with IN and table columns -// -inWithTableColumn -SELECT emp_no IN (10000, 10001, 10002) FROM test_emp ORDER BY 1; - - emp_no -------- -10001 -10002 -; - -inWithTableColumnAndFunction -SELECT emp_no IN (10000, 10000 + 1, abs(-10000 - 2)) FROM test_emp; - - emp_no -------- -10001 -10002 -; - -inWithTableColumnAndNegation -SELECT emp_no NOT IN (10000, 10000 + 1, 10002) FROM test_emp ORDER BY 1 LIMIT 3; - - emp_no -------- -10003 -10004 -10005 -; - -inWithTableColumnAndComplexFunctions -SELECT 1 IN (1, abs(2 - 4), 3) OR emp_no NOT IN (10000, 10000 + 1, 10002) FROM test_emp ORDER BY 1 LIMIT 3; - - emp_no -------- -10003 -10004 -10005 -; - -inWithTableColumnAndNullHandling -SELECT emp_no, birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)), birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) FROM test_emp WHERE emp_no = 10038 OR emp_no = 10039 OR emp_no = 10040 ORDER BY 1; - - emp_no | birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) | birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) ---------+-------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------ -10038 | true | true -10039 | null | null -10040 | false | null - - -inWithTableColumnAndNullHandlingAndNegation -SELECT emp_no, NOT birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)), NOT birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) FROM test_emp WHERE emp_no = 10038 OR emp_no = 10039 OR emp_no = 10040 ORDER BY 1; - - emp_no | NOT birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) | NOT birth_date in (CAST('2018-10-01T00:00:00Z' AS TIMESTAMP), null, CAST('1959-10-01T00:00:00Z' AS TIMESTAMP)) ---------+-----------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------ -10038 | false | false -10039 | null | null -10040 | true | null \ No newline at end of file diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java index 9f97ebc6d03f6..a839a12e8da97 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java @@ -90,8 +90,8 @@ public void testUserSearchWithActiveDirectory() throws Exception { }); Settings fullSettings = builder.build(); sslService = new SSLService(fullSettings, TestEnvironment.newEnvironment(fullSettings)); - RealmConfig config = new RealmConfig("ad-as-ldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), - new ThreadContext(globalSettings)); + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("ad", "ad-as-ldap-test"), settings, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); String user = "Bruce Banner"; diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java index 829e87c849df6..d5274ab8deef9 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java @@ -9,16 +9,17 @@ import com.unboundid.ldap.sdk.LDAPConnectionPool; import com.unboundid.ldap.sdk.LDAPException; import com.unboundid.ldap.sdk.LDAPInterface; - import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.junit.Before; @@ -34,6 +35,8 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; + public abstract class AbstractActiveDirectoryTestCase extends ESTestCase { // follow referrals defaults to false here which differs from the default value of the setting @@ -98,25 +101,27 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO sslService = new SSLService(globalSettings, environment); } - Settings buildAdSettings(String ldapUrl, String adDomainName, String userSearchDN, LdapSearchScope scope, - boolean hostnameVerification) { + Settings buildAdSettings(RealmConfig.RealmIdentifier realmId, String ldapUrl, String adDomainName, String userSearchDN, + LdapSearchScope scope, boolean hostnameVerification) { + final String realmName = realmId.getName(); Settings.Builder builder = Settings.builder() - .putList(SessionFactorySettings.URLS_SETTING, ldapUrl) - .put(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING, adDomainName) - .put(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING, userSearchDN) - .put(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_SCOPE_SETTING, scope) - .put(ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING.getKey(), AD_LDAP_PORT) - .put(ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING.getKey(), AD_LDAPS_PORT) - .put(ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING.getKey(), AD_GC_LDAP_PORT) - .put(ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING.getKey(), AD_GC_LDAPS_PORT) - .put("follow_referrals", FOLLOW_REFERRALS); + .putList(getFullSettingKey(realmId, SessionFactorySettings.URLS_SETTING), ldapUrl) + .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), adDomainName) + .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING), userSearchDN) + .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_SCOPE_SETTING), scope) + .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING), AD_LDAP_PORT) + .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING), AD_LDAPS_PORT) + .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING), AD_GC_LDAP_PORT) + .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING), AD_GC_LDAPS_PORT) + .put(getFullSettingKey(realmId, SessionFactorySettings.FOLLOW_REFERRALS_SETTING), FOLLOW_REFERRALS); if (randomBoolean()) { - builder.put("ssl.verification_mode", hostnameVerification ? VerificationMode.FULL : VerificationMode.CERTIFICATE); + builder.put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), + hostnameVerification ? VerificationMode.FULL : VerificationMode.CERTIFICATE); } else { - builder.put(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, hostnameVerification); + builder.put(getFullSettingKey(realmId, SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING), hostnameVerification); } if (useGlobalSSL == false) { - builder.putList("ssl.certificate_authorities", certificatePaths); + builder.putList(getFullSettingKey(realmId, SSLConfigurationSettings.CAPATH_SETTING_REALM), certificatePaths); } return builder.build(); } diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java index 330ec6b9a758c..ebdb19be14f32 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.junit.Before; @@ -26,6 +27,8 @@ public class ActiveDirectoryGroupsResolverTests extends GroupsResolverTestCase { private static final String BRUCE_BANNER_DN = "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("active_directory", "ad"); + @Before public void setReferralFollowing() { ldapConnection.getConnectionOptions().setFollowReferrals(AbstractActiveDirectoryTestCase.FOLLOW_REFERRALS); @@ -34,11 +37,11 @@ public void setReferralFollowing() { @SuppressWarnings("unchecked") public void testResolveSubTree() throws Exception { Settings settings = Settings.builder() - .put("group_search.scope", LdapSearchScope.SUB_TREE) - .put("group_search.base_dn", "DC=ad,DC=test,DC=elasticsearch,DC=com") - .put("domain_name", "ad.test.elasticsearch.com") + .put("xpack.security.authc.realms.active_directory.ad.group_search.scope", LdapSearchScope.SUB_TREE) + .put("xpack.security.authc.realms.active_directory.ad.group_search.base_dn", "DC=ad,DC=test,DC=elasticsearch,DC=com") + .put("xpack.security.authc.realms.active_directory.ad.domain_name", "ad.test.elasticsearch.com") .build(); - ActiveDirectoryGroupsResolver resolver = new ActiveDirectoryGroupsResolver(settings); + ActiveDirectoryGroupsResolver resolver = new ActiveDirectoryGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); assertThat(groups, containsInAnyOrder( @@ -53,11 +56,12 @@ public void testResolveSubTree() throws Exception { public void testResolveOneLevel() throws Exception { Settings settings = Settings.builder() - .put("scope", LdapSearchScope.ONE_LEVEL) - .put("group_search.base_dn", "CN=Builtin, DC=ad, DC=test, DC=elasticsearch,DC=com") - .put("domain_name", "ad.test.elasticsearch.com") + .put("xpack.security.authc.realms.active_directory.ad.scope", LdapSearchScope.ONE_LEVEL) + .put("xpack.security.authc.realms.active_directory.ad.group_search.base_dn", + "CN=Builtin, DC=ad, DC=test, DC=elasticsearch,DC=com") + .put("xpack.security.authc.realms.active_directory.ad.domain_name", "ad.test.elasticsearch.com") .build(); - ActiveDirectoryGroupsResolver resolver = new ActiveDirectoryGroupsResolver(settings); + ActiveDirectoryGroupsResolver resolver = new ActiveDirectoryGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); assertThat(groups, hasItem(containsString("Users"))); @@ -65,11 +69,12 @@ public void testResolveOneLevel() throws Exception { public void testResolveBaseLevel() throws Exception { Settings settings = Settings.builder() - .put("group_search.scope", LdapSearchScope.BASE) - .put("group_search.base_dn", "CN=Users, CN=Builtin, DC=ad, DC=test, DC=elasticsearch, DC=com") - .put("domain_name", "ad.test.elasticsearch.com") + .put("xpack.security.authc.realms.active_directory.ad.group_search.scope", LdapSearchScope.BASE) + .put("xpack.security.authc.realms.active_directory.ad.group_search.base_dn", + "CN=Users, CN=Builtin, DC=ad, DC=test, DC=elasticsearch, DC=com") + .put("xpack.security.authc.realms.active_directory.ad.domain_name", "ad.test.elasticsearch.com") .build(); - ActiveDirectoryGroupsResolver resolver = new ActiveDirectoryGroupsResolver(settings); + ActiveDirectoryGroupsResolver resolver = new ActiveDirectoryGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); assertThat(groups, hasItem(containsString("CN=Users,CN=Builtin"))); @@ -108,7 +113,7 @@ private void assertValidSidQuery(Filter query, String[] expectedSids) { Pattern sidQueryPattern = Pattern.compile("\\(\\|(\\(objectSid=S(-\\d+)+\\))+\\)"); assertThat("[" + queryString + "] didn't match the search filter pattern", sidQueryPattern.matcher(queryString).matches(), is(true)); - for (String sid: expectedSids) { + for (String sid : expectedSids) { assertThat(queryString, containsString(sid)); } } diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java index 7861557709ed1..721c2d066778a 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java @@ -19,8 +19,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; @@ -33,6 +35,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasItem; @@ -41,6 +44,8 @@ public class ActiveDirectorySessionFactoryTests extends AbstractActiveDirectoryTestCase { + private static final String REALM_NAME = "ad-test"; + private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("ad", REALM_NAME); private final SecureString SECURED_PASSWORD = new SecureString(PASSWORD); private ThreadPool threadPool; @@ -90,7 +95,8 @@ private RealmConfig configureRealm(String name, Settings settings) { .put(globalSettings) .build(); this.sslService = new SSLService(mergedSettings, env); - return new RealmConfig(name, settings, globalSettings, env, new ThreadContext(globalSettings)); + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier(LdapRealmSettings.AD_TYPE, name); + return new RealmConfig(identifier, mergedSettings, env, new ThreadContext(globalSettings)); } @SuppressWarnings("unchecked") @@ -133,7 +139,7 @@ public void testAdAuthAvengers() throws Exception { @SuppressWarnings("unchecked") public void testAuthenticate() throws Exception { - Settings settings = buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false); RealmConfig config = configureRealm("ad-test", settings); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { @@ -157,8 +163,8 @@ public void testAuthenticate() throws Exception { @SuppressWarnings("unchecked") public void testAuthenticateBaseUserSearch() throws Exception { - Settings settings = buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Bruce Banner, CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", - LdapSearchScope.BASE, false); + Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, + "CN=Bruce Banner, CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.BASE, false); RealmConfig config = configureRealm("ad-test", settings); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { @@ -181,7 +187,7 @@ public void testAuthenticateBaseUserSearch() throws Exception { public void testAuthenticateBaseGroupSearch() throws Exception { Settings settings = Settings.builder() - .put(buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + .put(buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false)) .put(ActiveDirectorySessionFactorySettings.AD_GROUP_SEARCH_BASEDN_SETTING, "CN=Avengers,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") @@ -202,7 +208,7 @@ public void testAuthenticateBaseGroupSearch() throws Exception { @SuppressWarnings("unchecked") public void testAuthenticateWithUserPrincipalName() throws Exception { - Settings settings = buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false); RealmConfig config = configureRealm("ad-test", settings); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { @@ -223,7 +229,7 @@ public void testAuthenticateWithUserPrincipalName() throws Exception { @SuppressWarnings("unchecked") public void testAuthenticateWithSAMAccountName() throws Exception { - Settings settings = buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + Settings settings = buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, false); RealmConfig config = configureRealm("ad-test", settings); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { @@ -246,9 +252,9 @@ public void testAuthenticateWithSAMAccountName() throws Exception { @SuppressWarnings("unchecked") public void testCustomUserFilter() throws Exception { Settings settings = Settings.builder() - .put(buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + .put(buildAdSettings(REALM_ID, AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.SUB_TREE, false)) - .put(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_FILTER_SETTING, + .put(getFullSettingKey(REALM_ID.getName(), ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_FILTER_SETTING), "(&(objectclass=user)(userPrincipalName={0}@ad.test.elasticsearch.com))") .build(); RealmConfig config = configureRealm("ad-test", settings); @@ -273,12 +279,12 @@ public void testStandardLdapConnection() throws Exception { String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; Settings settings = Settings.builder() .put(LdapTestCase.buildLdapSettings( - new String[] { AD_LDAP_URL }, - new String[] { userTemplate }, - groupSearchBase, - LdapSearchScope.SUB_TREE, - null, - true)) + new String[]{AD_LDAP_URL}, + new String[]{userTemplate}, + groupSearchBase, + LdapSearchScope.SUB_TREE, + null, + true)) .put("follow_referrals", FOLLOW_REFERRALS) .build(); if (useGlobalSSL == false) { @@ -309,8 +315,8 @@ public void testHandlingLdapReferralErrors() throws Exception { String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; final boolean ignoreReferralErrors = false; Settings settings = LdapTestCase.buildLdapSettings( - new String[] { AD_LDAP_URL }, - new String[] { userTemplate }, + new String[]{AD_LDAP_URL}, + new String[]{userTemplate}, groupSearchBase, LdapSearchScope.SUB_TREE, null, @@ -321,7 +327,8 @@ public void testHandlingLdapReferralErrors() throws Exception { .putList("ssl.certificate_authorities", certificatePaths) .build(); } - RealmConfig config = new RealmConfig("ad-as-ldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + RealmConfig config = new RealmConfig(new RealmConfig.RealmIdentifier("ad", "ad-as-ldap-test"), + settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); @@ -342,7 +349,7 @@ public void testHandlingLdapReferralErrors() throws Exception { @SuppressWarnings("unchecked") public void testStandardLdapWithAttributeGroups() throws Exception { String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; - Settings settings = LdapTestCase.buildLdapSettings(new String[] { AD_LDAP_URL }, userTemplate, false); + Settings settings = LdapTestCase.buildLdapSettings(new String[]{AD_LDAP_URL}, userTemplate, false); if (useGlobalSSL == false) { settings = Settings.builder() .put(settings) @@ -389,21 +396,22 @@ private Settings buildAdSettings(String ldapUrl, String adDomainName, boolean ho private Settings buildAdSettings(String ldapUrl, String adDomainName, boolean hostnameVerification, boolean useBindUser) { Settings.Builder builder = Settings.builder() - .put(SessionFactorySettings.URLS_SETTING, ldapUrl) - .put(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING, adDomainName) - .put(ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING.getKey(), AD_LDAP_PORT) - .put(ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING.getKey(), AD_LDAPS_PORT) - .put(ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING.getKey(), AD_GC_LDAP_PORT) - .put(ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING.getKey(), AD_GC_LDAPS_PORT) - .put("follow_referrals", FOLLOW_REFERRALS); + .put(getFullSettingKey(REALM_ID, SessionFactorySettings.URLS_SETTING), ldapUrl) + .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), adDomainName) + .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING), AD_LDAP_PORT) + .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING), AD_LDAPS_PORT) + .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING), AD_GC_LDAP_PORT) + .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING), AD_GC_LDAPS_PORT) + .put(getFullSettingKey(REALM_ID, SessionFactorySettings.FOLLOW_REFERRALS_SETTING), FOLLOW_REFERRALS); if (randomBoolean()) { - builder.put("ssl.verification_mode", hostnameVerification ? VerificationMode.FULL : VerificationMode.CERTIFICATE); + builder.put(getFullSettingKey(REALM_ID, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), + hostnameVerification ? VerificationMode.FULL : VerificationMode.CERTIFICATE); } else { - builder.put(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, hostnameVerification); + builder.put(getFullSettingKey(REALM_ID, SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING), hostnameVerification); } if (useGlobalSSL == false) { - builder.putList("ssl.certificate_authorities", certificatePaths); + builder.putList(getFullSettingKey(REALM_ID, SSLConfigurationSettings.CAPATH_SETTING_REALM), certificatePaths); } if (useBindUser) { diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java index d6fc22a5cf579..126cde20686c6 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java @@ -10,6 +10,7 @@ import com.unboundid.ldap.sdk.SearchScope; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; @@ -24,11 +25,12 @@ public class UserAttributeGroupsResolverTests extends GroupsResolverTestCase { public static final String BRUCE_BANNER_DN = "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("ldap", "realm1"); @SuppressWarnings("unchecked") public void testResolve() throws Exception { //falling back on the 'memberOf' attribute - UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(Settings.EMPTY); + UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(config(REALM_ID, Settings.EMPTY)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(20), NoOpLogger.INSTANCE, null); assertThat(groups, containsInAnyOrder( @@ -43,7 +45,7 @@ public void testResolveFromPreloadedAttributes() throws Exception { SearchRequest preSearch = new SearchRequest(BRUCE_BANNER_DN, SearchScope.BASE, LdapUtils.OBJECT_CLASS_PRESENCE_FILTER, "memberOf"); final Collection attributes = ldapConnection.searchForEntry(preSearch).getAttributes(); - UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(Settings.EMPTY); + UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(config(REALM_ID, Settings.EMPTY)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(20), NoOpLogger.INSTANCE, attributes); assertThat(groups, containsInAnyOrder( @@ -58,7 +60,7 @@ public void testResolveCustomGroupAttribute() throws Exception { Settings settings = Settings.builder() .put("user_group_attribute", "seeAlso") .build(); - UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(settings); + UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(20), NoOpLogger.INSTANCE, null); assertThat(groups, hasItems(containsString("Avengers"))); //seeAlso only has Avengers @@ -68,7 +70,7 @@ public void testResolveInvalidGroupAttribute() throws Exception { Settings settings = Settings.builder() .put("user_group_attribute", "doesntExist") .build(); - UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(settings); + UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(config(REALM_ID, settings)); List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(20), NoOpLogger.INSTANCE, null); assertThat(groups, empty());