From e0e696727667f0584b56f75a4de5297ecf1e6f3d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 6 Jun 2018 11:59:16 -0400 Subject: [PATCH] QA: Switch xpack rolling upgrades to three nodes (#31112) This is much more realistic and can find more issues. This causes the "mixed cluster" tests to be run twice so I had to fix the tests to work in that case. In most cases I did as little as possible to get them working but in a few cases I went a little beyond that to make them easier for me to debug while getting them to work. My test changes: 1. Remove the "basic indexing" tests and replace them with a copy of the tests used in the OSS. We have no way of sharing code between these two projects so for now I copy. 2. Skip the a few tests in the "one third" upgraded scenario: * creating a scroll to be reused when the cluster is fully upgraded * creating some ml data to be used when the cluster is fully ugpraded 3. Drop many "assert yellow and that the cluster has two nodes" assertions. These assertions duplicate those made by the wait condition and they fail now that we have three nodes. 4. Switch many "assert green and that the cluster has two nodes" to 3 nodes. These assertions are unique from the wait condition and, while I imagine they aren't required in all cases, now is not the time to find that out. Thus, I made them work. 5. Rework the index audit trail test so it is more obvious that it is the same test expecting different numbers based on the shape of the cluster. The conditions for which number are expected are fairly complex because the index audit trail is shut down until the template for it is upgraded and the template is upgraded when a master node is elected that has the new version of the software. 6. Add some more information to debug the index audit trail test because it helped me figure out what was going on. I also dropped the `waitCondition` from the `rolling-upgrade-basic` tests because it wasn't needed. Closes #25336 --- .../elasticsearch/upgrades/IndexingIT.java | 4 + x-pack/qa/rolling-upgrade/build.gradle | 151 ++++++++--------- .../upgrades/AbstractUpgradeTestCase.java | 6 +- .../upgrades/IndexAuditUpgradeIT.java | 144 +++++++++------- .../elasticsearch/upgrades/IndexingIT.java | 124 ++++++++++++++ .../TokenBackwardsCompatibilityIT.java | 8 +- .../test/mixed_cluster/10_basic.yml | 159 +----------------- .../test/mixed_cluster/20_security.yml | 11 -- .../test/mixed_cluster/30_ml_jobs_crud.yml | 7 - .../mixed_cluster/40_ml_datafeed_crud.yml | 6 - .../test/old_cluster/10_basic.yml | 35 ---- .../test/upgraded_cluster/10_basic.yml | 43 +---- .../test/upgraded_cluster/20_security.yml | 2 +- .../test/upgraded_cluster/30_ml_jobs_crud.yml | 2 +- .../upgraded_cluster/40_ml_datafeed_crud.yml | 3 +- 15 files changed, 297 insertions(+), 408 deletions(-) create mode 100644 x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index f1e01d24acff6..3898746e5c374 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -30,6 +30,10 @@ * Basic test that indexed documents survive the rolling restart. See * {@link RecoveryIT} for much more in depth testing of the mechanism * by which they survive. + *

+ * This test is an almost exact copy of IndexingIT in the + * xpack rolling restart tests. We should work on a way to remove this + * duplication but for now we have no real way to share code. */ public class IndexingIT extends AbstractRollingTestCase { public void testIndexing() throws IOException { diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 1a53654b38233..13b18b727f23d 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -32,7 +32,7 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> HttpURLConnection httpURLConnection = null; try { // TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling - httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=2&wait_for_status=yellow").openConnection(); + httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=3&wait_for_status=yellow").openConnection(); httpURLConnection.setRequestProperty("Authorization", "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); httpURLConnection.setRequestMethod("GET"); @@ -124,9 +124,9 @@ subprojects { String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' bwcVersion = version - numBwcNodes = 2 - numNodes = 2 - minimumMasterNodes = { 2 } + numBwcNodes = 3 + numNodes = 3 + minimumMasterNodes = { 3 } clusterName = 'rolling-upgrade' waitCondition = waitWithAuth setting 'xpack.monitoring.exporters._http.type', 'http' @@ -179,87 +179,88 @@ subprojects { systemProperty 'tests.rest.suite', 'old_cluster' } - Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) - - configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { - dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } - waitCondition = waitWithAuth - setting 'xpack.monitoring.exporters._http.type', 'http' - setting 'xpack.monitoring.exporters._http.enabled', 'false' - setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' - setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.ssl.keystore.path', 'testnode.jks' - keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' - if (version.onOrAfter('6.0.0') == false) { - // this is needed since in 5.6 we don't bootstrap the token service if there is no explicit initial password - keystoreSetting 'xpack.security.authc.token.passphrase', 'xpack_token_passphrase' - } - setting 'node.attr.upgraded', 'first' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'index' - setting 'node.name', 'mixed-node-0' - dependsOn copyTestNodeKeystore - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + clusterName = 'rolling-upgrade' + unicastTransportUri = { seedNode, node, ant -> unicastSeed() } + minimumMasterNodes = { 3 } + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + waitCondition = waitWithAuth + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.ssl.keystore.path', 'testnode.jks' + keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' + if (version.onOrAfter('6.0.0') == false) { + // this is needed since in 5.6 we don't bootstrap the token service if there is no explicit initial password + keystoreSetting 'xpack.security.authc.token.passphrase', 'xpack_token_passphrase' + } + setting 'node.attr.upgraded', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'index' + setting 'node.name', "upgraded-node-${stopNode}" + dependsOn copyTestNodeKeystore + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + if (withSystemKey) { + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + } } } - Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") - mixedClusterTestRunner.configure { + Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, + 0, { oldClusterTest.nodes.get(1).transportUri() }) + + Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") + oneThirdUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' - finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" + systemProperty 'tests.first_round', 'true' + // We only need to run these tests once so we may as well do it when we're two thirds upgraded + systemProperty 'tests.rest.blacklist', [ + 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', + 'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data', + 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed in mixed cluster', + ].join(',') + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" } - Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) + Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) - configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn(mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop") - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir } - waitCondition = waitWithAuth - setting 'xpack.monitoring.exporters._http.type', 'http' - setting 'xpack.monitoring.exporters._http.enabled', 'false' - setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' - setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.ssl.keystore.path', 'testnode.jks' - keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' - if (version.onOrAfter('6.0.0') == false) { - // this is needed since in 5.6 we don't bootstrap the token service if there is no explicit initial password - keystoreSetting 'xpack.security.authc.token.passphrase', 'xpack_token_passphrase' - } - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'index' - setting 'node.name', 'upgraded-node-0' - dependsOn copyTestNodeKeystore - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" - } + configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, + 1, { oneThirdUpgradedTest.nodes.get(0).transportUri() }) + + Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") + twoThirdsUpgradedTestRunner.configure { + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } + Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) + + configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, + 2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() }) + Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. // this stinks but we do the check here since our rest tests do not support conditionals @@ -272,13 +273,9 @@ subprojects { systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' } } - if (version.before('6.1.0') || version.onOrAfter('6.3.0')) { systemProperty 'tests.rest.blacklist', '/30_ml_jobs_crud/Test model memory limit is updated' } - - // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion - finalizedBy "${baseName}#mixedClusterTestCluster#stop" } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index 65b1a7c85dcb1..a3576b7b8c3e8 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -37,12 +37,12 @@ protected boolean preserveTemplatesUponCompletion() { return true; } - enum CLUSTER_TYPE { + enum ClusterType { OLD, MIXED, UPGRADED; - public static CLUSTER_TYPE parse(String value) { + public static ClusterType parse(String value) { switch (value) { case "old_cluster": return OLD; @@ -56,7 +56,7 @@ public static CLUSTER_TYPE parse(String value) { } } - protected final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite")); + protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); @Override protected Settings restClientSettings() { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java index 97a61f25d7b03..d8c508d730cf2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.test.rest.yaml.ObjectPath; import org.hamcrest.Matcher; +import org.elasticsearch.common.Booleans; import org.hamcrest.Matchers; import org.junit.Before; @@ -19,77 +20,80 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isOneOf; public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase { - private Version minVersionInCluster; @Before public void findMinVersionInCluster() throws IOException { - Response response = client().performRequest("GET", "_nodes"); - ObjectPath objectPath = ObjectPath.createFromResponse(response); - Map nodesAsMap = objectPath.evaluate("nodes"); - Version version = Version.CURRENT; - for (String id : nodesAsMap.keySet()) { - Version nodeVersion = Version.fromString(objectPath.evaluate("nodes." + id + ".version")); - if (nodeVersion.before(version)) { - version = nodeVersion; + if (CLUSTER_TYPE == ClusterType.UPGRADED) { + // cannot use minVersion in cluster as we are upgraded on all nodes, + // BUT we can check the version created on the index + Response response = client().performRequest("GET", "/.security_audit_log*/_settings/index.version.created", + Collections.singletonMap("flat_settings", "true")); + Map responseMap = entityAsMap(response); + logger.info("get settings response {}", responseMap); + for (Map.Entry entry : responseMap.entrySet()) { + Map indexEntry = (Map) entry.getValue(); + Map indexSettings = (Map) indexEntry.get("settings"); + String versionCreated = (String) indexSettings.get("index.version.created"); + if (versionCreated != null) { + Version indexVersionCreated = Version.fromId(Integer.valueOf(versionCreated)); + if (minVersionInCluster == null || indexVersionCreated.before(minVersionInCluster)) { + minVersionInCluster = indexVersionCreated; + } + } } - } - minVersionInCluster = version; - } - public void testDocsAuditedInOldCluster() throws Exception { - assumeTrue("only runs against old cluster", clusterType == CLUSTER_TYPE.OLD); - final Matcher expectedBuckets = minVersionInCluster.onOrAfter(Version.V_6_2_3) ? is(2) : is(0); - assertBusy(() -> { - assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(expectedBuckets); - }); + assertNotNull(minVersionInCluster); + } else { + Response response = client().performRequest("GET", "_nodes"); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + Map nodesAsMap = objectPath.evaluate("nodes"); + minVersionInCluster = Version.CURRENT; + for (String id : nodesAsMap.keySet()) { + Version nodeVersion = Version.fromString(objectPath.evaluate("nodes." + id + ".version")); + if (nodeVersion.before(minVersionInCluster)) { + minVersionInCluster = nodeVersion; + } + } + + assertTrue(minVersionInCluster + " is older than " + Version.CURRENT, + minVersionInCluster.before(Version.CURRENT)); + } } - public void testDocsAuditedInMixedCluster() throws Exception { - assumeTrue("only runs against mixed cluster", clusterType == CLUSTER_TYPE.MIXED); - // the isOneOf(0, 1) check is necessary for instances where this test runs across an - // an index rollover and the audit trail on the upgraded node starts so we get a bucket - // with a node name - final Matcher expectedBuckets = minVersionInCluster.onOrAfter(Version.V_6_2_3) ? is(2) : isOneOf(0, 1); + public void testAuditLogs() throws Exception { assertBusy(() -> { assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(expectedBuckets); + assertNumUniqueNodeNameBuckets(expectedNumUniqueNodeNameBuckets()); }); } - public void testDocsAuditedInUpgradedCluster() throws Exception { - assumeTrue("only runs against upgraded cluster", clusterType == CLUSTER_TYPE.UPGRADED); - // cannot use minVersion in cluster as we are upgraded on all nodes, - // BUT we can check the version created on the index - Response response = client().performRequest("GET", "/.security_audit_log*/_settings/index.version.created", - Collections.singletonMap("flat_settings", "true")); - Map responseMap = entityAsMap(response); - logger.error("get settings response {}", responseMap); - Version minVersion = null; - for (Map.Entry entry : responseMap.entrySet()) { - Map indexEntry = (Map) entry.getValue(); - Map indexSettings = (Map) indexEntry.get("settings"); - String versionCreated = (String) indexSettings.get("index.version.created"); - if (versionCreated != null) { - Version indexVersionCreated = Version.fromId(Integer.valueOf(versionCreated)); - if (minVersion == null || indexVersionCreated.before(minVersion)) { - minVersion = indexVersionCreated; + private int expectedNumUniqueNodeNameBuckets() throws IOException { + int oldVal = minVersionInCluster.onOrAfter(Version.V_6_2_3) ? 3 : 0; + switch (CLUSTER_TYPE) { + case OLD: + return oldVal; + case MIXED: + if (false == masterIsNewVersion()) { + return oldVal; + } + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + return oldVal + 1; } + return oldVal + 2; + case UPGRADED: + return oldVal + 3; + default: + throw new IllegalArgumentException("Unsupported cluster type [" + CLUSTER_TYPE + "]"); } - } - - assertNotNull(minVersion); - final Matcher expectedBuckets = minVersion.onOrAfter(Version.V_6_2_3) ? is(4) : is(2); - assertBusy(() -> { - assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(expectedBuckets); - }); } private void assertAuditDocsExist() throws Exception { @@ -101,28 +105,42 @@ private void assertAuditDocsExist() throws Exception { assertThat((Integer) responseMap.get("count"), Matchers.greaterThanOrEqualTo(1)); } - private void assertNumUniqueNodeNameBuckets(Matcher numBucketsMatcher) throws Exception { + private void assertNumUniqueNodeNameBuckets(int numBuckets) throws Exception { // call API that will hit all nodes - assertEquals(200, client().performRequest("GET", "/_nodes").getStatusLine().getStatusCode()); + Map nodesResponse = entityAsMap(client().performRequest("GET", "/_nodes/_all/info/version")); + logger.info("all nodes {}", nodesResponse); HttpEntity httpEntity = new StringEntity( "{\n" + - " \"aggs\" : {\n" + - " \"nodes\" : {\n" + - " \"terms\" : { \"field\" : \"node_name\" }\n" + - " }\n" + - " }\n" + - "}", ContentType.APPLICATION_JSON); + " \"aggs\" : {\n" + + " \"nodes\" : {\n" + + " \"terms\" : { \"field\" : \"node_name\" }\n" + + " }\n" + + " }\n" + + "}", ContentType.APPLICATION_JSON); Response aggResponse = client().performRequest("GET", "/.security_audit_log*/_search", Collections.singletonMap("pretty", "true"), httpEntity); Map aggResponseMap = entityAsMap(aggResponse); logger.debug("aggResponse {}", aggResponseMap); - Map aggregations = (Map) aggResponseMap.get("aggregations"); + Map aggregations = (Map) aggResponseMap.get("aggregations"); assertNotNull(aggregations); - Map nodesAgg = (Map) aggregations.get("nodes"); + Map nodesAgg = (Map) aggregations.get("nodes"); assertNotNull(nodesAgg); - List> buckets = (List>) nodesAgg.get("buckets"); + List buckets = (List) nodesAgg.get("buckets"); assertNotNull(buckets); - assertThat("Found node buckets " + buckets, buckets.size(), numBucketsMatcher); + assertThat("Found node buckets " + buckets, buckets, hasSize(numBuckets)); + } + + /** + * Has the master been upgraded to the new version? + * @throws IOException + */ + private boolean masterIsNewVersion() throws IOException { + Map map = entityAsMap(client().performRequest("GET", "/_nodes/_master")); + map = (Map) map.get("nodes"); + assertThat(map.values(), hasSize(1)); + map = (Map) map.values().iterator().next(); + Version masterVersion = Version.fromString(map.get("version").toString()); + return Version.CURRENT.equals(masterVersion); } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java new file mode 100644 index 0000000000000..3448117cd2c88 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +/** + * Basic test that indexed documents survive the rolling restart. + *

+ * This test is an almost exact copy of IndexingIT in the + * oss rolling restart tests. We should work on a way to remove this + * duplication but for now we have no real way to share code. + */ +public class IndexingIT extends AbstractUpgradeTestCase { + public void testIndexing() throws IOException { + switch (CLUSTER_TYPE) { + case OLD: + break; + case MIXED: + Request waitForYellow = new Request("GET", "/_cluster/health"); + waitForYellow.addParameter("wait_for_nodes", "3"); + waitForYellow.addParameter("wait_for_status", "yellow"); + client().performRequest(waitForYellow); + break; + case UPGRADED: + Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index"); + waitForGreen.addParameter("wait_for_nodes", "3"); + waitForGreen.addParameter("wait_for_status", "green"); + // wait for long enough that we give delayed unassigned shards to stop being delayed + waitForGreen.addParameter("timeout", "70s"); + waitForGreen.addParameter("level", "shards"); + client().performRequest(waitForGreen); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + if (CLUSTER_TYPE == ClusterType.OLD) { + Request createTestIndex = new Request("PUT", "/test_index"); + createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); + client().performRequest(createTestIndex); + + String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}"; + Request createIndexWithReplicas = new Request("PUT", "/index_with_replicas"); + createIndexWithReplicas.setJsonEntity(recoverQuickly); + client().performRequest(createIndexWithReplicas); + + Request createEmptyIndex = new Request("PUT", "/empty_index"); + // Ask for recovery to be quick + createEmptyIndex.setJsonEntity(recoverQuickly); + client().performRequest(createEmptyIndex); + + bulk("test_index", "_OLD", 5); + bulk("index_with_replicas", "_OLD", 5); + } + + int expectedCount; + switch (CLUSTER_TYPE) { + case OLD: + expectedCount = 5; + break; + case MIXED: + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + expectedCount = 5; + } else { + expectedCount = 10; + } + break; + case UPGRADED: + expectedCount = 15; + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + assertCount("test_index", expectedCount); + assertCount("index_with_replicas", 5); + assertCount("empty_index", 0); + + if (CLUSTER_TYPE != ClusterType.OLD) { + bulk("test_index", "_" + CLUSTER_TYPE, 5); + Request toBeDeleted = new Request("PUT", "/test_index/doc/to_be_deleted"); + toBeDeleted.addParameter("refresh", "true"); + toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); + client().performRequest(toBeDeleted); + assertCount("test_index", expectedCount + 6); + + Request delete = new Request("DELETE", "/test_index/doc/to_be_deleted"); + delete.addParameter("refresh", "true"); + client().performRequest(delete); + + assertCount("test_index", expectedCount + 5); + } + } + + private void bulk(String index, String valueSuffix, int count) throws IOException { + StringBuilder b = new StringBuilder(); + for (int i = 0; i < count; i++) { + b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n"); + b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n"); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.setJsonEntity(b.toString()); + client().performRequest(bulk); + } + + private void assertCount(String index, int count) throws IOException { + Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); + searchTestIndexRequest.addParameter("filter_path", "hits.total"); + Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); + assertEquals("{\"hits\":{\"total\":" + count + "}}", + EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8)); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 4fa0c9a535f6c..705122252e7c3 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -25,7 +25,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { public void testGeneratingTokenInOldCluster() throws Exception { - assumeTrue("this test should only run against the old cluster", clusterType == CLUSTER_TYPE.OLD); + assumeTrue("this test should only run against the old cluster", CLUSTER_TYPE == ClusterType.OLD); final StringEntity tokenPostBody = new StringEntity("{\n" + " \"username\": \"test_user\",\n" + " \"password\": \"x-pack-test-password\",\n" + @@ -61,7 +61,7 @@ public void testGeneratingTokenInOldCluster() throws Exception { public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { assumeTrue("this test should only run against the mixed or upgraded cluster", - clusterType == CLUSTER_TYPE.MIXED || clusterType == CLUSTER_TYPE.UPGRADED); + CLUSTER_TYPE == ClusterType.MIXED || CLUSTER_TYPE == ClusterType.UPGRADED); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); assertOK(getResponse); Map source = (Map) entityAsMap(getResponse).get("_source"); @@ -69,7 +69,7 @@ public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { } public void testMixedCluster() throws Exception { - assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.MIXED); + assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.MIXED); assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion()); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); assertOK(getResponse); @@ -117,7 +117,7 @@ public void testMixedCluster() throws Exception { } public void testUpgradedCluster() throws Exception { - assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.UPGRADED); + assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.UPGRADED); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); assertOK(getResponse); Map source = (Map) entityAsMap(getResponse).get("_source"); diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index 5c4808edb8a14..770e694524fe3 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -1,166 +1,13 @@ ---- -setup: - - do: - cluster.health: - # if the primary shard of an index with (number_of_replicas > 0) ends up on the new node, the replica cannot be - # allocated to the old node (see NodeVersionAllocationDecider). x-pack automatically creates indices with - # replicas, for example monitoring-data-*. - wait_for_status: yellow - wait_for_nodes: 2 - ---- -"Index data and search on the mixed cluster": - - do: - search: - index: test_index - - - match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v1_mixed", "f2": 5}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v2_mixed", "f2": 6}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v3_mixed", "f2": 7}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v4_mixed", "f2": 8}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v5_mixed", "f2": 9}' - - - do: - index: - index: test_index - type: test_type - id: d10 - body: {"f1": "v6_mixed", "f2": 10} - - - do: - index: - index: test_index - type: test_type - id: d11 - body: {"f1": "v7_mixed", "f2": 11} - - - do: - index: - index: test_index - type: test_type - id: d12 - body: {"f1": "v8_mixed", "f2": 12} - - - do: - indices.flush: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 13 } # 5 docs from old cluster, 8 docs from mixed cluster - - - do: - delete: - index: test_index - type: test_type - id: d10 - - - do: - delete: - index: test_index - type: test_type - id: d11 - - - do: - delete: - index: test_index - type: test_type - id: d12 - - - do: - indices.flush: - index: test_index - ---- -"Basic scroll mixed": - - do: - indices.create: - index: test_scroll - - do: - index: - index: test_scroll - type: test - id: 42 - body: { foo: 1 } - - - do: - index: - index: test_scroll - type: test - id: 43 - body: { foo: 2 } - - - do: - indices.refresh: {} - - - do: - search: - index: test_scroll - size: 1 - scroll: 1m - sort: foo - body: - query: - match_all: {} - - - set: {_scroll_id: scroll_id} - - match: {hits.total: 2 } - - length: {hits.hits: 1 } - - match: {hits.hits.0._id: "42" } - - - do: - index: - index: test_scroll - type: test - id: 44 - body: { foo: 3 } - - - do: - indices.refresh: {} - - - do: - scroll: - body: { "scroll_id": "$scroll_id", "scroll": "1m"} - - - match: {hits.total: 2 } - - length: {hits.hits: 1 } - - match: {hits.hits.0._id: "43" } - - - do: - scroll: - scroll_id: $scroll_id - scroll: 1m - - - match: {hits.total: 2 } - - length: {hits.hits: 0 } - - - do: - clear_scroll: - scroll_id: $scroll_id ---- -"Start scroll in mixed cluster for upgraded": +"Start scroll in mixed cluster on upgraded node that we will continue after upgrade": - do: indices.create: index: upgraded_scroll wait_for_active_shards: all body: settings: - number_of_replicas: "0" - index.routing.allocation.include.upgraded: "first" + number_of_replicas: 0 + index.routing.allocation.include.upgraded: true - do: index: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml index e9189a916bba5..ae2275e972cdc 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml @@ -1,13 +1,5 @@ --- "Verify user and role in mixed cluster": - - do: - headers: - Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - - match: { timed_out: false } - - do: xpack.security.get_user: username: "native_user" @@ -39,6 +31,3 @@ username: "kibana,logstash_system" - match: { kibana.enabled: false } - match: { logstash_system.enabled: true } - - - diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml index d77cc8436defe..1551b6cb94e40 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -1,11 +1,4 @@ ---- -setup: - - do: - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 ---- "Test get old cluster job": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 8a06c91cc8a01..0ec288f90973c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,9 +1,3 @@ -setup: - - do: - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - --- "Test old cluster datafeed": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml deleted file mode 100644 index 7a0231240aa9c..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -"Index data and search on the old cluster": - - do: - indices.create: - index: test_index - wait_for_active_shards : all - body: - settings: - index: - number_of_replicas: 1 - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v1_old", "f2": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v2_old", "f2": 1}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v3_old", "f2": 2}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v4_old", "f2": 3}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v5_old", "f2": 4}' - - - do: - indices.flush: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 5 } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 2657bb0ea1a36..0cd51af1f7a0a 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -1,46 +1,5 @@ ---- -"Index data and search on the upgraded cluster": - - do: - cluster.health: - wait_for_status: green - wait_for_nodes: 2 - # wait for long enough that we give delayed unassigned shards to stop being delayed - timeout: 70s - level: shards - - do: - search: - index: test_index - - - match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v1_upgraded", "f2": 10}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v2_upgraded", "f2": 11}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v3_upgraded", "f2": 12}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v4_upgraded", "f2": 13}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v5_upgraded", "f2": 14}' - - - do: - indices.flush: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs - ---- -"Get indexed scroll and execute scroll": +"Continue scroll after upgrade": - do: get: index: scroll_index diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml index 0d18bcc0bee0a..46ade4823a221 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml @@ -5,7 +5,7 @@ Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" cluster.health: wait_for_status: green - wait_for_nodes: 2 + wait_for_nodes: 3 # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s - match: { timed_out: false } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index 00d5c9b9280a3..8f52a1afb0943 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -2,7 +2,7 @@ setup: - do: cluster.health: wait_for_status: green - wait_for_nodes: 2 + wait_for_nodes: 3 # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index ed6a66ae1a51f..6b4c963dd533b 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -2,7 +2,7 @@ setup: - do: cluster.health: wait_for_status: green - wait_for_nodes: 2 + wait_for_nodes: 3 # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s @@ -97,4 +97,3 @@ setup: xpack.ml.delete_job: job_id: mixed-cluster-datafeed-job - match: { acknowledged: true } -