diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java index 542a241cb673..34a559f9ce47 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java @@ -85,6 +85,11 @@ public static Builder> builder() return new Builder<>(); } + public static Builder> builder(Session defaultSession) + { + return new Builder<>(defaultSession); + } + public static class Builder> extends DistributedQueryRunner.Builder { @@ -107,7 +112,12 @@ public static class Builder> protected Builder() { - super(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))); + this(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))); + } + + protected Builder(Session defaultSession) + { + super(defaultSession); } public SELF setSkipTimezoneSetup(boolean skipTimezoneSetup) diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreQueries.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreQueries.java deleted file mode 100644 index ad5fa1ec0752..000000000000 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreQueries.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.hive.metastore.cache; - -import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import io.trino.plugin.hive.HiveQueryRunner; -import io.trino.testing.AbstractTestQueryFramework; -import io.trino.testing.QueryRunner; -import org.testng.annotations.Test; - -import static com.google.common.base.Verify.verify; -import static io.trino.tpch.TpchTable.NATION; -import static java.util.Collections.nCopies; - -public class TestCachingHiveMetastoreQueries - extends AbstractTestQueryFramework -{ - private static final int nodeCount = 3; - - @Override - protected QueryRunner createQueryRunner() - throws Exception - { - verify(nodeCount > 1, "this test requires a multinode query runner"); - return HiveQueryRunner.builder() - .setHiveProperties(ImmutableMap.of("hive.metastore-cache-ttl", "60m")) - // only so that tpch schema is created (TODO https://github.com/trinodb/trino/issues/6861) - .setInitialTables(ImmutableList.of(NATION)) - .setNodeCount(nodeCount) - // Exclude coordinator from workers to make testPartitionAppend deterministically reproduce the original problem - .setCoordinatorProperties(ImmutableMap.of("node-scheduler.include-coordinator", "false")) - .build(); - } - - @Test - public void testPartitionAppend() - { - getQueryRunner().execute("CREATE TABLE test_part_append " + - "(name varchar, partkey varchar) " + - "WITH (partitioned_by = ARRAY['partkey'])"); - - String row = "('some name', 'part1')"; - - // if metastore caching was enabled on workers than any worker which tries to INSERT into same partition twice - // will fail because it would've cached the absence of the partition - for (int i = 0; i < nodeCount + 1; i++) { - getQueryRunner().execute("INSERT INTO test_part_append VALUES " + row); - } - - String expected = Joiner.on(",").join(nCopies(nodeCount + 1, row)); - assertQuery("SELECT * FROM test_part_append", "VALUES " + expected); - } -} diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreWithQueryRunner.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreWithQueryRunner.java index e2c781c09a13..3621e93b83a3 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreWithQueryRunner.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreWithQueryRunner.java @@ -13,10 +13,11 @@ */ package io.trino.plugin.hive.metastore.cache; +import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import io.trino.Session; -import io.trino.plugin.hive.TestingHivePlugin; +import io.trino.plugin.hive.HiveQueryRunner; import io.trino.plugin.hive.metastore.file.FileHiveMetastore; import io.trino.spi.security.Identity; import io.trino.spi.security.SelectedRole; @@ -30,20 +31,23 @@ import java.util.List; import java.util.Optional; +import static com.google.common.base.Verify.verify; import static com.google.common.collect.Lists.cartesianProduct; import static com.google.common.io.MoreFiles.deleteRecursively; import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE; import static io.trino.plugin.hive.authentication.HiveIdentity.none; +import static io.trino.plugin.hive.metastore.file.FileHiveMetastore.createTestingFileHiveMetastore; import static io.trino.spi.security.SelectedRole.Type.ROLE; import static io.trino.testing.TestingSession.testSessionBuilder; import static java.nio.file.Files.createTempDirectory; +import static java.util.Collections.nCopies; import static org.assertj.core.api.Assertions.assertThatThrownBy; @Test(singleThreaded = true) public class TestCachingHiveMetastoreWithQueryRunner extends AbstractTestQueryFramework { - private static final String CATALOG = "test"; + private static final String CATALOG = HiveQueryRunner.HIVE_CATALOG; private static final String SCHEMA = "test"; private static final Session ADMIN = getTestSession(Identity.forUser("admin") .withConnectorRole(CATALOG, new SelectedRole(ROLE, Optional.of("admin"))) @@ -57,22 +61,22 @@ public class TestCachingHiveMetastoreWithQueryRunner protected QueryRunner createQueryRunner() throws Exception { - DistributedQueryRunner queryRunner = DistributedQueryRunner - .builder(ADMIN) - .setNodeCount(1) - // Exclude coordinator from workers to make testPartitionAppend deterministically reproduce the original problem - .setCoordinatorProperties(ImmutableMap.of("node-scheduler.include-coordinator", "false")) - .build(); - Path temporaryMetastoreDirectory = createTempDirectory(null); closeAfterClass(() -> deleteRecursively(temporaryMetastoreDirectory, ALLOW_INSECURE)); - fileHiveMetastore = FileHiveMetastore.createTestingFileHiveMetastore(temporaryMetastoreDirectory.toFile()); - queryRunner.installPlugin(new TestingHivePlugin(fileHiveMetastore)); - queryRunner.createCatalog(CATALOG, "hive", ImmutableMap.of( - "hive.security", "sql-standard", - "hive.metastore-cache-ttl", "60m", - "hive.metastore-refresh-interval", "10m")); + DistributedQueryRunner queryRunner = HiveQueryRunner.builder(ADMIN) + .setNodeCount(3) + // Required by testPartitionAppend test. + // Coordinator needs to be excluded from workers to deterministically reproduce the original problem + // https://github.com/trinodb/trino/pull/6853 + .setCoordinatorProperties(ImmutableMap.of("node-scheduler.include-coordinator", "false")) + .setMetastore(distributedQueryRunner -> fileHiveMetastore = createTestingFileHiveMetastore(temporaryMetastoreDirectory.toFile())) + .setHiveProperties(ImmutableMap.of( + "hive.security", "sql-standard", + "hive.metastore-cache-ttl", "60m", + "hive.metastore-refresh-interval", "10m")) + .build(); + queryRunner.execute(ADMIN, "CREATE SCHEMA " + SCHEMA); queryRunner.execute("CREATE TABLE test (test INT)"); @@ -155,6 +159,28 @@ public void testIllegalFlushHiveMetastoreCacheProcedureCalls() .hasMessage("Parameters partition_column and partition_value should have same length"); } + @Test + public void testPartitionAppend() + { + int nodeCount = getQueryRunner().getNodeCount(); + verify(nodeCount > 1, "this test requires a multinode query runner"); + + getQueryRunner().execute("CREATE TABLE test_part_append " + + "(name varchar, partkey varchar) " + + "WITH (partitioned_by = ARRAY['partkey'])"); + + String row = "('some name', 'part1')"; + + // if metastore caching was enabled on workers than any worker which tries to INSERT into same partition twice + // will fail because it would've cached the absence of the partition + for (int i = 0; i < nodeCount + 1; i++) { + getQueryRunner().execute("INSERT INTO test_part_append VALUES " + row); + } + + String expected = Joiner.on(",").join(nCopies(nodeCount + 1, row)); + assertQuery("SELECT * FROM test_part_append", "VALUES " + expected); + } + @DataProvider public Object[][] testCacheRefreshOnRoleGrantAndRevokeParams() {