Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use external table in Hive sync partition tests #12448

Merged
merged 1 commit into from
May 20, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
*/
package io.trino.tests.product.hive;

import io.trino.tempto.AfterTestWithContext;
import io.trino.tempto.BeforeTestWithContext;
import io.trino.tempto.ProductTest;
import io.trino.tempto.assertions.QueryAssert;
import io.trino.tempto.query.QueryResult;
Expand All @@ -23,7 +21,6 @@
import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure;
import static io.trino.tempto.assertions.QueryAssert.assertThat;
import static io.trino.tests.product.hive.util.TableLocationUtils.getTableLocation;
import static io.trino.tests.product.utils.QueryExecutors.onHive;
import static io.trino.tests.product.utils.QueryExecutors.onTrino;
import static java.lang.String.format;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
Expand All @@ -34,19 +31,6 @@
public abstract class BaseTestSyncPartitionMetadata
extends ProductTest
{
@BeforeTestWithContext
public void setUp()
{
removeHdfsDirectory(schemaLocation());
makeHdfsDirectory(schemaLocation());
}

@AfterTestWithContext
public void tearDown()
{
removeHdfsDirectory(schemaLocation());
}

public void testAddPartition()
{
String tableName = "test_sync_partition_metadata_add_partition";
Expand Down Expand Up @@ -214,9 +198,8 @@ private void prepare(String tableName)
String tableLocation = tableLocation(tableName);
onTrino().executeQuery("DROP TABLE IF EXISTS " + tableName);
removeHdfsDirectory(tableLocation);
makeHdfsDirectory(tableLocation);

onHive().executeQuery("CREATE TABLE " + tableName + " (payload bigint) PARTITIONED BY (col_x string, col_y string) STORED AS ORC LOCATION '" + tableLocation + "'");
createTable(tableName, tableLocation);
onTrino().executeQuery("INSERT INTO " + tableName + " VALUES (1, 'a', '1'), (2, 'b', '2')");

// remove partition col_x=b/col_y=2
Expand All @@ -238,6 +221,8 @@ private void prepare(String tableName)
assertPartitions(tableName, row("a", "1"), row("b", "2"));
}

protected abstract void createTable(String tableName, String location);

protected abstract void removeHdfsDirectory(String path);

protected abstract void makeHdfsDirectory(String path);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
*/
package io.trino.tests.product.hive;

import io.trino.tempto.AfterTestWithContext;
import io.trino.tempto.BeforeTestWithContext;
import io.trino.testng.services.Flaky;
import org.testng.annotations.Test;

Expand All @@ -33,6 +35,19 @@ public class TestAbfsSyncPartitionMetadata
{
private final String schema = "test_" + randomTableSuffix();

@BeforeTestWithContext
public void setUp()
{
removeHdfsDirectory(schemaLocation());
makeHdfsDirectory(schemaLocation());
}

@AfterTestWithContext
public void tearDown()
{
removeHdfsDirectory(schemaLocation());
}

@Override
protected String schemaLocation()
{
Expand Down Expand Up @@ -125,6 +140,13 @@ protected void copyOrcFileToHdfsDirectory(String tableName, String targetDirecto
onHive().executeQuery(format("dfs -cp %s %s", orcFilePath, targetDirectory));
}

@Override
protected void createTable(String tableName, String tableLocation)
{
makeHdfsDirectory(tableLocation);
onHive().executeQuery("CREATE TABLE " + tableName + " (payload bigint) PARTITIONED BY (col_x string, col_y string) STORED AS ORC LOCATION '" + tableLocation + "'");
}

// Drop and create a table. Then, return single ORC file path
private String generateOrcFile()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@
import static io.trino.tests.product.TestGroups.TRINO_JDBC;
import static io.trino.tests.product.hive.HiveProductTest.ERROR_COMMITTING_WRITE_TO_HIVE_ISSUE;
import static io.trino.tests.product.hive.HiveProductTest.ERROR_COMMITTING_WRITE_TO_HIVE_MATCH;
import static io.trino.tests.product.hive.util.TemporaryHiveTable.randomTableSuffix;
import static java.lang.String.format;
import static io.trino.tests.product.utils.QueryExecutors.onTrino;

public class TestHdfsSyncPartitionMetadata
extends BaseTestSyncPartitionMetadata
Expand All @@ -41,12 +40,10 @@ public class TestHdfsSyncPartitionMetadata
@Inject
private HdfsDataSourceWriter hdfsDataSourceWriter;

private final String schema = "test_" + randomTableSuffix();

@Override
protected String schemaLocation()
{
return format("%s/%s", warehouseDirectory, schema);
return warehouseDirectory;
}

@Test(groups = {HIVE_PARTITIONING, SMOKE, TRINO_JDBC})
Expand Down Expand Up @@ -131,4 +128,10 @@ protected void copyOrcFileToHdfsDirectory(String tableName, String targetDirecto
HiveDataSource dataSource = createResourceDataSource(tableName, "io/trino/tests/product/hive/data/single_int_column/data.orc");
hdfsDataSourceWriter.ensureDataOnHdfs(targetDirectory, dataSource);
}

@Override
protected void createTable(String tableName, String tableLocation)
{
onTrino().executeQuery("CREATE TABLE " + tableName + " (payload bigint, col_x varchar, col_y varchar) WITH (format = 'ORC', partitioned_by = ARRAY[ 'col_x', 'col_y' ])");
}
}