Skip to content

Commit

Permalink
Add TestTrinoHive4CatalogWithHiveMetastore test
Browse files Browse the repository at this point in the history
  • Loading branch information
mayankvadariya committed Dec 12, 2024
1 parent b60eb5c commit 4d83587
Show file tree
Hide file tree
Showing 2 changed files with 74 additions and 3 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.iceberg.catalog.hms;

import io.trino.plugin.base.util.AutoCloseableCloser;
import io.trino.plugin.hive.containers.Hive4MinioDataLake;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;

import java.net.URI;

import static io.trino.testing.TestingNames.randomNameSuffix;

public class TestTrinoHive4CatalogWithHiveMetastore
extends TestTrinoHiveCatalogWithHiveMetastore
{
private final AutoCloseableCloser closer = AutoCloseableCloser.create();
private Hive4MinioDataLake dataLake;

@Override
@BeforeAll
public void setUp()
{
bucketName = "test-hive-catalog-with-hms-" + randomNameSuffix();
dataLake = closer.register(new Hive4MinioDataLake(bucketName));
dataLake.start();
}

@Override
@AfterAll
public void tearDown()
throws Exception
{
dataLake = null;
closer.close();
}

@Override
protected URI hiveMetastoreEndpoint()
{
return dataLake.getHiveMetastore().getHiveMetastoreEndpoint();
}

@Override
protected String minioAddress()
{
return dataLake.getMinio().getMinioAddress();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@
import org.junit.jupiter.api.parallel.Execution;

import java.io.IOException;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.Optional;
Expand Down Expand Up @@ -91,7 +92,7 @@ public class TestTrinoHiveCatalogWithHiveMetastore
// Use MinIO for storage, since HDFS is hard to get working in a unit test
private HiveMinioDataLake dataLake;
private TrinoFileSystem fileSystem;
private String bucketName;
protected String bucketName;

@BeforeAll
public void setUp()
Expand All @@ -117,7 +118,7 @@ protected TrinoCatalog createTrinoCatalog(boolean useUniqueTableLocations)
new HdfsConfigurationInitializer(
new HdfsConfig(),
Set.of(new TrinoS3ConfigurationInitializer(new HiveS3Config()
.setS3Endpoint(dataLake.getMinio().getMinioAddress())
.setS3Endpoint(minioAddress())
.setS3SslEnabled(false)
.setS3AwsAccessKey(MINIO_ACCESS_KEY)
.setS3AwsSecretKey(MINIO_SECRET_KEY)
Expand All @@ -130,7 +131,7 @@ protected TrinoCatalog createTrinoCatalog(boolean useUniqueTableLocations)
.thriftMetastoreConfig(new ThriftMetastoreConfig()
// Read timed out sometimes happens with the default timeout
.setReadTimeout(new Duration(1, MINUTES)))
.metastoreClient(dataLake.getHiveHadoop().getHiveMetastoreEndpoint())
.metastoreClient(hiveMetastoreEndpoint())
.build(closer::register);
CachingHiveMetastore metastore = createPerTransactionCache(new BridgingHiveMetastore(thriftMetastore), 1000);
fileSystem = fileSystemFactory.create(SESSION);
Expand Down Expand Up @@ -229,6 +230,16 @@ public void testCreateMaterializedView()
}
}

protected URI hiveMetastoreEndpoint()
{
return dataLake.getHiveHadoop().getHiveMetastoreEndpoint();
}

protected String minioAddress()
{
return dataLake.getMinio().getMinioAddress();
}

@Override
protected Map<String, Object> defaultNamespaceProperties(String namespaceName)
{
Expand Down

0 comments on commit 4d83587

Please sign in to comment.