Skip to content

Commit

Permalink
Remove supported statistics from metastore recording
Browse files Browse the repository at this point in the history
No need to record that, since it's a pure local operation.
  • Loading branch information
findepi committed Sep 21, 2022
1 parent 581842e commit 1115c56
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 35 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
import io.trino.plugin.hive.metastore.UserTableKey;
import io.trino.spi.TrinoException;
import io.trino.spi.security.RoleGrant;
import io.trino.spi.statistics.ColumnStatisticType;
import org.weakref.jmx.Managed;

import javax.annotation.concurrent.Immutable;
Expand Down Expand Up @@ -70,7 +69,6 @@ public class HiveMetastoreRecording
private volatile Optional<Set<String>> allRoles = Optional.empty();
private final NonEvictableCache<String, Optional<Database>> databaseCache;
private final NonEvictableCache<HiveTableName, Optional<Table>> tableCache;
private final NonEvictableCache<String, Set<ColumnStatisticType>> supportedColumnStatisticsCache;
private final NonEvictableCache<HiveTableName, PartitionStatistics> tableStatisticsCache;
private final NonEvictableCache<HivePartitionName, PartitionStatistics> partitionStatisticsCache;
private final NonEvictableCache<String, List<String>> allTablesCache;
Expand All @@ -95,7 +93,6 @@ public HiveMetastoreRecording(RecordingMetastoreConfig config, JsonCodec<Recordi
Duration recordingDuration = config.getRecordingDuration();
databaseCache = createCache(replay, recordingDuration);
tableCache = createCache(replay, recordingDuration);
supportedColumnStatisticsCache = createCache(replay, recordingDuration);
tableStatisticsCache = createCache(replay, recordingDuration);
partitionStatisticsCache = createCache(replay, recordingDuration);
allTablesCache = createCache(replay, recordingDuration);
Expand Down Expand Up @@ -127,7 +124,6 @@ void loadRecording()
allRoles = recording.getAllRoles();
databaseCache.putAll(toMap(recording.getDatabases()));
tableCache.putAll(toMap(recording.getTables()));
supportedColumnStatisticsCache.putAll(toMap(recording.getSupportedColumnStatistics()));
tableStatisticsCache.putAll(toMap(recording.getTableStatistics()));
partitionStatisticsCache.putAll(toMap(recording.getPartitionStatistics()));
allTablesCache.putAll(toMap(recording.getAllTables()));
Expand Down Expand Up @@ -168,11 +164,6 @@ public Optional<Table> getTable(HiveTableName hiveTableName, Supplier<Optional<T
return loadValue(tableCache, hiveTableName, valueSupplier);
}

public Set<ColumnStatisticType> getSupportedColumnStatistics(String type, Supplier<Set<ColumnStatisticType>> valueSupplier)
{
return loadValue(supportedColumnStatisticsCache, type, valueSupplier);
}

public PartitionStatistics getTableStatistics(HiveTableName hiveTableName, Supplier<PartitionStatistics> valueSupplier)
{
return loadValue(tableStatisticsCache, hiveTableName, valueSupplier);
Expand Down Expand Up @@ -262,7 +253,6 @@ public void writeRecording()
allRoles,
toPairs(databaseCache),
toPairs(tableCache),
toPairs(supportedColumnStatisticsCache),
toPairs(tableStatisticsCache),
toPairs(partitionStatisticsCache),
toPairs(allTablesCache),
Expand Down Expand Up @@ -331,7 +321,6 @@ public static class Recording
private final Optional<Set<String>> allRoles;
private final List<Pair<String, Optional<Database>>> databases;
private final List<Pair<HiveTableName, Optional<Table>>> tables;
private final List<Pair<String, Set<ColumnStatisticType>>> supportedColumnStatistics;
private final List<Pair<HiveTableName, PartitionStatistics>> tableStatistics;
private final List<Pair<HivePartitionName, PartitionStatistics>> partitionStatistics;
private final List<Pair<String, List<String>>> allTables;
Expand All @@ -351,7 +340,6 @@ public Recording(
@JsonProperty("allRoles") Optional<Set<String>> allRoles,
@JsonProperty("databases") List<Pair<String, Optional<Database>>> databases,
@JsonProperty("tables") List<Pair<HiveTableName, Optional<Table>>> tables,
@JsonProperty("supportedColumnStatistics") List<Pair<String, Set<ColumnStatisticType>>> supportedColumnStatistics,
@JsonProperty("tableStatistics") List<Pair<HiveTableName, PartitionStatistics>> tableStatistics,
@JsonProperty("partitionStatistics") List<Pair<HivePartitionName, PartitionStatistics>> partitionStatistics,
@JsonProperty("allTables") List<Pair<String, List<String>>> allTables,
Expand All @@ -369,7 +357,6 @@ public Recording(
this.allRoles = allRoles;
this.databases = databases;
this.tables = tables;
this.supportedColumnStatistics = supportedColumnStatistics;
this.tableStatistics = tableStatistics;
this.partitionStatistics = partitionStatistics;
this.allTables = allTables;
Expand Down Expand Up @@ -414,12 +401,6 @@ public List<Pair<TablesWithParameterCacheKey, List<String>>> getTablesWithParame
return tablesWithParameter;
}

@JsonProperty
public List<Pair<String, Set<ColumnStatisticType>>> getSupportedColumnStatistics()
{
return supportedColumnStatistics;
}

@JsonProperty
public List<Pair<HiveTableName, PartitionStatistics>> getTableStatistics()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ public Optional<Table> getTable(String databaseName, String tableName)
@Override
public Set<ColumnStatisticType> getSupportedColumnStatistics(Type type)
{
return recording.getSupportedColumnStatistics(type.getTypeSignature().toString(), () -> delegate.getSupportedColumnStatistics(type));
// No need to record that, since it's a pure local operation.
return delegate.getSupportedColumnStatistics(type);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
import io.trino.spi.predicate.TupleDomain;
import io.trino.spi.security.RoleGrant;
import io.trino.spi.security.TrinoPrincipal;
import io.trino.spi.statistics.ColumnStatisticType;
import io.trino.spi.type.TestingTypeManager;
import io.trino.spi.type.Type;
import org.testng.annotations.Test;
Expand All @@ -66,10 +65,7 @@
import static io.trino.plugin.hive.HiveBasicStatistics.createEmptyStatistics;
import static io.trino.plugin.hive.util.HiveBucketing.BucketingVersion.BUCKETING_V1;
import static io.trino.spi.security.PrincipalType.USER;
import static io.trino.spi.statistics.ColumnStatisticType.MAX_VALUE;
import static io.trino.spi.statistics.ColumnStatisticType.MIN_VALUE;
import static io.trino.spi.type.VarcharType.createUnboundedVarcharType;
import static io.trino.spi.type.VarcharType.createVarcharType;
import static org.testng.Assert.assertEquals;

public class TestRecordingHiveMetastore
Expand Down Expand Up @@ -181,7 +177,6 @@ private void validateMetadata(HiveMetastore hiveMetastore)
assertEquals(hiveMetastore.getDatabase("database"), Optional.of(DATABASE));
assertEquals(hiveMetastore.getAllDatabases(), ImmutableList.of("database"));
assertEquals(hiveMetastore.getTable("database", "table"), Optional.of(TABLE));
assertEquals(hiveMetastore.getSupportedColumnStatistics(createVarcharType(123)), ImmutableSet.of(MIN_VALUE, MAX_VALUE));
assertEquals(hiveMetastore.getTableStatistics(TABLE), PARTITION_STATISTICS);
assertEquals(hiveMetastore.getPartitionStatistics(TABLE, ImmutableList.of(PARTITION, OTHER_PARTITION)), ImmutableMap.of(
"column=value", PARTITION_STATISTICS,
Expand Down Expand Up @@ -238,16 +233,6 @@ public Optional<Table> getTable(String databaseName, String tableName)
return Optional.empty();
}

@Override
public Set<ColumnStatisticType> getSupportedColumnStatistics(Type type)
{
if (type.equals(createVarcharType(123))) {
return ImmutableSet.of(MIN_VALUE, MAX_VALUE);
}

return ImmutableSet.of();
}

@Override
public PartitionStatistics getTableStatistics(Table table)
{
Expand Down

0 comments on commit 1115c56

Please sign in to comment.