From c3d1365aeb4cfbd87b817ca0bb0fec04453735f5 Mon Sep 17 00:00:00 2001 From: Raunaq Morarka Date: Sat, 10 Dec 2022 17:16:07 +0530 Subject: [PATCH 1/2] Revert "Add a builder for IcebergTableHandle" This reverts commit 34efd535573f89928405e4868b6eb23457b8cd58. --- .../trino/plugin/iceberg/IcebergMetadata.java | 102 +++--- .../plugin/iceberg/IcebergTableHandle.java | 306 ++++++------------ ...stIcebergNodeLocalDynamicSplitPruning.java | 30 +- .../TestIcebergProjectionPushdownPlans.java | 8 +- .../iceberg/TestIcebergSplitSource.java | 30 +- ...TestConnectorPushdownRulesWithIceberg.java | 136 ++++---- 6 files changed, 288 insertions(+), 324 deletions(-) diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java index 49b352865ce2..e12f937af007 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java @@ -369,18 +369,24 @@ public IcebergTableHandle getTableHandle( Map tableProperties = table.properties(); String nameMappingJson = tableProperties.get(TableProperties.DEFAULT_NAME_MAPPING); - return IcebergTableHandle.builder() - .withSchemaName(tableName.getSchemaName()) - .withTableName(name.getTableName()) - .withTableType(name.getTableType()) - .withSnapshotId(tableSnapshotId) - .withTableSchemaJson(SchemaParser.toJson(tableSchema)) - .withPartitionSpecJson(partitionSpec.map(PartitionSpecParser::toJson)) - .withFormatVersion(table.operations().current().formatVersion()) - .withNameMappingJson(Optional.ofNullable(nameMappingJson)) - .withTableLocation(table.location()) - .withStorageProperties(table.properties()) - .build(); + return new IcebergTableHandle( + tableName.getSchemaName(), + name.getTableName(), + name.getTableType(), + tableSnapshotId, + SchemaParser.toJson(tableSchema), + partitionSpec.map(PartitionSpecParser::toJson), + table.operations().current().formatVersion(), + TupleDomain.all(), + TupleDomain.all(), + ImmutableSet.of(), + Optional.ofNullable(nameMappingJson), + table.location(), + table.properties(), + NO_RETRIES, + ImmutableList.of(), + false, + Optional.empty()); } private static long getSnapshotIdFromVersion(Table table, ConnectorTableVersion version) @@ -1076,10 +1082,7 @@ private BeginTableExecuteResult( executeHandle, - IcebergTableHandle.buildFrom(table) - .withRecordScannedFiles(true) - .withMaxScannedFileSize(Optional.of(optimizeHandle.getMaxScannedFileSize())) - .build()); + table.forOptimize(true, optimizeHandle.getMaxScannedFileSize())); } @Override @@ -1690,9 +1693,7 @@ public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTable validateNotPartitionedByNestedField(icebergTable.schema(), icebergTable.spec()); beginTransaction(icebergTable); - return IcebergTableHandle.buildFrom(table) - .withRetryMode(retryMode) - .build(); + return table.withRetryMode(retryMode); } @Override @@ -1718,12 +1719,10 @@ public ConnectorTableHandle beginUpdate(ConnectorSession session, ConnectorTable validateNotPartitionedByNestedField(icebergTable.schema(), icebergTable.spec()); beginTransaction(icebergTable); - return IcebergTableHandle.buildFrom(table) - .withRetryMode(retryMode) + return table.withRetryMode(retryMode) .withUpdatedColumns(updatedColumns.stream() .map(IcebergColumnHandle.class::cast) - .collect(toImmutableList())) - .build(); + .collect(toImmutableList())); } @Override @@ -1793,9 +1792,7 @@ public ConnectorMergeTableHandle beginMerge(ConnectorSession session, ConnectorT beginTransaction(icebergTable); - IcebergTableHandle newTableHandle = IcebergTableHandle.buildFrom(table) - .withRetryMode(retryMode) - .build(); + IcebergTableHandle newTableHandle = table.withRetryMode(retryMode); IcebergWritableTableHandle insertHandle = newWritableTableHandle(table.getSchemaTableName(), icebergTable, retryMode); return new IcebergMergeTableHandle(newTableHandle, insertHandle); @@ -2117,11 +2114,26 @@ else if (isMetadataColumnId(columnHandle.getId())) { && newUnenforcedConstraint.equals(table.getUnenforcedPredicate())) { return Optional.empty(); } + return Optional.of(new ConstraintApplicationResult<>( - IcebergTableHandle.buildFrom(table) - .withUnenforcedPredicate(newUnenforcedConstraint) - .withEnforcedPredicate(newEnforcedConstraint) - .build(), + new IcebergTableHandle( + table.getSchemaName(), + table.getTableName(), + table.getTableType(), + table.getSnapshotId(), + table.getTableSchemaJson(), + table.getPartitionSpecJson(), + table.getFormatVersion(), + newUnenforcedConstraint, + newEnforcedConstraint, + table.getProjectedColumns(), + table.getNameMappingJson(), + table.getTableLocation(), + table.getStorageProperties(), + table.getRetryMode(), + table.getUpdatedColumns(), + table.isRecordScannedFiles(), + table.getMaxScannedFileSize()), remainingConstraint.transformKeys(ColumnHandle.class::cast), extractionResult.remainingExpression(), false)); @@ -2175,9 +2187,7 @@ public Optional> applyProjecti .collect(toImmutableList()); return Optional.of(new ProjectionApplicationResult<>( - IcebergTableHandle.buildFrom(icebergTableHandle) - .withProjectedColumns(projectedColumns) - .build(), + icebergTableHandle.withProjectedColumns(projectedColumns), projections, assignmentsList, false)); @@ -2211,9 +2221,7 @@ public Optional> applyProjecti List outputAssignments = ImmutableList.copyOf(newAssignments.values()); return Optional.of(new ProjectionApplicationResult<>( - IcebergTableHandle.buildFrom(icebergTableHandle) - .withProjectedColumns(projectedColumnsBuilder.build()) - .build(), + icebergTableHandle.withProjectedColumns(projectedColumnsBuilder.build()), newProjections, outputAssignments, false)); @@ -2257,10 +2265,24 @@ public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTab checkArgument(originalHandle.getMaxScannedFileSize().isEmpty(), "Unexpected max scanned file size set"); return tableStatisticsCache.computeIfAbsent( - IcebergTableHandle.buildFrom(originalHandle) - .withProjectedColumns(ImmutableSet.of()) // projectedColumns don't affect stats - .withRetryMode(NO_RETRIES) // retry mode doesn't affect stats - .build(), + new IcebergTableHandle( + originalHandle.getSchemaName(), + originalHandle.getTableName(), + originalHandle.getTableType(), + originalHandle.getSnapshotId(), + originalHandle.getTableSchemaJson(), + originalHandle.getPartitionSpecJson(), + originalHandle.getFormatVersion(), + originalHandle.getUnenforcedPredicate(), + originalHandle.getEnforcedPredicate(), + ImmutableSet.of(), // projectedColumns don't affect stats + originalHandle.getNameMappingJson(), + originalHandle.getTableLocation(), + originalHandle.getStorageProperties(), + NO_RETRIES, // retry mode doesn't affect stats + originalHandle.getUpdatedColumns(), + originalHandle.isRecordScannedFiles(), + originalHandle.getMaxScannedFileSize()), handle -> { Table icebergTable = catalog.loadTable(session, handle.getSchemaTableName()); return TableStatisticsMaker.getTableStatistics(typeManager, session, handle, icebergTable); diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTableHandle.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTableHandle.java index fe89301c8b41..8cd1d3f58299 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTableHandle.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTableHandle.java @@ -32,7 +32,6 @@ import java.util.Optional; import java.util.Set; -import static java.util.Collections.emptyMap; import static java.util.Objects.requireNonNull; import static java.util.stream.Collectors.joining; @@ -85,26 +84,27 @@ public static IcebergTableHandle fromJsonForDeserializationOnly( @JsonProperty("retryMode") RetryMode retryMode, @JsonProperty("updatedColumns") List updatedColumns) { - return builder() - .withSchemaName(schemaName) - .withTableName(tableName) - .withTableType(tableType) - .withSnapshotId(snapshotId) - .withTableSchemaJson(tableSchemaJson) - .withPartitionSpecJson(partitionSpecJson) - .withFormatVersion(formatVersion) - .withUnenforcedPredicate(unenforcedPredicate) - .withEnforcedPredicate(enforcedPredicate) - .withProjectedColumns(projectedColumns) - .withNameMappingJson(nameMappingJson) - .withTableLocation(tableLocation) - .withStorageProperties(storageProperties) - .withRetryMode(retryMode) - .withUpdatedColumns(updatedColumns) - .build(); + return new IcebergTableHandle( + schemaName, + tableName, + tableType, + snapshotId, + tableSchemaJson, + partitionSpecJson, + formatVersion, + unenforcedPredicate, + enforcedPredicate, + projectedColumns, + nameMappingJson, + tableLocation, + storageProperties, + retryMode, + updatedColumns, + false, + Optional.empty()); } - private IcebergTableHandle( + public IcebergTableHandle( String schemaName, String tableName, TableType tableType, @@ -254,6 +254,94 @@ public SchemaTableName getSchemaTableNameWithType() return new SchemaTableName(schemaName, tableName + "$" + tableType.name().toLowerCase(Locale.ROOT)); } + public IcebergTableHandle withProjectedColumns(Set projectedColumns) + { + return new IcebergTableHandle( + schemaName, + tableName, + tableType, + snapshotId, + tableSchemaJson, + partitionSpecJson, + formatVersion, + unenforcedPredicate, + enforcedPredicate, + projectedColumns, + nameMappingJson, + tableLocation, + storageProperties, + retryMode, + updatedColumns, + recordScannedFiles, + maxScannedFileSize); + } + + public IcebergTableHandle withRetryMode(RetryMode retryMode) + { + return new IcebergTableHandle( + schemaName, + tableName, + tableType, + snapshotId, + tableSchemaJson, + partitionSpecJson, + formatVersion, + unenforcedPredicate, + enforcedPredicate, + projectedColumns, + nameMappingJson, + tableLocation, + storageProperties, + retryMode, + updatedColumns, + recordScannedFiles, + maxScannedFileSize); + } + + public IcebergTableHandle withUpdatedColumns(List updatedColumns) + { + return new IcebergTableHandle( + schemaName, + tableName, + tableType, + snapshotId, + tableSchemaJson, + partitionSpecJson, + formatVersion, + unenforcedPredicate, + enforcedPredicate, + projectedColumns, + nameMappingJson, + tableLocation, + storageProperties, + retryMode, + updatedColumns, + recordScannedFiles, + maxScannedFileSize); + } + + public IcebergTableHandle forOptimize(boolean recordScannedFiles, DataSize maxScannedFileSize) + { + return new IcebergTableHandle( + schemaName, + tableName, + tableType, + snapshotId, + tableSchemaJson, + partitionSpecJson, + formatVersion, + unenforcedPredicate, + enforcedPredicate, + projectedColumns, + nameMappingJson, + tableLocation, + storageProperties, + retryMode, + updatedColumns, + recordScannedFiles, + Optional.of(maxScannedFileSize)); + } + @Override public boolean equals(Object o) { @@ -307,184 +395,4 @@ else if (!enforcedPredicate.isAll()) { } return builder.toString(); } - - public static Builder builder() - { - return new Builder(); - } - - public static Builder buildFrom(IcebergTableHandle table) - { - return new Builder(table); - } - - public static class Builder - { - private String schemaName; - private String tableName; - private TableType tableType; - private Optional snapshotId = Optional.empty(); - private String tableSchemaJson; - private Optional partitionSpecJson = Optional.empty(); - private int formatVersion; - private String tableLocation; - private Map storageProperties = emptyMap(); - private RetryMode retryMode = RetryMode.NO_RETRIES; - private List updatedColumns = ImmutableList.of(); - private TupleDomain unenforcedPredicate = TupleDomain.all(); - private TupleDomain enforcedPredicate = TupleDomain.all(); - private Set projectedColumns = ImmutableSet.of(); - private Optional nameMappingJson = Optional.empty(); - private boolean recordScannedFiles; - private Optional maxScannedFileSize = Optional.empty(); - - private Builder() - { - } - - private Builder(IcebergTableHandle table) - { - this.schemaName = table.schemaName; - this.tableName = table.tableName; - this.tableType = table.tableType; - this.snapshotId = table.snapshotId; - this.tableSchemaJson = table.tableSchemaJson; - this.partitionSpecJson = table.partitionSpecJson; - this.formatVersion = table.formatVersion; - this.tableLocation = table.tableLocation; - this.storageProperties = table.storageProperties; - this.retryMode = table.retryMode; - this.updatedColumns = table.updatedColumns; - this.unenforcedPredicate = table.unenforcedPredicate; - this.enforcedPredicate = table.enforcedPredicate; - this.projectedColumns = table.projectedColumns; - this.nameMappingJson = table.nameMappingJson; - this.recordScannedFiles = table.recordScannedFiles; - this.maxScannedFileSize = table.maxScannedFileSize; - } - - public Builder withSchemaName(String schemaName) - { - this.schemaName = schemaName; - return this; - } - - public Builder withTableName(String tableName) - { - this.tableName = tableName; - return this; - } - - public Builder withTableType(TableType tableType) - { - this.tableType = tableType; - return this; - } - - public Builder withSnapshotId(Optional snapshotId) - { - this.snapshotId = snapshotId; - return this; - } - - public Builder withTableSchemaJson(String tableSchemaJson) - { - this.tableSchemaJson = tableSchemaJson; - return this; - } - - public Builder withPartitionSpecJson(Optional partitionSpecJson) - { - this.partitionSpecJson = partitionSpecJson; - return this; - } - - public Builder withFormatVersion(int formatVersion) - { - this.formatVersion = formatVersion; - return this; - } - - public Builder withTableLocation(String tableLocation) - { - this.tableLocation = tableLocation; - return this; - } - - public Builder withStorageProperties(Map storageProperties) - { - this.storageProperties = storageProperties; - return this; - } - - public Builder withRetryMode(RetryMode retryMode) - { - this.retryMode = retryMode; - return this; - } - - public Builder withUpdatedColumns(List updatedColumns) - { - this.updatedColumns = updatedColumns; - return this; - } - - public Builder withUnenforcedPredicate(TupleDomain unenforcedPredicate) - { - this.unenforcedPredicate = unenforcedPredicate; - return this; - } - - public Builder withEnforcedPredicate(TupleDomain enforcedPredicate) - { - this.enforcedPredicate = enforcedPredicate; - return this; - } - - public Builder withProjectedColumns(Set projectedColumns) - { - this.projectedColumns = projectedColumns; - return this; - } - - public Builder withNameMappingJson(Optional nameMappingJson) - { - this.nameMappingJson = nameMappingJson; - return this; - } - - public Builder withRecordScannedFiles(boolean recordScannedFiles) - { - this.recordScannedFiles = recordScannedFiles; - return this; - } - - public Builder withMaxScannedFileSize(Optional maxScannedFileSize) - { - this.maxScannedFileSize = maxScannedFileSize; - return this; - } - - public IcebergTableHandle build() - { - return new IcebergTableHandle( - schemaName, - tableName, - tableType, - snapshotId, - tableSchemaJson, - partitionSpecJson, - formatVersion, - unenforcedPredicate, - enforcedPredicate, - projectedColumns, - nameMappingJson, - tableLocation, - storageProperties, - retryMode, - updatedColumns, - recordScannedFiles, - maxScannedFileSize); - } - } } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergNodeLocalDynamicSplitPruning.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergNodeLocalDynamicSplitPruning.java index e818e6cddcce..7b45bd5dad50 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergNodeLocalDynamicSplitPruning.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergNodeLocalDynamicSplitPruning.java @@ -42,6 +42,7 @@ import io.trino.spi.connector.ColumnHandle; import io.trino.spi.connector.ConnectorPageSource; import io.trino.spi.connector.DynamicFilter; +import io.trino.spi.connector.RetryMode; import io.trino.spi.predicate.Domain; import io.trino.spi.predicate.TupleDomain; import io.trino.spi.type.Type; @@ -178,17 +179,24 @@ private static ConnectorPageSource createTestingPageSource(HiveTransactionHandle String tablePath = filePath.substring(0, filePath.lastIndexOf("/")); TableHandle tableHandle = new TableHandle( TEST_CATALOG_HANDLE, - IcebergTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(TABLE_NAME) - .withTableType(TableType.DATA) - .withTableSchemaJson(SchemaParser.toJson(TABLE_SCHEMA)) - .withPartitionSpecJson(Optional.of(PartitionSpecParser.toJson(PartitionSpec.unpartitioned()))) - .withFormatVersion(2) - .withUnenforcedPredicate(TupleDomain.withColumnDomains(ImmutableMap.of(KEY_ICEBERG_COLUMN_HANDLE, Domain.singleValue(INTEGER, (long) KEY_COLUMN_VALUE)))) - .withProjectedColumns(ImmutableSet.of(KEY_ICEBERG_COLUMN_HANDLE)) - .withTableLocation(tablePath) - .build(), + new IcebergTableHandle( + SCHEMA_NAME, + TABLE_NAME, + TableType.DATA, + Optional.empty(), + SchemaParser.toJson(TABLE_SCHEMA), + Optional.of(PartitionSpecParser.toJson(PartitionSpec.unpartitioned())), + 2, + TupleDomain.withColumnDomains(ImmutableMap.of(KEY_ICEBERG_COLUMN_HANDLE, Domain.singleValue(INTEGER, (long) KEY_COLUMN_VALUE))), + TupleDomain.all(), + ImmutableSet.of(KEY_ICEBERG_COLUMN_HANDLE), + Optional.empty(), + tablePath, + ImmutableMap.of(), + RetryMode.NO_RETRIES, + ImmutableList.of(), + false, + Optional.empty()), transaction); FileFormatDataSourceStats stats = new FileFormatDataSourceStats(); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergProjectionPushdownPlans.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergProjectionPushdownPlans.java index 0d86d5bc4c58..59eefbdb779b 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergProjectionPushdownPlans.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergProjectionPushdownPlans.java @@ -166,9 +166,7 @@ public void testDereferencePushdown() assertPlan( "SELECT col0.x expr_x, col0.y expr_y FROM " + testTable, any(tableScan( - equalTo(IcebergTableHandle.buildFrom((IcebergTableHandle) tableHandle.get().getConnectorHandle()) - .withProjectedColumns(Set.of(columnX, columnY)) - .build()), + equalTo(((IcebergTableHandle) tableHandle.get().getConnectorHandle()).withProjectedColumns(Set.of(columnX, columnY))), TupleDomain.all(), ImmutableMap.of("col0#x", equalTo(columnX), "col0#y", equalTo(columnY))))); @@ -234,9 +232,7 @@ public void testDereferencePushdown() .right( anyTree( tableScan( - equalTo(IcebergTableHandle.buildFrom((IcebergTableHandle) tableHandle.get().getConnectorHandle()) - .withProjectedColumns(Set.of(column1Handle)) - .build()), + equalTo(((IcebergTableHandle) tableHandle.get().getConnectorHandle()).withProjectedColumns(Set.of(column1Handle))), TupleDomain.all(), ImmutableMap.of("s_expr_1", equalTo(column1Handle))))))))); } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergSplitSource.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergSplitSource.java index bb8c9651cd30..fa9dd805ebf6 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergSplitSource.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergSplitSource.java @@ -62,8 +62,8 @@ import static io.trino.plugin.hive.HiveTestUtils.HDFS_ENVIRONMENT; import static io.trino.plugin.hive.metastore.cache.CachingHiveMetastore.memoizeMetastore; import static io.trino.plugin.hive.metastore.file.FileHiveMetastore.createTestingFileHiveMetastore; -import static io.trino.plugin.iceberg.TableType.DATA; import static io.trino.spi.connector.Constraint.alwaysTrue; +import static io.trino.spi.connector.RetryMode.NO_RETRIES; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.testing.TestingConnectorSession.SESSION; import static io.trino.tpch.TpchTable.NATION; @@ -118,16 +118,24 @@ public void testIncompleteDynamicFilterTimeout() long startMillis = System.currentTimeMillis(); SchemaTableName schemaTableName = new SchemaTableName("tpch", "nation"); Table nationTable = catalog.loadTable(SESSION, schemaTableName); - IcebergTableHandle tableHandle = IcebergTableHandle.builder() - .withSchemaName(schemaTableName.getSchemaName()) - .withTableName(schemaTableName.getTableName()) - .withTableType(DATA) - .withTableSchemaJson(SchemaParser.toJson(nationTable.schema())) - .withPartitionSpecJson(Optional.of(PartitionSpecParser.toJson(nationTable.spec()))) - .withFormatVersion(1) - .withTableLocation(nationTable.location()) - .withStorageProperties(nationTable.properties()) - .build(); + IcebergTableHandle tableHandle = new IcebergTableHandle( + schemaTableName.getSchemaName(), + schemaTableName.getTableName(), + TableType.DATA, + Optional.empty(), + SchemaParser.toJson(nationTable.schema()), + Optional.of(PartitionSpecParser.toJson(nationTable.spec())), + 1, + TupleDomain.all(), + TupleDomain.all(), + ImmutableSet.of(), + Optional.empty(), + nationTable.location(), + nationTable.properties(), + NO_RETRIES, + ImmutableList.of(), + false, + Optional.empty()); try (IcebergSplitSource splitSource = new IcebergSplitSource( HDFS_ENVIRONMENT, diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/optimizer/TestConnectorPushdownRulesWithIceberg.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/optimizer/TestConnectorPushdownRulesWithIceberg.java index 295fb409cd41..b0202f59aa7a 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/optimizer/TestConnectorPushdownRulesWithIceberg.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/optimizer/TestConnectorPushdownRulesWithIceberg.java @@ -72,6 +72,7 @@ import static io.trino.plugin.iceberg.ColumnIdentity.primitiveColumnIdentity; import static io.trino.plugin.iceberg.TableType.DATA; import static io.trino.plugin.iceberg.catalog.hms.IcebergHiveMetastoreCatalogModule.HIDE_DELTA_LAKE_TABLES_IN_ICEBERG; +import static io.trino.spi.connector.RetryMode.NO_RETRIES; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.spi.type.IntegerType.INTEGER; import static io.trino.spi.type.RowType.field; @@ -165,16 +166,24 @@ public void testProjectionPushdown() BIGINT, Optional.empty()); - IcebergTableHandle icebergTable = IcebergTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(tableName) - .withTableType(DATA) - .withSnapshotId(Optional.of(1L)) - .withTableSchemaJson("") - .withPartitionSpecJson(Optional.of("")) - .withFormatVersion(1) - .withTableLocation("") - .build(); + IcebergTableHandle icebergTable = new IcebergTableHandle( + SCHEMA_NAME, + tableName, + DATA, + Optional.of(1L), + "", + Optional.of(""), + 1, + TupleDomain.all(), + TupleDomain.all(), + ImmutableSet.of(), + Optional.empty(), + "", + ImmutableMap.of(), + NO_RETRIES, + ImmutableList.of(), + false, + Optional.empty()); TableHandle table = new TableHandle(TEST_CATALOG_HANDLE, icebergTable, new HiveTransactionHandle(false)); IcebergColumnHandle fullColumn = partialColumn.getBaseColumn(); @@ -192,9 +201,7 @@ public void testProjectionPushdown() project( ImmutableMap.of("expr", expression("col")), tableScan( - IcebergTableHandle.buildFrom(icebergTable) - .withProjectedColumns(ImmutableSet.of(fullColumn)) - .build()::equals, + icebergTable.withProjectedColumns(ImmutableSet.of(fullColumn))::equals, TupleDomain.all(), ImmutableMap.of("col", fullColumn::equals)))); @@ -206,9 +213,7 @@ public void testProjectionPushdown() p.tableScan( new TableHandle( TEST_CATALOG_HANDLE, - IcebergTableHandle.buildFrom(icebergTable) - .withProjectedColumns(ImmutableSet.of(fullColumn)) - .build(), + icebergTable.withProjectedColumns(ImmutableSet.of(fullColumn)), new HiveTransactionHandle(false)), ImmutableList.of(p.symbol("struct_of_int", baseType)), ImmutableMap.of(p.symbol("struct_of_int", baseType), fullColumn)))) @@ -227,9 +232,7 @@ public void testProjectionPushdown() .matches(project( ImmutableMap.of("expr_deref", expression(new SymbolReference("struct_of_int#a"))), tableScan( - IcebergTableHandle.buildFrom(icebergTable) - .withProjectedColumns(ImmutableSet.of(partialColumn)) - .build()::equals, + icebergTable.withProjectedColumns(ImmutableSet.of(partialColumn))::equals, TupleDomain.all(), ImmutableMap.of("struct_of_int#a", partialColumn::equals)))); @@ -245,16 +248,24 @@ public void testPredicatePushdown() PushPredicateIntoTableScan pushPredicateIntoTableScan = new PushPredicateIntoTableScan(tester().getPlannerContext(), tester().getTypeAnalyzer()); - IcebergTableHandle icebergTable = IcebergTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(tableName) - .withTableType(DATA) - .withSnapshotId(Optional.of(snapshotId)) - .withTableSchemaJson("") - .withPartitionSpecJson(Optional.of("")) - .withFormatVersion(1) - .withTableLocation("") - .build(); + IcebergTableHandle icebergTable = new IcebergTableHandle( + SCHEMA_NAME, + tableName, + DATA, + Optional.of(snapshotId), + "", + Optional.of(""), + 1, + TupleDomain.all(), + TupleDomain.all(), + ImmutableSet.of(), + Optional.empty(), + "", + ImmutableMap.of(), + NO_RETRIES, + ImmutableList.of(), + false, + Optional.empty()); TableHandle table = new TableHandle(TEST_CATALOG_HANDLE, icebergTable, new HiveTransactionHandle(false)); IcebergColumnHandle column = new IcebergColumnHandle(primitiveColumnIdentity(1, "a"), INTEGER, ImmutableList.of(), INTEGER, Optional.empty()); @@ -286,15 +297,24 @@ public void testColumnPruningProjectionPushdown() PruneTableScanColumns pruneTableScanColumns = new PruneTableScanColumns(tester().getMetadata()); - IcebergTableHandle icebergTable = IcebergTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(tableName) - .withTableType(DATA) - .withTableSchemaJson("") - .withPartitionSpecJson(Optional.of("")) - .withFormatVersion(1) - .withTableLocation("") - .build(); + IcebergTableHandle icebergTable = new IcebergTableHandle( + SCHEMA_NAME, + tableName, + DATA, + Optional.empty(), + "", + Optional.of(""), + 1, + TupleDomain.all(), + TupleDomain.all(), + ImmutableSet.of(), + Optional.empty(), + "", + ImmutableMap.of(), + NO_RETRIES, + ImmutableList.of(), + false, + Optional.empty()); TableHandle table = new TableHandle(TEST_CATALOG_HANDLE, icebergTable, new HiveTransactionHandle(false)); IcebergColumnHandle columnA = new IcebergColumnHandle(primitiveColumnIdentity(0, "a"), INTEGER, ImmutableList.of(), INTEGER, Optional.empty()); @@ -317,9 +337,7 @@ public void testColumnPruningProjectionPushdown() strictProject( ImmutableMap.of("expr", expression("COLA")), tableScan( - IcebergTableHandle.buildFrom(icebergTable) - .withProjectedColumns(ImmutableSet.of(columnA)) - .build()::equals, + icebergTable.withProjectedColumns(ImmutableSet.of(columnA))::equals, TupleDomain.all(), ImmutableMap.of("COLA", columnA::equals)))); @@ -339,16 +357,24 @@ public void testPushdownWithDuplicateExpressions() tester().getTypeAnalyzer(), new ScalarStatsCalculator(tester().getPlannerContext(), tester().getTypeAnalyzer())); - IcebergTableHandle icebergTable = IcebergTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(tableName) - .withTableType(DATA) - .withSnapshotId(Optional.of(1L)) - .withTableSchemaJson("") - .withPartitionSpecJson(Optional.of("")) - .withFormatVersion(1) - .withTableLocation("") - .build(); + IcebergTableHandle icebergTable = new IcebergTableHandle( + SCHEMA_NAME, + tableName, + DATA, + Optional.of(1L), + "", + Optional.of(""), + 1, + TupleDomain.all(), + TupleDomain.all(), + ImmutableSet.of(), + Optional.empty(), + "", + ImmutableMap.of(), + NO_RETRIES, + ImmutableList.of(), + false, + Optional.empty()); TableHandle table = new TableHandle(TEST_CATALOG_HANDLE, icebergTable, new HiveTransactionHandle(false)); IcebergColumnHandle bigintColumn = new IcebergColumnHandle(primitiveColumnIdentity(1, "just_bigint"), BIGINT, ImmutableList.of(), BIGINT, Optional.empty()); @@ -379,9 +405,7 @@ public void testPushdownWithDuplicateExpressions() "column_ref", expression("just_bigint_0"), "negated_column_ref", expression("- just_bigint_0")), tableScan( - IcebergTableHandle.buildFrom(icebergTable) - .withProjectedColumns(ImmutableSet.of(bigintColumn)) - .build()::equals, + icebergTable.withProjectedColumns(ImmutableSet.of(bigintColumn))::equals, TupleDomain.all(), ImmutableMap.of("just_bigint_0", bigintColumn::equals)))); @@ -405,9 +429,7 @@ public void testPushdownWithDuplicateExpressions() "expr_deref", expression(new SymbolReference("struct_of_bigint#a")), "expr_deref_2", expression(new ArithmeticBinaryExpression(ADD, new SymbolReference("struct_of_bigint#a"), new LongLiteral("2")))), tableScan( - IcebergTableHandle.buildFrom(icebergTable) - .withProjectedColumns(ImmutableSet.of(partialColumn)) - .build()::equals, + icebergTable.withProjectedColumns(ImmutableSet.of(partialColumn))::equals, TupleDomain.all(), ImmutableMap.of("struct_of_bigint#a", partialColumn::equals)))); From 36f079d1cb1303251b40c431a895c7d01989a3d9 Mon Sep 17 00:00:00 2001 From: Raunaq Morarka Date: Sat, 10 Dec 2022 17:16:11 +0530 Subject: [PATCH 2/2] Revert "Add a builder for HiveTableHandle" This reverts commit 3350aa808316408aec382bfdc93636da546ff843. --- .../io/trino/plugin/hive/HiveMetadata.java | 71 ++-- .../plugin/hive/HivePartitionManager.java | 28 +- .../io/trino/plugin/hive/HiveTableHandle.java | 356 ++++++++---------- .../hive/PartitionsSystemTableProvider.java | 16 +- .../trino/plugin/hive/AbstractTestHive.java | 9 +- .../trino/plugin/hive/TestHivePageSink.java | 5 +- .../plugin/hive/TestHiveTableHandle.java | 9 +- .../TestNodeLocalDynamicSplitPruning.java | 15 +- .../hive/benchmark/AbstractFileFormat.java | 9 +- .../TestConnectorPushdownRulesWithHive.java | 44 +-- ...stHiveProjectionPushdownIntoTableScan.java | 10 +- 11 files changed, 252 insertions(+), 320 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadata.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadata.java index b6eeb44d7226..f041f9ba2dce 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadata.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadata.java @@ -482,14 +482,13 @@ public HiveTableHandle getTableHandle(ConnectorSession session, SchemaTableName verifyOnline(tableName, Optional.empty(), getProtectMode(table), table.getParameters()); - return HiveTableHandle.builder() - .withSchemaName(tableName.getSchemaName()) - .withTableName(tableName.getTableName()) - .withTableParameters(Optional.of(table.getParameters())) - .withPartitionColumns(getPartitionKeyColumnHandles(table, typeManager)) - .withDataColumns(getRegularColumnHandles(table, typeManager, getTimestampPrecision(session))) - .withBucketHandle(getHiveBucketHandle(session, table, typeManager)) - .build(); + return new HiveTableHandle( + tableName.getSchemaName(), + tableName.getTableName(), + table.getParameters(), + getPartitionKeyColumnHandles(table, typeManager), + getRegularColumnHandles(table, typeManager, getTimestampPrecision(session)), + getHiveBucketHandle(session, table, typeManager)); } @Override @@ -515,9 +514,7 @@ public ConnectorAnalyzeMetadata getStatisticsCollectionMetadata(ConnectorSession } } - handle = HiveTableHandle.buildFrom(handle) - .withAnalyzePartitionValues(Optional.of(list)) - .build(); + handle = handle.withAnalyzePartitionValues(list); HivePartitionResult partitions = partitionManager.getPartitions(handle, list); handle = partitionManager.applyPartitionResult(handle, partitions, alwaysTrue()); } @@ -1889,9 +1886,7 @@ public ConnectorTableHandle beginUpdate(ConnectorSession session, ConnectorTable HiveUpdateProcessor updateProcessor = new HiveUpdateProcessor(allDataColumns, hiveUpdatedColumns); AcidTransaction transaction = metastore.beginUpdate(session, table, updateProcessor); - HiveTableHandle updateHandle = HiveTableHandle.buildFrom(hiveTableHandle) - .withTransaction(transaction) - .build(); + HiveTableHandle updateHandle = hiveTableHandle.withTransaction(transaction); WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle); metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), tableName); @@ -1952,11 +1947,7 @@ public ConnectorMergeTableHandle beginMerge(ConnectorSession session, ConnectorT } HiveInsertTableHandle insertHandle = beginInsertOrMerge(session, tableHandle, retryMode, "Merging into", true); - return new HiveMergeTableHandle( - HiveTableHandle.buildFrom(hiveTableHandle) - .withTransaction(insertHandle.getTransaction()) - .build(), - insertHandle); + return new HiveMergeTableHandle(hiveTableHandle.withTransaction(insertHandle.getTransaction()), insertHandle); } @Override @@ -2429,10 +2420,9 @@ private BeginTableExecuteResult( hiveExecuteHandle .withWriteDeclarationId(writeDeclarationId), - HiveTableHandle.buildFrom(hiveSourceTableHandle) + hiveSourceTableHandle .withMaxScannedFileSize(hiveExecuteHandle.getMaxScannedFileSize()) - .withRecordScannedFiles(true) - .build()); + .withRecordScannedFiles(true)); } @Override @@ -2780,9 +2770,7 @@ public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTable WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle); metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), handle.getSchemaTableName()); - return HiveTableHandle.buildFrom(handle) - .withTransaction(transaction) - .build(); + return handle.withTransaction(transaction); } @Override @@ -3054,9 +3042,7 @@ public Optional> applyProjecti ((HiveColumnHandle) assignment.getValue()).getType())) .collect(toImmutableList()); return Optional.of(new ProjectionApplicationResult<>( - HiveTableHandle.buildFrom(hiveTableHandle) - .withProjectedColumns(projectedColumns) - .build(), + hiveTableHandle.withProjectedColumns(projectedColumns), projections, assignmentsList, false)); @@ -3103,9 +3089,7 @@ public Optional> applyProjecti List outputAssignments = ImmutableList.copyOf(newAssignments.values()); return Optional.of(new ProjectionApplicationResult<>( - HiveTableHandle.buildFrom(hiveTableHandle) - .withProjectedColumns(projectedColumnsBuilder.build()) - .build(), + hiveTableHandle.withProjectedColumns(projectedColumnsBuilder.build()), newProjections, outputAssignments, false)); @@ -3229,16 +3213,29 @@ public ConnectorTableHandle makeCompatiblePartitioning(ConnectorSession session, largerBucketCount % smallerBucketCount == 0 && Integer.bitCount(largerBucketCount / smallerBucketCount) == 1, "The requested partitioning is not a valid alternative for the table layout"); - return HiveTableHandle.buildFrom(hiveTable) - .withBucketHandle(Optional.of(new HiveBucketHandle( + return new HiveTableHandle( + hiveTable.getSchemaName(), + hiveTable.getTableName(), + hiveTable.getTableParameters(), + hiveTable.getPartitionColumns(), + hiveTable.getDataColumns(), + hiveTable.getPartitionNames(), + hiveTable.getPartitions(), + hiveTable.getCompactEffectivePredicate(), + hiveTable.getEnforcedConstraint(), + Optional.of(new HiveBucketHandle( bucketHandle.getColumns(), bucketHandle.getBucketingVersion(), bucketHandle.getTableBucketCount(), hivePartitioningHandle.getBucketCount(), - bucketHandle.getSortedBy()))) - .withConstraintColumns(ImmutableSet.of()) - .withProjectedColumns(ImmutableSet.of()) // Projected columns is used only during optimization phase of planning - .build(); + bucketHandle.getSortedBy())), + hiveTable.getBucketFilter(), + hiveTable.getAnalyzePartitionValues(), + ImmutableSet.of(), + ImmutableSet.of(), // Projected columns is used only during optimization phase of planning + hiveTable.getTransaction(), + hiveTable.isRecordScannedFiles(), + hiveTable.getMaxScannedFileSize()); } @VisibleForTesting diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePartitionManager.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePartitionManager.java index fc4ca58df8c3..1a285a3dd26b 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePartitionManager.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePartitionManager.java @@ -168,16 +168,24 @@ public HiveTableHandle applyPartitionResult(HiveTableHandle handle, HivePartitio List partitionColumns = partitions.getPartitionColumns(); enforcedConstraint = partitions.getEffectivePredicate().filter((column, domain) -> partitionColumns.contains(column)); } - return HiveTableHandle.buildFrom(handle) - .withPartitionColumns(ImmutableList.copyOf(partitions.getPartitionColumns())) - .withPartitionNames(partitionNames) - .withPartitions(partitionList) - .withCompactEffectivePredicate(partitions.getCompactEffectivePredicate()) - .withEnforcedConstraint(enforcedConstraint) - .withBucketHandle(partitions.getBucketHandle()) - .withBucketFilter(partitions.getBucketFilter()) - .withConstraintColumns(Sets.union(handle.getConstraintColumns(), constraint.getPredicateColumns().orElseGet(ImmutableSet::of))) - .build(); + return new HiveTableHandle( + handle.getSchemaName(), + handle.getTableName(), + handle.getTableParameters(), + ImmutableList.copyOf(partitions.getPartitionColumns()), + handle.getDataColumns(), + partitionNames, + partitionList, + partitions.getCompactEffectivePredicate(), + enforcedConstraint, + partitions.getBucketHandle(), + partitions.getBucketFilter(), + handle.getAnalyzePartitionValues(), + Sets.union(handle.getConstraintColumns(), constraint.getPredicateColumns().orElseGet(ImmutableSet::of)), + handle.getProjectedColumns(), + handle.getTransaction(), + handle.isRecordScannedFiles(), + handle.getMaxScannedFileSize()); } public Iterator getPartitions(SemiTransactionalHiveMetastore metastore, HiveTableHandle table) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTableHandle.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTableHandle.java index 13f95e9fb97f..416a315bdaed 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTableHandle.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTableHandle.java @@ -60,7 +60,7 @@ public class HiveTableHandle private final Optional maxScannedFileSize; @JsonCreator - public static HiveTableHandle fromJsonForDeserializationOnly( + public HiveTableHandle( @JsonProperty("schemaName") String schemaName, @JsonProperty("tableName") String tableName, @JsonProperty("partitionColumns") List partitionColumns, @@ -72,21 +72,55 @@ public static HiveTableHandle fromJsonForDeserializationOnly( @JsonProperty("analyzePartitionValues") Optional>> analyzePartitionValues, @JsonProperty("transaction") AcidTransaction transaction) { - return builder() - .withSchemaName(schemaName) - .withTableName(tableName) - .withPartitionColumns(partitionColumns) - .withDataColumns(dataColumns) - .withCompactEffectivePredicate(compactEffectivePredicate) - .withEnforcedConstraint(enforcedConstraint) - .withBucketHandle(bucketHandle) - .withBucketFilter(bucketFilter) - .withAnalyzePartitionValues(analyzePartitionValues) - .withTransaction(transaction) - .build(); - } - - private HiveTableHandle( + this( + schemaName, + tableName, + Optional.empty(), + partitionColumns, + dataColumns, + Optional.empty(), + Optional.empty(), + compactEffectivePredicate, + enforcedConstraint, + bucketHandle, + bucketFilter, + analyzePartitionValues, + ImmutableSet.of(), + ImmutableSet.of(), + transaction, + false, + Optional.empty()); + } + + public HiveTableHandle( + String schemaName, + String tableName, + Map tableParameters, + List partitionColumns, + List dataColumns, + Optional bucketHandle) + { + this( + schemaName, + tableName, + Optional.of(tableParameters), + partitionColumns, + dataColumns, + Optional.empty(), + Optional.empty(), + TupleDomain.all(), + TupleDomain.all(), + bucketHandle, + Optional.empty(), + Optional.empty(), + ImmutableSet.of(), + ImmutableSet.of(), + NO_ACID_TRANSACTION, + false, + Optional.empty()); + } + + public HiveTableHandle( String schemaName, String tableName, Optional> tableParameters, @@ -125,6 +159,116 @@ private HiveTableHandle( this.maxScannedFileSize = requireNonNull(maxSplitFileSize, "maxSplitFileSize is null"); } + public HiveTableHandle withAnalyzePartitionValues(List> analyzePartitionValues) + { + return new HiveTableHandle( + schemaName, + tableName, + tableParameters, + partitionColumns, + dataColumns, + partitionNames, + partitions, + compactEffectivePredicate, + enforcedConstraint, + bucketHandle, + bucketFilter, + Optional.of(analyzePartitionValues), + constraintColumns, + projectedColumns, + transaction, + recordScannedFiles, + maxScannedFileSize); + } + + public HiveTableHandle withTransaction(AcidTransaction transaction) + { + return new HiveTableHandle( + schemaName, + tableName, + tableParameters, + partitionColumns, + dataColumns, + partitionNames, + partitions, + compactEffectivePredicate, + enforcedConstraint, + bucketHandle, + bucketFilter, + analyzePartitionValues, + constraintColumns, + projectedColumns, + transaction, + recordScannedFiles, + maxScannedFileSize); + } + + public HiveTableHandle withProjectedColumns(Set projectedColumns) + { + return new HiveTableHandle( + schemaName, + tableName, + tableParameters, + partitionColumns, + dataColumns, + partitionNames, + partitions, + compactEffectivePredicate, + enforcedConstraint, + bucketHandle, + bucketFilter, + analyzePartitionValues, + constraintColumns, + projectedColumns, + transaction, + recordScannedFiles, + maxScannedFileSize); + } + + public HiveTableHandle withRecordScannedFiles(boolean recordScannedFiles) + { + return new HiveTableHandle( + schemaName, + tableName, + tableParameters, + partitionColumns, + dataColumns, + partitionNames, + partitions, + compactEffectivePredicate, + enforcedConstraint, + bucketHandle, + bucketFilter, + analyzePartitionValues, + constraintColumns, + projectedColumns, + transaction, + recordScannedFiles, + maxScannedFileSize); + } + + public HiveTableHandle withMaxScannedFileSize(Optional maxScannedFileSize) + { + return new HiveTableHandle( + schemaName, + tableName, + tableParameters, + partitionColumns, + dataColumns, + partitionNames, + partitions, + compactEffectivePredicate, + enforcedConstraint, + bucketHandle, + bucketFilter, + analyzePartitionValues, + constraintColumns, + projectedColumns, + transaction, + recordScannedFiles, + maxScannedFileSize); + } + @JsonProperty public String getSchemaName() { @@ -343,184 +487,4 @@ public String toString() }); return builder.toString(); } - - public static Builder builder() - { - return new Builder(); - } - - public static Builder buildFrom(HiveTableHandle table) - { - return new Builder(table); - } - - public static class Builder - { - private String schemaName; - private String tableName; - private Optional> tableParameters = Optional.empty(); - private List partitionColumns = ImmutableList.of(); - private List dataColumns = ImmutableList.of(); - private Optional> partitionNames = Optional.empty(); - private Optional> partitions = Optional.empty(); - private TupleDomain compactEffectivePredicate = TupleDomain.all(); - private TupleDomain enforcedConstraint = TupleDomain.all(); - private Optional bucketHandle = Optional.empty(); - private Optional bucketFilter = Optional.empty(); - private Optional>> analyzePartitionValues = Optional.empty(); - private Set constraintColumns = ImmutableSet.of(); - private Set projectedColumns = ImmutableSet.of(); - private AcidTransaction transaction = NO_ACID_TRANSACTION; - private boolean recordScannedFiles; - private Optional maxScannedFileSize = Optional.empty(); - - private Builder() - { - } - - private Builder(HiveTableHandle table) - { - this.schemaName = table.schemaName; - this.tableName = table.tableName; - this.tableParameters = table.tableParameters; - this.partitionColumns = table.partitionColumns; - this.dataColumns = table.dataColumns; - this.partitionNames = table.partitionNames; - this.partitions = table.partitions; - this.compactEffectivePredicate = table.compactEffectivePredicate; - this.enforcedConstraint = table.enforcedConstraint; - this.bucketHandle = table.bucketHandle; - this.bucketFilter = table.bucketFilter; - this.analyzePartitionValues = table.analyzePartitionValues; - this.constraintColumns = table.constraintColumns; - this.projectedColumns = table.projectedColumns; - this.transaction = table.transaction; - this.recordScannedFiles = table.recordScannedFiles; - this.maxScannedFileSize = table.maxScannedFileSize; - } - - public Builder withSchemaName(String schemaName) - { - this.schemaName = schemaName; - return this; - } - - public Builder withTableName(String tableName) - { - this.tableName = tableName; - return this; - } - - public Builder withTableParameters(Optional> tableParameters) - { - this.tableParameters = tableParameters; - return this; - } - - public Builder withPartitionColumns(List partitionColumns) - { - this.partitionColumns = partitionColumns; - return this; - } - - public Builder withDataColumns(List dataColumns) - { - this.dataColumns = dataColumns; - return this; - } - - public Builder withPartitionNames(Optional> partitionNames) - { - this.partitionNames = partitionNames; - return this; - } - - public Builder withPartitions(Optional> partitions) - { - this.partitions = partitions; - return this; - } - - public Builder withCompactEffectivePredicate(TupleDomain compactEffectivePredicate) - { - this.compactEffectivePredicate = compactEffectivePredicate; - return this; - } - - public Builder withEnforcedConstraint(TupleDomain enforcedConstraint) - { - this.enforcedConstraint = enforcedConstraint; - return this; - } - - public Builder withBucketHandle(Optional bucketHandle) - { - this.bucketHandle = bucketHandle; - return this; - } - - public Builder withBucketFilter(Optional bucketFilter) - { - this.bucketFilter = bucketFilter; - return this; - } - - public Builder withAnalyzePartitionValues(Optional>> analyzePartitionValues) - { - this.analyzePartitionValues = analyzePartitionValues; - return this; - } - - public Builder withConstraintColumns(Set constraintColumns) - { - this.constraintColumns = constraintColumns; - return this; - } - - public Builder withProjectedColumns(Set projectedColumns) - { - this.projectedColumns = projectedColumns; - return this; - } - - public Builder withTransaction(AcidTransaction transaction) - { - this.transaction = transaction; - return this; - } - - public Builder withRecordScannedFiles(boolean recordScannedFiles) - { - this.recordScannedFiles = recordScannedFiles; - return this; - } - - public Builder withMaxScannedFileSize(Optional maxScannedFileSize) - { - this.maxScannedFileSize = maxScannedFileSize; - return this; - } - - public HiveTableHandle build() - { - return new HiveTableHandle( - schemaName, - tableName, - tableParameters, - partitionColumns, - dataColumns, - partitionNames, - partitions, - compactEffectivePredicate, - enforcedConstraint, - bucketHandle, - bucketFilter, - analyzePartitionValues, - constraintColumns, - projectedColumns, - transaction, - recordScannedFiles, - maxScannedFileSize); - } - } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PartitionsSystemTableProvider.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PartitionsSystemTableProvider.java index 548efc10153d..24c8a45d06ec 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PartitionsSystemTableProvider.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PartitionsSystemTableProvider.java @@ -86,15 +86,13 @@ public Optional getSystemTable(HiveMetadata metadata, ConnectorSess return Optional.empty(); } verifyOnline(sourceTableName, Optional.empty(), getProtectMode(sourceTable), sourceTable.getParameters()); - - HiveTableHandle sourceTableHandle = HiveTableHandle.builder() - .withSchemaName(sourceTableName.getSchemaName()) - .withTableName(sourceTableName.getTableName()) - .withTableParameters(Optional.of(sourceTable.getParameters())) - .withPartitionColumns(getPartitionKeyColumnHandles(sourceTable, typeManager)) - .withDataColumns(getRegularColumnHandles(sourceTable, typeManager, getTimestampPrecision(session))) - .withBucketHandle(getHiveBucketHandle(session, sourceTable, typeManager)) - .build(); + HiveTableHandle sourceTableHandle = new HiveTableHandle( + sourceTableName.getSchemaName(), + sourceTableName.getTableName(), + sourceTable.getParameters(), + getPartitionKeyColumnHandles(sourceTable, typeManager), + getRegularColumnHandles(sourceTable, typeManager, getTimestampPrecision(session)), + getHiveBucketHandle(session, sourceTable, typeManager)); List partitionColumns = sourceTableHandle.getPartitionColumns(); if (partitionColumns.isEmpty()) { diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java index 3b1f887e2c53..e7339589f2a5 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java @@ -710,10 +710,7 @@ protected void setupHive(String databaseName) tablePartitionSchemaChangeNonCanonical = new SchemaTableName(database, "trino_test_partition_schema_change_non_canonical"); tableBucketEvolution = new SchemaTableName(database, "trino_test_bucket_evolution"); - invalidTableHandle = HiveTableHandle.builder() - .withSchemaName(database) - .withTableName(INVALID_TABLE) - .build(); + invalidTableHandle = new HiveTableHandle(database, INVALID_TABLE, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); dsColumn = createBaseColumn("ds", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty()); fileFormatColumn = createBaseColumn("file_format", -1, HIVE_STRING, VARCHAR, PARTITION_KEY, Optional.empty()); @@ -3719,9 +3716,7 @@ public void testApplyProjection() // Extra columns handles in HiveTableHandle should get pruned projectionResult = metadata.applyProjection( session, - HiveTableHandle.buildFrom((HiveTableHandle) tableHandle) - .withProjectedColumns(ImmutableSet.copyOf(columnHandles)) - .build(), + ((HiveTableHandle) tableHandle).withProjectedColumns(ImmutableSet.copyOf(columnHandles)), inputProjections, inputAssignments); assertProjectionResult(projectionResult, false, inputProjections, expectedAssignments); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java index d5e2e3a13b0b..e420894d7db8 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java @@ -265,10 +265,7 @@ private static ConnectorPageSource createPageSource(HiveTransactionHandle transa Optional.empty(), 0, SplitWeight.standard()); - ConnectorTableHandle table = HiveTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(TABLE_NAME) - .build(); + ConnectorTableHandle table = new HiveTableHandle(SCHEMA_NAME, TABLE_NAME, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); HivePageSourceProvider provider = new HivePageSourceProvider( TESTING_TYPE_MANAGER, HDFS_ENVIRONMENT, diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveTableHandle.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveTableHandle.java index 6a1ba9437a29..f31eb71c4ff3 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveTableHandle.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveTableHandle.java @@ -13,9 +13,13 @@ */ package io.trino.plugin.hive; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import io.airlift.json.JsonCodec; import org.testng.annotations.Test; +import java.util.Optional; + import static org.testng.Assert.assertEquals; public class TestHiveTableHandle @@ -25,10 +29,7 @@ public class TestHiveTableHandle @Test public void testRoundTrip() { - HiveTableHandle expected = HiveTableHandle.builder() - .withSchemaName("schema") - .withTableName("table") - .build(); + HiveTableHandle expected = new HiveTableHandle("schema", "table", ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); String json = codec.toJson(expected); HiveTableHandle actual = codec.fromJson(json); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestNodeLocalDynamicSplitPruning.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestNodeLocalDynamicSplitPruning.java index 7eee8607372a..823a8f029885 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestNodeLocalDynamicSplitPruning.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestNodeLocalDynamicSplitPruning.java @@ -141,17 +141,18 @@ private static ConnectorPageSource createTestingPageSource(HiveTransactionHandle TableHandle tableHandle = new TableHandle( TEST_CATALOG_HANDLE, - HiveTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(TABLE_NAME) - .withDataColumns(ImmutableList.of(BUCKET_HIVE_COLUMN_HANDLE)) - .withBucketHandle(Optional.of(new HiveBucketHandle( + new HiveTableHandle( + SCHEMA_NAME, + TABLE_NAME, + ImmutableMap.of(), + ImmutableList.of(), + ImmutableList.of(BUCKET_HIVE_COLUMN_HANDLE), + Optional.of(new HiveBucketHandle( ImmutableList.of(BUCKET_HIVE_COLUMN_HANDLE), BUCKETING_V1, 20, 20, - ImmutableList.of()))) - .build(), + ImmutableList.of()))), transaction); HivePageSourceProvider provider = new HivePageSourceProvider( diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/benchmark/AbstractFileFormat.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/benchmark/AbstractFileFormat.java index 608bc8a1f232..c61aab051f71 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/benchmark/AbstractFileFormat.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/benchmark/AbstractFileFormat.java @@ -14,6 +14,7 @@ package io.trino.plugin.hive.benchmark; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import io.trino.hdfs.HdfsEnvironment; import io.trino.plugin.hive.GenericHiveRecordCursorProvider; @@ -159,12 +160,8 @@ public ConnectorPageSource createGenericReader( return factory.createPageSource( TestingConnectorTransactionHandle.INSTANCE, - session, - split, - HiveTableHandle.builder() - .withSchemaName("schema_name") - .withTableName("table_name") - .build(), + session, split, + new HiveTableHandle("schema_name", "table_name", ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()), readColumns, DynamicFilter.EMPTY); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java index 8b1627761f98..4d93f627c4e9 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java @@ -165,10 +165,7 @@ public void testProjectionPushdown() REGULAR, Optional.empty()); - HiveTableHandle hiveTable = HiveTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(tableName) - .build(); + HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); TableHandle table = new TableHandle(TEST_CATALOG_HANDLE, hiveTable, new HiveTransactionHandle(false)); HiveColumnHandle fullColumn = partialColumn.getBaseColumn(); @@ -186,9 +183,7 @@ public void testProjectionPushdown() project( ImmutableMap.of("expr", expression("col")), tableScan( - HiveTableHandle.buildFrom(hiveTable) - .withProjectedColumns(ImmutableSet.of(fullColumn)) - .build()::equals, + hiveTable.withProjectedColumns(ImmutableSet.of(fullColumn))::equals, TupleDomain.all(), ImmutableMap.of("col", fullColumn::equals)))); @@ -200,9 +195,7 @@ public void testProjectionPushdown() p.tableScan( new TableHandle( TEST_CATALOG_HANDLE, - HiveTableHandle.buildFrom(hiveTable) - .withProjectedColumns(ImmutableSet.of(fullColumn)) - .build(), + hiveTable.withProjectedColumns(ImmutableSet.of(fullColumn)), new HiveTransactionHandle(false)), ImmutableList.of(p.symbol("struct_of_int", baseType)), ImmutableMap.of(p.symbol("struct_of_int", baseType), fullColumn)))) @@ -221,9 +214,7 @@ public void testProjectionPushdown() .matches(project( ImmutableMap.of("expr_deref", expression(new SymbolReference("struct_of_int#a"))), tableScan( - HiveTableHandle.buildFrom(hiveTable) - .withProjectedColumns(ImmutableSet.of(partialColumn)) - .build()::equals, + hiveTable.withProjectedColumns(ImmutableSet.of(partialColumn))::equals, TupleDomain.all(), ImmutableMap.of("struct_of_int#a", partialColumn::equals)))); @@ -238,10 +229,7 @@ public void testPredicatePushdown() PushPredicateIntoTableScan pushPredicateIntoTableScan = new PushPredicateIntoTableScan(tester().getPlannerContext(), tester().getTypeAnalyzer()); - HiveTableHandle hiveTable = HiveTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(tableName) - .build(); + HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); TableHandle table = new TableHandle(TEST_CATALOG_HANDLE, hiveTable, new HiveTransactionHandle(false)); HiveColumnHandle column = createBaseColumn("a", 0, HIVE_INT, INTEGER, REGULAR, Optional.empty()); @@ -273,10 +261,7 @@ public void testColumnPruningProjectionPushdown() PruneTableScanColumns pruneTableScanColumns = new PruneTableScanColumns(tester().getMetadata()); - HiveTableHandle hiveTable = HiveTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(tableName) - .build(); + HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); TableHandle table = new TableHandle(TEST_CATALOG_HANDLE, hiveTable, new HiveTransactionHandle(false)); HiveColumnHandle columnA = createBaseColumn("a", 0, HIVE_INT, INTEGER, REGULAR, Optional.empty()); @@ -299,9 +284,7 @@ public void testColumnPruningProjectionPushdown() strictProject( ImmutableMap.of("expr", expression("COLA")), tableScan( - HiveTableHandle.buildFrom(hiveTable) - .withProjectedColumns(ImmutableSet.of(columnA)) - .build()::equals, + hiveTable.withProjectedColumns(ImmutableSet.of(columnA))::equals, TupleDomain.all(), ImmutableMap.of("COLA", columnA::equals)))); @@ -321,10 +304,7 @@ public void testPushdownWithDuplicateExpressions() tester().getTypeAnalyzer(), new ScalarStatsCalculator(tester().getPlannerContext(), tester().getTypeAnalyzer())); - HiveTableHandle hiveTable = HiveTableHandle.builder() - .withSchemaName(SCHEMA_NAME) - .withTableName(tableName) - .build(); + HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); TableHandle table = new TableHandle(TEST_CATALOG_HANDLE, hiveTable, new HiveTransactionHandle(false)); HiveColumnHandle bigintColumn = createBaseColumn("just_bigint", 1, toHiveType(BIGINT), BIGINT, REGULAR, Optional.empty()); @@ -361,9 +341,7 @@ public void testPushdownWithDuplicateExpressions() "column_ref", expression("just_bigint_0"), "negated_column_ref", expression("- just_bigint_0")), tableScan( - HiveTableHandle.buildFrom(hiveTable) - .withProjectedColumns(ImmutableSet.of(bigintColumn)) - .build()::equals, + hiveTable.withProjectedColumns(ImmutableSet.of(bigintColumn))::equals, TupleDomain.all(), ImmutableMap.of("just_bigint_0", bigintColumn::equals)))); @@ -387,9 +365,7 @@ public void testPushdownWithDuplicateExpressions() "expr_deref", expression(new SymbolReference("struct_of_bigint#a")), "expr_deref_2", expression(new ArithmeticBinaryExpression(ADD, new SymbolReference("struct_of_bigint#a"), new LongLiteral("2")))), tableScan( - HiveTableHandle.buildFrom(hiveTable) - .withProjectedColumns(ImmutableSet.of(partialColumn)) - .build()::equals, + hiveTable.withProjectedColumns(ImmutableSet.of(partialColumn))::equals, TupleDomain.all(), ImmutableMap.of("struct_of_bigint#a", partialColumn::equals)))); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHiveProjectionPushdownIntoTableScan.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHiveProjectionPushdownIntoTableScan.java index 0d6fb07f7496..96c3d25bde61 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHiveProjectionPushdownIntoTableScan.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHiveProjectionPushdownIntoTableScan.java @@ -164,9 +164,8 @@ public void testDereferencePushdown() assertPlan( "SELECT col0.x expr_x, col0.y expr_y FROM " + testTable, any(tableScan( - HiveTableHandle.buildFrom((HiveTableHandle) tableHandle.get().getConnectorHandle()) - .withProjectedColumns(ImmutableSet.of(columnX, columnY)) - .build()::equals, + ((HiveTableHandle) tableHandle.get().getConnectorHandle()) + .withProjectedColumns(ImmutableSet.of(columnX, columnY))::equals, TupleDomain.all(), ImmutableMap.of("col0#x", columnX::equals, "col0#y", columnY::equals)))); @@ -227,9 +226,8 @@ public void testDereferencePushdown() .right( anyTree( tableScan( - HiveTableHandle.buildFrom((HiveTableHandle) tableHandle.get().getConnectorHandle()) - .withProjectedColumns(ImmutableSet.of(column1Handle)) - .build()::equals, + ((HiveTableHandle) tableHandle.get().getConnectorHandle()) + .withProjectedColumns(ImmutableSet.of(column1Handle))::equals, TupleDomain.all(), ImmutableMap.of("s_expr_1", column1Handle::equals)))))))); }