diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataWriter.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataWriter.java index 0997f1dc925c2..7b3c4b73424a4 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataWriter.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataWriter.java @@ -69,5 +69,5 @@ public interface HoodieTableMetadataWriter extends Serializable, AutoCloseable { * @param partitions - list of {@link MetadataPartitionType} to drop * @throws IOException */ - void dropPartitions(String instantTime, List partitions); + void deletePartitions(String instantTime, List partitions); } diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/metadata/FlinkHoodieBackedTableMetadataWriter.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/metadata/FlinkHoodieBackedTableMetadataWriter.java index a1e921c905a6f..d295ae384d1d7 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/metadata/FlinkHoodieBackedTableMetadataWriter.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/metadata/FlinkHoodieBackedTableMetadataWriter.java @@ -158,7 +158,7 @@ protected void commit(String instantTime, Map partitions) { + public void deletePartitions(String instantTime, List partitions) { throw new HoodieNotSupportedException("Dropping metadata index not supported for Flink metadata table yet."); } } \ No newline at end of file diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metadata/SparkHoodieBackedTableMetadataWriter.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metadata/SparkHoodieBackedTableMetadataWriter.java index 3a1e469568d64..7d94b2d4f53f1 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metadata/SparkHoodieBackedTableMetadataWriter.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metadata/SparkHoodieBackedTableMetadataWriter.java @@ -183,9 +183,9 @@ protected void commit(String instantTime, Map partitions) { + public void deletePartitions(String instantTime, List partitions) { List partitionsToDrop = partitions.stream().map(MetadataPartitionType::getPartitionPath).collect(Collectors.toList()); - LOG.warn("Deleting Metadata Table partitions: " + partitionsToDrop); + LOG.info("Deleting Metadata Table partitions: " + partitionsToDrop); try (SparkRDDWriteClient writeClient = new SparkRDDWriteClient(engineContext, metadataWriteConfig, true)) { String actionType = CommitUtils.getCommitActionType(WriteOperationType.DELETE_PARTITION, HoodieTableType.MERGE_ON_READ); diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java index 83b7aa43b08af..74c26a4f43284 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java @@ -482,12 +482,11 @@ public void testMetadataTableDeletePartition(HoodieTableType tableType) throws I records = dataGen.generateInserts(newCommitTime, 10); writeStatuses = client.upsert(jsc.parallelize(records, 1), newCommitTime).collect(); assertNoWriteErrors(writeStatuses); - validateMetadata(client); // metadata writer to delete column_stats partition HoodieBackedTableMetadataWriter metadataWriter = metadataWriter(client); assertNotNull(metadataWriter, "MetadataWriter should have been initialized"); - metadataWriter.dropPartitions("0000003", Arrays.asList(MetadataPartitionType.COLUMN_STATS)); + metadataWriter.deletePartitions("0000003", Arrays.asList(MetadataPartitionType.COLUMN_STATS)); HoodieTableMetaClient metadataMetaClient = HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(metadataTableBasePath).build(); List metadataTablePartitions = FSUtils.getAllPartitionPaths(engineContext, metadataMetaClient.getBasePath(), false, false);