diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/GenericRecordValidationTestUtils.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/GenericRecordValidationTestUtils.java index 36ef74b60558a..6b2fa59564a61 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/GenericRecordValidationTestUtils.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/GenericRecordValidationTestUtils.java @@ -18,11 +18,6 @@ package org.apache.hudi.testutils; -import org.apache.avro.Schema; -import org.apache.avro.generic.GenericRecord; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.ArrayWritable; -import org.apache.hadoop.mapred.JobConf; import org.apache.hudi.avro.HoodieAvroUtils; import org.apache.hudi.common.testutils.HoodieTestDataGenerator; import org.apache.hudi.common.util.CollectionUtils; @@ -31,6 +26,12 @@ import org.apache.hudi.hadoop.config.HoodieRealtimeConfig; import org.apache.hudi.hadoop.utils.HoodieRealtimeRecordReaderUtils; +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericRecord; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.ArrayWritable; +import org.apache.hadoop.mapred.JobConf; + import java.nio.file.Paths; import java.util.Arrays; import java.util.List; @@ -88,7 +89,7 @@ public static void assertDataInMORTable(HoodieWriteConfig config, String instant .collect(Collectors.toList()); jobConf.set(String.format(HOODIE_CONSUME_COMMIT, config.getTableName()), instant1); - jobConf.set(HoodieRealtimeConfig.USE_LOG_RECORD_READER_SCAN_V2, "true"); + jobConf.set(HoodieRealtimeConfig.ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN, "true"); List records = HoodieMergeOnReadTestUtils.getRecordsUsingInputFormat( hadoopConf, fullPartitionPaths, config.getBasePath(), jobConf, true); Map prevRecordsMap = records.stream() diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/config/HoodieRealtimeConfig.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/config/HoodieRealtimeConfig.java index de062bc251af2..a9688fed5b8b2 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/config/HoodieRealtimeConfig.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/config/HoodieRealtimeConfig.java @@ -17,6 +17,8 @@ package org.apache.hudi.hadoop.config; +import org.apache.hudi.common.config.HoodieMetadataConfig; + /** * Class to hold props related to Hoodie RealtimeInputFormat and RealtimeRecordReader. */ @@ -39,5 +41,6 @@ public final class HoodieRealtimeConfig { public static final String SPILLABLE_MAP_BASE_PATH_PROP = "hoodie.memory.spillable.map.path"; // Default file path prefix for spillable file public static final String DEFAULT_SPILLABLE_MAP_BASE_PATH = "/tmp/"; - public static final String USE_LOG_RECORD_READER_SCAN_V2 = "hoodie.log.record.reader.use.scanV2"; + public static final String ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN = + "hoodie" + HoodieMetadataConfig.OPTIMIZED_LOG_BLOCKS_SCAN; } diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/RealtimeCompactedRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/RealtimeCompactedRecordReader.java index 2cf372384e9fc..2a271203d77b6 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/RealtimeCompactedRecordReader.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/RealtimeCompactedRecordReader.java @@ -97,7 +97,7 @@ private HoodieMergedLogRecordScanner getMergedLogRecordScanner() throws IOExcept .withDiskMapType(jobConf.getEnum(HoodieCommonConfig.SPILLABLE_DISK_MAP_TYPE.key(), HoodieCommonConfig.SPILLABLE_DISK_MAP_TYPE.defaultValue())) .withBitCaskDiskMapCompressionEnabled(jobConf.getBoolean(HoodieCommonConfig.DISK_MAP_BITCASK_COMPRESSION_ENABLED.key(), HoodieCommonConfig.DISK_MAP_BITCASK_COMPRESSION_ENABLED.defaultValue())) - .withOptimizedLogBlocksScan(jobConf.getBoolean(HoodieRealtimeConfig.USE_LOG_RECORD_READER_SCAN_V2, false)) + .withOptimizedLogBlocksScan(jobConf.getBoolean(HoodieRealtimeConfig.ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN, false)) .withInternalSchema(schemaEvolutionContext.internalSchemaOption.orElse(InternalSchema.getEmptyInternalSchema())) .build(); }