From db057b18ade8b991aaab3c1f59cb3a746f26bb56 Mon Sep 17 00:00:00 2001 From: CodeCheef Date: Wed, 27 Apr 2022 21:13:48 +0800 Subject: [PATCH] [HUDI-3943] Some description fixes for 0.10.1 docs --- .../hudi/config/HoodieClusteringConfig.java | 2 +- .../apache/hudi/config/HoodieMemoryConfig.java | 6 +++--- .../apache/hudi/config/HoodieStorageConfig.java | 16 ++++++++-------- .../apache/hudi/config/HoodieWriteConfig.java | 2 +- .../table/view/FileSystemViewStorageConfig.java | 2 +- .../apache/hudi/configuration/FlinkOptions.java | 6 +++--- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieClusteringConfig.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieClusteringConfig.java index 36f9d169faa47..eee6f4f4927e0 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieClusteringConfig.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieClusteringConfig.java @@ -87,7 +87,7 @@ public class HoodieClusteringConfig extends HoodieConfig { .key(CLUSTERING_STRATEGY_PARAM_PREFIX + "small.file.limit") .defaultValue(String.valueOf(300 * 1024 * 1024L)) .sinceVersion("0.7.0") - .withDocumentation("Files smaller than the size specified here are candidates for clustering"); + .withDocumentation("Files smaller than the size in bytes specified here are candidates for clustering"); public static final ConfigProperty PARTITION_REGEX_PATTERN = ConfigProperty .key(CLUSTERING_STRATEGY_PARAM_PREFIX + "partition.regex.pattern") diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieMemoryConfig.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieMemoryConfig.java index 8845ccbeeec65..4e37796393a73 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieMemoryConfig.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieMemoryConfig.java @@ -65,17 +65,17 @@ public class HoodieMemoryConfig extends HoodieConfig { public static final ConfigProperty MAX_MEMORY_FOR_MERGE = ConfigProperty .key("hoodie.memory.merge.max.size") .defaultValue(DEFAULT_MAX_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES) - .withDocumentation("Maximum amount of memory used for merge operations, before spilling to local storage."); + .withDocumentation("Maximum amount of memory used in bytes for merge operations, before spilling to local storage."); public static final ConfigProperty MAX_MEMORY_FOR_COMPACTION = ConfigProperty .key("hoodie.memory.compaction.max.size") .noDefaultValue() - .withDocumentation("Maximum amount of memory used for compaction operations, before spilling to local storage."); + .withDocumentation("Maximum amount of memory used in bytes for compaction operations in bytes , before spilling to local storage."); public static final ConfigProperty MAX_DFS_STREAM_BUFFER_SIZE = ConfigProperty .key("hoodie.memory.dfs.buffer.max.size") .defaultValue(16 * 1024 * 1024) - .withDocumentation("Property to control the max memory for dfs input stream buffer size"); + .withDocumentation("Property to control the max memory in bytes for dfs input stream buffer size"); public static final ConfigProperty SPILLABLE_MAP_BASE_PATH = ConfigProperty .key("hoodie.memory.spillable.map.path") diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieStorageConfig.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieStorageConfig.java index 6447a039cc069..ba3888863d557 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieStorageConfig.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieStorageConfig.java @@ -42,25 +42,25 @@ public class HoodieStorageConfig extends HoodieConfig { public static final ConfigProperty PARQUET_MAX_FILE_SIZE = ConfigProperty .key("hoodie.parquet.max.file.size") .defaultValue(String.valueOf(120 * 1024 * 1024)) - .withDocumentation("Target size for parquet files produced by Hudi write phases. " + .withDocumentation("Target size in bytes for parquet files produced by Hudi write phases. " + "For DFS, this needs to be aligned with the underlying filesystem block size for optimal performance."); public static final ConfigProperty PARQUET_BLOCK_SIZE = ConfigProperty .key("hoodie.parquet.block.size") .defaultValue(String.valueOf(120 * 1024 * 1024)) - .withDocumentation("Parquet RowGroup size. It's recommended to make this large enough that scan costs can be" + .withDocumentation("Parquet RowGroup size in bytes. It's recommended to make this large enough that scan costs can be" + " amortized by packing enough column values into a single row group."); public static final ConfigProperty PARQUET_PAGE_SIZE = ConfigProperty .key("hoodie.parquet.page.size") .defaultValue(String.valueOf(1 * 1024 * 1024)) - .withDocumentation("Parquet page size. Page is the unit of read within a parquet file. " + .withDocumentation("Parquet page size in bytes. Page is the unit of read within a parquet file. " + "Within a block, pages are compressed separately."); public static final ConfigProperty ORC_FILE_MAX_SIZE = ConfigProperty .key("hoodie.orc.max.file.size") .defaultValue(String.valueOf(120 * 1024 * 1024)) - .withDocumentation("Target file size for ORC base files."); + .withDocumentation("Target file size in bytes for ORC base files."); public static final ConfigProperty ORC_STRIPE_SIZE = ConfigProperty .key("hoodie.orc.stripe.size") @@ -75,12 +75,12 @@ public class HoodieStorageConfig extends HoodieConfig { public static final ConfigProperty HFILE_MAX_FILE_SIZE = ConfigProperty .key("hoodie.hfile.max.file.size") .defaultValue(String.valueOf(120 * 1024 * 1024)) - .withDocumentation("Target file size for HFile base files."); + .withDocumentation("Target file size in bytes for HFile base files."); public static final ConfigProperty HFILE_BLOCK_SIZE = ConfigProperty .key("hoodie.hfile.block.size") .defaultValue(String.valueOf(1024 * 1024)) - .withDocumentation("Lower values increase the size of metadata tracked within HFile, but can offer potentially " + .withDocumentation("Lower values increase the size in bytes of metadata tracked within HFile, but can offer potentially " + "faster lookup times."); public static final ConfigProperty LOGFILE_DATA_BLOCK_FORMAT = ConfigProperty @@ -91,13 +91,13 @@ public class HoodieStorageConfig extends HoodieConfig { public static final ConfigProperty LOGFILE_MAX_SIZE = ConfigProperty .key("hoodie.logfile.max.size") .defaultValue(String.valueOf(1024 * 1024 * 1024)) // 1 GB - .withDocumentation("LogFile max size. This is the maximum size allowed for a log file " + .withDocumentation("LogFile max size in bytes. This is the maximum size allowed for a log file " + "before it is rolled over to the next version."); public static final ConfigProperty LOGFILE_DATA_BLOCK_MAX_SIZE = ConfigProperty .key("hoodie.logfile.data.block.max.size") .defaultValue(String.valueOf(256 * 1024 * 1024)) - .withDocumentation("LogFile Data block max size. This is the maximum size allowed for a single data block " + .withDocumentation("LogFile Data block max size in bytes. This is the maximum size allowed for a single data block " + "to be appended to a log file. This helps to make sure the data appended to the log file is broken up " + "into sizable blocks to prevent from OOM errors. This size should be greater than the JVM memory."); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java index ca201816cb045..700413c9c3820 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java @@ -150,7 +150,7 @@ public class HoodieWriteConfig extends HoodieConfig { .key("hoodie.table.base.file.format") .defaultValue(HoodieFileFormat.PARQUET) .withAlternatives("hoodie.table.ro.file.format") - .withDocumentation(""); + .withDocumentation("Base file format to store all the base file data."); public static final ConfigProperty BASE_PATH = ConfigProperty .key("hoodie.base.path") diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java index e2342edc3a351..63f10855bad84 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java @@ -78,7 +78,7 @@ public class FileSystemViewStorageConfig extends HoodieConfig { public static final ConfigProperty SPILLABLE_MEMORY = ConfigProperty .key("hoodie.filesystem.view.spillable.mem") .defaultValue(100 * 1024 * 1024L) // 100 MB - .withDocumentation("Amount of memory to be used for holding file system view, before spilling to disk."); + .withDocumentation("Amount of memory to be used in bytes for holding file system view, before spilling to disk."); public static final ConfigProperty SPILLABLE_COMPACTION_MEM_FRACTION = ConfigProperty .key("hoodie.filesystem.view.spillable.compaction.mem.fraction") diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/configuration/FlinkOptions.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/configuration/FlinkOptions.java index 8e202c692383d..88cb8c2f4e355 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/configuration/FlinkOptions.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/configuration/FlinkOptions.java @@ -103,7 +103,7 @@ private FlinkOptions() { .key("metadata.compaction.delta_commits") .intType() .defaultValue(10) - .withDescription("Max delta commits for metadata table to trigger compaction, default 24"); + .withDescription("Max delta commits for metadata table to trigger compaction, default 10"); // ------------------------------------------------------------------------ // Index Options @@ -137,7 +137,7 @@ private FlinkOptions() { .key("index.partition.regex") .stringType() .defaultValue(".*") - .withDescription("Whether to load partitions in state if partition path matching, default *"); + .withDescription("Whether to load partitions in state if partition path matching, default `*`"); // ------------------------------------------------------------------------ // Read Options @@ -542,7 +542,7 @@ private FlinkOptions() { .key("compaction.target_io") .longType() .defaultValue(500 * 1024L) // default 500 GB - .withDescription("Target IO per compaction (both read and write), default 500 GB"); + .withDescription("Target IO in MB for per compaction (both read and write), default 500 GB"); public static final ConfigOption CLEAN_ASYNC_ENABLED = ConfigOptions .key("clean.async.enabled")