Skip to content

Commit

Permalink
[HUDI-3943] Some description fixes for 0.10.1 docs (#5447)
Browse files Browse the repository at this point in the history
  • Loading branch information
CodeCooker17 authored Apr 28, 2022
1 parent 52953c8 commit 4e928a6
Show file tree
Hide file tree
Showing 6 changed files with 16 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ public class HoodieClusteringConfig extends HoodieConfig {
.key(CLUSTERING_STRATEGY_PARAM_PREFIX + "small.file.limit")
.defaultValue(String.valueOf(300 * 1024 * 1024L))
.sinceVersion("0.7.0")
.withDocumentation("Files smaller than the size specified here are candidates for clustering");
.withDocumentation("Files smaller than the size in bytes specified here are candidates for clustering");

public static final ConfigProperty<String> PARTITION_REGEX_PATTERN = ConfigProperty
.key(CLUSTERING_STRATEGY_PARAM_PREFIX + "partition.regex.pattern")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,17 +65,17 @@ public class HoodieMemoryConfig extends HoodieConfig {
public static final ConfigProperty<Long> MAX_MEMORY_FOR_MERGE = ConfigProperty
.key("hoodie.memory.merge.max.size")
.defaultValue(DEFAULT_MAX_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES)
.withDocumentation("Maximum amount of memory used for merge operations, before spilling to local storage.");
.withDocumentation("Maximum amount of memory used in bytes for merge operations, before spilling to local storage.");

public static final ConfigProperty<String> MAX_MEMORY_FOR_COMPACTION = ConfigProperty
.key("hoodie.memory.compaction.max.size")
.noDefaultValue()
.withDocumentation("Maximum amount of memory used for compaction operations, before spilling to local storage.");
.withDocumentation("Maximum amount of memory used in bytes for compaction operations in bytes , before spilling to local storage.");

public static final ConfigProperty<Integer> MAX_DFS_STREAM_BUFFER_SIZE = ConfigProperty
.key("hoodie.memory.dfs.buffer.max.size")
.defaultValue(16 * 1024 * 1024)
.withDocumentation("Property to control the max memory for dfs input stream buffer size");
.withDocumentation("Property to control the max memory in bytes for dfs input stream buffer size");

public static final ConfigProperty<String> SPILLABLE_MAP_BASE_PATH = ConfigProperty
.key("hoodie.memory.spillable.map.path")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,25 +42,25 @@ public class HoodieStorageConfig extends HoodieConfig {
public static final ConfigProperty<String> PARQUET_MAX_FILE_SIZE = ConfigProperty
.key("hoodie.parquet.max.file.size")
.defaultValue(String.valueOf(120 * 1024 * 1024))
.withDocumentation("Target size for parquet files produced by Hudi write phases. "
.withDocumentation("Target size in bytes for parquet files produced by Hudi write phases. "
+ "For DFS, this needs to be aligned with the underlying filesystem block size for optimal performance.");

public static final ConfigProperty<String> PARQUET_BLOCK_SIZE = ConfigProperty
.key("hoodie.parquet.block.size")
.defaultValue(String.valueOf(120 * 1024 * 1024))
.withDocumentation("Parquet RowGroup size. It's recommended to make this large enough that scan costs can be"
.withDocumentation("Parquet RowGroup size in bytes. It's recommended to make this large enough that scan costs can be"
+ " amortized by packing enough column values into a single row group.");

public static final ConfigProperty<String> PARQUET_PAGE_SIZE = ConfigProperty
.key("hoodie.parquet.page.size")
.defaultValue(String.valueOf(1 * 1024 * 1024))
.withDocumentation("Parquet page size. Page is the unit of read within a parquet file. "
.withDocumentation("Parquet page size in bytes. Page is the unit of read within a parquet file. "
+ "Within a block, pages are compressed separately.");

public static final ConfigProperty<String> ORC_FILE_MAX_SIZE = ConfigProperty
.key("hoodie.orc.max.file.size")
.defaultValue(String.valueOf(120 * 1024 * 1024))
.withDocumentation("Target file size for ORC base files.");
.withDocumentation("Target file size in bytes for ORC base files.");

public static final ConfigProperty<String> ORC_STRIPE_SIZE = ConfigProperty
.key("hoodie.orc.stripe.size")
Expand All @@ -75,12 +75,12 @@ public class HoodieStorageConfig extends HoodieConfig {
public static final ConfigProperty<String> HFILE_MAX_FILE_SIZE = ConfigProperty
.key("hoodie.hfile.max.file.size")
.defaultValue(String.valueOf(120 * 1024 * 1024))
.withDocumentation("Target file size for HFile base files.");
.withDocumentation("Target file size in bytes for HFile base files.");

public static final ConfigProperty<String> HFILE_BLOCK_SIZE = ConfigProperty
.key("hoodie.hfile.block.size")
.defaultValue(String.valueOf(1024 * 1024))
.withDocumentation("Lower values increase the size of metadata tracked within HFile, but can offer potentially "
.withDocumentation("Lower values increase the size in bytes of metadata tracked within HFile, but can offer potentially "
+ "faster lookup times.");

public static final ConfigProperty<String> LOGFILE_DATA_BLOCK_FORMAT = ConfigProperty
Expand All @@ -91,13 +91,13 @@ public class HoodieStorageConfig extends HoodieConfig {
public static final ConfigProperty<String> LOGFILE_MAX_SIZE = ConfigProperty
.key("hoodie.logfile.max.size")
.defaultValue(String.valueOf(1024 * 1024 * 1024)) // 1 GB
.withDocumentation("LogFile max size. This is the maximum size allowed for a log file "
.withDocumentation("LogFile max size in bytes. This is the maximum size allowed for a log file "
+ "before it is rolled over to the next version.");

public static final ConfigProperty<String> LOGFILE_DATA_BLOCK_MAX_SIZE = ConfigProperty
.key("hoodie.logfile.data.block.max.size")
.defaultValue(String.valueOf(256 * 1024 * 1024))
.withDocumentation("LogFile Data block max size. This is the maximum size allowed for a single data block "
.withDocumentation("LogFile Data block max size in bytes. This is the maximum size allowed for a single data block "
+ "to be appended to a log file. This helps to make sure the data appended to the log file is broken up "
+ "into sizable blocks to prevent from OOM errors. This size should be greater than the JVM memory.");

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ public class HoodieWriteConfig extends HoodieConfig {
.key("hoodie.table.base.file.format")
.defaultValue(HoodieFileFormat.PARQUET)
.withAlternatives("hoodie.table.ro.file.format")
.withDocumentation("");
.withDocumentation("Base file format to store all the base file data.");

public static final ConfigProperty<String> BASE_PATH = ConfigProperty
.key("hoodie.base.path")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ public class FileSystemViewStorageConfig extends HoodieConfig {
public static final ConfigProperty<Long> SPILLABLE_MEMORY = ConfigProperty
.key("hoodie.filesystem.view.spillable.mem")
.defaultValue(100 * 1024 * 1024L) // 100 MB
.withDocumentation("Amount of memory to be used for holding file system view, before spilling to disk.");
.withDocumentation("Amount of memory to be used in bytes for holding file system view, before spilling to disk.");

public static final ConfigProperty<Double> SPILLABLE_COMPACTION_MEM_FRACTION = ConfigProperty
.key("hoodie.filesystem.view.spillable.compaction.mem.fraction")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ private FlinkOptions() {
.key("index.partition.regex")
.stringType()
.defaultValue(".*")
.withDescription("Whether to load partitions in state if partition path matching, default *");
.withDescription("Whether to load partitions in state if partition path matching, default `*`");

// ------------------------------------------------------------------------
// Read Options
Expand Down Expand Up @@ -542,7 +542,7 @@ private FlinkOptions() {
.key("compaction.target_io")
.longType()
.defaultValue(500 * 1024L) // default 500 GB
.withDescription("Target IO per compaction (both read and write), default 500 GB");
.withDescription("Target IO in MB for per compaction (both read and write), default 500 GB");

public static final ConfigOption<Boolean> CLEAN_ASYNC_ENABLED = ConfigOptions
.key("clean.async.enabled")
Expand Down

0 comments on commit 4e928a6

Please sign in to comment.