Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MINOR][DOC] Correct some document description errors #23162

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ package object config {
private[spark] val LISTENER_BUS_EVENT_QUEUE_CAPACITY =
ConfigBuilder("spark.scheduler.listenerbus.eventqueue.capacity")
.intConf
.checkValue(_ > 0, "The capacity of listener bus event queue must not be negative")
.checkValue(_ > 0, "The capacity of listener bus event queue must be positive")
.createWithDefault(10000)

private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
Expand Down Expand Up @@ -430,8 +430,8 @@ package object config {
.doc("The chunk size in bytes during writing out the bytes of ChunkedByteBuffer.")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
"The chunk size during writing out the bytes of" +
" ChunkedByteBuffer should not larger than Int.MaxValue - 15.")
"The chunk size during writing out the bytes of ChunkedByteBuffer should" +
s" be less than or equal to ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
.createWithDefault(64 * 1024 * 1024)

private[spark] val CHECKPOINT_COMPRESS =
Expand Down Expand Up @@ -503,7 +503,7 @@ package object config {
"made in creating intermediate shuffle files.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The file buffer size must be greater than 0 and less than" +
s"The file buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")

Expand All @@ -513,7 +513,7 @@ package object config {
"is written in unsafe shuffle writer. In KiB unless otherwise specified.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The buffer size must be greater than 0 and less than" +
s"The buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ object SQLConf {
"factor as the estimated data size, in case the data is compressed in the file and lead to" +
" a heavily underestimated result.")
.doubleConf
.checkValue(_ > 0, "the value of fileDataSizeFactor must be larger than 0")
.checkValue(_ > 0, "the value of fileDataSizeFactor must be greater than 0")
.createWithDefault(1.0)

val PARQUET_SCHEMA_MERGING_ENABLED = buildConf("spark.sql.parquet.mergeSchema")
Expand Down Expand Up @@ -673,7 +673,7 @@ object SQLConf {
val BUCKETING_MAX_BUCKETS = buildConf("spark.sql.sources.bucketing.maxBuckets")
.doc("The maximum number of buckets allowed. Defaults to 100000")
.intConf
.checkValue(_ > 0, "the value of spark.sql.sources.bucketing.maxBuckets must be larger than 0")
.checkValue(_ > 0, "the value of spark.sql.sources.bucketing.maxBuckets must be greater than 0")
.createWithDefault(100000)

val CROSS_JOINS_ENABLED = buildConf("spark.sql.crossJoin.enabled")
Expand Down Expand Up @@ -1154,7 +1154,7 @@ object SQLConf {
.internal()
.doc("The number of bins when generating histograms.")
.intConf
.checkValue(num => num > 1, "The number of bins must be larger than 1.")
.checkValue(num => num > 1, "The number of bins must be greater than 1.")
.createWithDefault(254)

val PERCENTILE_ACCURACY =
Expand Down