Skip to content

Commit f124ae6

Browse files
10110346Jackey Lee
authored andcommitted
[MINOR][DOC] Correct some document description errors
## What changes were proposed in this pull request? Correct some document description errors. ## How was this patch tested? N/A Closes apache#23162 from 10110346/docerror. Authored-by: liuxian <[email protected]> Signed-off-by: Sean Owen <[email protected]>
1 parent a3da568 commit f124ae6

File tree

2 files changed

+8
-8
lines changed
  • core/src/main/scala/org/apache/spark/internal/config
  • sql/catalyst/src/main/scala/org/apache/spark/sql/internal

2 files changed

+8
-8
lines changed

core/src/main/scala/org/apache/spark/internal/config/package.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,7 @@ package object config {
281281
private[spark] val LISTENER_BUS_EVENT_QUEUE_CAPACITY =
282282
ConfigBuilder("spark.scheduler.listenerbus.eventqueue.capacity")
283283
.intConf
284-
.checkValue(_ > 0, "The capacity of listener bus event queue must not be negative")
284+
.checkValue(_ > 0, "The capacity of listener bus event queue must be positive")
285285
.createWithDefault(10000)
286286

287287
private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
@@ -430,8 +430,8 @@ package object config {
430430
.doc("The chunk size in bytes during writing out the bytes of ChunkedByteBuffer.")
431431
.bytesConf(ByteUnit.BYTE)
432432
.checkValue(_ <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
433-
"The chunk size during writing out the bytes of" +
434-
" ChunkedByteBuffer should not larger than Int.MaxValue - 15.")
433+
"The chunk size during writing out the bytes of ChunkedByteBuffer should" +
434+
s" be less than or equal to ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
435435
.createWithDefault(64 * 1024 * 1024)
436436

437437
private[spark] val CHECKPOINT_COMPRESS =
@@ -503,7 +503,7 @@ package object config {
503503
"made in creating intermediate shuffle files.")
504504
.bytesConf(ByteUnit.KiB)
505505
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
506-
s"The file buffer size must be greater than 0 and less than" +
506+
s"The file buffer size must be positive and less than or equal to" +
507507
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
508508
.createWithDefaultString("32k")
509509

@@ -513,7 +513,7 @@ package object config {
513513
"is written in unsafe shuffle writer. In KiB unless otherwise specified.")
514514
.bytesConf(ByteUnit.KiB)
515515
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
516-
s"The buffer size must be greater than 0 and less than" +
516+
s"The buffer size must be positive and less than or equal to" +
517517
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
518518
.createWithDefaultString("32k")
519519

sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ object SQLConf {
326326
"factor as the estimated data size, in case the data is compressed in the file and lead to" +
327327
" a heavily underestimated result.")
328328
.doubleConf
329-
.checkValue(_ > 0, "the value of fileDataSizeFactor must be larger than 0")
329+
.checkValue(_ > 0, "the value of fileDataSizeFactor must be greater than 0")
330330
.createWithDefault(1.0)
331331

332332
val PARQUET_SCHEMA_MERGING_ENABLED = buildConf("spark.sql.parquet.mergeSchema")
@@ -673,7 +673,7 @@ object SQLConf {
673673
val BUCKETING_MAX_BUCKETS = buildConf("spark.sql.sources.bucketing.maxBuckets")
674674
.doc("The maximum number of buckets allowed. Defaults to 100000")
675675
.intConf
676-
.checkValue(_ > 0, "the value of spark.sql.sources.bucketing.maxBuckets must be larger than 0")
676+
.checkValue(_ > 0, "the value of spark.sql.sources.bucketing.maxBuckets must be greater than 0")
677677
.createWithDefault(100000)
678678

679679
val CROSS_JOINS_ENABLED = buildConf("spark.sql.crossJoin.enabled")
@@ -1154,7 +1154,7 @@ object SQLConf {
11541154
.internal()
11551155
.doc("The number of bins when generating histograms.")
11561156
.intConf
1157-
.checkValue(num => num > 1, "The number of bins must be larger than 1.")
1157+
.checkValue(num => num > 1, "The number of bins must be greater than 1.")
11581158
.createWithDefault(254)
11591159

11601160
val PERCENTILE_ACCURACY =

0 commit comments

Comments
 (0)