Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -524,9 +524,9 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
| --proxy-user NAME User to impersonate when submitting the application.
| This argument does not work with --principal / --keytab.
|
| --help, -h Show this help message and exit
| --verbose, -v Print additional debug output
| --version, Print the version of current Spark
| --help, -h Show this help message and exit.
| --verbose, -v Print additional debug output.
| --version, Print the version of current Spark.
|
| Spark standalone with cluster deploy mode only:
| --driver-cores NUM Cores for driver (Default: 1).
Expand Down
24 changes: 12 additions & 12 deletions sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
Original file line number Diff line number Diff line change
Expand Up @@ -210,11 +210,11 @@ object SQLConf {

val ALLOW_MULTIPLE_CONTEXTS = booleanConf("spark.sql.allowMultipleContexts",
defaultValue = Some(true),
doc = "When set to true, creating multiple SQLContexts/HiveContexts is allowed." +
doc = "When set to true, creating multiple SQLContexts/HiveContexts is allowed. " +
"When set to false, only one SQLContext/HiveContext is allowed to be created " +
"through the constructor (new SQLContexts/HiveContexts created through newSession " +
"method is allowed). Please note that this conf needs to be set in Spark Conf. Once" +
"a SQLContext/HiveContext has been created, changing the value of this conf will not" +
"method is allowed). Please note that this conf needs to be set in Spark Conf. Once " +
"a SQLContext/HiveContext has been created, changing the value of this conf will not " +
"have effect.",
isPublic = true)

Expand All @@ -238,7 +238,7 @@ object SQLConf {

val PREFER_SORTMERGEJOIN = booleanConf("spark.sql.join.preferSortMergeJoin",
defaultValue = Some(true),
doc = "When true, prefer sort merge join over shuffle hash join",
doc = "When true, prefer sort merge join over shuffle hash join.",
isPublic = false)

val AUTO_BROADCASTJOIN_THRESHOLD = intConf("spark.sql.autoBroadcastJoinThreshold",
Expand All @@ -252,8 +252,8 @@ object SQLConf {
"spark.sql.defaultSizeInBytes",
doc = "The default table size used in query planning. By default, it is set to a larger " +
"value than `spark.sql.autoBroadcastJoinThreshold` to be more conservative. That is to say " +
"by default the optimizer will not choose to broadcast a table unless it knows for sure its" +
"size is small enough.",
"by default the optimizer will not choose to broadcast a table unless it knows for sure " +
"its size is small enough.",
isPublic = false)

val SHUFFLE_PARTITIONS = intConf("spark.sql.shuffle.partitions",
Expand All @@ -275,7 +275,7 @@ object SQLConf {
doc = "The advisory minimal number of post-shuffle partitions provided to " +
"ExchangeCoordinator. This setting is used in our test to make sure we " +
"have enough parallelism to expose issues that will not be exposed with a " +
"single partition. When the value is a non-positive value, this setting will" +
"single partition. When the value is a non-positive value, this setting will " +
"not be provided to ExchangeCoordinator.",
isPublic = false)

Expand Down Expand Up @@ -391,7 +391,7 @@ object SQLConf {

// This is only used for the thriftserver
val THRIFTSERVER_POOL = stringConf("spark.sql.thriftserver.scheduler.pool",
doc = "Set a Fair Scheduler pool for a JDBC client session")
doc = "Set a Fair Scheduler pool for a JDBC client session.")

val THRIFTSERVER_UI_STATEMENT_LIMIT = intConf("spark.sql.thriftserver.ui.retainedStatements",
defaultValue = Some(200),
Expand Down Expand Up @@ -433,7 +433,7 @@ object SQLConf {

val BUCKETING_ENABLED = booleanConf("spark.sql.sources.bucketing.enabled",
defaultValue = Some(true),
doc = "When false, we will treat bucketed table as normal table")
doc = "When false, we will treat bucketed table as normal table.")

// The output committer class used by HadoopFsRelation. The specified class needs to be a
// subclass of org.apache.hadoop.mapreduce.OutputCommitter.
Expand Down Expand Up @@ -482,7 +482,7 @@ object SQLConf {
val RUN_SQL_ON_FILES = booleanConf("spark.sql.runSQLOnFiles",
defaultValue = Some(true),
isPublic = false,
doc = "When true, we could use `datasource`.`path` as table in SQL query"
doc = "When true, we could use `datasource`.`path` as table in SQL query."
)

val PARSER_SUPPORT_QUOTEDID = booleanConf("spark.sql.parser.supportQuotedIdentifiers",
Expand All @@ -501,7 +501,7 @@ object SQLConf {
val WHOLESTAGE_CODEGEN_ENABLED = booleanConf("spark.sql.codegen.wholeStage",
defaultValue = Some(true),
doc = "When true, the whole stage (of multiple operators) will be compiled into single java" +
" method",
" method.",
isPublic = false)

val FILES_MAX_PARTITION_BYTES = longConf("spark.sql.files.maxPartitionBytes",
Expand All @@ -511,7 +511,7 @@ object SQLConf {

val EXCHANGE_REUSE_ENABLED = booleanConf("spark.sql.exchange.reuse",
defaultValue = Some(true),
doc = "When true, the planner will try to find out duplicated exchanges and re-use them",
doc = "When true, the planner will try to find out duplicated exchanges and re-use them.",
isPublic = false)

object Deprecated {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ package object config {
/* Cluster-mode launcher configuration. */

private[spark] val WAIT_FOR_APP_COMPLETION = ConfigBuilder("spark.yarn.submit.waitAppCompletion")
.doc("In cluster mode, whether to wait for the application to finishe before exiting the " +
.doc("In cluster mode, whether to wait for the application to finish before exiting the " +
"launcher process.")
.booleanConf
.withDefault(true)
Expand Down