aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala24
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala2
3 files changed, 16 insertions, 16 deletions
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
index a62096d771..ec6d48485f 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
@@ -524,9 +524,9 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S
| --proxy-user NAME User to impersonate when submitting the application.
| This argument does not work with --principal / --keytab.
|
- | --help, -h Show this help message and exit
- | --verbose, -v Print additional debug output
- | --version, Print the version of current Spark
+ | --help, -h Show this help message and exit.
+ | --verbose, -v Print additional debug output.
+ | --version, Print the version of current Spark.
|
| Spark standalone with cluster deploy mode only:
| --driver-cores NUM Cores for driver (Default: 1).
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 473cde56fd..9e0878a514 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -210,11 +210,11 @@ object SQLConf {
val ALLOW_MULTIPLE_CONTEXTS = booleanConf("spark.sql.allowMultipleContexts",
defaultValue = Some(true),
- doc = "When set to true, creating multiple SQLContexts/HiveContexts is allowed." +
+ doc = "When set to true, creating multiple SQLContexts/HiveContexts is allowed. " +
"When set to false, only one SQLContext/HiveContext is allowed to be created " +
"through the constructor (new SQLContexts/HiveContexts created through newSession " +
- "method is allowed). Please note that this conf needs to be set in Spark Conf. Once" +
- "a SQLContext/HiveContext has been created, changing the value of this conf will not" +
+ "method is allowed). Please note that this conf needs to be set in Spark Conf. Once " +
+ "a SQLContext/HiveContext has been created, changing the value of this conf will not " +
"have effect.",
isPublic = true)
@@ -238,7 +238,7 @@ object SQLConf {
val PREFER_SORTMERGEJOIN = booleanConf("spark.sql.join.preferSortMergeJoin",
defaultValue = Some(true),
- doc = "When true, prefer sort merge join over shuffle hash join",
+ doc = "When true, prefer sort merge join over shuffle hash join.",
isPublic = false)
val AUTO_BROADCASTJOIN_THRESHOLD = intConf("spark.sql.autoBroadcastJoinThreshold",
@@ -252,8 +252,8 @@ object SQLConf {
"spark.sql.defaultSizeInBytes",
doc = "The default table size used in query planning. By default, it is set to a larger " +
"value than `spark.sql.autoBroadcastJoinThreshold` to be more conservative. That is to say " +
- "by default the optimizer will not choose to broadcast a table unless it knows for sure its" +
- "size is small enough.",
+ "by default the optimizer will not choose to broadcast a table unless it knows for sure " +
+ "its size is small enough.",
isPublic = false)
val SHUFFLE_PARTITIONS = intConf("spark.sql.shuffle.partitions",
@@ -275,7 +275,7 @@ object SQLConf {
doc = "The advisory minimal number of post-shuffle partitions provided to " +
"ExchangeCoordinator. This setting is used in our test to make sure we " +
"have enough parallelism to expose issues that will not be exposed with a " +
- "single partition. When the value is a non-positive value, this setting will" +
+ "single partition. When the value is a non-positive value, this setting will " +
"not be provided to ExchangeCoordinator.",
isPublic = false)
@@ -391,7 +391,7 @@ object SQLConf {
// This is only used for the thriftserver
val THRIFTSERVER_POOL = stringConf("spark.sql.thriftserver.scheduler.pool",
- doc = "Set a Fair Scheduler pool for a JDBC client session")
+ doc = "Set a Fair Scheduler pool for a JDBC client session.")
val THRIFTSERVER_UI_STATEMENT_LIMIT = intConf("spark.sql.thriftserver.ui.retainedStatements",
defaultValue = Some(200),
@@ -433,7 +433,7 @@ object SQLConf {
val BUCKETING_ENABLED = booleanConf("spark.sql.sources.bucketing.enabled",
defaultValue = Some(true),
- doc = "When false, we will treat bucketed table as normal table")
+ doc = "When false, we will treat bucketed table as normal table.")
// The output committer class used by HadoopFsRelation. The specified class needs to be a
// subclass of org.apache.hadoop.mapreduce.OutputCommitter.
@@ -482,7 +482,7 @@ object SQLConf {
val RUN_SQL_ON_FILES = booleanConf("spark.sql.runSQLOnFiles",
defaultValue = Some(true),
isPublic = false,
- doc = "When true, we could use `datasource`.`path` as table in SQL query"
+ doc = "When true, we could use `datasource`.`path` as table in SQL query."
)
val PARSER_SUPPORT_QUOTEDID = booleanConf("spark.sql.parser.supportQuotedIdentifiers",
@@ -501,7 +501,7 @@ object SQLConf {
val WHOLESTAGE_CODEGEN_ENABLED = booleanConf("spark.sql.codegen.wholeStage",
defaultValue = Some(true),
doc = "When true, the whole stage (of multiple operators) will be compiled into single java" +
- " method",
+ " method.",
isPublic = false)
val FILES_MAX_PARTITION_BYTES = longConf("spark.sql.files.maxPartitionBytes",
@@ -511,7 +511,7 @@ object SQLConf {
val EXCHANGE_REUSE_ENABLED = booleanConf("spark.sql.exchange.reuse",
defaultValue = Some(true),
- doc = "When true, the planner will try to find out duplicated exchanges and re-use them",
+ doc = "When true, the planner will try to find out duplicated exchanges and re-use them.",
isPublic = false)
object Deprecated {
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala
index 10cd6d00b0..0789567ae6 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala
@@ -104,7 +104,7 @@ package object config {
/* Cluster-mode launcher configuration. */
private[spark] val WAIT_FOR_APP_COMPLETION = ConfigBuilder("spark.yarn.submit.waitAppCompletion")
- .doc("In cluster mode, whether to wait for the application to finishe before exiting the " +
+ .doc("In cluster mode, whether to wait for the application to finish before exiting the " +
"launcher process.")
.booleanConf
.withDefault(true)