aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-03-21 08:00:09 +0000
committerSean Owen <sowen@cloudera.com>2016-03-21 08:00:09 +0000
commit761c2d1b6ee0482e2ba15664c4938eb121dda070 (patch)
treebdfd8a2e8ff518210fcb145d6fb39de27732f780 /sql
parent20fd254101553cb5a4c932c8d03064899112bee6 (diff)
downloadspark-761c2d1b6ee0482e2ba15664c4938eb121dda070.tar.gz
spark-761c2d1b6ee0482e2ba15664c4938eb121dda070.tar.bz2
spark-761c2d1b6ee0482e2ba15664c4938eb121dda070.zip
[MINOR][DOCS] Add proper periods and spaces for CLI help messages and `config` doc.
## What changes were proposed in this pull request? This PR adds some proper periods and spaces to Spark CLI help messages and SQL/YARN conf docs for consistency. ## How was this patch tested? Manual. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #11848 from dongjoon-hyun/add_proper_period_and_space.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala24
1 files changed, 12 insertions, 12 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 473cde56fd..9e0878a514 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -210,11 +210,11 @@ object SQLConf {
val ALLOW_MULTIPLE_CONTEXTS = booleanConf("spark.sql.allowMultipleContexts",
defaultValue = Some(true),
- doc = "When set to true, creating multiple SQLContexts/HiveContexts is allowed." +
+ doc = "When set to true, creating multiple SQLContexts/HiveContexts is allowed. " +
"When set to false, only one SQLContext/HiveContext is allowed to be created " +
"through the constructor (new SQLContexts/HiveContexts created through newSession " +
- "method is allowed). Please note that this conf needs to be set in Spark Conf. Once" +
- "a SQLContext/HiveContext has been created, changing the value of this conf will not" +
+ "method is allowed). Please note that this conf needs to be set in Spark Conf. Once " +
+ "a SQLContext/HiveContext has been created, changing the value of this conf will not " +
"have effect.",
isPublic = true)
@@ -238,7 +238,7 @@ object SQLConf {
val PREFER_SORTMERGEJOIN = booleanConf("spark.sql.join.preferSortMergeJoin",
defaultValue = Some(true),
- doc = "When true, prefer sort merge join over shuffle hash join",
+ doc = "When true, prefer sort merge join over shuffle hash join.",
isPublic = false)
val AUTO_BROADCASTJOIN_THRESHOLD = intConf("spark.sql.autoBroadcastJoinThreshold",
@@ -252,8 +252,8 @@ object SQLConf {
"spark.sql.defaultSizeInBytes",
doc = "The default table size used in query planning. By default, it is set to a larger " +
"value than `spark.sql.autoBroadcastJoinThreshold` to be more conservative. That is to say " +
- "by default the optimizer will not choose to broadcast a table unless it knows for sure its" +
- "size is small enough.",
+ "by default the optimizer will not choose to broadcast a table unless it knows for sure " +
+ "its size is small enough.",
isPublic = false)
val SHUFFLE_PARTITIONS = intConf("spark.sql.shuffle.partitions",
@@ -275,7 +275,7 @@ object SQLConf {
doc = "The advisory minimal number of post-shuffle partitions provided to " +
"ExchangeCoordinator. This setting is used in our test to make sure we " +
"have enough parallelism to expose issues that will not be exposed with a " +
- "single partition. When the value is a non-positive value, this setting will" +
+ "single partition. When the value is a non-positive value, this setting will " +
"not be provided to ExchangeCoordinator.",
isPublic = false)
@@ -391,7 +391,7 @@ object SQLConf {
// This is only used for the thriftserver
val THRIFTSERVER_POOL = stringConf("spark.sql.thriftserver.scheduler.pool",
- doc = "Set a Fair Scheduler pool for a JDBC client session")
+ doc = "Set a Fair Scheduler pool for a JDBC client session.")
val THRIFTSERVER_UI_STATEMENT_LIMIT = intConf("spark.sql.thriftserver.ui.retainedStatements",
defaultValue = Some(200),
@@ -433,7 +433,7 @@ object SQLConf {
val BUCKETING_ENABLED = booleanConf("spark.sql.sources.bucketing.enabled",
defaultValue = Some(true),
- doc = "When false, we will treat bucketed table as normal table")
+ doc = "When false, we will treat bucketed table as normal table.")
// The output committer class used by HadoopFsRelation. The specified class needs to be a
// subclass of org.apache.hadoop.mapreduce.OutputCommitter.
@@ -482,7 +482,7 @@ object SQLConf {
val RUN_SQL_ON_FILES = booleanConf("spark.sql.runSQLOnFiles",
defaultValue = Some(true),
isPublic = false,
- doc = "When true, we could use `datasource`.`path` as table in SQL query"
+ doc = "When true, we could use `datasource`.`path` as table in SQL query."
)
val PARSER_SUPPORT_QUOTEDID = booleanConf("spark.sql.parser.supportQuotedIdentifiers",
@@ -501,7 +501,7 @@ object SQLConf {
val WHOLESTAGE_CODEGEN_ENABLED = booleanConf("spark.sql.codegen.wholeStage",
defaultValue = Some(true),
doc = "When true, the whole stage (of multiple operators) will be compiled into single java" +
- " method",
+ " method.",
isPublic = false)
val FILES_MAX_PARTITION_BYTES = longConf("spark.sql.files.maxPartitionBytes",
@@ -511,7 +511,7 @@ object SQLConf {
val EXCHANGE_REUSE_ENABLED = booleanConf("spark.sql.exchange.reuse",
defaultValue = Some(true),
- doc = "When true, the planner will try to find out duplicated exchanges and re-use them",
+ doc = "When true, the planner will try to find out duplicated exchanges and re-use them.",
isPublic = false)
object Deprecated {