aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/main
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2017-01-23 01:21:44 -0800
committergatorsmile <gatorsmile@gmail.com>2017-01-23 01:21:44 -0800
commitc4a6519c44f29950ef3d706a4f79e006ec8bc6b5 (patch)
tree3c4e0f91fef01902d2e7265eb5f27104d93e92f4 /sql/core/src/main
parentf067acefabebf04939d03a639a2aaa654e1bc8f9 (diff)
downloadspark-c4a6519c44f29950ef3d706a4f79e006ec8bc6b5.tar.gz
spark-c4a6519c44f29950ef3d706a4f79e006ec8bc6b5.tar.bz2
spark-c4a6519c44f29950ef3d706a4f79e006ec8bc6b5.zip
[SPARK-19218][SQL] Fix SET command to show a result correctly and in a sorted order
## What changes were proposed in this pull request? This PR aims to fix the following two things. 1. `sql("SET -v").collect()` or `sql("SET -v").show()` raises the following exceptions for String configuration with default value, `null`. For the test, please see [Jenkins result](https://amplab.cs.berkeley.edu/jenkins/job/SparkPullRequestBuilder/71539/testReport/) and https://github.com/apache/spark/commit/60953bf1f1ba144e709fdae3903a390ff9479fd0 in #16624 . ``` sbt.ForkMain$ForkError: java.lang.RuntimeException: Error while decoding: java.lang.NullPointerException createexternalrow(input[0, string, false].toString, input[1, string, false].toString, input[2, string, false].toString, StructField(key,StringType,false), StructField(value,StringType,false), StructField(meaning,StringType,false)) :- input[0, string, false].toString : +- input[0, string, false] :- input[1, string, false].toString : +- input[1, string, false] +- input[2, string, false].toString +- input[2, string, false] ``` 2. Currently, `SET` and `SET -v` commands show unsorted result. We had better show a sorted result for UX. Also, this is compatible with Hive. **BEFORE** ``` scala> sql("set").show(false) ... |spark.driver.host |10.22.16.140 | |spark.driver.port |63893 | |spark.repl.class.uri |spark://10.22.16.140:63893/classes | ... |spark.app.name |Spark shell | |spark.driver.memory |4G | |spark.executor.id |driver | |spark.submit.deployMode |client | |spark.master |local[*] | |spark.home |/Users/dhyun/spark | |spark.sql.catalogImplementation|hive | |spark.app.id |local-1484333618945 | ``` **AFTER** ``` scala> sql("set").show(false) ... |spark.app.id |local-1484333925649 | |spark.app.name |Spark shell | |spark.driver.host |10.22.16.140 | |spark.driver.memory |4G | |spark.driver.port |64994 | |spark.executor.id |driver | |spark.jars | | |spark.master |local[*] | |spark.repl.class.uri |spark://10.22.16.140:64994/classes | |spark.sql.catalogImplementation|hive | |spark.submit.deployMode |client | ``` ## How was this patch tested? Jenkins with a new test case. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #16579 from dongjoon-hyun/SPARK-19218.
Diffstat (limited to 'sql/core/src/main')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/command/SetCommand.scala7
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala5
2 files changed, 9 insertions, 3 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/SetCommand.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/SetCommand.scala
index dc8d97594c..7afa4e78a3 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/SetCommand.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/SetCommand.scala
@@ -79,7 +79,7 @@ case class SetCommand(kv: Option[(String, Option[String])]) extends RunnableComm
// Queries all key-value pairs that are set in the SQLConf of the sparkSession.
case None =>
val runFunc = (sparkSession: SparkSession) => {
- sparkSession.conf.getAll.map { case (k, v) => Row(k, v) }.toSeq
+ sparkSession.conf.getAll.toSeq.sorted.map { case (k, v) => Row(k, v) }
}
(keyValueOutput, runFunc)
@@ -87,8 +87,9 @@ case class SetCommand(kv: Option[(String, Option[String])]) extends RunnableComm
// SQLConf of the sparkSession.
case Some(("-v", None)) =>
val runFunc = (sparkSession: SparkSession) => {
- sparkSession.sessionState.conf.getAllDefinedConfs.map { case (key, defaultValue, doc) =>
- Row(key, defaultValue, doc)
+ sparkSession.sessionState.conf.getAllDefinedConfs.sorted.map {
+ case (key, defaultValue, doc) =>
+ Row(key, Option(defaultValue).getOrElse("<undefined>"), doc)
}
}
val schema = StructType(
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 645b0fa13e..d0c86ffc27 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -50,6 +50,11 @@ object SQLConf {
sqlConfEntries.put(entry.key, entry)
}
+ // For testing only
+ private[sql] def unregister(entry: ConfigEntry[_]): Unit = sqlConfEntries.synchronized {
+ sqlConfEntries.remove(entry.key)
+ }
+
private[sql] object SQLConfigBuilder {
def apply(key: String): ConfigBuilder = new ConfigBuilder(key).onCreate(register)