aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaoyuan Wang <daoyuan.wang@intel.com>2016-03-16 22:52:10 -0700
committerReynold Xin <rxin@databricks.com>2016-03-16 22:52:10 -0700
commitd1c193a2f1a5e2b98f5df1b86d7a7ec0ced13668 (patch)
tree94aef4269107c7dbb650980116c9bd2ca72566ba
parentc890c359b1dfb64274d1d0067b1e16d834035f11 (diff)
downloadspark-d1c193a2f1a5e2b98f5df1b86d7a7ec0ced13668.tar.gz
spark-d1c193a2f1a5e2b98f5df1b86d7a7ec0ced13668.tar.bz2
spark-d1c193a2f1a5e2b98f5df1b86d7a7ec0ced13668.zip
[SPARK-12855][MINOR][SQL][DOC][TEST] remove spark.sql.dialect from doc and test
## What changes were proposed in this pull request? Since developer API of plug-able parser has been removed in #10801 , docs should be updated accordingly. ## How was this patch tested? This patch will not affect the real code path. Author: Daoyuan Wang <daoyuan.wang@intel.com> Closes #11758 from adrian-wang/spark12855.
-rw-r--r--docs/sql-programming-guide.md7
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala6
3 files changed, 1 insertions, 14 deletions
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index 3138fd5fb4..2fdc97f8a0 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -122,13 +122,6 @@ Spark build. If these dependencies are not a problem for your application then u
is recommended for the 1.3 release of Spark. Future releases will focus on bringing `SQLContext` up
to feature parity with a `HiveContext`.
-The specific variant of SQL that is used to parse queries can also be selected using the
-`spark.sql.dialect` option. This parameter can be changed using either the `setConf` method on
-a `SQLContext` or by using a `SET key=value` command in SQL. For a `SQLContext`, the only dialect
-available is "sql" which uses a simple SQL parser provided by Spark SQL. In a `HiveContext`, the
-default is "hiveql", though "sql" is also available. Since the HiveQL parser is much more complete,
-this is recommended for most use cases.
-
## Creating DataFrames
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index e4d9308692..0f0342ce85 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -80,7 +80,7 @@ class SQLContext private[sql](
def this(sparkContext: JavaSparkContext) = this(sparkContext.sc)
// If spark.sql.allowMultipleContexts is true, we will throw an exception if a user
- // wants to create a new root SQLContext (a SLQContext that is not created by newSession).
+ // wants to create a new root SQLContext (a SQLContext that is not created by newSession).
private val allowMultipleContexts =
sparkContext.conf.getBoolean(
SQLConf.ALLOW_MULTIPLE_CONTEXTS.key,
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index d905f0cd68..ab4047df1e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -270,12 +270,6 @@ class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter {
"SELECT 11 % 10, IF((101.1 % 100.0) BETWEEN 1.01 AND 1.11, \"true\", \"false\"), " +
"(101 / 2) % 10 FROM src LIMIT 1")
- test("Query expressed in SQL") {
- setConf("spark.sql.dialect", "sql")
- assert(sql("SELECT 1").collect() === Array(Row(1)))
- setConf("spark.sql.dialect", "hiveql")
- }
-
test("Query expressed in HiveQL") {
sql("FROM src SELECT key").collect()
}