aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-04-30 01:32:00 -0700
committerYin Huai <yhuai@databricks.com>2016-04-30 01:32:00 -0700
commit8dc3987d095ae01ad80c89b8f052f231e0807990 (patch)
tree0ce2b3cd50fa778cf28cc51eb27e19b3caeeed55 /sql
parentb3ea579314945dc1fcb4a260fbc7af8479d139f2 (diff)
downloadspark-8dc3987d095ae01ad80c89b8f052f231e0807990.tar.gz
spark-8dc3987d095ae01ad80c89b8f052f231e0807990.tar.bz2
spark-8dc3987d095ae01ad80c89b8f052f231e0807990.zip
[SPARK-15028][SQL] Remove HiveSessionState.setDefaultOverrideConfs
## What changes were proposed in this pull request? This patch removes some code that are no longer relevant -- mainly HiveSessionState.setDefaultOverrideConfs. ## How was this patch tested? N/A Author: Reynold Xin <rxin@databricks.com> Closes #12806 from rxin/SPARK-15028.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala23
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala15
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala6
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala10
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala3
6 files changed, 8 insertions, 55 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index ff633cf837..168ac7e04b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -44,8 +44,10 @@ import org.apache.spark.sql.types._
import org.apache.spark.sql.util.ExecutionListenerManager
/**
- * The entry point for working with structured data (rows and columns) in Spark. Allows the
- * creation of [[DataFrame]] objects as well as the execution of SQL queries.
+ * The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
+ *
+ * As of Spark 2.0, this is replaced by [[SparkSession]]. However, we are keeping the class here
+ * for backward compatibility.
*
* @groupname basic Basic Operations
* @groupname ddl_ops Persistent Catalog DDL
@@ -167,23 +169,6 @@ class SQLContext private[sql](
/**
* Return the value of Spark SQL configuration property for the given key. If the key is not set
- * yet, return `defaultValue` in [[ConfigEntry]].
- */
- private[sql] def getConf[T](entry: ConfigEntry[T]): T = {
- sparkSession.conf.get(entry)
- }
-
- /**
- * Return the value of Spark SQL configuration property for the given key. If the key is not set
- * yet, return `defaultValue`. This is useful when `defaultValue` in ConfigEntry is not the
- * desired one.
- */
- private[sql] def getConf[T](entry: ConfigEntry[T], defaultValue: T): T = {
- sparkSession.conf.get(entry, defaultValue)
- }
-
- /**
- * Return the value of Spark SQL configuration property for the given key. If the key is not set
* yet, return `defaultValue`.
*
* @group config
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
index 5b96ab10c9..c77c889a1b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
@@ -68,11 +68,11 @@ package object debug {
}
/**
- * Augments [[SQLContext]] with debug methods.
+ * Augments [[SparkSession]] with debug methods.
*/
- implicit class DebugSQLContext(sqlContext: SQLContext) {
+ implicit class DebugSQLContext(sparkSession: SparkSession) {
def debug(): Unit = {
- sqlContext.setConf(SQLConf.DATAFRAME_EAGER_ANALYSIS, false)
+ sparkSession.conf.set(SQLConf.DATAFRAME_EAGER_ANALYSIS.key, false)
}
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
index f10d8372ed..80a93ee6d4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
@@ -66,21 +66,6 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
Row(1, 1) :: Nil)
}
- ignore("invalid plan toString, debug mode") {
- // Turn on debug mode so we can see invalid query plans.
- import org.apache.spark.sql.execution.debug._
-
- withSQLConf(SQLConf.DATAFRAME_EAGER_ANALYSIS.key -> "true") {
- sqlContext.debug()
-
- val badPlan = testData.select('badColumn)
-
- assert(badPlan.toString contains badPlan.queryExecution.toString,
- "toString on bad query plans should include the query execution but was:\n" +
- badPlan.toString)
- }
- }
-
test("access complex data") {
assert(complexData.filter(complexData("a").getItem(0) === 2).count() == 1)
assert(complexData.filter(complexData("m").getItem("1") === 1).count() == 1)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index 5065e5b80b..ec5163b658 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -1495,15 +1495,11 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
test("SPARK-4699 case sensitivity SQL query") {
- val orig = sqlContext.getConf(SQLConf.CASE_SENSITIVE)
- try {
- sqlContext.setConf(SQLConf.CASE_SENSITIVE, false)
+ withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val data = TestData(1, "val_1") :: TestData(2, "val_2") :: Nil
val rdd = sparkContext.parallelize((0 to 1).map(i => data(i)))
rdd.toDF().registerTempTable("testTable1")
checkAnswer(sql("SELECT VALUE FROM TESTTABLE1 where KEY = 1"), Row("val_1"))
- } finally {
- sqlContext.setConf(SQLConf.CASE_SENSITIVE, orig)
}
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
index f3076912cb..57aa4b2931 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
@@ -44,8 +44,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
*/
lazy val metadataHive: HiveClient = sharedState.metadataHive.newSession()
- setDefaultOverrideConfs()
-
/**
* Internal catalog for managing table and database states.
*/
@@ -108,14 +106,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
// Helper methods, partially leftover from pre-2.0 days
// ------------------------------------------------------
- /**
- * Overrides default Hive configurations to avoid breaking changes to Spark SQL users.
- * - allow SQL11 keywords to be used as identifiers
- */
- def setDefaultOverrideConfs(): Unit = {
- conf.setConfString(ConfVars.HIVE_SUPPORT_SQL11_RESERVED_KEYWORDS.varname, "false")
- }
-
override def addJar(path: String): Unit = {
metadataHive.addJar(path)
super.addJar(path)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index 93646a45a2..b41d882ffa 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -432,9 +432,6 @@ private[hive] class TestHiveSparkSession(
// Lots of tests fail if we do not change the partition whitelist from the default.
sessionState.metadataHive.runSqlHive("set hive.metastore.partition.name.whitelist.pattern=.*")
- // In case a test changed any of these values, restore all the original ones here.
- sessionState.setDefaultOverrideConfs()
-
sessionState.catalog.setCurrentDatabase("default")
} catch {
case e: Exception =>