aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test/scala
diff options
context:
space:
mode:
authorHerman van Hovell <hvanhovell@databricks.com>2017-03-28 10:07:24 +0800
committerWenchen Fan <wenchen@databricks.com>2017-03-28 10:07:24 +0800
commitea361165e1ddce4d8aa0242ae3e878d7b39f1de2 (patch)
treef3014ba709d54b48172a399708074480a6ed9661 /sql/core/src/test/scala
parent8a6f33f0483dcee81467e6374a796b5dbd53ea30 (diff)
downloadspark-ea361165e1ddce4d8aa0242ae3e878d7b39f1de2.tar.gz
spark-ea361165e1ddce4d8aa0242ae3e878d7b39f1de2.tar.bz2
spark-ea361165e1ddce4d8aa0242ae3e878d7b39f1de2.zip
[SPARK-20100][SQL] Refactor SessionState initialization
## What changes were proposed in this pull request? The current SessionState initialization code path is quite complex. A part of the creation is done in the SessionState companion objects, a part of the creation is one inside the SessionState class, and a part is done by passing functions. This PR refactors this code path, and consolidates SessionState initialization into a builder class. This SessionState will not do any initialization and just becomes a place holder for the various Spark SQL internals. This also lays the ground work for two future improvements: 1. This provides us with a start for removing the `HiveSessionState`. Removing the `HiveSessionState` would also require us to move resource loading into a separate class, and to (re)move metadata hive. 2. This makes it easier to customize the Spark Session. Currently you will need to create a custom version of the builder. I have added hooks to facilitate this. A future step will be to create a semi stable API on top of this. ## How was this patch tested? Existing tests. Author: Herman van Hovell <hvanhovell@databricks.com> Closes #17433 from hvanhovell/SPARK-20100.
Diffstat (limited to 'sql/core/src/test/scala')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala23
1 files changed, 12 insertions, 11 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala
index 898a2fb4f3..b01977a238 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala
@@ -19,7 +19,7 @@ package org.apache.spark.sql.test
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
-import org.apache.spark.sql.internal.{SessionState, SQLConf}
+import org.apache.spark.sql.internal.{SessionState, SessionStateBuilder, SQLConf, WithTestConf}
/**
* A special [[SparkSession]] prepared for testing.
@@ -35,16 +35,9 @@ private[sql] class TestSparkSession(sc: SparkContext) extends SparkSession(sc) {
}
@transient
- override lazy val sessionState: SessionState = SessionState(
- this,
- new SQLConf {
- clear()
- override def clear(): Unit = {
- super.clear()
- // Make sure we start with the default test configs even after clear
- TestSQLContext.overrideConfs.foreach { case (key, value) => setConfString(key, value) }
- }
- })
+ override lazy val sessionState: SessionState = {
+ new TestSQLSessionStateBuilder(this, None).build()
+ }
// Needed for Java tests
def loadTestData(): Unit = {
@@ -67,3 +60,11 @@ private[sql] object TestSQLContext {
// Fewer shuffle partitions to speed up testing.
SQLConf.SHUFFLE_PARTITIONS.key -> "5")
}
+
+private[sql] class TestSQLSessionStateBuilder(
+ session: SparkSession,
+ state: Option[SessionState])
+ extends SessionStateBuilder(session, state) with WithTestConf {
+ override def overrideConfs: Map[String, String] = TestSQLContext.overrideConfs
+ override def newBuilder: NewBuilder = new TestSQLSessionStateBuilder(_, _)
+}