aboutsummaryrefslogtreecommitdiff
path: root/project/SparkBuild.scala
diff options
context:
space:
mode:
authorAndrew Or <andrew@databricks.com>2015-08-13 17:42:01 -0700
committerReynold Xin <rxin@databricks.com>2015-08-13 17:42:01 -0700
commit8187b3ae477e2b2987ae9acc5368d57b1d5653b2 (patch)
treee80b71bbbfbf39b0fdca5a5bfca567ae8e0ca6a3 /project/SparkBuild.scala
parentc50f97dafd2d5bf5a8351efcc1c8d3e2b87efc72 (diff)
downloadspark-8187b3ae477e2b2987ae9acc5368d57b1d5653b2.tar.gz
spark-8187b3ae477e2b2987ae9acc5368d57b1d5653b2.tar.bz2
spark-8187b3ae477e2b2987ae9acc5368d57b1d5653b2.zip
[SPARK-9580] [SQL] Replace singletons in SQL tests
A fundamental limitation of the existing SQL tests is that *there is simply no way to create your own `SparkContext`*. This is a serious limitation because the user may wish to use a different master or config. As a case in point, `BroadcastJoinSuite` is entirely commented out because there is no way to make it pass with the existing infrastructure. This patch removes the singletons `TestSQLContext` and `TestData`, and instead introduces a `SharedSQLContext` that starts a context per suite. Unfortunately the singletons were so ingrained in the SQL tests that this patch necessarily needed to touch *all* the SQL test files. <!-- Reviewable:start --> [<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/apache/spark/8111) <!-- Reviewable:end --> Author: Andrew Or <andrew@databricks.com> Closes #8111 from andrewor14/sql-tests-refactor.
Diffstat (limited to 'project/SparkBuild.scala')
-rw-r--r--project/SparkBuild.scala16
1 files changed, 11 insertions, 5 deletions
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 74f815f941..04e0d49b17 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -319,6 +319,8 @@ object SQL {
lazy val settings = Seq(
initialCommands in console :=
"""
+ |import org.apache.spark.SparkContext
+ |import org.apache.spark.sql.SQLContext
|import org.apache.spark.sql.catalyst.analysis._
|import org.apache.spark.sql.catalyst.dsl._
|import org.apache.spark.sql.catalyst.errors._
@@ -328,9 +330,14 @@ object SQL {
|import org.apache.spark.sql.catalyst.util._
|import org.apache.spark.sql.execution
|import org.apache.spark.sql.functions._
- |import org.apache.spark.sql.test.TestSQLContext._
- |import org.apache.spark.sql.types._""".stripMargin,
- cleanupCommands in console := "sparkContext.stop()"
+ |import org.apache.spark.sql.types._
+ |
+ |val sc = new SparkContext("local[*]", "dev-shell")
+ |val sqlContext = new SQLContext(sc)
+ |import sqlContext.implicits._
+ |import sqlContext._
+ """.stripMargin,
+ cleanupCommands in console := "sc.stop()"
)
}
@@ -340,8 +347,6 @@ object Hive {
javaOptions += "-XX:MaxPermSize=256m",
// Specially disable assertions since some Hive tests fail them
javaOptions in Test := (javaOptions in Test).value.filterNot(_ == "-ea"),
- // Multiple queries rely on the TestHive singleton. See comments there for more details.
- parallelExecution in Test := false,
// Supporting all SerDes requires us to depend on deprecated APIs, so we turn off the warnings
// only for this subproject.
scalacOptions <<= scalacOptions map { currentOpts: Seq[String] =>
@@ -349,6 +354,7 @@ object Hive {
},
initialCommands in console :=
"""
+ |import org.apache.spark.SparkContext
|import org.apache.spark.sql.catalyst.analysis._
|import org.apache.spark.sql.catalyst.dsl._
|import org.apache.spark.sql.catalyst.errors._