aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/test
diff options
context:
space:
mode:
authorAndrew Or <andrew@databricks.com>2015-08-13 17:42:01 -0700
committerReynold Xin <rxin@databricks.com>2015-08-13 17:42:01 -0700
commit8187b3ae477e2b2987ae9acc5368d57b1d5653b2 (patch)
treee80b71bbbfbf39b0fdca5a5bfca567ae8e0ca6a3 /sql/hive/src/test
parentc50f97dafd2d5bf5a8351efcc1c8d3e2b87efc72 (diff)
downloadspark-8187b3ae477e2b2987ae9acc5368d57b1d5653b2.tar.gz
spark-8187b3ae477e2b2987ae9acc5368d57b1d5653b2.tar.bz2
spark-8187b3ae477e2b2987ae9acc5368d57b1d5653b2.zip
[SPARK-9580] [SQL] Replace singletons in SQL tests
A fundamental limitation of the existing SQL tests is that *there is simply no way to create your own `SparkContext`*. This is a serious limitation because the user may wish to use a different master or config. As a case in point, `BroadcastJoinSuite` is entirely commented out because there is no way to make it pass with the existing infrastructure. This patch removes the singletons `TestSQLContext` and `TestData`, and instead introduces a `SharedSQLContext` that starts a context per suite. Unfortunately the singletons were so ingrained in the SQL tests that this patch necessarily needed to touch *all* the SQL test files. <!-- Reviewable:start --> [<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/apache/spark/8111) <!-- Reviewable:end --> Author: Andrew Or <andrew@databricks.com> Closes #8111 from andrewor14/sql-tests-refactor.
Diffstat (limited to 'sql/hive/src/test')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala5
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala11
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala5
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala3
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala9
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala3
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala3
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala3
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/CommitFailureTestRelationSuite.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala5
14 files changed, 36 insertions, 33 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
index 59e65ff97b..574624d501 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala
@@ -26,7 +26,7 @@ import org.apache.spark.sql.hive.test.TestHive.implicits._
import org.apache.spark.sql.sources.DataSourceTest
import org.apache.spark.sql.test.{ExamplePointUDT, SQLTestUtils}
import org.apache.spark.sql.types.{DecimalType, StringType, StructType}
-import org.apache.spark.sql.{Row, SaveMode}
+import org.apache.spark.sql.{Row, SaveMode, SQLContext}
import org.apache.spark.{Logging, SparkFunSuite}
@@ -53,7 +53,8 @@ class HiveMetastoreCatalogSuite extends SparkFunSuite with Logging {
}
class DataSourceWithHiveMetastoreCatalogSuite extends DataSourceTest with SQLTestUtils {
- override val sqlContext = TestHive
+ override def _sqlContext: SQLContext = TestHive
+ import testImplicits._
private val testDF = range(1, 3).select(
('id + 0.1) cast DecimalType(10, 3) as 'd1,
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
index 1fa005d5f9..fe0db5228d 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala
@@ -19,14 +19,13 @@ package org.apache.spark.sql.hive
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.execution.datasources.parquet.ParquetTest
-import org.apache.spark.sql.{QueryTest, Row}
+import org.apache.spark.sql.{QueryTest, Row, SQLContext}
case class Cases(lower: String, UPPER: String)
class HiveParquetSuite extends QueryTest with ParquetTest {
- val sqlContext = TestHive
-
- import sqlContext._
+ private val ctx = TestHive
+ override def _sqlContext: SQLContext = ctx
test("Case insensitive attribute names") {
withParquetTable((1 to 4).map(i => Cases(i.toString, i.toString)), "cases") {
@@ -54,7 +53,7 @@ class HiveParquetSuite extends QueryTest with ParquetTest {
test("Converting Hive to Parquet Table via saveAsParquetFile") {
withTempPath { dir =>
sql("SELECT * FROM src").write.parquet(dir.getCanonicalPath)
- read.parquet(dir.getCanonicalPath).registerTempTable("p")
+ ctx.read.parquet(dir.getCanonicalPath).registerTempTable("p")
withTempTable("p") {
checkAnswer(
sql("SELECT * FROM src ORDER BY key"),
@@ -67,7 +66,7 @@ class HiveParquetSuite extends QueryTest with ParquetTest {
withParquetTable((1 to 10).map(i => (i, s"val_$i")), "t") {
withTempPath { file =>
sql("SELECT * FROM t LIMIT 1").write.parquet(file.getCanonicalPath)
- read.parquet(file.getCanonicalPath).registerTempTable("p")
+ ctx.read.parquet(file.getCanonicalPath).registerTempTable("p")
withTempTable("p") {
// let's do three overwrites for good measure
sql("INSERT OVERWRITE TABLE p SELECT * FROM t")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
index 7f36a483a3..20a50586d5 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala
@@ -22,7 +22,6 @@ import java.io.{IOException, File}
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.fs.Path
-import org.apache.hadoop.mapred.InvalidInputException
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.Logging
@@ -42,7 +41,8 @@ import org.apache.spark.util.Utils
*/
class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with BeforeAndAfterAll
with Logging {
- override val sqlContext = TestHive
+ override def _sqlContext: SQLContext = TestHive
+ private val sqlContext = _sqlContext
var jsonFilePath: String = _
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala
index 73852f13ad..417e8b0791 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala
@@ -22,9 +22,8 @@ import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.{QueryTest, SQLContext, SaveMode}
class MultiDatabaseSuite extends QueryTest with SQLTestUtils {
- override val sqlContext: SQLContext = TestHive
-
- import sqlContext.sql
+ override val _sqlContext: SQLContext = TestHive
+ private val sqlContext = _sqlContext
private val df = sqlContext.range(10).coalesce(1)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
index 251e0324bf..13452e71a1 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala
@@ -26,7 +26,8 @@ import org.apache.spark.sql.{Row, SQLConf, SQLContext}
class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest {
import ParquetCompatibilityTest.makeNullable
- override val sqlContext: SQLContext = TestHive
+ override def _sqlContext: SQLContext = TestHive
+ private val sqlContext = _sqlContext
/**
* Set the staging directory (and hence path to ignore Parquet files under)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
index 9b3ede43ee..7ee1c8d13a 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/UDFSuite.scala
@@ -17,14 +17,12 @@
package org.apache.spark.sql.hive
-import org.apache.spark.sql.{Row, QueryTest}
+import org.apache.spark.sql.QueryTest
case class FunctionResult(f1: String, f2: String)
class UDFSuite extends QueryTest {
-
private lazy val ctx = org.apache.spark.sql.hive.test.TestHive
- import ctx.implicits._
test("UDF case insensitive") {
ctx.udf.register("random0", () => { Math.random() })
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
index 7b5aa4763f..a312f84958 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
@@ -17,17 +17,18 @@
package org.apache.spark.sql.hive.execution
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.spark.sql._
import org.apache.spark.sql.execution.aggregate
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
-import org.apache.spark.sql._
-import org.scalatest.BeforeAndAfterAll
import _root_.test.org.apache.spark.sql.hive.aggregate.{MyDoubleAvg, MyDoubleSum}
abstract class AggregationQuerySuite extends QueryTest with SQLTestUtils with BeforeAndAfterAll {
-
- override val sqlContext = TestHive
+ override def _sqlContext: SQLContext = TestHive
+ protected val sqlContext = _sqlContext
import sqlContext.implicits._
var originalUseAggregate2: Boolean = _
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
index 44c5b80392..11d7a872df 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
@@ -26,8 +26,8 @@ import org.apache.spark.sql.test.SQLTestUtils
* A set of tests that validates support for Hive Explain command.
*/
class HiveExplainSuite extends QueryTest with SQLTestUtils {
-
- def sqlContext: SQLContext = TestHive
+ override def _sqlContext: SQLContext = TestHive
+ private val sqlContext = _sqlContext
test("explain extended command") {
checkExistence(sql(" explain select * from src where key=123 "), true,
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 79a136ae6f..8b8f520776 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -66,7 +66,8 @@ class MyDialect extends DefaultParserDialect
* valid, but Hive currently cannot execute it.
*/
class SQLQuerySuite extends QueryTest with SQLTestUtils {
- override def sqlContext: SQLContext = TestHive
+ override def _sqlContext: SQLContext = TestHive
+ private val sqlContext = _sqlContext
test("UDTF") {
sql(s"ADD JAR ${TestHive.getHiveFile("TestUDTF.jar").getCanonicalPath()}")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
index 0875232aed..9aca40f15a 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ScriptTransformationSuite.scala
@@ -31,7 +31,8 @@ import org.apache.spark.sql.types.StringType
class ScriptTransformationSuite extends SparkPlanTest {
- override def sqlContext: SQLContext = TestHive
+ override def _sqlContext: SQLContext = TestHive
+ private val sqlContext = _sqlContext
private val noSerdeIOSchema = HiveScriptIOSchema(
inputRowFormat = Seq.empty,
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala
index 145965388d..f7ba20ff41 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala
@@ -27,8 +27,8 @@ import org.apache.spark.sql._
import org.apache.spark.sql.test.SQLTestUtils
private[sql] trait OrcTest extends SQLTestUtils { this: SparkFunSuite =>
- lazy val sqlContext = org.apache.spark.sql.hive.test.TestHive
-
+ protected override def _sqlContext: SQLContext = org.apache.spark.sql.hive.test.TestHive
+ protected val sqlContext = _sqlContext
import sqlContext.implicits._
import sqlContext.sparkContext
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
index 50f02432da..34d3434569 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
@@ -685,7 +685,8 @@ class ParquetSourceSuite extends ParquetPartitioningTest {
* A collection of tests for parquet data with various forms of partitioning.
*/
abstract class ParquetPartitioningTest extends QueryTest with SQLTestUtils with BeforeAndAfterAll {
- override def sqlContext: SQLContext = TestHive
+ override def _sqlContext: SQLContext = TestHive
+ protected val sqlContext = _sqlContext
var partitionedTableDir: File = null
var normalTableDir: File = null
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/CommitFailureTestRelationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/CommitFailureTestRelationSuite.scala
index e976125b37..b4640b1616 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/CommitFailureTestRelationSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/CommitFailureTestRelationSuite.scala
@@ -18,14 +18,16 @@
package org.apache.spark.sql.sources
import org.apache.hadoop.fs.Path
-import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.{SparkException, SparkFunSuite}
+import org.apache.spark.deploy.SparkHadoopUtil
+import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.test.SQLTestUtils
class CommitFailureTestRelationSuite extends SparkFunSuite with SQLTestUtils {
- override val sqlContext = TestHive
+ override def _sqlContext: SQLContext = TestHive
+ private val sqlContext = _sqlContext
// When committing a task, `CommitFailureTestSource` throws an exception for testing purpose.
val dataSourceName: String = classOf[CommitFailureTestSource].getCanonicalName
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
index 2a69d331b6..af445626fb 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
@@ -34,9 +34,8 @@ import org.apache.spark.sql.types._
abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils {
- override lazy val sqlContext: SQLContext = TestHive
-
- import sqlContext.sql
+ override def _sqlContext: SQLContext = TestHive
+ protected val sqlContext = _sqlContext
import sqlContext.implicits._
val dataSourceName: String