aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorCheng Lian <lian.cs.zju@gmail.com>2014-09-12 20:14:09 -0700
committerMichael Armbrust <michael@databricks.com>2014-09-12 20:14:09 -0700
commit6d887db7891be643f0131b136e82191b5f6eb407 (patch)
tree2cee12cb5d5ac67290863df4d9cf5d44e9d464f6 /sql
parent885d1621bc06bc1f009c9707c3452eac26baf828 (diff)
downloadspark-6d887db7891be643f0131b136e82191b5f6eb407.tar.gz
spark-6d887db7891be643f0131b136e82191b5f6eb407.tar.bz2
spark-6d887db7891be643f0131b136e82191b5f6eb407.zip
[SPARK-3515][SQL] Moves test suite setup code to beforeAll rather than in constructor
Please refer to the JIRA ticket for details. **NOTE** We should check all test suites that do similar initialization-like side effects in their constructors. This PR only fixes `ParquetMetastoreSuite` because it breaks our Jenkins Maven build. Author: Cheng Lian <lian.cs.zju@gmail.com> Closes #2375 from liancheng/say-no-to-constructor and squashes the following commits: 0ceb75b [Cheng Lian] Moves test suite setup code to beforeAll rather than in constructor
Diffstat (limited to 'sql')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala53
1 files changed, 24 insertions, 29 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala
index 0723be7298..e380280f30 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/parquet/ParquetMetastoreSuite.scala
@@ -20,14 +20,10 @@ package org.apache.spark.sql.parquet
import java.io.File
-import org.apache.spark.sql.hive.execution.HiveTableScan
import org.scalatest.BeforeAndAfterAll
-import scala.reflect.ClassTag
-
-import org.apache.spark.sql.{SQLConf, QueryTest}
-import org.apache.spark.sql.execution.{BroadcastHashJoin, ShuffledHashJoin}
-import org.apache.spark.sql.hive.test.TestHive
+import org.apache.spark.sql.QueryTest
+import org.apache.spark.sql.hive.execution.HiveTableScan
import org.apache.spark.sql.hive.test.TestHive._
case class ParquetData(intField: Int, stringField: String)
@@ -36,27 +32,19 @@ case class ParquetData(intField: Int, stringField: String)
* Tests for our SerDe -> Native parquet scan conversion.
*/
class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
-
override def beforeAll(): Unit = {
- setConf("spark.sql.hive.convertMetastoreParquet", "true")
- }
-
- override def afterAll(): Unit = {
- setConf("spark.sql.hive.convertMetastoreParquet", "false")
- }
-
- val partitionedTableDir = File.createTempFile("parquettests", "sparksql")
- partitionedTableDir.delete()
- partitionedTableDir.mkdir()
-
- (1 to 10).foreach { p =>
- val partDir = new File(partitionedTableDir, s"p=$p")
- sparkContext.makeRDD(1 to 10)
- .map(i => ParquetData(i, s"part-$p"))
- .saveAsParquetFile(partDir.getCanonicalPath)
- }
-
- sql(s"""
+ val partitionedTableDir = File.createTempFile("parquettests", "sparksql")
+ partitionedTableDir.delete()
+ partitionedTableDir.mkdir()
+
+ (1 to 10).foreach { p =>
+ val partDir = new File(partitionedTableDir, s"p=$p")
+ sparkContext.makeRDD(1 to 10)
+ .map(i => ParquetData(i, s"part-$p"))
+ .saveAsParquetFile(partDir.getCanonicalPath)
+ }
+
+ sql(s"""
create external table partitioned_parquet
(
intField INT,
@@ -70,7 +58,7 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
location '${partitionedTableDir.getCanonicalPath}'
""")
- sql(s"""
+ sql(s"""
create external table normal_parquet
(
intField INT,
@@ -83,8 +71,15 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
location '${new File(partitionedTableDir, "p=1").getCanonicalPath}'
""")
- (1 to 10).foreach { p =>
- sql(s"ALTER TABLE partitioned_parquet ADD PARTITION (p=$p)")
+ (1 to 10).foreach { p =>
+ sql(s"ALTER TABLE partitioned_parquet ADD PARTITION (p=$p)")
+ }
+
+ setConf("spark.sql.hive.convertMetastoreParquet", "true")
+ }
+
+ override def afterAll(): Unit = {
+ setConf("spark.sql.hive.convertMetastoreParquet", "false")
}
test("project the partitioning column") {