aboutsummaryrefslogtreecommitdiff
path: root/project/SparkBuild.scala
diff options
context:
space:
mode:
authorOopsOutOfMemory <victorshengli@126.com>2015-03-15 20:44:45 +0800
committerCheng Lian <lian@databricks.com>2015-03-15 20:44:45 +0800
commit62ede5383f64b69570a66d46939638f4bf38d1b1 (patch)
tree78b19468b6304f3caca57494c9af61e0eb5b7863 /project/SparkBuild.scala
parentc49d156624624a719c0d1262a58933ea3e346963 (diff)
downloadspark-62ede5383f64b69570a66d46939638f4bf38d1b1.tar.gz
spark-62ede5383f64b69570a66d46939638f4bf38d1b1.tar.bz2
spark-62ede5383f64b69570a66d46939638f4bf38d1b1.zip
[SPARK-6285][SQL]Remove ParquetTestData in SparkBuild.scala and in README.md
This is a following clean up PR for #5010 This will resolve issues when launching `hive/console` like below: ``` <console>:20: error: object ParquetTestData is not a member of package org.apache.spark.sql.parquet import org.apache.spark.sql.parquet.ParquetTestData ``` Author: OopsOutOfMemory <victorshengli@126.com> Closes #5032 from OopsOutOfMemory/SPARK-6285 and squashes the following commits: 2996aeb [OopsOutOfMemory] remove ParquetTestData
Diffstat (limited to 'project/SparkBuild.scala')
-rw-r--r--project/SparkBuild.scala6
1 files changed, 2 insertions, 4 deletions
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index f4c74c4051..ac37c605de 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -272,8 +272,7 @@ object SQL {
|import org.apache.spark.sql.execution
|import org.apache.spark.sql.functions._
|import org.apache.spark.sql.test.TestSQLContext._
- |import org.apache.spark.sql.types._
- |import org.apache.spark.sql.parquet.ParquetTestData""".stripMargin,
+ |import org.apache.spark.sql.types._""".stripMargin,
cleanupCommands in console := "sparkContext.stop()"
)
}
@@ -304,8 +303,7 @@ object Hive {
|import org.apache.spark.sql.functions._
|import org.apache.spark.sql.hive._
|import org.apache.spark.sql.hive.test.TestHive._
- |import org.apache.spark.sql.types._
- |import org.apache.spark.sql.parquet.ParquetTestData""".stripMargin,
+ |import org.apache.spark.sql.types._""".stripMargin,
cleanupCommands in console := "sparkContext.stop()",
// Some of our log4j jars make it impossible to submit jobs from this JVM to Hive Map/Reduce
// in order to generate golden files. This is only required for developers who are adding new