aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test
diff options
context:
space:
mode:
authorDavies Liu <davies.liu@gmail.com>2016-02-25 11:53:48 -0800
committerDavies Liu <davies.liu@gmail.com>2016-02-25 11:53:48 -0800
commit751724b1320d38fd94186df3d8f1ca887f21947a (patch)
treeef365e952284a7ec26aee882caa429a729223c9d /sql/core/src/test
parent46f6e79316b72afea0c9b1559ea662dd3e95e57b (diff)
downloadspark-751724b1320d38fd94186df3d8f1ca887f21947a.tar.gz
spark-751724b1320d38fd94186df3d8f1ca887f21947a.tar.bz2
spark-751724b1320d38fd94186df3d8f1ca887f21947a.zip
Revert "[SPARK-13457][SQL] Removes DataFrame RDD operations"
This reverts commit 157fe64f3ecbd13b7286560286e50235eecfe30e.
Diffstat (limited to 'sql/core/src/test')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala4
4 files changed, 5 insertions, 5 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
index 7d96ef6fe0..f54bff9f18 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
@@ -257,7 +257,7 @@ class DataFrameAggregateSuite extends QueryTest with SharedSQLContext {
}
test("count") {
- assert(testData2.count() === testData2.rdd.map(_ => 1).count())
+ assert(testData2.count() === testData2.map(_ => 1).count())
checkAnswer(
testData2.agg(count('a), sumDistinct('a)), // non-partial
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
index bd51154c58..fbffe867e4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
@@ -101,7 +101,7 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex
(implicit df: DataFrame): Unit = {
def checkBinaryAnswer(df: DataFrame, expected: Seq[Row]) = {
assertResult(expected.map(_.getAs[Array[Byte]](0).mkString(",")).sorted) {
- df.rdd.map(_.getAs[Array[Byte]](0).mkString(",")).collect().toSeq.sorted
+ df.map(_.getAs[Array[Byte]](0).mkString(",")).collect().toSeq.sorted
}
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
index c85eeddc2c..3c74464d57 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
@@ -599,7 +599,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext {
test("null and non-null strings") {
// Create a dataset where the first values are NULL and then some non-null values. The
// number of non-nulls needs to be bigger than the ParquetReader batch size.
- val data = sqlContext.range(200).rdd.map { i =>
+ val data = sqlContext.range(200).map { i =>
if (i.getLong(0) < 150) Row(None)
else Row("a")
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
index 39920d8cc6..085e4a49a5 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala
@@ -330,7 +330,7 @@ class SQLListenerSuite extends SparkFunSuite with SharedSQLContext {
// listener should ignore the non SQL stage
assert(sqlContext.listener.stageIdToStageMetrics.size == previousStageNumber)
- sqlContext.sparkContext.parallelize(1 to 10).toDF().rdd.foreach(i => ())
+ sqlContext.sparkContext.parallelize(1 to 10).toDF().foreach(i => ())
sqlContext.sparkContext.listenerBus.waitUntilEmpty(10000)
// listener should save the SQL stage
assert(sqlContext.listener.stageIdToStageMetrics.size == previousStageNumber + 1)
@@ -398,7 +398,7 @@ class SQLListenerMemoryLeakSuite extends SparkFunSuite {
).toDF()
df.collect()
try {
- df.rdd.foreach(_ => throw new RuntimeException("Oops"))
+ df.foreach(_ => throw new RuntimeException("Oops"))
} catch {
case e: SparkException => // This is expected for a failed job
}