aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-03-24 22:34:55 -0700
committerReynold Xin <rxin@databricks.com>2016-03-24 22:34:55 -0700
commit3619fec1ec395a66ad5ae1f614ce67fe173cf159 (patch)
tree5d123e603aacc49b553df038b78cabe8557923cb /sql/core/src/test
parent13cbb2de709d0ec2707eebf36c5c97f7d44fb84f (diff)
downloadspark-3619fec1ec395a66ad5ae1f614ce67fe173cf159.tar.gz
spark-3619fec1ec395a66ad5ae1f614ce67fe173cf159.tar.bz2
spark-3619fec1ec395a66ad5ae1f614ce67fe173cf159.zip
[SPARK-14142][SQL] Replace internal use of unionAll with union
## What changes were proposed in this pull request? unionAll has been deprecated in SPARK-14088. ## How was this patch tested? Should be covered by all existing tests. Author: Reynold Xin <rxin@databricks.com> Closes #11946 from rxin/SPARK-14142.
Diffstat (limited to 'sql/core/src/test')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameStatSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala12
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/ExchangeCoordinatorSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala2
8 files changed, 14 insertions, 14 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
index efa2eeaf4d..82b79c791d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
@@ -363,7 +363,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with SharedSQLContext
}
test("A cached table preserves the partitioning and ordering of its cached SparkPlan") {
- val table3x = testData.unionAll(testData).unionAll(testData)
+ val table3x = testData.union(testData).union(testData)
table3x.registerTempTable("testData3x")
sql("SELECT key, value FROM testData3x ORDER BY key").registerTempTable("orderedTable")
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameStatSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameStatSuite.scala
index fe12aa8099..0ea7727e45 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameStatSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameStatSuite.scala
@@ -57,7 +57,7 @@ class DataFrameStatSuite extends QueryTest with SharedSQLContext {
val splits = data.randomSplit(Array[Double](1, 2, 3), seed)
assert(splits.length == 3, "wrong number of splits")
- assert(splits.reduce((a, b) => a.unionAll(b)).sort("id").collect().toList ==
+ assert(splits.reduce((a, b) => a.union(b)).sort("id").collect().toList ==
data.collect().toList, "incomplete or wrong split")
val s = splits.map(_.count())
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
index ec4e7b2042..86c6405522 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
@@ -94,8 +94,8 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
}
test("union all") {
- val unionDF = testData.unionAll(testData).unionAll(testData)
- .unionAll(testData).unionAll(testData)
+ val unionDF = testData.union(testData).union(testData)
+ .union(testData).union(testData)
// Before optimizer, Union should be combined.
assert(unionDF.queryExecution.analyzed.collect {
@@ -107,7 +107,7 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
)
}
- test("unionAll should union DataFrames with UDTs (SPARK-13410)") {
+ test("union should union DataFrames with UDTs (SPARK-13410)") {
val rowRDD1 = sparkContext.parallelize(Seq(Row(1, new ExamplePoint(1.0, 2.0))))
val schema1 = StructType(Array(StructField("label", IntegerType, false),
StructField("point", new ExamplePointUDT(), false)))
@@ -118,7 +118,7 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
val df2 = sqlContext.createDataFrame(rowRDD2, schema2)
checkAnswer(
- df1.unionAll(df2).orderBy("label"),
+ df1.union(df2).orderBy("label"),
Seq(Row(1, new ExamplePoint(1.0, 2.0)), Row(2, new ExamplePoint(3.0, 4.0)))
)
}
@@ -636,7 +636,7 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
val jsonDF = sqlContext.read.json(jsonDir)
assert(parquetDF.inputFiles.nonEmpty)
- val unioned = jsonDF.unionAll(parquetDF).inputFiles.sorted
+ val unioned = jsonDF.union(parquetDF).inputFiles.sorted
val allFiles = (jsonDF.inputFiles ++ parquetDF.inputFiles).distinct.sorted
assert(unioned === allFiles)
}
@@ -1104,7 +1104,7 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
}
}
- val union = df1.unionAll(df2)
+ val union = df1.union(df2)
checkAnswer(
union.filter('i < rand(7) * 10),
expected(union)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
index dfffa4bc8b..5af1a4fcd7 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
@@ -184,7 +184,7 @@ class JoinSuite extends QueryTest with SharedSQLContext {
}
test("big inner join, 4 matches per row") {
- val bigData = testData.unionAll(testData).unionAll(testData).unionAll(testData)
+ val bigData = testData.union(testData).union(testData).union(testData)
val bigDataX = bigData.as("x")
val bigDataY = bigData.as("y")
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index 61358fda76..077e579931 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -251,8 +251,8 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("aggregation with codegen") {
// Prepare a table that we can group some rows.
sqlContext.table("testData")
- .unionAll(sqlContext.table("testData"))
- .unionAll(sqlContext.table("testData"))
+ .union(sqlContext.table("testData"))
+ .union(sqlContext.table("testData"))
.registerTempTable("testData3x")
try {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/ExchangeCoordinatorSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/ExchangeCoordinatorSuite.scala
index 4f01e46633..01d485ce2d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/ExchangeCoordinatorSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/ExchangeCoordinatorSuite.scala
@@ -342,7 +342,7 @@ class ExchangeCoordinatorSuite extends SparkFunSuite with BeforeAndAfterAll {
sqlContext
.range(0, 1000)
.selectExpr("id % 500 as key", "id as value")
- .unionAll(sqlContext.range(0, 1000).selectExpr("id % 500 as key", "id as value"))
+ .union(sqlContext.range(0, 1000).selectExpr("id % 500 as key", "id as value"))
checkAnswer(
join,
expectedAnswer.collect())
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
index 27b02d6e1a..a9b1970a7c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
@@ -44,7 +44,7 @@ class PartitionedWriteSuite extends QueryTest with SharedSQLContext {
path.delete()
val base = sqlContext.range(100)
- val df = base.unionAll(base).select($"id", lit(1).as("data"))
+ val df = base.union(base).select($"id", lit(1).as("data"))
df.write.partitionBy("id").save(path.getCanonicalPath)
checkAnswer(
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
index 588f6e268f..bb2c54aa64 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/SaveLoadSuite.scala
@@ -122,7 +122,7 @@ class SaveLoadSuite extends DataSourceTest with SharedSQLContext with BeforeAndA
// verify the append mode
df.write.mode(SaveMode.Append).json(path.toString)
- val df2 = df.unionAll(df)
+ val df2 = df.union(df)
df2.registerTempTable("jsonTable2")
checkLoad(df2, "jsonTable2")