aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-03-24 22:34:55 -0700
committerReynold Xin <rxin@databricks.com>2016-03-24 22:34:55 -0700
commit3619fec1ec395a66ad5ae1f614ce67fe173cf159 (patch)
tree5d123e603aacc49b553df038b78cabe8557923cb /sql/hive
parent13cbb2de709d0ec2707eebf36c5c97f7d44fb84f (diff)
downloadspark-3619fec1ec395a66ad5ae1f614ce67fe173cf159.tar.gz
spark-3619fec1ec395a66ad5ae1f614ce67fe173cf159.tar.bz2
spark-3619fec1ec395a66ad5ae1f614ce67fe173cf159.zip
[SPARK-14142][SQL] Replace internal use of unionAll with union
## What changes were proposed in this pull request? unionAll has been deprecated in SPARK-14088. ## How was this patch tested? Should be covered by all existing tests. Author: Reynold Xin <rxin@databricks.com> Closes #11946 from rxin/SPARK-14142.
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala14
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala12
5 files changed, 17 insertions, 17 deletions
diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
index 63fb4b7cf7..397421ae92 100644
--- a/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
+++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/JavaDataFrameSuite.java
@@ -82,7 +82,7 @@ public class JavaDataFrameSuite {
@Test
public void testUDAF() {
- Dataset<Row> df = hc.range(0, 100).unionAll(hc.range(0, 100)).select(col("id").as("value"));
+ Dataset<Row> df = hc.range(0, 100).union(hc.range(0, 100)).select(col("id").as("value"));
UserDefinedAggregateFunction udaf = new MyDoubleSum();
UserDefinedAggregateFunction registeredUDAF = hc.udf().register("mydoublesum", udaf);
// Create Columns for the UDAF. For now, callUDF does not take an argument to specific if
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
index 656c1317c1..11384a0275 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
@@ -186,7 +186,7 @@ class CachedTableSuite extends QueryTest with TestHiveSingleton {
assertCached(table("refreshTable"))
checkAnswer(
table("refreshTable"),
- table("src").unionAll(table("src")).collect())
+ table("src").union(table("src")).collect())
// Drop the table and create it again.
sql("DROP TABLE refreshTable")
@@ -198,7 +198,7 @@ class CachedTableSuite extends QueryTest with TestHiveSingleton {
sql("REFRESH TABLE refreshTable")
checkAnswer(
table("refreshTable"),
- table("src").unionAll(table("src")).collect())
+ table("src").union(table("src")).collect())
// It is not cached.
assert(!isCached("refreshTable"), "refreshTable should not be cached.")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala
index d275190744..f3af60a018 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MultiDatabaseSuite.scala
@@ -113,11 +113,11 @@ class MultiDatabaseSuite extends QueryTest with SQLTestUtils with TestHiveSingle
df.write.mode(SaveMode.Overwrite).saveAsTable("t")
df.write.mode(SaveMode.Append).saveAsTable("t")
assert(sqlContext.tableNames().contains("t"))
- checkAnswer(sqlContext.table("t"), df.unionAll(df))
+ checkAnswer(sqlContext.table("t"), df.union(df))
}
assert(sqlContext.tableNames(db).contains("t"))
- checkAnswer(sqlContext.table(s"$db.t"), df.unionAll(df))
+ checkAnswer(sqlContext.table(s"$db.t"), df.union(df))
checkTablePath(db, "t")
}
@@ -128,7 +128,7 @@ class MultiDatabaseSuite extends QueryTest with SQLTestUtils with TestHiveSingle
df.write.mode(SaveMode.Overwrite).saveAsTable(s"$db.t")
df.write.mode(SaveMode.Append).saveAsTable(s"$db.t")
assert(sqlContext.tableNames(db).contains("t"))
- checkAnswer(sqlContext.table(s"$db.t"), df.unionAll(df))
+ checkAnswer(sqlContext.table(s"$db.t"), df.union(df))
checkTablePath(db, "t")
}
@@ -141,7 +141,7 @@ class MultiDatabaseSuite extends QueryTest with SQLTestUtils with TestHiveSingle
assert(sqlContext.tableNames().contains("t"))
df.write.insertInto(s"$db.t")
- checkAnswer(sqlContext.table(s"$db.t"), df.unionAll(df))
+ checkAnswer(sqlContext.table(s"$db.t"), df.union(df))
}
}
}
@@ -156,7 +156,7 @@ class MultiDatabaseSuite extends QueryTest with SQLTestUtils with TestHiveSingle
assert(sqlContext.tableNames(db).contains("t"))
df.write.insertInto(s"$db.t")
- checkAnswer(sqlContext.table(s"$db.t"), df.unionAll(df))
+ checkAnswer(sqlContext.table(s"$db.t"), df.union(df))
}
}
@@ -220,7 +220,7 @@ class MultiDatabaseSuite extends QueryTest with SQLTestUtils with TestHiveSingle
hiveContext.refreshTable("t")
checkAnswer(
sqlContext.table("t"),
- df.withColumn("p", lit(1)).unionAll(df.withColumn("p", lit(2))))
+ df.withColumn("p", lit(1)).union(df.withColumn("p", lit(2))))
}
}
}
@@ -252,7 +252,7 @@ class MultiDatabaseSuite extends QueryTest with SQLTestUtils with TestHiveSingle
hiveContext.refreshTable(s"$db.t")
checkAnswer(
sqlContext.table(s"$db.t"),
- df.withColumn("p", lit(1)).unionAll(df.withColumn("p", lit(2))))
+ df.withColumn("p", lit(1)).union(df.withColumn("p", lit(2))))
}
}
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
index 1e5dbd991e..a15bd227a9 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/ParquetHadoopFsRelationSuite.scala
@@ -137,7 +137,7 @@ class ParquetHadoopFsRelationSuite extends HadoopFsRelationTest {
fs.delete(commonSummaryPath, true)
df.write.mode(SaveMode.Append).parquet(path)
- checkAnswer(sqlContext.read.parquet(path), df.unionAll(df))
+ checkAnswer(sqlContext.read.parquet(path), df.union(df))
assert(fs.exists(summaryPath))
assert(fs.exists(commonSummaryPath))
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
index e842caf5be..ea7e905742 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
@@ -60,7 +60,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
p2 <- Seq("foo", "bar")
} yield (i, s"val_$i", 2, p2)).toDF("a", "b", "p1", "p2")
- lazy val partitionedTestDF = partitionedTestDF1.unionAll(partitionedTestDF2)
+ lazy val partitionedTestDF = partitionedTestDF1.union(partitionedTestDF2)
def checkQueries(df: DataFrame): Unit = {
// Selects everything
@@ -191,7 +191,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
sqlContext.read.format(dataSourceName)
.option("dataSchema", dataSchema.json)
.load(file.getCanonicalPath).orderBy("a"),
- testDF.unionAll(testDF).orderBy("a").collect())
+ testDF.union(testDF).orderBy("a").collect())
}
}
@@ -268,7 +268,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
sqlContext.read.format(dataSourceName)
.option("dataSchema", dataSchema.json)
.load(file.getCanonicalPath),
- partitionedTestDF.unionAll(partitionedTestDF).collect())
+ partitionedTestDF.union(partitionedTestDF).collect())
}
}
@@ -332,7 +332,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
testDF.write.format(dataSourceName).mode(SaveMode.Append).saveAsTable("t")
withTable("t") {
- checkAnswer(sqlContext.table("t"), testDF.unionAll(testDF).orderBy("a").collect())
+ checkAnswer(sqlContext.table("t"), testDF.union(testDF).orderBy("a").collect())
}
}
@@ -415,7 +415,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
.saveAsTable("t")
withTable("t") {
- checkAnswer(sqlContext.table("t"), partitionedTestDF.unionAll(partitionedTestDF).collect())
+ checkAnswer(sqlContext.table("t"), partitionedTestDF.union(partitionedTestDF).collect())
}
}
@@ -625,7 +625,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes
.format(dataSourceName)
.option("dataSchema", df.schema.json)
.load(dir.getCanonicalPath),
- df.unionAll(df))
+ df.union(df))
// This will fail because AlwaysFailOutputCommitter is used when we do append.
intercept[Exception] {