diff options
author | Reynold Xin <rxin@databricks.com> | 2016-03-24 22:34:55 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2016-03-24 22:34:55 -0700 |
commit | 3619fec1ec395a66ad5ae1f614ce67fe173cf159 (patch) | |
tree | 5d123e603aacc49b553df038b78cabe8557923cb /python/pyspark/sql/dataframe.py | |
parent | 13cbb2de709d0ec2707eebf36c5c97f7d44fb84f (diff) | |
download | spark-3619fec1ec395a66ad5ae1f614ce67fe173cf159.tar.gz spark-3619fec1ec395a66ad5ae1f614ce67fe173cf159.tar.bz2 spark-3619fec1ec395a66ad5ae1f614ce67fe173cf159.zip |
[SPARK-14142][SQL] Replace internal use of unionAll with union
## What changes were proposed in this pull request?
unionAll has been deprecated in SPARK-14088.
## How was this patch tested?
Should be covered by all existing tests.
Author: Reynold Xin <rxin@databricks.com>
Closes #11946 from rxin/SPARK-14142.
Diffstat (limited to 'python/pyspark/sql/dataframe.py')
-rw-r--r-- | python/pyspark/sql/dataframe.py | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py index 5cfc348a69..7a69c4c70c 100644 --- a/python/pyspark/sql/dataframe.py +++ b/python/pyspark/sql/dataframe.py @@ -360,7 +360,7 @@ class DataFrame(object): >>> df.repartition(10).rdd.getNumPartitions() 10 - >>> data = df.unionAll(df).repartition("age") + >>> data = df.union(df).repartition("age") >>> data.show() +---+-----+ |age| name| @@ -919,7 +919,7 @@ class DataFrame(object): This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by a distinct. """ - return DataFrame(self._jdf.unionAll(other._jdf), self.sql_ctx) + return DataFrame(self._jdf.union(other._jdf), self.sql_ctx) @since(1.3) def unionAll(self, other): |