aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql
diff options
context:
space:
mode:
authorDavies Liu <davies@databricks.com>2015-08-19 13:56:40 -0700
committerMichael Armbrust <michael@databricks.com>2015-08-19 13:56:40 -0700
commit08887369c890e0dd87eb8b34e8c32bb03307bf24 (patch)
treebd5b86d521b1da6809fa1266bf3e0eb6f1bef366 /python/pyspark/sql
parente05da5cb5ea253e6372f648fc8203204f2a8df8d (diff)
downloadspark-08887369c890e0dd87eb8b34e8c32bb03307bf24.tar.gz
spark-08887369c890e0dd87eb8b34e8c32bb03307bf24.tar.bz2
spark-08887369c890e0dd87eb8b34e8c32bb03307bf24.zip
[SPARK-10073] [SQL] Python withColumn should replace the old column
DataFrame.withColumn in Python should be consistent with the Scala one (replacing the existing column that has the same name). cc marmbrus Author: Davies Liu <davies@databricks.com> Closes #8300 from davies/with_column.
Diffstat (limited to 'python/pyspark/sql')
-rw-r--r--python/pyspark/sql/dataframe.py12
-rw-r--r--python/pyspark/sql/tests.py4
2 files changed, 10 insertions, 6 deletions
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index da742d7ce7..025811f519 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -1202,7 +1202,9 @@ class DataFrame(object):
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
- """Returns a new :class:`DataFrame` by adding a column.
+ """
+ Returns a new :class:`DataFrame` by adding a column or replacing the
+ existing column that has the same name.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
@@ -1210,7 +1212,8 @@ class DataFrame(object):
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
- return self.select('*', col.alias(colName))
+ assert isinstance(col, Column), "col should be Column"
+ return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
@@ -1223,10 +1226,7 @@ class DataFrame(object):
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
- cols = [Column(_to_java_column(c)).alias(new)
- if c == existing else c
- for c in self.columns]
- return self.select(*cols)
+ return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index 13cf647b66..aacfb34c77 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -1035,6 +1035,10 @@ class SQLTests(ReusedPySparkTestCase):
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
+ def test_with_column_with_existing_name(self):
+ keys = self.df.withColumn("key", self.df.key).select("key").collect()
+ self.assertEqual([r.key for r in keys], list(range(100)))
+
class HiveContextSQLTests(ReusedPySparkTestCase):