aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
author0x0FFF <programmerag@gmail.com>2015-09-02 13:36:36 -0700
committerDavies Liu <davies.liu@gmail.com>2015-09-02 13:36:36 -0700
commit6cd98c1878a9c5c6475ed5974643021ab27862a7 (patch)
tree662254b085711c660660d1df9e95f07c421870d2 /python
parent2da3a9e98e5d129d4507b5db01bba5ee9558d28e (diff)
downloadspark-6cd98c1878a9c5c6475ed5974643021ab27862a7.tar.gz
spark-6cd98c1878a9c5c6475ed5974643021ab27862a7.tar.bz2
spark-6cd98c1878a9c5c6475ed5974643021ab27862a7.zip
[SPARK-10417] [SQL] Iterating through Column results in infinite loop
`pyspark.sql.column.Column` object has `__getitem__` method, which makes it iterable for Python. In fact it has `__getitem__` to address the case when the column might be a list or dict, for you to be able to access certain element of it in DF API. The ability to iterate over it is just a side effect that might cause confusion for the people getting familiar with Spark DF (as you might iterate this way on Pandas DF for instance) Issue reproduction: ``` df = sqlContext.jsonRDD(sc.parallelize(['{"name": "El Magnifico"}'])) for i in df["name"]: print i ``` Author: 0x0FFF <programmerag@gmail.com> Closes #8574 from 0x0FFF/SPARK-10417.
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/sql/column.py3
-rw-r--r--python/pyspark/sql/tests.py9
2 files changed, 12 insertions, 0 deletions
diff --git a/python/pyspark/sql/column.py b/python/pyspark/sql/column.py
index 0948f9b27c..56e75e8cae 100644
--- a/python/pyspark/sql/column.py
+++ b/python/pyspark/sql/column.py
@@ -226,6 +226,9 @@ class Column(object):
raise AttributeError(item)
return self.getField(item)
+ def __iter__(self):
+ raise TypeError("Column is not iterable")
+
# string methods
rlike = _bin_op("rlike")
like = _bin_op("like")
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index fc778631d9..eb449e8679 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -1066,6 +1066,15 @@ class SQLTests(ReusedPySparkTestCase):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
+ # regression test for SPARK-10417
+ def test_column_iterator(self):
+
+ def foo():
+ for x in self.df.key:
+ break
+
+ self.assertRaises(TypeError, foo)
+
class HiveContextSQLTests(ReusedPySparkTestCase):