aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorMichael Nazario <mnazario@palantir.com>2015-01-28 12:47:12 -0800
committerJosh Rosen <joshrosen@databricks.com>2015-01-28 12:47:12 -0800
commit456c11f15aec809044d8bdbdcce0ae05533fb44b (patch)
treefc9b57b8d87a4ebf30bda5dd0fda859e64ccf7ba /python
parent9b18009b835c784e9716594713f3d27d8e48d86c (diff)
downloadspark-456c11f15aec809044d8bdbdcce0ae05533fb44b.tar.gz
spark-456c11f15aec809044d8bdbdcce0ae05533fb44b.tar.bz2
spark-456c11f15aec809044d8bdbdcce0ae05533fb44b.zip
[SPARK-5440][pyspark] Add toLocalIterator to pyspark rdd
Since Java and Scala both have access to iterate over partitions via the "toLocalIterator" function, python should also have that same ability. Author: Michael Nazario <mnazario@palantir.com> Closes #4237 from mnazario/feature/toLocalIterator and squashes the following commits: 1c58526 [Michael Nazario] Fix documentation off by one error 0cdc8f8 [Michael Nazario] Add toLocalIterator to PySpark
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/rdd.py14
1 files changed, 14 insertions, 0 deletions
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index efd2f35912..014c0aa889 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -2059,6 +2059,20 @@ class RDD(object):
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
+ def toLocalIterator(self):
+ """
+ Return an iterator that contains all of the elements in this RDD.
+ The iterator will consume as much memory as the largest partition in this RDD.
+ >>> rdd = sc.parallelize(range(10))
+ >>> [x for x in rdd.toLocalIterator()]
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ """
+ partitions = xrange(self.getNumPartitions())
+ for partition in partitions:
+ rows = self.context.runJob(self, lambda x: x, [partition])
+ for row in rows:
+ yield row
+
class PipelinedRDD(RDD):