aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/rdd.py
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@apache.org>2013-11-10 17:48:27 -0800
committerJosh Rosen <joshrosen@apache.org>2013-11-10 17:53:25 -0800
commit13122ceb8c74dc0c4ad37902a3d1b30bf273cc6a (patch)
tree5c41f195c2c989b0c90770ac2c33960d10266c4f /python/pyspark/rdd.py
parentffa5bedf46fbc89ad5c5658f3b423dfff49b70f0 (diff)
downloadspark-13122ceb8c74dc0c4ad37902a3d1b30bf273cc6a.tar.gz
spark-13122ceb8c74dc0c4ad37902a3d1b30bf273cc6a.tar.bz2
spark-13122ceb8c74dc0c4ad37902a3d1b30bf273cc6a.zip
FramedSerializer: _dumps => dumps, _loads => loads.
Diffstat (limited to 'python/pyspark/rdd.py')
-rw-r--r--python/pyspark/rdd.py4
1 files changed, 2 insertions, 2 deletions
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index 062f44f81e..957f3f89c0 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -751,7 +751,7 @@ class RDD(object):
buckets[partitionFunc(k) % numPartitions].append((k, v))
for (split, items) in buckets.iteritems():
yield pack_long(split)
- yield outputSerializer._dumps(items)
+ yield outputSerializer.dumps(items)
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
@@ -970,7 +970,7 @@ class PipelinedRDD(RDD):
else:
serializer = self.ctx.serializer
command = (self.func, self._prev_jrdd_deserializer, serializer)
- pickled_command = CloudPickleSerializer()._dumps(command)
+ pickled_command = CloudPickleSerializer().dumps(command)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)