aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorKan Zhang <kzhang@apache.org>2014-06-14 13:17:22 -0700
committerReynold Xin <rxin@apache.org>2014-06-14 13:17:22 -0700
commit2550533a28382664f8fd294b2caa494d12bfc7c1 (patch)
tree6b68fa24234486ca8b9d130d39d515a5b64e5d5f /python
parent891968509105d8d8cf5a608ad9473aeeed747089 (diff)
downloadspark-2550533a28382664f8fd294b2caa494d12bfc7c1.tar.gz
spark-2550533a28382664f8fd294b2caa494d12bfc7c1.tar.bz2
spark-2550533a28382664f8fd294b2caa494d12bfc7c1.zip
[SPARK-2079] Support batching when serializing SchemaRDD to Python
Added batching with default batch size 10 in SchemaRDD.javaToPython Author: Kan Zhang <kzhang@apache.org> Closes #1023 from kanzhang/SPARK-2079 and squashes the following commits: 2d1915e [Kan Zhang] [SPARK-2079] Add batching in SchemaRDD.javaToPython 19b0c09 [Kan Zhang] [SPARK-2079] Removing unnecessary wrapping in SchemaRDD.javaToPython
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/sql.py4
1 files changed, 3 insertions, 1 deletions
diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py
index 960d0a8244..e344610b1f 100644
--- a/python/pyspark/sql.py
+++ b/python/pyspark/sql.py
@@ -16,6 +16,7 @@
#
from pyspark.rdd import RDD
+from pyspark.serializers import BatchedSerializer, PickleSerializer
from py4j.protocol import Py4JError
@@ -346,7 +347,8 @@ class SchemaRDD(RDD):
# TODO: This is inefficient, we should construct the Python Row object
# in Java land in the javaToPython function. May require a custom
# pickle serializer in Pyrolite
- return RDD(jrdd, self._sc, self._sc.serializer).map(lambda d: Row(d))
+ return RDD(jrdd, self._sc, BatchedSerializer(
+ PickleSerializer())).map(lambda d: Row(d))
# We override the default cache/persist/checkpoint behavior as we want to cache the underlying
# SchemaRDD object in the JVM, not the PythonRDD checkpointed by the super class