aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/context.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/context.py')
-rw-r--r--python/pyspark/context.py6
1 files changed, 1 insertions, 5 deletions
diff --git a/python/pyspark/context.py b/python/pyspark/context.py
index 6a743ac8bd..b006120eb2 100644
--- a/python/pyspark/context.py
+++ b/python/pyspark/context.py
@@ -23,8 +23,6 @@ import sys
from threading import Lock
from tempfile import NamedTemporaryFile
-from py4j.java_collections import ListConverter
-
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast
@@ -643,7 +641,6 @@ class SparkContext(object):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
- rest = ListConverter().convert(rest, self._gateway._gateway_client)
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
@@ -846,13 +843,12 @@ class SparkContext(object):
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
- javaPartitions = ListConverter().convert(partitions, self._gateway._gateway_client)
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
- port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, javaPartitions,
+ port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions,
allowLocal)
return list(_load_from_socket(port, mappedRDD._jrdd_deserializer))