aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/ml/common.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/ml/common.py')
-rw-r--r--python/pyspark/ml/common.py10
1 files changed, 5 insertions, 5 deletions
diff --git a/python/pyspark/ml/common.py b/python/pyspark/ml/common.py
index 256e91e141..7d449aaccb 100644
--- a/python/pyspark/ml/common.py
+++ b/python/pyspark/ml/common.py
@@ -63,7 +63,7 @@ def _to_java_object_rdd(rdd):
RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
- return rdd.ctx._jvm.MLSerDe.pythonToJava(rdd._jrdd, True)
+ return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
def _py2java(sc, obj):
@@ -82,7 +82,7 @@ def _py2java(sc, obj):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
- obj = sc._jvm.MLSerDe.loads(data)
+ obj = sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(data)
return obj
@@ -95,17 +95,17 @@ def _java2py(sc, r, encoding="bytes"):
clsName = 'JavaRDD'
if clsName == 'JavaRDD':
- jrdd = sc._jvm.MLSerDe.javaToPython(r)
+ jrdd = sc._jvm.org.apache.spark.ml.python.MLSerDe.javaToPython(r)
return RDD(jrdd, sc)
if clsName == 'Dataset':
return DataFrame(r, SQLContext.getOrCreate(sc))
if clsName in _picklable_classes:
- r = sc._jvm.MLSerDe.dumps(r)
+ r = sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(r)
elif isinstance(r, (JavaArray, JavaList)):
try:
- r = sc._jvm.MLSerDe.dumps(r)
+ r = sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(r)
except Py4JJavaError:
pass # not pickable