From 695d9a0fd461070ee2684b2210fb69d0b6ed1a95 Mon Sep 17 00:00:00 2001 From: Liang-Chi Hsieh Date: Tue, 24 May 2016 10:10:41 -0700 Subject: [SPARK-15433] [PYSPARK] PySpark core test should not use SerDe from PythonMLLibAPI ## What changes were proposed in this pull request? Currently PySpark core test uses the `SerDe` from `PythonMLLibAPI` which includes many MLlib things. It should use `SerDeUtil` instead. ## How was this patch tested? Existing tests. Author: Liang-Chi Hsieh Closes #13214 from viirya/pycore-use-serdeutil. --- python/pyspark/tests.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'python') diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py index 97ea39dde0..222c5ca5f4 100644 --- a/python/pyspark/tests.py +++ b/python/pyspark/tests.py @@ -960,13 +960,13 @@ class RDDTests(ReusedPySparkTestCase): ] data_rdd = self.sc.parallelize(data) data_java_rdd = data_rdd._to_java_object_rdd() - data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd) + data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd) converted_rdd = RDD(data_python_rdd, self.sc) self.assertEqual(2, converted_rdd.count()) # conversion between python and java RDD threw exceptions data_java_rdd = converted_rdd._to_java_object_rdd() - data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd) + data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd) converted_rdd = RDD(data_python_rdd, self.sc) self.assertEqual(2, converted_rdd.count()) -- cgit v1.2.3