aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql/context.py
diff options
context:
space:
mode:
authorDavies Liu <davies@databricks.com>2015-07-09 14:43:38 -0700
committerDavies Liu <davies.liu@gmail.com>2015-07-09 14:43:38 -0700
commitc9e2ef52bb54f35a904427389dc492d61f29b018 (patch)
tree90887ae7055aa78751561119083bd09ac099e0f4 /python/pyspark/sql/context.py
parent3ccebf36c5abe04702d4cf223552a94034d980fb (diff)
downloadspark-c9e2ef52bb54f35a904427389dc492d61f29b018.tar.gz
spark-c9e2ef52bb54f35a904427389dc492d61f29b018.tar.bz2
spark-c9e2ef52bb54f35a904427389dc492d61f29b018.zip
[SPARK-7902] [SPARK-6289] [SPARK-8685] [SQL] [PYSPARK] Refactor of serialization for Python DataFrame
This PR fix the long standing issue of serialization between Python RDD and DataFrame, it change to using a customized Pickler for InternalRow to enable customized unpickling (type conversion, especially for UDT), now we can support UDT for UDF, cc mengxr . There is no generated `Row` anymore. Author: Davies Liu <davies@databricks.com> Closes #7301 from davies/sql_ser and squashes the following commits: 81bef71 [Davies Liu] address comments e9217bd [Davies Liu] add regression tests db34167 [Davies Liu] Refactor of serialization for Python DataFrame
Diffstat (limited to 'python/pyspark/sql/context.py')
-rw-r--r--python/pyspark/sql/context.py5
1 files changed, 2 insertions, 3 deletions
diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py
index 309c11faf9..c93a15bada 100644
--- a/python/pyspark/sql/context.py
+++ b/python/pyspark/sql/context.py
@@ -30,7 +30,7 @@ from pyspark.rdd import RDD, _prepare_for_python_RDD, ignore_unicode_prefix
from pyspark.serializers import AutoBatchedSerializer, PickleSerializer
from pyspark.sql import since
from pyspark.sql.types import Row, StringType, StructType, _verify_type, \
- _infer_schema, _has_nulltype, _merge_type, _create_converter, _python_to_sql_converter
+ _infer_schema, _has_nulltype, _merge_type, _create_converter
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.utils import install_exception_handler
@@ -388,8 +388,7 @@ class SQLContext(object):
raise TypeError("schema should be StructType or list or None")
# convert python objects to sql data
- converter = _python_to_sql_converter(schema)
- rdd = rdd.map(converter)
+ rdd = rdd.map(schema.toInternal)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
df = self._ssql_ctx.applySchemaToPythonRDD(jrdd.rdd(), schema.json())