From f92f334ca47c03b980b06cf300aa652d0ffa1880 Mon Sep 17 00:00:00 2001 From: Jason White Date: Mon, 2 Nov 2015 10:49:06 -0800 Subject: [SPARK-11437] [PYSPARK] Don't .take when converting RDD to DataFrame with provided schema When creating a DataFrame from an RDD in PySpark, `createDataFrame` calls `.take(10)` to verify the first 10 rows of the RDD match the provided schema. Similar to https://issues.apache.org/jira/browse/SPARK-8070, but that issue affected cases where a schema was not provided. Verifying the first 10 rows is of limited utility and causes the DAG to be executed non-lazily. If necessary, I believe this verification should be done lazily on all rows. However, since the caller is providing a schema to follow, I think it's acceptable to simply fail if the schema is incorrect. marmbrus We chatted about this at SparkSummitEU. davies you made a similar change for the infer-schema path in https://github.com/apache/spark/pull/6606 Author: Jason White Closes #9392 from JasonMWhite/createDataFrame_without_take. --- python/pyspark/sql/context.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'python/pyspark') diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py index 79453658a1..924bb6433d 100644 --- a/python/pyspark/sql/context.py +++ b/python/pyspark/sql/context.py @@ -318,13 +318,7 @@ class SQLContext(object): struct.names[i] = name schema = struct - elif isinstance(schema, StructType): - # take the first few rows to verify schema - rows = rdd.take(10) - for row in rows: - _verify_type(row, schema) - - else: + elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data -- cgit v1.2.3