aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala9
1 files changed, 2 insertions, 7 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
index 821ac850ac..89eaba2d19 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
@@ -347,16 +347,11 @@ class SchemaRDD(
val pickle = new Pickler
iter.map { row =>
val map: JMap[String, Any] = new java.util.HashMap
- // TODO: We place the map in an ArrayList so that the object is pickled to a List[Dict].
- // Ideally we should be able to pickle an object directly into a Python collection so we
- // don't have to create an ArrayList every time.
- val arr: java.util.ArrayList[Any] = new java.util.ArrayList
row.zip(fieldNames).foreach { case (obj, name) =>
map.put(name, obj)
}
- arr.add(map)
- pickle.dumps(arr)
- }
+ map
+ }.grouped(10).map(batched => pickle.dumps(batched.toArray))
}
}