aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBryan Cutler <cutlerb@gmail.com>2016-10-11 08:29:52 +0200
committerSean Owen <sowen@cloudera.com>2016-10-11 08:29:52 +0200
commit658c7147f5bf637f36e8c66b9207d94b1e7c74c5 (patch)
tree82507f1f189d75e3c816967bdaca513c5aa4518f
parent19401a203b441e3355f0d3fc3fd062b6d5bdee1f (diff)
downloadspark-658c7147f5bf637f36e8c66b9207d94b1e7c74c5.tar.gz
spark-658c7147f5bf637f36e8c66b9207d94b1e7c74c5.tar.bz2
spark-658c7147f5bf637f36e8c66b9207d94b1e7c74c5.zip
[SPARK-17808][PYSPARK] Upgraded version of Pyrolite to 4.13
## What changes were proposed in this pull request? Upgraded to a newer version of Pyrolite which supports serialization of a BinaryType StructField for PySpark.SQL ## How was this patch tested? Added a unit test which fails with a raised ValueError when using the previous version of Pyrolite 4.9 and Python3 Author: Bryan Cutler <cutlerb@gmail.com> Closes #15386 from BryanCutler/pyrolite-upgrade-SPARK-17808.
-rw-r--r--core/pom.xml2
-rw-r--r--dev/deps/spark-deps-hadoop-2.22
-rw-r--r--dev/deps/spark-deps-hadoop-2.32
-rw-r--r--dev/deps/spark-deps-hadoop-2.42
-rw-r--r--dev/deps/spark-deps-hadoop-2.62
-rw-r--r--dev/deps/spark-deps-hadoop-2.72
-rw-r--r--python/pyspark/sql/tests.py8
7 files changed, 14 insertions, 6 deletions
diff --git a/core/pom.xml b/core/pom.xml
index 9a4f234953..205bbc588b 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -320,7 +320,7 @@
<dependency>
<groupId>net.razorvine</groupId>
<artifactId>pyrolite</artifactId>
- <version>4.9</version>
+ <version>4.13</version>
<exclusions>
<exclusion>
<groupId>net.razorvine</groupId>
diff --git a/dev/deps/spark-deps-hadoop-2.2 b/dev/deps/spark-deps-hadoop-2.2
index f4f92c6d20..b30f8c347c 100644
--- a/dev/deps/spark-deps-hadoop-2.2
+++ b/dev/deps/spark-deps-hadoop-2.2
@@ -141,7 +141,7 @@ pmml-model-1.2.15.jar
pmml-schema-1.2.15.jar
protobuf-java-2.5.0.jar
py4j-0.10.3.jar
-pyrolite-4.9.jar
+pyrolite-4.13.jar
scala-compiler-2.11.8.jar
scala-library-2.11.8.jar
scala-parser-combinators_2.11-1.0.4.jar
diff --git a/dev/deps/spark-deps-hadoop-2.3 b/dev/deps/spark-deps-hadoop-2.3
index 3db013f1a7..5b3a7651dd 100644
--- a/dev/deps/spark-deps-hadoop-2.3
+++ b/dev/deps/spark-deps-hadoop-2.3
@@ -148,7 +148,7 @@ pmml-model-1.2.15.jar
pmml-schema-1.2.15.jar
protobuf-java-2.5.0.jar
py4j-0.10.3.jar
-pyrolite-4.9.jar
+pyrolite-4.13.jar
scala-compiler-2.11.8.jar
scala-library-2.11.8.jar
scala-parser-combinators_2.11-1.0.4.jar
diff --git a/dev/deps/spark-deps-hadoop-2.4 b/dev/deps/spark-deps-hadoop-2.4
index 71710109a1..e323efe30f 100644
--- a/dev/deps/spark-deps-hadoop-2.4
+++ b/dev/deps/spark-deps-hadoop-2.4
@@ -148,7 +148,7 @@ pmml-model-1.2.15.jar
pmml-schema-1.2.15.jar
protobuf-java-2.5.0.jar
py4j-0.10.3.jar
-pyrolite-4.9.jar
+pyrolite-4.13.jar
scala-compiler-2.11.8.jar
scala-library-2.11.8.jar
scala-parser-combinators_2.11-1.0.4.jar
diff --git a/dev/deps/spark-deps-hadoop-2.6 b/dev/deps/spark-deps-hadoop-2.6
index cb30fda253..77d97e5365 100644
--- a/dev/deps/spark-deps-hadoop-2.6
+++ b/dev/deps/spark-deps-hadoop-2.6
@@ -156,7 +156,7 @@ pmml-model-1.2.15.jar
pmml-schema-1.2.15.jar
protobuf-java-2.5.0.jar
py4j-0.10.3.jar
-pyrolite-4.9.jar
+pyrolite-4.13.jar
scala-compiler-2.11.8.jar
scala-library-2.11.8.jar
scala-parser-combinators_2.11-1.0.4.jar
diff --git a/dev/deps/spark-deps-hadoop-2.7 b/dev/deps/spark-deps-hadoop-2.7
index 9008aa80bc..572edfa0cc 100644
--- a/dev/deps/spark-deps-hadoop-2.7
+++ b/dev/deps/spark-deps-hadoop-2.7
@@ -157,7 +157,7 @@ pmml-model-1.2.15.jar
pmml-schema-1.2.15.jar
protobuf-java-2.5.0.jar
py4j-0.10.3.jar
-pyrolite-4.9.jar
+pyrolite-4.13.jar
scala-compiler-2.11.8.jar
scala-library-2.11.8.jar
scala-parser-combinators_2.11-1.0.4.jar
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index 7b6f9f0ef1..86c590dae3 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -1708,6 +1708,14 @@ class SQLTests(ReusedPySparkTestCase):
count = df.count()
self.assertEquals(count, 4)
+ def test_BinaryType_serialization(self):
+ # Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
+ schema = StructType([StructField('mybytes', BinaryType())])
+ data = [[bytearray(b'here is my data')],
+ [bytearray(b'and here is some more')]]
+ df = self.spark.createDataFrame(data, schema=schema)
+ df.collect()
+
class HiveSparkSubmitTests(SparkSubmitTests):