diff options
author | WeichenXu <WeichenXu123@outlook.com> | 2016-09-12 12:23:16 +0100 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2016-09-12 12:23:16 +0100 |
commit | 8087ecf8daad1587d0ce9040991b14320628a65e (patch) | |
tree | 1b407fb0de526dec2e1619287d440ee0c171f54f | |
parent | b3c22912284c2a010a4af3c43dc5e6fd53c68f8c (diff) | |
download | spark-8087ecf8daad1587d0ce9040991b14320628a65e.tar.gz spark-8087ecf8daad1587d0ce9040991b14320628a65e.tar.bz2 spark-8087ecf8daad1587d0ce9040991b14320628a65e.zip |
[SPARK CORE][MINOR] fix "default partitioner cannot partition array keys" error message in PairRDDfunctions
## What changes were proposed in this pull request?
In order to avoid confusing user,
error message in `PairRDDfunctions`
`Default partitioner cannot partition array keys.`
is updated,
the one in `partitionBy` is replaced with
`Specified partitioner cannot partition array keys.`
other is replaced with
`Specified or default partitioner cannot partition array keys.`
## How was this patch tested?
N/A
Author: WeichenXu <WeichenXu123@outlook.com>
Closes #15045 from WeichenXu123/fix_partitionBy_error_message.
-rw-r--r-- | core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala index 7d6a8805bc..068f4ed8ad 100644 --- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala @@ -83,7 +83,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) throw new SparkException("Cannot use map-side combining with array keys.") } if (partitioner.isInstanceOf[HashPartitioner]) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } } val aggregator = new Aggregator[K, V, C]( @@ -530,7 +530,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) */ def partitionBy(partitioner: Partitioner): RDD[(K, V)] = self.withScope { if (keyClass.isArray && partitioner.isInstanceOf[HashPartitioner]) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } if (self.partitioner == Some(partitioner)) { self @@ -784,7 +784,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) partitioner: Partitioner) : RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))] = self.withScope { if (partitioner.isInstanceOf[HashPartitioner] && keyClass.isArray) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } val cg = new CoGroupedRDD[K](Seq(self, other1, other2, other3), partitioner) cg.mapValues { case Array(vs, w1s, w2s, w3s) => @@ -802,7 +802,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) def cogroup[W](other: RDD[(K, W)], partitioner: Partitioner) : RDD[(K, (Iterable[V], Iterable[W]))] = self.withScope { if (partitioner.isInstanceOf[HashPartitioner] && keyClass.isArray) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } val cg = new CoGroupedRDD[K](Seq(self, other), partitioner) cg.mapValues { case Array(vs, w1s) => @@ -817,7 +817,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)]) def cogroup[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)], partitioner: Partitioner) : RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))] = self.withScope { if (partitioner.isInstanceOf[HashPartitioner] && keyClass.isArray) { - throw new SparkException("Default partitioner cannot partition array keys.") + throw new SparkException("HashPartitioner cannot partition array keys.") } val cg = new CoGroupedRDD[K](Seq(self, other1, other2), partitioner) cg.mapValues { case Array(vs, w1s, w2s) => |