aboutsummaryrefslogtreecommitdiff
path: root/core/src/test/scala
diff options
context:
space:
mode:
authorChristopher Nguyen <ctn@adatao.com>2013-06-16 01:23:48 -0700
committerChristopher Nguyen <ctn@adatao.com>2013-06-16 01:23:48 -0700
commit5c886194e458c64fcf24066af351bde47dd8bf12 (patch)
tree50a2790424f7e9e9f92fa3e2b442fefda819c097 /core/src/test/scala
parent479442a9b913b08a64da4bd5848111d950105336 (diff)
downloadspark-5c886194e458c64fcf24066af351bde47dd8bf12.tar.gz
spark-5c886194e458c64fcf24066af351bde47dd8bf12.tar.bz2
spark-5c886194e458c64fcf24066af351bde47dd8bf12.zip
Move zero-length partition testing from JavaAPISuite.java to PartitioningSuite.scala
Diffstat (limited to 'core/src/test/scala')
-rw-r--r--core/src/test/scala/spark/JavaAPISuite.java22
-rw-r--r--core/src/test/scala/spark/PartitioningSuite.scala21
2 files changed, 19 insertions, 24 deletions
diff --git a/core/src/test/scala/spark/JavaAPISuite.java b/core/src/test/scala/spark/JavaAPISuite.java
index 3190a43e73..93bb69b41c 100644
--- a/core/src/test/scala/spark/JavaAPISuite.java
+++ b/core/src/test/scala/spark/JavaAPISuite.java
@@ -315,28 +315,6 @@ public class JavaAPISuite implements Serializable {
}
@Test
- public void zeroLengthPartitions() {
- // Create RDD with some consecutive empty partitions (including the "first" one)
- JavaDoubleRDD rdd = sc
- .parallelizeDoubles(Arrays.asList(-1.0, -1.0, -1.0, -1.0, 2.0, 4.0, -1.0, -1.0), 8)
- .filter(new Function<Double, Boolean>() {
- @Override
- public Boolean call(Double x) {
- return x > 0.0;
- }
- });
-
- // Run the partitions, including the consecutive empty ones, through StatCounter
- StatCounter stats = rdd.stats();
- Assert.assertEquals(6.0, stats.sum(), 0.01);
- Assert.assertEquals(6.0/2, rdd.mean(), 0.01);
- Assert.assertEquals(1.0, rdd.variance(), 0.01);
- Assert.assertEquals(1.0, rdd.stdev(), 0.01);
-
- // Add other tests here for classes that should be able to handle empty partitions correctly
- }
-
- @Test
public void map() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
JavaDoubleRDD doubles = rdd.map(new DoubleFunction<Integer>() {
diff --git a/core/src/test/scala/spark/PartitioningSuite.scala b/core/src/test/scala/spark/PartitioningSuite.scala
index 60db759c25..e5745c81b3 100644
--- a/core/src/test/scala/spark/PartitioningSuite.scala
+++ b/core/src/test/scala/spark/PartitioningSuite.scala
@@ -1,10 +1,10 @@
package spark
import org.scalatest.FunSuite
-
import scala.collection.mutable.ArrayBuffer
-
import SparkContext._
+import spark.util.StatCounter
+import scala.math._
class PartitioningSuite extends FunSuite with LocalSparkContext {
@@ -120,4 +120,21 @@ class PartitioningSuite extends FunSuite with LocalSparkContext {
assert(intercept[SparkException]{ arrPairs.reduceByKeyLocally(_ + _) }.getMessage.contains("array"))
assert(intercept[SparkException]{ arrPairs.reduceByKey(_ + _) }.getMessage.contains("array"))
}
+
+ test("Zero-length partitions should be correctly handled") {
+ // Create RDD with some consecutive empty partitions (including the "first" one)
+ sc = new SparkContext("local", "test")
+ val rdd: RDD[Double] = sc
+ .parallelize(Array(-1.0, -1.0, -1.0, -1.0, 2.0, 4.0, -1.0, -1.0), 8)
+ .filter(_ >= 0.0)
+
+ // Run the partitions, including the consecutive empty ones, through StatCounter
+ val stats: StatCounter = rdd.stats();
+ assert(abs(6.0 - stats.sum) < 0.01);
+ assert(abs(6.0/2 - rdd.mean) < 0.01);
+ assert(abs(1.0 - rdd.variance) < 0.01);
+ assert(abs(1.0 - rdd.stdev) < 0.01);
+
+ // Add other tests here for classes that should be able to handle empty partitions correctly
+ }
}