aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2014-02-27 11:12:21 -0800
committerPatrick Wendell <pwendell@gmail.com>2014-02-27 11:12:21 -0800
commit12bbca20657c17d5ebfceaacb37dddc851772675 (patch)
tree53d717c10b2b7ede275608b1829e6f44389daa2d /core
parentaace2c097ed2ca8bca33a3a3f07fb8bf772b3c50 (diff)
downloadspark-12bbca20657c17d5ebfceaacb37dddc851772675.tar.gz
spark-12bbca20657c17d5ebfceaacb37dddc851772675.tar.bz2
spark-12bbca20657c17d5ebfceaacb37dddc851772675.zip
SPARK 1084.1 (resubmitted)
(Ported from https://github.com/apache/incubator-spark/pull/637 ) Author: Sean Owen <sowen@cloudera.com> Closes #31 from srowen/SPARK-1084.1 and squashes the following commits: 6c4a32c [Sean Owen] Suppress warnings about legitimate unchecked array creations, or change code to avoid it f35b833 [Sean Owen] Fix two misc javadoc problems 254e8ef [Sean Owen] Fix one new style error introduced in scaladoc warning commit 5b2fce2 [Sean Owen] Fix scaladoc invocation warning, and enable javac warnings properly, with plugin config updates 007762b [Sean Owen] Remove dead scaladoc links b8ff8cb [Sean Owen] Replace deprecated Ant <tasks> with <target>
Diffstat (limited to 'core')
-rw-r--r--core/pom.xml4
-rw-r--r--core/src/main/scala/org/apache/spark/SparkContext.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/util/IndestructibleActorSystem.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/util/StatCounter.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/util/Vector.scala2
-rw-r--r--core/src/test/java/org/apache/spark/JavaAPISuite.java35
7 files changed, 35 insertions, 20 deletions
diff --git a/core/pom.xml b/core/pom.xml
index ebc178a105..a333bff28c 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -225,7 +225,7 @@
</goals>
<configuration>
<exportAntProperties>true</exportAntProperties>
- <tasks>
+ <target>
<property name="spark.classpath" refid="maven.test.classpath" />
<property environment="env" />
<fail message="Please set the SCALA_HOME (or SCALA_LIBRARY_PATH if scala is on the path) environment variables and retry.">
@@ -238,7 +238,7 @@
</not>
</condition>
</fail>
- </tasks>
+ </target>
</configuration>
</execution>
</executions>
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 1f5334f3db..da778aa851 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -351,7 +351,7 @@ class SparkContext(
* using the older MapReduce API (`org.apache.hadoop.mapred`).
*
* @param conf JobConf for setting up the dataset
- * @param inputFormatClass Class of the [[InputFormat]]
+ * @param inputFormatClass Class of the InputFormat
* @param keyClass Class of the keys
* @param valueClass Class of the values
* @param minSplits Minimum number of Hadoop Splits to generate.
diff --git a/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala b/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala
index 9d75d7c4ad..006e2a3335 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/JobLogger.scala
@@ -81,7 +81,7 @@ class JobLogger(val user: String, val logDirName: String)
/**
* Create a log file for one job
* @param jobID ID of the job
- * @exception FileNotFoundException Fail to create log file
+ * @throws FileNotFoundException Fail to create log file
*/
protected def createLogWriter(jobID: Int) {
try {
diff --git a/core/src/main/scala/org/apache/spark/util/IndestructibleActorSystem.scala b/core/src/main/scala/org/apache/spark/util/IndestructibleActorSystem.scala
index bf71882ef7..c539d2f708 100644
--- a/core/src/main/scala/org/apache/spark/util/IndestructibleActorSystem.scala
+++ b/core/src/main/scala/org/apache/spark/util/IndestructibleActorSystem.scala
@@ -23,9 +23,9 @@ import scala.util.control.{ControlThrowable, NonFatal}
import com.typesafe.config.Config
/**
- * An [[akka.actor.ActorSystem]] which refuses to shut down in the event of a fatal exception.
+ * An akka.actor.ActorSystem which refuses to shut down in the event of a fatal exception
* This is necessary as Spark Executors are allowed to recover from fatal exceptions
- * (see [[org.apache.spark.executor.Executor]]).
+ * (see org.apache.spark.executor.Executor)
*/
object IndestructibleActorSystem {
def apply(name: String, config: Config): ActorSystem =
diff --git a/core/src/main/scala/org/apache/spark/util/StatCounter.scala b/core/src/main/scala/org/apache/spark/util/StatCounter.scala
index 5b0d2c3651..f837dc7ccc 100644
--- a/core/src/main/scala/org/apache/spark/util/StatCounter.scala
+++ b/core/src/main/scala/org/apache/spark/util/StatCounter.scala
@@ -19,9 +19,9 @@ package org.apache.spark.util
/**
* A class for tracking the statistics of a set of numbers (count, mean and variance) in a
- * numerically robust way. Includes support for merging two StatCounters. Based on
- * [[http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
- * Welford and Chan's algorithms for running variance]].
+ * numerically robust way. Includes support for merging two StatCounters. Based on Welford
+ * and Chan's [[http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance algorithms]]
+ * for running variance.
*
* @constructor Initialize the StatCounter with the given values.
*/
diff --git a/core/src/main/scala/org/apache/spark/util/Vector.scala b/core/src/main/scala/org/apache/spark/util/Vector.scala
index d437c055f3..dc4b8f253f 100644
--- a/core/src/main/scala/org/apache/spark/util/Vector.scala
+++ b/core/src/main/scala/org/apache/spark/util/Vector.scala
@@ -136,7 +136,7 @@ object Vector {
/**
* Creates this [[org.apache.spark.util.Vector]] of given length containing random numbers
- * between 0.0 and 1.0. Optional [[scala.util.Random]] number generator can be provided.
+ * between 0.0 and 1.0. Optional scala.util.Random number generator can be provided.
*/
def random(length: Int, random: Random = new XORShiftRandom()) =
Vector(length, _ => random.nextDouble())
diff --git a/core/src/test/java/org/apache/spark/JavaAPISuite.java b/core/src/test/java/org/apache/spark/JavaAPISuite.java
index 20232e9fbb..aa5079c159 100644
--- a/core/src/test/java/org/apache/spark/JavaAPISuite.java
+++ b/core/src/test/java/org/apache/spark/JavaAPISuite.java
@@ -75,8 +75,9 @@ public class JavaAPISuite implements Serializable {
else if (a < b) return 1;
else return 0;
}
- };
+ }
+ @SuppressWarnings("unchecked")
@Test
public void sparkContextUnion() {
// Union of non-specialized JavaRDDs
@@ -148,6 +149,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(2, foreachCalls);
}
+ @SuppressWarnings("unchecked")
@Test
public void lookup() {
JavaPairRDD<String, String> categories = sc.parallelizePairs(Arrays.asList(
@@ -179,6 +181,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(5, oddsAndEvens.lookup(false).get(0).size()); // Odds
}
+ @SuppressWarnings("unchecked")
@Test
public void cogroup() {
JavaPairRDD<String, String> categories = sc.parallelizePairs(Arrays.asList(
@@ -197,6 +200,7 @@ public class JavaAPISuite implements Serializable {
cogrouped.collect();
}
+ @SuppressWarnings("unchecked")
@Test
public void leftOuterJoin() {
JavaPairRDD<Integer, Integer> rdd1 = sc.parallelizePairs(Arrays.asList(
@@ -243,6 +247,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(33, sum);
}
+ @SuppressWarnings("unchecked")
@Test
public void foldByKey() {
List<Tuple2<Integer, Integer>> pairs = Arrays.asList(
@@ -265,6 +270,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(3, sums.lookup(3).get(0).intValue());
}
+ @SuppressWarnings("unchecked")
@Test
public void reduceByKey() {
List<Tuple2<Integer, Integer>> pairs = Arrays.asList(
@@ -320,8 +326,8 @@ public class JavaAPISuite implements Serializable {
public void take() {
JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
Assert.assertEquals(1, rdd.first().intValue());
- List<Integer> firstTwo = rdd.take(2);
- List<Integer> sample = rdd.takeSample(false, 2, 42);
+ rdd.take(2);
+ rdd.takeSample(false, 2, 42);
}
@Test
@@ -359,8 +365,8 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(2.49444, rdd.stdev(), 0.01);
Assert.assertEquals(2.73252, rdd.sampleStdev(), 0.01);
- Double first = rdd.first();
- List<Double> take = rdd.take(5);
+ rdd.first();
+ rdd.take(5);
}
@Test
@@ -438,11 +444,11 @@ public class JavaAPISuite implements Serializable {
return lengths;
}
});
- Double x = doubles.first();
- Assert.assertEquals(5.0, doubles.first().doubleValue(), 0.01);
+ Assert.assertEquals(5.0, doubles.first(), 0.01);
Assert.assertEquals(11, pairs.count());
}
+ @SuppressWarnings("unchecked")
@Test
public void mapsFromPairsToPairs() {
List<Tuple2<Integer, String>> pairs = Arrays.asList(
@@ -509,6 +515,7 @@ public class JavaAPISuite implements Serializable {
}
}
+ @SuppressWarnings("unchecked")
@Test
public void persist() {
JavaDoubleRDD doubleRDD = sc.parallelizeDoubles(Arrays.asList(1.0, 1.0, 2.0, 3.0, 5.0, 8.0));
@@ -573,6 +580,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(expected, readRDD.collect());
}
+ @SuppressWarnings("unchecked")
@Test
public void sequenceFile() {
File tempDir = Files.createTempDir();
@@ -602,6 +610,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(pairs, readRDD.collect());
}
+ @SuppressWarnings("unchecked")
@Test
public void writeWithNewAPIHadoopFile() {
File tempDir = Files.createTempDir();
@@ -632,6 +641,7 @@ public class JavaAPISuite implements Serializable {
}).collect().toString());
}
+ @SuppressWarnings("unchecked")
@Test
public void readWithNewAPIHadoopFile() throws IOException {
File tempDir = Files.createTempDir();
@@ -674,6 +684,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(expected, readRDD.collect());
}
+ @SuppressWarnings("unchecked")
@Test
public void objectFilesOfComplexTypes() {
File tempDir = Files.createTempDir();
@@ -690,6 +701,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(pairs, readRDD.collect());
}
+ @SuppressWarnings("unchecked")
@Test
public void hadoopFile() {
File tempDir = Files.createTempDir();
@@ -719,6 +731,7 @@ public class JavaAPISuite implements Serializable {
}).collect().toString());
}
+ @SuppressWarnings("unchecked")
@Test
public void hadoopFileCompressed() {
File tempDir = Files.createTempDir();
@@ -824,7 +837,7 @@ public class JavaAPISuite implements Serializable {
}
};
- final Accumulator<Float> floatAccum = sc.accumulator((Float) 10.0f, floatAccumulatorParam);
+ final Accumulator<Float> floatAccum = sc.accumulator(10.0f, floatAccumulatorParam);
rdd.foreach(new VoidFunction<Integer>() {
public void call(Integer x) {
floatAccum.add((float) x);
@@ -876,6 +889,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(Arrays.asList(1, 2, 3, 4, 5), recovered.collect());
}
+ @SuppressWarnings("unchecked")
@Test
public void mapOnPairRDD() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1,2,3,4));
@@ -900,6 +914,7 @@ public class JavaAPISuite implements Serializable {
}
+ @SuppressWarnings("unchecked")
@Test
public void collectPartitions() {
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7), 3);
@@ -968,7 +983,7 @@ public class JavaAPISuite implements Serializable {
@Test
public void collectAsMapWithIntArrayValues() {
// Regression test for SPARK-1040
- JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(new Integer[] { 1 }));
+ JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1));
JavaPairRDD<Integer, int[]> pairRDD = rdd.map(new PairFunction<Integer, Integer, int[]>() {
@Override
public Tuple2<Integer, int[]> call(Integer x) throws Exception {
@@ -976,6 +991,6 @@ public class JavaAPISuite implements Serializable {
}
});
pairRDD.collect(); // Works fine
- Map<Integer, int[]> map = pairRDD.collectAsMap(); // Used to crash with ClassCastException
+ pairRDD.collectAsMap(); // Used to crash with ClassCastException
}
}