aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2014-12-24 13:32:51 -0800
committerJosh Rosen <joshrosen@databricks.com>2014-12-24 13:32:51 -0800
commit29fabb1b528e60b2f65132a9ab64f2fd95b729ba (patch)
tree00028a1f4ea48e77ede7e8f9bdfaa0cb324b74e7 /core
parent199e59aacd540e17b31f38e0e32a3618870e9055 (diff)
downloadspark-29fabb1b528e60b2f65132a9ab64f2fd95b729ba.tar.gz
spark-29fabb1b528e60b2f65132a9ab64f2fd95b729ba.tar.bz2
spark-29fabb1b528e60b2f65132a9ab64f2fd95b729ba.zip
SPARK-4297 [BUILD] Build warning fixes omnibus
There are a number of warnings generated in a normal, successful build right now. They're mostly Java unchecked cast warnings, which can be suppressed. But there's a grab bag of other Scala language warnings and so on that can all be easily fixed. The forthcoming PR fixes about 90% of the build warnings I see now. Author: Sean Owen <sowen@cloudera.com> Closes #3157 from srowen/SPARK-4297 and squashes the following commits: 8c9e469 [Sean Owen] Suppress unchecked cast warnings, and several other build warning fixes
Diffstat (limited to 'core')
-rw-r--r--core/pom.xml4
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala1
-rw-r--r--core/src/test/java/org/apache/spark/JavaAPISuite.java4
-rw-r--r--core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala2
5 files changed, 9 insertions, 6 deletions
diff --git a/core/pom.xml b/core/pom.xml
index 1feb00b3a7..c5c41b2b5d 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -352,9 +352,9 @@
</execution>
</executions>
<configuration>
- <tasks>
+ <target>
<unzip src="../python/lib/py4j-0.8.2.1-src.zip" dest="../python/build" />
- </tasks>
+ </target>
</configuration>
</plugin>
<plugin>
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala
index 819b51e12a..4896ec845b 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala
@@ -19,6 +19,7 @@ package org.apache.spark.scheduler
import java.nio.ByteBuffer
+import scala.language.existentials
import scala.util.control.NonFatal
import org.apache.spark._
diff --git a/core/src/test/java/org/apache/spark/JavaAPISuite.java b/core/src/test/java/org/apache/spark/JavaAPISuite.java
index e5bdad6bda..5ce299d058 100644
--- a/core/src/test/java/org/apache/spark/JavaAPISuite.java
+++ b/core/src/test/java/org/apache/spark/JavaAPISuite.java
@@ -184,6 +184,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(new Tuple2<Integer, Integer>(3, 2), sortedPairs.get(2));
}
+ @SuppressWarnings("unchecked")
@Test
public void repartitionAndSortWithinPartitions() {
List<Tuple2<Integer, Integer>> pairs = new ArrayList<Tuple2<Integer, Integer>>();
@@ -491,6 +492,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(33, sum);
}
+ @SuppressWarnings("unchecked")
@Test
public void aggregateByKey() {
JavaPairRDD<Integer, Integer> pairs = sc.parallelizePairs(
@@ -1556,7 +1558,7 @@ public class JavaAPISuite implements Serializable {
@Test
public void testRegisterKryoClasses() {
SparkConf conf = new SparkConf();
- conf.registerKryoClasses(new Class[]{ Class1.class, Class2.class });
+ conf.registerKryoClasses(new Class<?>[]{ Class1.class, Class2.class });
Assert.assertEquals(
Class1.class.getName() + "," + Class2.class.getName(),
conf.get("spark.kryo.classesToRegister"));
diff --git a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
index ca226fd4e6..f8bcde12a3 100644
--- a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
@@ -24,14 +24,14 @@ import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.scheduler.{SparkListenerTaskEnd, SparkListener}
import org.scalatest.FunSuite
-import org.scalatest.matchers.ShouldMatchers
+import org.scalatest.Matchers
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, FileSystem}
import scala.collection.mutable.ArrayBuffer
-class InputOutputMetricsSuite extends FunSuite with SharedSparkContext with ShouldMatchers {
+class InputOutputMetricsSuite extends FunSuite with SharedSparkContext with Matchers {
test("input metrics when reading text file with single split") {
val file = new File(getClass.getSimpleName + ".txt")
val pw = new PrintWriter(new FileWriter(file))
diff --git a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
index 436eea4f1f..d6ec9e129c 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
@@ -739,7 +739,7 @@ class DAGSchedulerSuite extends TestKit(ActorSystem("DAGSchedulerSuite")) with F
test("accumulator not calculated for resubmitted result stage") {
//just for register
- val accum = new Accumulator[Int](0, SparkContext.IntAccumulatorParam)
+ val accum = new Accumulator[Int](0, AccumulatorParam.IntAccumulatorParam)
val finalRdd = new MyRDD(sc, 1, Nil)
submit(finalRdd, Array(0))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))