aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2015-03-11 13:15:19 +0000
committerSean Owen <sowen@cloudera.com>2015-03-11 13:15:19 +0000
commit6e94c4eadf443ac3d34eaae4c334c8386fdec960 (patch)
treef55b474a450b1c3cd085b63dd3cd9291d812fa0f
parentec30c17822329e6d2b8c85625b31ba8bd8679fcf (diff)
downloadspark-6e94c4eadf443ac3d34eaae4c334c8386fdec960.tar.gz
spark-6e94c4eadf443ac3d34eaae4c334c8386fdec960.tar.bz2
spark-6e94c4eadf443ac3d34eaae4c334c8386fdec960.zip
SPARK-6225 [CORE] [SQL] [STREAMING] Resolve most build warnings, 1.3.0 edition
Resolve javac, scalac warnings of various types -- deprecations, Scala lang, unchecked cast, etc. Author: Sean Owen <sowen@cloudera.com> Closes #4950 from srowen/SPARK-6225 and squashes the following commits: 3080972 [Sean Owen] Ordered imports: Java, Scala, 3rd party, Spark c67985b [Sean Owen] Resolve javac, scalac warnings of various types -- deprecations, Scala lang, unchecked cast, etc.
-rw-r--r--core/src/main/scala/org/apache/spark/SparkContext.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala3
-rw-r--r--examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala4
-rw-r--r--external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java21
-rw-r--r--external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java21
-rw-r--r--external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java14
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala1
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala2
-rw-r--r--streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java4
15 files changed, 40 insertions, 49 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 1a0bee4e3a..8121aab3b0 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -1104,7 +1104,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
if (!fs.exists(hadoopPath)) {
throw new FileNotFoundException(s"Added file $hadoopPath does not exist.")
}
- val isDir = fs.isDirectory(hadoopPath)
+ val isDir = fs.getFileStatus(hadoopPath).isDir
if (!isLocal && scheme == "file" && isDir) {
throw new SparkException(s"addFile does not support local directories when not running " +
"local mode.")
diff --git a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
index 2091a9fe8d..34fa6d27c3 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala
@@ -95,7 +95,7 @@ private[spark] class EventLoggingListener(
* Creates the log file in the configured log directory.
*/
def start() {
- if (!fileSystem.isDirectory(new Path(logBaseDir))) {
+ if (!fileSystem.getFileStatus(new Path(logBaseDir)).isDir) {
throw new IllegalArgumentException(s"Log directory $logBaseDir does not exist.")
}
diff --git a/core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala b/core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala
index d9c7103b2f..1e0ba5c287 100644
--- a/core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala
+++ b/core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala
@@ -23,8 +23,6 @@ import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConversions._
-import org.apache.spark.util.ParentClassLoader
-
/**
* URL class loader that exposes the `addURL` and `getURLs` methods in URLClassLoader.
*/
diff --git a/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
index 3cc860caa1..c8c9578562 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala
@@ -153,7 +153,7 @@ class OutputCommitCoordinatorSuite extends FunSuite with BeforeAndAfter {
def resultHandler(x: Int, y: Unit): Unit = {}
val futureAction: SimpleFutureAction[Unit] = sc.submitJob[Int, Unit, Unit](rdd,
OutputCommitFunctions(tempDir.getAbsolutePath).commitSuccessfully,
- 0 until rdd.partitions.size, resultHandler, 0)
+ 0 until rdd.partitions.size, resultHandler, () => Unit)
// It's an error if the job completes successfully even though no committer was authorized,
// so throw an exception if the job was allowed to complete.
intercept[TimeoutException] {
diff --git a/core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala b/core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala
index 31e3b7e7bb..87de90bb0d 100644
--- a/core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala
@@ -21,8 +21,7 @@ import java.net.URLClassLoader
import org.scalatest.FunSuite
-import org.apache.spark.{LocalSparkContext, SparkContext, SparkException, TestUtils}
-import org.apache.spark.util.Utils
+import org.apache.spark.{SparkContext, SparkException, TestUtils}
class MutableURLClassLoaderSuite extends FunSuite {
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
index d46c7107c7..dbf2ef02d7 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
@@ -82,6 +82,7 @@ public class JavaStatefulNetworkWordCount {
ssc.checkpoint(".");
// Initial RDD input to updateStateByKey
+ @SuppressWarnings("unchecked")
List<Tuple2<String, Integer>> tuples = Arrays.asList(new Tuple2<String, Integer>("hello", 1),
new Tuple2<String, Integer>("world", 1));
JavaPairRDD<String, Integer> initialRDD = ssc.sc().parallelizePairs(tuples);
diff --git a/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala b/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
index 822673347b..f4684b42b5 100644
--- a/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
@@ -18,7 +18,7 @@
package org.apache.spark.examples
import org.apache.hadoop.hbase.client.HBaseAdmin
-import org.apache.hadoop.hbase.{HBaseConfiguration, HTableDescriptor}
+import org.apache.hadoop.hbase.{HBaseConfiguration, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.spark._
@@ -36,7 +36,7 @@ object HBaseTest {
// Initialize hBase table if necessary
val admin = new HBaseAdmin(conf)
if (!admin.isTableAvailable(args(0))) {
- val tableDesc = new HTableDescriptor(args(0))
+ val tableDesc = new HTableDescriptor(TableName.valueOf(args(0)))
admin.createTable(tableDesc)
}
diff --git a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java
index 1334cc8fd1..d6ca6d58b5 100644
--- a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java
+++ b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaDirectKafkaStreamSuite.java
@@ -20,32 +20,27 @@ package org.apache.spark.streaming.kafka;
import java.io.Serializable;
import java.util.HashMap;
import java.util.HashSet;
-import java.util.Random;
import java.util.Arrays;
-import org.apache.spark.SparkConf;
-
import scala.Tuple2;
-import junit.framework.Assert;
-
import kafka.common.TopicAndPartition;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
-import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.Durations;
+import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
-import org.junit.Test;
-import org.junit.After;
-import org.junit.Before;
-
public class JavaDirectKafkaStreamSuite implements Serializable {
private transient JavaStreamingContext ssc = null;
- private transient Random random = new Random();
private transient KafkaStreamSuiteBase suiteBase = null;
@Before
@@ -93,7 +88,7 @@ public class JavaDirectKafkaStreamSuite implements Serializable {
).map(
new Function<Tuple2<String, String>, String>() {
@Override
- public String call(scala.Tuple2<String, String> kv) throws Exception {
+ public String call(Tuple2<String, String> kv) throws Exception {
return kv._2();
}
}
@@ -121,7 +116,7 @@ public class JavaDirectKafkaStreamSuite implements Serializable {
unifiedStream.foreachRDD(
new Function<JavaRDD<String>, Void>() {
@Override
- public Void call(org.apache.spark.api.java.JavaRDD<String> rdd) throws Exception {
+ public Void call(JavaRDD<String> rdd) throws Exception {
result.addAll(rdd.collect());
return null;
}
diff --git a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java
index 9d2e1705c6..4477b81827 100644
--- a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java
+++ b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaRDDSuite.java
@@ -19,27 +19,22 @@ package org.apache.spark.streaming.kafka;
import java.io.Serializable;
import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Arrays;
-
-import org.apache.spark.SparkConf;
import scala.Tuple2;
-import junit.framework.Assert;
-
import kafka.common.TopicAndPartition;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
-import org.junit.Test;
-import org.junit.After;
-import org.junit.Before;
-
public class JavaKafkaRDDSuite implements Serializable {
private transient JavaSparkContext sc = null;
private transient KafkaStreamSuiteBase suiteBase = null;
@@ -78,8 +73,8 @@ public class JavaKafkaRDDSuite implements Serializable {
OffsetRange.create(topic2, 0, 0, 1)
};
- HashMap<TopicAndPartition, Broker> emptyLeaders = new HashMap();
- HashMap<TopicAndPartition, Broker> leaders = new HashMap();
+ HashMap<TopicAndPartition, Broker> emptyLeaders = new HashMap<TopicAndPartition, Broker>();
+ HashMap<TopicAndPartition, Broker> leaders = new HashMap<TopicAndPartition, Broker>();
String[] hostAndPort = suiteBase.brokerAddress().split(":");
Broker broker = Broker.create(hostAndPort[0], Integer.parseInt(hostAndPort[1]));
leaders.put(new TopicAndPartition(topic1, 0), broker);
@@ -96,7 +91,7 @@ public class JavaKafkaRDDSuite implements Serializable {
).map(
new Function<Tuple2<String, String>, String>() {
@Override
- public String call(scala.Tuple2<String, String> kv) throws Exception {
+ public String call(Tuple2<String, String> kv) throws Exception {
return kv._2();
}
}
diff --git a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java
index 208cc51b29..bad0a93eb2 100644
--- a/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java
+++ b/external/kafka/src/test/java/org/apache/spark/streaming/kafka/JavaKafkaStreamSuite.java
@@ -22,27 +22,25 @@ import java.util.HashMap;
import java.util.List;
import java.util.Random;
-import org.apache.spark.SparkConf;
-import org.apache.spark.streaming.Duration;
import scala.Predef;
import scala.Tuple2;
import scala.collection.JavaConverters;
-import junit.framework.Assert;
-
import kafka.serializer.StringDecoder;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.storage.StorageLevel;
+import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
-import org.junit.Test;
-import org.junit.After;
-import org.junit.Before;
-
public class JavaKafkaStreamSuite implements Serializable {
private transient JavaStreamingContext ssc = null;
private transient Random random = new Random();
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
index c399496568..5f5a996a87 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala
@@ -199,12 +199,12 @@ object MatrixFactorizationModel extends Loader[MatrixFactorizationModel] {
assert(formatVersion == thisFormatVersion)
val rank = (metadata \ "rank").extract[Int]
val userFeatures = sqlContext.parquetFile(userPath(path))
- .map { case Row(id: Int, features: Seq[Double]) =>
- (id, features.toArray)
+ .map { case Row(id: Int, features: Seq[_]) =>
+ (id, features.asInstanceOf[Seq[Double]].toArray)
}
val productFeatures = sqlContext.parquetFile(productPath(path))
- .map { case Row(id: Int, features: Seq[Double]) =>
- (id, features.toArray)
+ .map { case Row(id: Int, features: Seq[_]) =>
+ (id, features.asInstanceOf[Seq[Double]].toArray)
}
new MatrixFactorizationModel(rank, userFeatures, productFeatures)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala b/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala
index 5020689f7a..76754a6ce4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/sources/ddl.scala
@@ -17,6 +17,7 @@
package org.apache.spark.sql.sources
+import scala.language.existentials
import scala.language.implicitConversions
import org.apache.spark.Logging
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
index 23df6e7eac..17e923ca48 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ScalaReflectionRelationSuite.scala
@@ -86,7 +86,7 @@ class ScalaReflectionRelationSuite extends FunSuite {
assert(sql("SELECT * FROM reflectData").collect().head ===
Row("a", 1, 1L, 1.toFloat, 1.toDouble, 1.toShort, 1.toByte, true,
- new java.math.BigDecimal(1), new Date(70, 0, 1), // This is 1970-01-01
+ new java.math.BigDecimal(1), Date.valueOf("1970-01-01"),
new Timestamp(12345), Seq(1,2,3)))
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
index 09bbd5c867..3181cfe400 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
@@ -75,7 +75,7 @@ class HiveInspectorSuite extends FunSuite with HiveInspectors {
Literal(0.asInstanceOf[Float]) ::
Literal(0.asInstanceOf[Double]) ::
Literal("0") ::
- Literal(new java.sql.Date(114, 8, 23)) ::
+ Literal(java.sql.Date.valueOf("2014-09-23")) ::
Literal(Decimal(BigDecimal(123.123))) ::
Literal(new java.sql.Timestamp(123123)) ::
Literal(Array[Byte](1,2,3)) ::
diff --git a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
index 57302ff407..90340753a4 100644
--- a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
+++ b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
@@ -316,6 +316,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
testReduceByWindow(false);
}
+ @SuppressWarnings("unchecked")
private void testReduceByWindow(boolean withInverse) {
List<List<Integer>> inputData = Arrays.asList(
Arrays.asList(1,2,3),
@@ -684,6 +685,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
JavaDStream<Long> transformed1 = ssc.transform(
listOfDStreams1,
new Function2<List<JavaRDD<?>>, Time, JavaRDD<Long>>() {
+ @Override
public JavaRDD<Long> call(List<JavaRDD<?>> listOfRDDs, Time time) {
Assert.assertEquals(2, listOfRDDs.size());
return null;
@@ -697,6 +699,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
JavaPairDStream<Integer, Tuple2<Integer, String>> transformed2 = ssc.transformToPair(
listOfDStreams2,
new Function2<List<JavaRDD<?>>, Time, JavaPairRDD<Integer, Tuple2<Integer, String>>>() {
+ @Override
public JavaPairRDD<Integer, Tuple2<Integer, String>> call(List<JavaRDD<?>> listOfRDDs, Time time) {
Assert.assertEquals(3, listOfRDDs.size());
JavaRDD<Integer> rdd1 = (JavaRDD<Integer>)listOfRDDs.get(0);
@@ -1829,6 +1832,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
return expected;
}
+ @SuppressWarnings("unchecked")
// SPARK-5795: no logic assertions, just testing that intended API invocations compile
private void compileSaveAsJavaAPI(JavaPairDStream<LongWritable,Text> pds) {
pds.saveAsNewAPIHadoopFiles(