aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/pom.xml4
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala1
-rw-r--r--core/src/test/java/org/apache/spark/JavaAPISuite.java4
-rw-r--r--core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala2
-rw-r--r--mllib/src/test/java/org/apache/spark/mllib/feature/JavaTfIdfSuite.java2
-rw-r--r--sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java1
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala4
-rw-r--r--sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java1
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala14
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala2
-rw-r--r--sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java20
-rw-r--r--streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java16
14 files changed, 45 insertions, 32 deletions
diff --git a/core/pom.xml b/core/pom.xml
index 1feb00b3a7..c5c41b2b5d 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -352,9 +352,9 @@
</execution>
</executions>
<configuration>
- <tasks>
+ <target>
<unzip src="../python/lib/py4j-0.8.2.1-src.zip" dest="../python/build" />
- </tasks>
+ </target>
</configuration>
</plugin>
<plugin>
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala
index 819b51e12a..4896ec845b 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskResultGetter.scala
@@ -19,6 +19,7 @@ package org.apache.spark.scheduler
import java.nio.ByteBuffer
+import scala.language.existentials
import scala.util.control.NonFatal
import org.apache.spark._
diff --git a/core/src/test/java/org/apache/spark/JavaAPISuite.java b/core/src/test/java/org/apache/spark/JavaAPISuite.java
index e5bdad6bda..5ce299d058 100644
--- a/core/src/test/java/org/apache/spark/JavaAPISuite.java
+++ b/core/src/test/java/org/apache/spark/JavaAPISuite.java
@@ -184,6 +184,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(new Tuple2<Integer, Integer>(3, 2), sortedPairs.get(2));
}
+ @SuppressWarnings("unchecked")
@Test
public void repartitionAndSortWithinPartitions() {
List<Tuple2<Integer, Integer>> pairs = new ArrayList<Tuple2<Integer, Integer>>();
@@ -491,6 +492,7 @@ public class JavaAPISuite implements Serializable {
Assert.assertEquals(33, sum);
}
+ @SuppressWarnings("unchecked")
@Test
public void aggregateByKey() {
JavaPairRDD<Integer, Integer> pairs = sc.parallelizePairs(
@@ -1556,7 +1558,7 @@ public class JavaAPISuite implements Serializable {
@Test
public void testRegisterKryoClasses() {
SparkConf conf = new SparkConf();
- conf.registerKryoClasses(new Class[]{ Class1.class, Class2.class });
+ conf.registerKryoClasses(new Class<?>[]{ Class1.class, Class2.class });
Assert.assertEquals(
Class1.class.getName() + "," + Class2.class.getName(),
conf.get("spark.kryo.classesToRegister"));
diff --git a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
index ca226fd4e6..f8bcde12a3 100644
--- a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala
@@ -24,14 +24,14 @@ import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.scheduler.{SparkListenerTaskEnd, SparkListener}
import org.scalatest.FunSuite
-import org.scalatest.matchers.ShouldMatchers
+import org.scalatest.Matchers
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, FileSystem}
import scala.collection.mutable.ArrayBuffer
-class InputOutputMetricsSuite extends FunSuite with SharedSparkContext with ShouldMatchers {
+class InputOutputMetricsSuite extends FunSuite with SharedSparkContext with Matchers {
test("input metrics when reading text file with single split") {
val file = new File(getClass.getSimpleName + ".txt")
val pw = new PrintWriter(new FileWriter(file))
diff --git a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
index 436eea4f1f..d6ec9e129c 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
@@ -739,7 +739,7 @@ class DAGSchedulerSuite extends TestKit(ActorSystem("DAGSchedulerSuite")) with F
test("accumulator not calculated for resubmitted result stage") {
//just for register
- val accum = new Accumulator[Int](0, SparkContext.IntAccumulatorParam)
+ val accum = new Accumulator[Int](0, AccumulatorParam.IntAccumulatorParam)
val finalRdd = new MyRDD(sc, 1, Nil)
submit(finalRdd, Array(0))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
diff --git a/mllib/src/test/java/org/apache/spark/mllib/feature/JavaTfIdfSuite.java b/mllib/src/test/java/org/apache/spark/mllib/feature/JavaTfIdfSuite.java
index 064263e02c..fbc26167ce 100644
--- a/mllib/src/test/java/org/apache/spark/mllib/feature/JavaTfIdfSuite.java
+++ b/mllib/src/test/java/org/apache/spark/mllib/feature/JavaTfIdfSuite.java
@@ -49,6 +49,7 @@ public class JavaTfIdfSuite implements Serializable {
public void tfIdf() {
// The tests are to check Java compatibility.
HashingTF tf = new HashingTF();
+ @SuppressWarnings("unchecked")
JavaRDD<ArrayList<String>> documents = sc.parallelize(Lists.newArrayList(
Lists.newArrayList("this is a sentence".split(" ")),
Lists.newArrayList("this is another sentence".split(" ")),
@@ -68,6 +69,7 @@ public class JavaTfIdfSuite implements Serializable {
public void tfIdfMinimumDocumentFrequency() {
// The tests are to check Java compatibility.
HashingTF tf = new HashingTF();
+ @SuppressWarnings("unchecked")
JavaRDD<ArrayList<String>> documents = sc.parallelize(Lists.newArrayList(
Lists.newArrayList("this is a sentence".split(" ")),
Lists.newArrayList("this is another sentence".split(" ")),
diff --git a/sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java b/sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java
index b751847b46..f0d079d25b 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java
@@ -35,6 +35,7 @@ public abstract class UserDefinedType<UserType> extends DataType implements Seri
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
+ @SuppressWarnings("unchecked")
UserDefinedType<UserType> that = (UserDefinedType<UserType>) o;
return this.sqlType().equals(that.sqlType());
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala
index 0e6fb57d57..97447871a1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala
@@ -24,8 +24,8 @@ import scala.util.Try
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.Job
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
+import parquet.format.converter.ParquetMetadataConverter
import parquet.hadoop.{ParquetFileReader, Footer, ParquetFileWriter}
import parquet.hadoop.metadata.{ParquetMetadata, FileMetaData}
import parquet.hadoop.util.ContextUtil
@@ -458,7 +458,7 @@ private[parquet] object ParquetTypesConverter extends Logging {
// ... and fallback to "_metadata" if no such file exists (which implies the Parquet file is
// empty, thus normally the "_metadata" file is expected to be fairly small).
.orElse(children.find(_.getPath.getName == ParquetFileWriter.PARQUET_METADATA_FILE))
- .map(ParquetFileReader.readFooter(conf, _))
+ .map(ParquetFileReader.readFooter(conf, _, ParquetMetadataConverter.NO_FILTER))
.getOrElse(
throw new IllegalArgumentException(s"Could not find Parquet metadata at path $path"))
}
diff --git a/sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java b/sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java
index bc5cd66482..2b5812159d 100644
--- a/sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java
+++ b/sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java
@@ -141,6 +141,7 @@ public class JavaRowSuite {
doubleValue, stringValue, timestampValue, null);
// Complex array
+ @SuppressWarnings("unchecked")
List<Map<String, Long>> arrayOfMaps = Arrays.asList(simpleMap);
List<Row> arrayOfRows = Arrays.asList(simpleStruct);
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
index e40d034ce4..691c4b3828 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
@@ -24,6 +24,8 @@ import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.dsl._
import org.apache.spark.sql.test.TestSQLContext._
+import scala.language.postfixOps
+
class DslQuerySuite extends QueryTest {
import org.apache.spark.sql.TestData._
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala
index 074855389d..a5fe2e8da2 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala
@@ -17,6 +17,8 @@
package org.apache.spark.sql.parquet
+import scala.reflect.ClassTag
+
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.Job
import org.scalatest.{BeforeAndAfterAll, FunSuiteLike}
@@ -459,11 +461,17 @@ class ParquetQuerySuite extends QueryTest with FunSuiteLike with BeforeAndAfterA
}
test("make RecordFilter for simple predicates") {
- def checkFilter[T <: FilterPredicate](predicate: Expression, defined: Boolean = true): Unit = {
+ def checkFilter[T <: FilterPredicate : ClassTag](
+ predicate: Expression,
+ defined: Boolean = true): Unit = {
val filter = ParquetFilters.createFilter(predicate)
if (defined) {
assert(filter.isDefined)
- assert(filter.get.isInstanceOf[T])
+ val tClass = implicitly[ClassTag[T]].runtimeClass
+ val filterGet = filter.get
+ assert(
+ tClass.isInstance(filterGet),
+ s"$filterGet of type ${filterGet.getClass} is not an instance of $tClass")
} else {
assert(filter.isEmpty)
}
@@ -484,7 +492,7 @@ class ParquetQuerySuite extends QueryTest with FunSuiteLike with BeforeAndAfterA
checkFilter[Operators.And]('a.int === 1 && 'a.int < 4)
checkFilter[Operators.Or]('a.int === 1 || 'a.int < 4)
- checkFilter[Operators.Not](!('a.int === 1))
+ checkFilter[Operators.NotEq[Integer]](!('a.int === 1))
checkFilter('a.int > 'b.int, defined = false)
checkFilter(('a.int > 'b.int) && ('a.int > 'b.int), defined = false)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
index abed299cd9..2a16c9d1a2 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
@@ -32,7 +32,7 @@ import org.apache.hadoop.io.Writable
* when "spark.sql.hive.convertMetastoreParquet" is set to true.
*/
@deprecated("No code should depend on FakeParquetHiveSerDe as it is only intended as a " +
- "placeholder in the Hive MetaStore")
+ "placeholder in the Hive MetaStore", "1.2.0")
class FakeParquetSerDe extends SerDe {
override def getObjectInspector: ObjectInspector = new ObjectInspector {
override def getCategory: Category = Category.PRIMITIVE
diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java
index d2d39a8c4d..808e2986d3 100644
--- a/sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java
+++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java
@@ -23,25 +23,21 @@ import java.util.List;
public class UDFListListInt extends UDF {
/**
- *
* @param obj
- * SQL schema: array<struct<x: int, y: int, z: int>>
- * Java Type: List<List<Integer>>
- * @return
+ * SQL schema: array&lt;struct&lt;x: int, y: int, z: int&gt;&gt;
+ * Java Type: List&lt;List&lt;Integer&gt;&gt;
*/
+ @SuppressWarnings("unchecked")
public long evaluate(Object obj) {
if (obj == null) {
- return 0l;
+ return 0L;
}
- List<List> listList = (List<List>) obj;
+ List<List<?>> listList = (List<List<?>>) obj;
long retVal = 0;
- for (List aList : listList) {
- @SuppressWarnings("unchecked")
- List<Object> list = (List<Object>) aList;
- @SuppressWarnings("unchecked")
- Integer someInt = (Integer) list.get(1);
+ for (List<?> aList : listList) {
+ Number someInt = (Number) aList.get(1);
try {
- retVal += (long) (someInt.intValue());
+ retVal += someInt.longValue();
} catch (NullPointerException e) {
System.out.println(e);
}
diff --git a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
index ce645fccba..12cc0de750 100644
--- a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
+++ b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java
@@ -57,7 +57,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
@Test
public void testInitialization() {
- Assert.assertNotNull(ssc.sc());
+ Assert.assertNotNull(ssc.sparkContext());
}
@SuppressWarnings("unchecked")
@@ -662,7 +662,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
listOfDStreams1,
new Function2<List<JavaRDD<?>>, Time, JavaRDD<Long>>() {
public JavaRDD<Long> call(List<JavaRDD<?>> listOfRDDs, Time time) {
- assert(listOfRDDs.size() == 2);
+ Assert.assertEquals(2, listOfRDDs.size());
return null;
}
}
@@ -675,7 +675,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
listOfDStreams2,
new Function2<List<JavaRDD<?>>, Time, JavaPairRDD<Integer, Tuple2<Integer, String>>>() {
public JavaPairRDD<Integer, Tuple2<Integer, String>> call(List<JavaRDD<?>> listOfRDDs, Time time) {
- assert(listOfRDDs.size() == 3);
+ Assert.assertEquals(3, listOfRDDs.size());
JavaRDD<Integer> rdd1 = (JavaRDD<Integer>)listOfRDDs.get(0);
JavaRDD<Integer> rdd2 = (JavaRDD<Integer>)listOfRDDs.get(1);
JavaRDD<Tuple2<Integer, String>> rdd3 = (JavaRDD<Tuple2<Integer, String>>)listOfRDDs.get(2);
@@ -969,7 +969,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
});
JavaTestUtils.attachTestOutputStream(reversed);
- List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
+ List<List<Integer>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@@ -1012,7 +1012,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
}
});
JavaTestUtils.attachTestOutputStream(flatMapped);
- List<List<Tuple2<String, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
+ List<List<Tuple2<Integer, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}
@@ -1163,9 +1163,9 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
JavaTestUtils.attachTestOutputStream(groupWindowed);
List<List<Tuple2<String, List<Integer>>>> result = JavaTestUtils.runStreams(ssc, 3, 3);
- assert(result.size() == expected.size());
+ Assert.assertEquals(expected.size(), result.size());
for (int i = 0; i < result.size(); i++) {
- assert(convert(result.get(i)).equals(convert(expected.get(i))));
+ Assert.assertEquals(convert(expected.get(i)), convert(result.get(i)));
}
}
@@ -1383,7 +1383,7 @@ public class JavaAPISuite extends LocalJavaStreamingContext implements Serializa
});
JavaTestUtils.attachTestOutputStream(sorted);
- List<List<Tuple2<String, String>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
+ List<List<Tuple2<Integer, Integer>>> result = JavaTestUtils.runStreams(ssc, 2, 2);
Assert.assertEquals(expected, result);
}