aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2016-01-08 17:47:44 +0000
committerSean Owen <sowen@cloudera.com>2016-01-08 17:47:44 +0000
commitb9c835337880f57fe8b953962913bcc524162348 (patch)
tree5dc476b1a65d513210d1124db144aaa3c5f66679 /sql
parent794ea553bd0fcfece15b610b47ee86d6644134c9 (diff)
downloadspark-b9c835337880f57fe8b953962913bcc524162348.tar.gz
spark-b9c835337880f57fe8b953962913bcc524162348.tar.bz2
spark-b9c835337880f57fe8b953962913bcc524162348.zip
[SPARK-12618][CORE][STREAMING][SQL] Clean up build warnings: 2.0.0 edition
Fix most build warnings: mostly deprecated API usages. I'll annotate some of the changes below. CC rxin who is leading the charge to remove the deprecated APIs. Author: Sean Owen <sowen@cloudera.com> Closes #10570 from srowen/SPARK-12618.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala8
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/DateTimeUtilsSuite.scala3
-rw-r--r--sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java19
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala5
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnarTestUtils.scala1
6 files changed, 20 insertions, 20 deletions
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala
index f869a96edb..e028d22a54 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala
@@ -57,8 +57,8 @@ trait ExpressionEvalHelper extends GeneratorDrivenPropertyChecks {
(result, expected) match {
case (result: Array[Byte], expected: Array[Byte]) =>
java.util.Arrays.equals(result, expected)
- case (result: Double, expected: Spread[Double]) =>
- expected.isWithin(result)
+ case (result: Double, expected: Spread[Double @unchecked]) =>
+ expected.asInstanceOf[Spread[Double]].isWithin(result)
case _ => result == expected
}
}
@@ -275,8 +275,8 @@ trait ExpressionEvalHelper extends GeneratorDrivenPropertyChecks {
(result, expected) match {
case (result: Array[Byte], expected: Array[Byte]) =>
java.util.Arrays.equals(result, expected)
- case (result: Double, expected: Spread[Double]) =>
- expected.isWithin(result)
+ case (result: Double, expected: Spread[Double @unchecked]) =>
+ expected.asInstanceOf[Spread[Double]].isWithin(result)
case (result: Double, expected: Double) if result.isNaN && expected.isNaN =>
true
case (result: Float, expected: Float) if result.isNaN && expected.isNaN =>
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/DateTimeUtilsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/DateTimeUtilsSuite.scala
index d5f1c4d74e..6745b4b6c3 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/DateTimeUtilsSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/DateTimeUtilsSuite.scala
@@ -384,9 +384,6 @@ class DateTimeUtilsSuite extends SparkFunSuite {
Timestamp.valueOf("1700-02-28 12:14:50.123456")).foreach { t =>
val us = fromJavaTimestamp(t)
assert(toJavaTimestamp(us) === t)
- assert(getHours(us) === t.getHours)
- assert(getMinutes(us) === t.getMinutes)
- assert(getSeconds(us) === t.getSeconds)
}
}
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
index f8e32d60a4..6bcd155ccd 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
@@ -21,6 +21,7 @@ package org.apache.spark.sql.execution.datasources.parquet;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -62,7 +63,7 @@ import org.apache.parquet.schema.Types;
import org.apache.spark.sql.types.StructType;
/**
- * Base class for custom RecordReaaders for Parquet that directly materialize to `T`.
+ * Base class for custom RecordReaders for Parquet that directly materialize to `T`.
* This class handles computing row groups, filtering on them, setting up the column readers,
* etc.
* This is heavily based on parquet-mr's RecordReader.
@@ -83,6 +84,7 @@ public abstract class SpecificParquetRecordReaderBase<T> extends RecordReader<Vo
protected ParquetFileReader reader;
+ @Override
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
Configuration configuration = taskAttemptContext.getConfiguration();
@@ -131,8 +133,7 @@ public abstract class SpecificParquetRecordReaderBase<T> extends RecordReader<Vo
}
this.fileSchema = footer.getFileMetaData().getSchema();
Map<String, String> fileMetadata = footer.getFileMetaData().getKeyValueMetaData();
- ReadSupport<T> readSupport = getReadSupportInstance(
- (Class<? extends ReadSupport<T>>) getReadSupportClass(configuration));
+ ReadSupport<T> readSupport = getReadSupportInstance(getReadSupportClass(configuration));
ReadSupport.ReadContext readContext = readSupport.init(new InitContext(
taskAttemptContext.getConfiguration(), toSetMultiMap(fileMetadata), fileSchema));
this.requestedSchema = readContext.getRequestedSchema();
@@ -282,8 +283,9 @@ public abstract class SpecificParquetRecordReaderBase<T> extends RecordReader<Vo
return Collections.unmodifiableMap(setMultiMap);
}
- private static Class<?> getReadSupportClass(Configuration configuration) {
- return ConfigurationUtil.getClassFromConfig(configuration,
+ @SuppressWarnings("unchecked")
+ private Class<? extends ReadSupport<T>> getReadSupportClass(Configuration configuration) {
+ return (Class<? extends ReadSupport<T>>) ConfigurationUtil.getClassFromConfig(configuration,
ParquetInputFormat.READ_SUPPORT_CLASS, ReadSupport.class);
}
@@ -294,10 +296,9 @@ public abstract class SpecificParquetRecordReaderBase<T> extends RecordReader<Vo
private static <T> ReadSupport<T> getReadSupportInstance(
Class<? extends ReadSupport<T>> readSupportClass){
try {
- return readSupportClass.newInstance();
- } catch (InstantiationException e) {
- throw new BadConfigurationException("could not instantiate read support class", e);
- } catch (IllegalAccessException e) {
+ return readSupportClass.getConstructor().newInstance();
+ } catch (InstantiationException | IllegalAccessException |
+ NoSuchMethodException | InvocationTargetException e) {
throw new BadConfigurationException("could not instantiate read support class", e);
}
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
index 076db0c08d..eb4efcd1d4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
@@ -580,7 +580,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSQLContext {
)
}
- test("sparkPartitionId") {
+ test("spark_partition_id") {
// Make sure we have 2 partitions, each with 2 records.
val df = sparkContext.parallelize(Seq[Int](), 2).mapPartitions { _ =>
Iterator(Tuple1(1), Tuple1(2))
@@ -591,7 +591,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSQLContext {
)
}
- test("InputFileName") {
+ test("input_file_name") {
withTempPath { dir =>
val data = sparkContext.parallelize(0 to 10).toDF("id")
data.write.parquet(dir.getCanonicalPath)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
index 0e60573dc6..fac26bd0c0 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql
import java.util.{Locale, TimeZone}
import scala.collection.JavaConverters._
+import scala.util.control.NonFatal
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.ImperativeAggregate
@@ -206,7 +207,7 @@ abstract class QueryTest extends PlanTest {
val jsonString = try {
logicalPlan.toJSON
} catch {
- case e =>
+ case NonFatal(e) =>
fail(
s"""
|Failed to parse logical plan to JSON:
@@ -231,7 +232,7 @@ abstract class QueryTest extends PlanTest {
val jsonBackPlan = try {
TreeNode.fromJSON[LogicalPlan](jsonString, sqlContext.sparkContext)
} catch {
- case e =>
+ case NonFatal(e) =>
fail(
s"""
|Failed to rebuild the logical plan from JSON:
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnarTestUtils.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnarTestUtils.scala
index 97cba1e349..1529313dfb 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnarTestUtils.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnarTestUtils.scala
@@ -60,6 +60,7 @@ object ColumnarTestUtils {
case MAP(_) =>
ArrayBasedMapData(
Map(Random.nextInt() -> UTF8String.fromString(Random.nextString(Random.nextInt(32)))))
+ case _ => throw new IllegalArgumentException(s"Unknown column type $columnType")
}).asInstanceOf[JvmType]
}