aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2014-12-24 13:32:51 -0800
committerJosh Rosen <joshrosen@databricks.com>2014-12-24 13:32:51 -0800
commit29fabb1b528e60b2f65132a9ab64f2fd95b729ba (patch)
tree00028a1f4ea48e77ede7e8f9bdfaa0cb324b74e7 /sql
parent199e59aacd540e17b31f38e0e32a3618870e9055 (diff)
downloadspark-29fabb1b528e60b2f65132a9ab64f2fd95b729ba.tar.gz
spark-29fabb1b528e60b2f65132a9ab64f2fd95b729ba.tar.bz2
spark-29fabb1b528e60b2f65132a9ab64f2fd95b729ba.zip
SPARK-4297 [BUILD] Build warning fixes omnibus
There are a number of warnings generated in a normal, successful build right now. They're mostly Java unchecked cast warnings, which can be suppressed. But there's a grab bag of other Scala language warnings and so on that can all be easily fixed. The forthcoming PR fixes about 90% of the build warnings I see now. Author: Sean Owen <sowen@cloudera.com> Closes #3157 from srowen/SPARK-4297 and squashes the following commits: 8c9e469 [Sean Owen] Suppress unchecked cast warnings, and several other build warning fixes
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java1
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala4
-rw-r--r--sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java1
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala14
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala2
-rw-r--r--sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java20
7 files changed, 26 insertions, 18 deletions
diff --git a/sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java b/sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java
index b751847b46..f0d079d25b 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/api/java/UserDefinedType.java
@@ -35,6 +35,7 @@ public abstract class UserDefinedType<UserType> extends DataType implements Seri
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
+ @SuppressWarnings("unchecked")
UserDefinedType<UserType> that = (UserDefinedType<UserType>) o;
return this.sqlType().equals(that.sqlType());
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala
index 0e6fb57d57..97447871a1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTypes.scala
@@ -24,8 +24,8 @@ import scala.util.Try
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.Job
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
+import parquet.format.converter.ParquetMetadataConverter
import parquet.hadoop.{ParquetFileReader, Footer, ParquetFileWriter}
import parquet.hadoop.metadata.{ParquetMetadata, FileMetaData}
import parquet.hadoop.util.ContextUtil
@@ -458,7 +458,7 @@ private[parquet] object ParquetTypesConverter extends Logging {
// ... and fallback to "_metadata" if no such file exists (which implies the Parquet file is
// empty, thus normally the "_metadata" file is expected to be fairly small).
.orElse(children.find(_.getPath.getName == ParquetFileWriter.PARQUET_METADATA_FILE))
- .map(ParquetFileReader.readFooter(conf, _))
+ .map(ParquetFileReader.readFooter(conf, _, ParquetMetadataConverter.NO_FILTER))
.getOrElse(
throw new IllegalArgumentException(s"Could not find Parquet metadata at path $path"))
}
diff --git a/sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java b/sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java
index bc5cd66482..2b5812159d 100644
--- a/sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java
+++ b/sql/core/src/test/java/org/apache/spark/sql/api/java/JavaRowSuite.java
@@ -141,6 +141,7 @@ public class JavaRowSuite {
doubleValue, stringValue, timestampValue, null);
// Complex array
+ @SuppressWarnings("unchecked")
List<Map<String, Long>> arrayOfMaps = Arrays.asList(simpleMap);
List<Row> arrayOfRows = Arrays.asList(simpleStruct);
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
index e40d034ce4..691c4b3828 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DslQuerySuite.scala
@@ -24,6 +24,8 @@ import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.dsl._
import org.apache.spark.sql.test.TestSQLContext._
+import scala.language.postfixOps
+
class DslQuerySuite extends QueryTest {
import org.apache.spark.sql.TestData._
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala
index 074855389d..a5fe2e8da2 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala
@@ -17,6 +17,8 @@
package org.apache.spark.sql.parquet
+import scala.reflect.ClassTag
+
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.Job
import org.scalatest.{BeforeAndAfterAll, FunSuiteLike}
@@ -459,11 +461,17 @@ class ParquetQuerySuite extends QueryTest with FunSuiteLike with BeforeAndAfterA
}
test("make RecordFilter for simple predicates") {
- def checkFilter[T <: FilterPredicate](predicate: Expression, defined: Boolean = true): Unit = {
+ def checkFilter[T <: FilterPredicate : ClassTag](
+ predicate: Expression,
+ defined: Boolean = true): Unit = {
val filter = ParquetFilters.createFilter(predicate)
if (defined) {
assert(filter.isDefined)
- assert(filter.get.isInstanceOf[T])
+ val tClass = implicitly[ClassTag[T]].runtimeClass
+ val filterGet = filter.get
+ assert(
+ tClass.isInstance(filterGet),
+ s"$filterGet of type ${filterGet.getClass} is not an instance of $tClass")
} else {
assert(filter.isEmpty)
}
@@ -484,7 +492,7 @@ class ParquetQuerySuite extends QueryTest with FunSuiteLike with BeforeAndAfterA
checkFilter[Operators.And]('a.int === 1 && 'a.int < 4)
checkFilter[Operators.Or]('a.int === 1 || 'a.int < 4)
- checkFilter[Operators.Not](!('a.int === 1))
+ checkFilter[Operators.NotEq[Integer]](!('a.int === 1))
checkFilter('a.int > 'b.int, defined = false)
checkFilter(('a.int > 'b.int) && ('a.int > 'b.int), defined = false)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
index abed299cd9..2a16c9d1a2 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
@@ -32,7 +32,7 @@ import org.apache.hadoop.io.Writable
* when "spark.sql.hive.convertMetastoreParquet" is set to true.
*/
@deprecated("No code should depend on FakeParquetHiveSerDe as it is only intended as a " +
- "placeholder in the Hive MetaStore")
+ "placeholder in the Hive MetaStore", "1.2.0")
class FakeParquetSerDe extends SerDe {
override def getObjectInspector: ObjectInspector = new ObjectInspector {
override def getCategory: Category = Category.PRIMITIVE
diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java
index d2d39a8c4d..808e2986d3 100644
--- a/sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java
+++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/execution/UDFListListInt.java
@@ -23,25 +23,21 @@ import java.util.List;
public class UDFListListInt extends UDF {
/**
- *
* @param obj
- * SQL schema: array<struct<x: int, y: int, z: int>>
- * Java Type: List<List<Integer>>
- * @return
+ * SQL schema: array&lt;struct&lt;x: int, y: int, z: int&gt;&gt;
+ * Java Type: List&lt;List&lt;Integer&gt;&gt;
*/
+ @SuppressWarnings("unchecked")
public long evaluate(Object obj) {
if (obj == null) {
- return 0l;
+ return 0L;
}
- List<List> listList = (List<List>) obj;
+ List<List<?>> listList = (List<List<?>>) obj;
long retVal = 0;
- for (List aList : listList) {
- @SuppressWarnings("unchecked")
- List<Object> list = (List<Object>) aList;
- @SuppressWarnings("unchecked")
- Integer someInt = (Integer) list.get(1);
+ for (List<?> aList : listList) {
+ Number someInt = (Number) aList.get(1);
try {
- retVal += (long) (someInt.intValue());
+ retVal += someInt.longValue();
} catch (NullPointerException e) {
System.out.println(e);
}