aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-07-22 21:02:19 -0700
committerReynold Xin <rxin@databricks.com>2015-07-22 21:02:19 -0700
commitd71a13f475df2d05a7db9e25738d1353cbc8cfc7 (patch)
tree1727247ac19f259b88c20633ef569d37277f4486 /sql/hive
parenta721ee52705100dbd7852f80f92cde4375517e48 (diff)
downloadspark-d71a13f475df2d05a7db9e25738d1353cbc8cfc7.tar.gz
spark-d71a13f475df2d05a7db9e25738d1353cbc8cfc7.tar.bz2
spark-d71a13f475df2d05a7db9e25738d1353cbc8cfc7.zip
[SPARK-9262][build] Treat Scala compiler warnings as errors
I've seen a few cases in the past few weeks that the compiler is throwing warnings that are caused by legitimate bugs. This patch upgrades warnings to errors, except deprecation warnings. Note that ideally we should be able to mark deprecation warnings as errors as well. However, due to the lack of ability to suppress individual warning messages in the Scala compiler, we cannot do that (since we do need to access deprecated APIs in Hadoop). Most of the work are done by ericl. Author: Reynold Xin <rxin@databricks.com> Author: Eric Liang <ekl@databricks.com> Closes #7598 from rxin/warnings and squashes the following commits: beb311b [Reynold Xin] Fixed tests. 542c031 [Reynold Xin] Fixed one more warning. 87c354a [Reynold Xin] Fixed all non-deprecation warnings. 78660ac [Eric Liang] first effort to fix warnings
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFilters.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala6
2 files changed, 6 insertions, 6 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFilters.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFilters.scala
index 250e73a4db..ddd5d24717 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFilters.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFilters.scala
@@ -41,10 +41,10 @@ private[orc] object OrcFilters extends Logging {
private def buildSearchArgument(expression: Filter, builder: Builder): Option[Builder] = {
def newBuilder = SearchArgument.FACTORY.newBuilder()
- def isSearchableLiteral(value: Any) = value match {
+ def isSearchableLiteral(value: Any): Boolean = value match {
// These are types recognized by the `SearchArgumentImpl.BuilderImpl.boxLiteral()` method.
- case _: String | _: Long | _: Double | _: DateWritable | _: HiveDecimal | _: HiveChar |
- _: HiveVarchar | _: Byte | _: Short | _: Integer | _: Float => true
+ case _: String | _: Long | _: Double | _: Byte | _: Short | _: Integer | _: Float => true
+ case _: DateWritable | _: HiveDecimal | _: HiveChar | _: HiveVarchar => true
case _ => false
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
index 1cef83fd5e..2a8748d913 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
@@ -134,7 +134,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils {
test("save()/load() - non-partitioned table - ErrorIfExists") {
withTempDir { file =>
- intercept[RuntimeException] {
+ intercept[AnalysisException] {
testDF.write.format(dataSourceName).mode(SaveMode.ErrorIfExists).save(file.getCanonicalPath)
}
}
@@ -233,7 +233,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils {
test("save()/load() - partitioned table - ErrorIfExists") {
withTempDir { file =>
- intercept[RuntimeException] {
+ intercept[AnalysisException] {
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.ErrorIfExists)
@@ -696,7 +696,7 @@ class ParquetHadoopFsRelationSuite extends HadoopFsRelationTest {
// This should only complain that the destination directory already exists, rather than file
// "empty" is not a Parquet file.
assert {
- intercept[RuntimeException] {
+ intercept[AnalysisException] {
df.write.format("parquet").mode(SaveMode.ErrorIfExists).save(path)
}.getMessage.contains("already exists")
}