aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2015-08-04 12:02:26 +0100
committerSean Owen <sowen@cloudera.com>2015-08-04 12:02:26 +0100
commit76d74090d60f74412bd45487e8db6aff2e8343a2 (patch)
treedf06579d8c0ab184fe17e1e1c611e01fcf4242a0 /sql
parent9e952ecbce670e9b532a1c664a4d03b66e404112 (diff)
downloadspark-76d74090d60f74412bd45487e8db6aff2e8343a2.tar.gz
spark-76d74090d60f74412bd45487e8db6aff2e8343a2.tar.bz2
spark-76d74090d60f74412bd45487e8db6aff2e8343a2.zip
[SPARK-9534] [BUILD] Enable javac lint for scalac parity; fix a lot of build warnings, 1.5.0 edition
Enable most javac lint warnings; fix a lot of build warnings. In a few cases, touch up surrounding code in the process. I'll explain several of the changes inline in comments. Author: Sean Owen <sowen@cloudera.com> Closes #7862 from srowen/SPARK-9534 and squashes the following commits: ea51618 [Sean Owen] Enable most javac lint warnings; fix a lot of build warnings. In a few cases, touch up surrounding code in the process.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java1
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala16
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/commands.scala2
-rw-r--r--sql/hive/src/test/java/test/org/apache/spark/sql/hive/JavaDataFrameSuite.java2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala4
6 files changed, 13 insertions, 14 deletions
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
index 2c669bb59a..7302361ab9 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
@@ -167,6 +167,7 @@ public class JavaDataFrameSuite {
for (int i = 0; i < result.length(); i++) {
Assert.assertEquals(bean.getB()[i], result.apply(i));
}
+ @SuppressWarnings("unchecked")
Seq<Integer> outputBuffer = (Seq<Integer>) first.getJavaMap(2).get("hello");
Assert.assertArrayEquals(
bean.getC().get("hello"),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala
index cfb03ff485..e34e0956d1 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala
@@ -17,14 +17,12 @@
package org.apache.spark.sql.sources
+import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
-import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
-import org.apache.spark.unsafe.types.UTF8String
class DefaultSource extends SimpleScanSource
@@ -73,7 +71,7 @@ case class AllDataTypesScan(
sqlContext.sparkContext.parallelize(from to to).map { i =>
Row(
s"str_$i",
- s"str_$i".getBytes(),
+ s"str_$i".getBytes(StandardCharsets.UTF_8),
i % 2 == 0,
i.toByte,
i.toShort,
@@ -83,7 +81,7 @@ case class AllDataTypesScan(
i.toDouble,
new java.math.BigDecimal(i),
new java.math.BigDecimal(i),
- new Date(1970, 1, 1),
+ Date.valueOf("1970-01-01"),
new Timestamp(20000 + i),
s"varchar_$i",
Seq(i, i + 1),
@@ -92,7 +90,7 @@ case class AllDataTypesScan(
Map(Map(s"str_$i" -> i.toFloat) -> Row(i.toLong)),
Row(i, i.toString),
Row(Seq(s"str_$i", s"str_${i + 1}"),
- Row(Seq(new Date(1970, 1, i + 1)))))
+ Row(Seq(Date.valueOf(s"1970-01-${i + 1}")))))
}
}
}
@@ -113,7 +111,7 @@ class TableScanSuite extends DataSourceTest {
i.toDouble,
new java.math.BigDecimal(i),
new java.math.BigDecimal(i),
- new Date(1970, 1, 1),
+ Date.valueOf("1970-01-01"),
new Timestamp(20000 + i),
s"varchar_$i",
Seq(i, i + 1),
@@ -121,7 +119,7 @@ class TableScanSuite extends DataSourceTest {
Map(i -> i.toString),
Map(Map(s"str_$i" -> i.toFloat) -> Row(i.toLong)),
Row(i, i.toString),
- Row(Seq(s"str_$i", s"str_${i + 1}"), Row(Seq(new Date(1970, 1, i + 1)))))
+ Row(Seq(s"str_$i", s"str_${i + 1}"), Row(Seq(Date.valueOf(s"1970-01-${i + 1}")))))
}.toSeq
before {
@@ -280,7 +278,7 @@ class TableScanSuite extends DataSourceTest {
sqlTest(
"SELECT structFieldComplex.Value.`value_(2)` FROM tableWithSchema",
- (1 to 10).map(i => Row(Seq(new Date(1970, 1, i + 1)))).toSeq)
+ (1 to 10).map(i => Row(Seq(Date.valueOf(s"1970-01-${i + 1}")))).toSeq)
test("Caching") {
// Cached Query Execution
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala
index f58bc7d7a0..a7d5a99194 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala
@@ -77,7 +77,7 @@ private[hive] object IsolatedClientLoader {
// TODO: Remove copy logic.
val tempDir = Utils.createTempDir(namePrefix = s"hive-${version}")
allFiles.foreach(f => FileUtils.copyFileToDirectory(f, tempDir))
- tempDir.listFiles().map(_.toURL)
+ tempDir.listFiles().map(_.toURI.toURL)
}
private def resolvedVersions = new scala.collection.mutable.HashMap[HiveVersion, Seq[URL]]
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/commands.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/commands.scala
index a47f9a4feb..05a78930af 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/commands.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/commands.scala
@@ -88,7 +88,7 @@ case class AddJar(path: String) extends RunnableCommand {
val currentClassLoader = Utils.getContextOrSparkClassLoader
// Add jar to current context
- val jarURL = new java.io.File(path).toURL
+ val jarURL = new java.io.File(path).toURI.toURL
val newClassLoader = new java.net.URLClassLoader(Array(jarURL), currentClassLoader)
Thread.currentThread.setContextClassLoader(newClassLoader)
// We need to explicitly set the class loader associated with the conf in executionHive's
diff --git a/sql/hive/src/test/java/test/org/apache/spark/sql/hive/JavaDataFrameSuite.java b/sql/hive/src/test/java/test/org/apache/spark/sql/hive/JavaDataFrameSuite.java
index 741a3cd31c..613b2bcc80 100644
--- a/sql/hive/src/test/java/test/org/apache/spark/sql/hive/JavaDataFrameSuite.java
+++ b/sql/hive/src/test/java/test/org/apache/spark/sql/hive/JavaDataFrameSuite.java
@@ -54,7 +54,7 @@ public class JavaDataFrameSuite {
for (int i = 0; i < 10; i++) {
jsonObjects.add("{\"key\":" + i + ", \"value\":\"str" + i + "\"}");
}
- df = hc.jsonRDD(sc.parallelize(jsonObjects));
+ df = hc.read().json(sc.parallelize(jsonObjects));
df.registerTempTable("window_table");
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 95c1da6e97..fb41451803 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -660,7 +660,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils {
test("resolve udtf in projection #2") {
val rdd = sparkContext.makeRDD((1 to 2).map(i => s"""{"a":[$i, ${i + 1}]}"""))
- jsonRDD(rdd).registerTempTable("data")
+ read.json(rdd).registerTempTable("data")
checkAnswer(sql("SELECT explode(map(1, 1)) FROM data LIMIT 1"), Row(1, 1) :: Nil)
checkAnswer(sql("SELECT explode(map(1, 1)) as (k1, k2) FROM data LIMIT 1"), Row(1, 1) :: Nil)
intercept[AnalysisException] {
@@ -675,7 +675,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils {
// TGF with non-TGF in project is allowed in Spark SQL, but not in Hive
test("TGF with non-TGF in projection") {
val rdd = sparkContext.makeRDD( """{"a": "1", "b":"1"}""" :: Nil)
- jsonRDD(rdd).registerTempTable("data")
+ read.json(rdd).registerTempTable("data")
checkAnswer(
sql("SELECT explode(map(a, b)) as (k1, k2), a, b FROM data"),
Row("1", "1", "1", "1") :: Nil)