aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/test
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2015-08-25 12:33:13 +0100
committerSean Owen <sowen@cloudera.com>2015-08-25 12:33:13 +0100
commit69c9c177160e32a2fbc9b36ecc52156077fca6fc (patch)
tree57345aaf19c3149038bfca5c4ddccf33d41bdd5b /sql/hive/src/test
parent7f1e507bf7e82bff323c5dec3c1ee044687c4173 (diff)
downloadspark-69c9c177160e32a2fbc9b36ecc52156077fca6fc.tar.gz
spark-69c9c177160e32a2fbc9b36ecc52156077fca6fc.tar.bz2
spark-69c9c177160e32a2fbc9b36ecc52156077fca6fc.zip
[SPARK-9613] [CORE] Ban use of JavaConversions and migrate all existing uses to JavaConverters
Replace `JavaConversions` implicits with `JavaConverters` Most occurrences I've seen so far are necessary conversions; a few have been avoidable. None are in critical code as far as I see, yet. Author: Sean Owen <sowen@cloudera.com> Closes #8033 from srowen/SPARK-9613.
Diffstat (limited to 'sql/hive/src/test')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala29
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala7
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala8
5 files changed, 23 insertions, 29 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
index 0efcf80bd4..5e7b93d457 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala
@@ -17,7 +17,7 @@
package org.apache.spark.sql.hive.client
-import scala.collection.JavaConversions._
+import java.util.Collections
import org.apache.hadoop.hive.metastore.api.FieldSchema
import org.apache.hadoop.hive.serde.serdeConstants
@@ -38,7 +38,7 @@ class FiltersSuite extends SparkFunSuite with Logging {
private val varCharCol = new FieldSchema()
varCharCol.setName("varchar")
varCharCol.setType(serdeConstants.VARCHAR_TYPE_NAME)
- testTable.setPartCols(varCharCol :: Nil)
+ testTable.setPartCols(Collections.singletonList(varCharCol))
filterTest("string filter",
(a("stringcol", StringType) > Literal("test")) :: Nil,
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
index b03a351323..9c10ffe111 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala
@@ -18,8 +18,7 @@
package org.apache.spark.sql.hive.execution
import java.io.{DataInput, DataOutput}
-import java.util
-import java.util.Properties
+import java.util.{ArrayList, Arrays, Properties}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.ql.udf.generic.{GenericUDAFAverage, GenericUDF}
@@ -33,8 +32,6 @@ import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.util.Utils
-import scala.collection.JavaConversions._
-
case class Fields(f1: Int, f2: Int, f3: Int, f4: Int, f5: Int)
// Case classes for the custom UDF's.
@@ -326,11 +323,11 @@ class PairSerDe extends AbstractSerDe {
override def getObjectInspector: ObjectInspector = {
ObjectInspectorFactory
.getStandardStructObjectInspector(
- Seq("pair"),
- Seq(ObjectInspectorFactory.getStandardStructObjectInspector(
- Seq("id", "value"),
- Seq(PrimitiveObjectInspectorFactory.javaIntObjectInspector,
- PrimitiveObjectInspectorFactory.javaIntObjectInspector))
+ Arrays.asList("pair"),
+ Arrays.asList(ObjectInspectorFactory.getStandardStructObjectInspector(
+ Arrays.asList("id", "value"),
+ Arrays.asList(PrimitiveObjectInspectorFactory.javaIntObjectInspector,
+ PrimitiveObjectInspectorFactory.javaIntObjectInspector))
))
}
@@ -343,10 +340,10 @@ class PairSerDe extends AbstractSerDe {
override def deserialize(value: Writable): AnyRef = {
val pair = value.asInstanceOf[TestPair]
- val row = new util.ArrayList[util.ArrayList[AnyRef]]
- row.add(new util.ArrayList[AnyRef](2))
- row(0).add(Integer.valueOf(pair.entry._1))
- row(0).add(Integer.valueOf(pair.entry._2))
+ val row = new ArrayList[ArrayList[AnyRef]]
+ row.add(new ArrayList[AnyRef](2))
+ row.get(0).add(Integer.valueOf(pair.entry._1))
+ row.get(0).add(Integer.valueOf(pair.entry._2))
row
}
@@ -355,9 +352,9 @@ class PairSerDe extends AbstractSerDe {
class PairUDF extends GenericUDF {
override def initialize(p1: Array[ObjectInspector]): ObjectInspector =
ObjectInspectorFactory.getStandardStructObjectInspector(
- Seq("id", "value"),
- Seq(PrimitiveObjectInspectorFactory.javaIntObjectInspector,
- PrimitiveObjectInspectorFactory.javaIntObjectInspector)
+ Arrays.asList("id", "value"),
+ Arrays.asList(PrimitiveObjectInspectorFactory.javaIntObjectInspector,
+ PrimitiveObjectInspectorFactory.javaIntObjectInspector)
)
override def evaluate(args: Array[DeferredObject]): AnyRef = {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala
index 3bf8f3ac20..210d566745 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala
@@ -17,13 +17,12 @@
package org.apache.spark.sql.hive.execution
+import scala.collection.JavaConverters._
+
import org.scalatest.BeforeAndAfter
import org.apache.spark.sql.hive.test.TestHive
-/* Implicit conversions */
-import scala.collection.JavaConversions._
-
/**
* A set of test cases that validate partition and column pruning.
*/
@@ -161,7 +160,7 @@ class PruningSuite extends HiveComparisonTest with BeforeAndAfter {
assert(actualOutputColumns === expectedOutputColumns, "Output columns mismatch")
assert(actualScannedColumns === expectedScannedColumns, "Scanned columns mismatch")
- val actualPartitions = actualPartValues.map(_.toSeq.mkString(",")).sorted
+ val actualPartitions = actualPartValues.map(_.asScala.mkString(",")).sorted
val expectedPartitions = expectedPartValues.map(_.mkString(",")).sorted
assert(actualPartitions === expectedPartitions, "Partitions selected do not match")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 55ecbd5b5f..1ff1d9a293 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -19,7 +19,7 @@ package org.apache.spark.sql.hive.execution
import java.sql.{Date, Timestamp}
-import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.DefaultParserDialect
@@ -164,7 +164,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils {
test("show functions") {
val allFunctions =
(FunctionRegistry.builtin.listFunction().toSet[String] ++
- org.apache.hadoop.hive.ql.exec.FunctionRegistry.getFunctionNames).toList.sorted
+ org.apache.hadoop.hive.ql.exec.FunctionRegistry.getFunctionNames.asScala).toList.sorted
checkAnswer(sql("SHOW functions"), allFunctions.map(Row(_)))
checkAnswer(sql("SHOW functions abs"), Row("abs"))
checkAnswer(sql("SHOW functions 'abs'"), Row("abs"))
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
index 5bbca14bad..7966b43596 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
@@ -17,9 +17,7 @@
package org.apache.spark.sql.sources
-import java.sql.Date
-
-import scala.collection.JavaConversions._
+import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
@@ -552,7 +550,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils {
} finally {
// Hadoop 1 doesn't have `Configuration.unset`
configuration.clear()
- clonedConf.foreach(entry => configuration.set(entry.getKey, entry.getValue))
+ clonedConf.asScala.foreach(entry => configuration.set(entry.getKey, entry.getValue))
}
}
@@ -600,7 +598,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils {
} finally {
// Hadoop 1 doesn't have `Configuration.unset`
configuration.clear()
- clonedConf.foreach(entry => configuration.set(entry.getKey, entry.getValue))
+ clonedConf.asScala.foreach(entry => configuration.set(entry.getKey, entry.getValue))
sqlContext.sparkContext.conf.set("spark.speculation", speculationEnabled.toString)
}
}