diff options
author | Reynold Xin <rxin@databricks.com> | 2015-01-13 17:16:41 -0800 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2015-01-13 17:16:41 -0800 |
commit | f9969098c8cb15e36c718b80c6cf5b534a6cf7c3 (patch) | |
tree | f7335abaafcd8e044d09565f6f1f21f24d5bc544 /sql/hive/src/test | |
parent | 14e3f114efb906937b2d7b7ac04484b2814a3b48 (diff) | |
download | spark-f9969098c8cb15e36c718b80c6cf5b534a6cf7c3.tar.gz spark-f9969098c8cb15e36c718b80c6cf5b534a6cf7c3.tar.bz2 spark-f9969098c8cb15e36c718b80c6cf5b534a6cf7c3.zip |
[SPARK-5123][SQL] Reconcile Java/Scala API for data types.
Having two versions of the data type APIs (one for Java, one for Scala) requires downstream libraries to also have two versions of the APIs if the library wants to support both Java and Scala. I took a look at the Scala version of the data type APIs - it can actually work out pretty well for Java out of the box.
As part of the PR, I created a sql.types package and moved all type definitions there. I then removed the Java specific data type API along with a lot of the conversion code.
This subsumes https://github.com/apache/spark/pull/3925
Author: Reynold Xin <rxin@databricks.com>
Closes #3958 from rxin/SPARK-5123-datatype-2 and squashes the following commits:
66505cc [Reynold Xin] [SPARK-5123] Expose only one version of the data type APIs (i.e. remove the Java-specific API).
Diffstat (limited to 'sql/hive/src/test')
4 files changed, 12 insertions, 11 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala index f90d360791..dc23d9a101 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala @@ -17,22 +17,21 @@ package org.apache.spark.sql.hive -import java.sql.Date import java.util +import java.sql.Date import java.util.{Locale, TimeZone} -import org.apache.hadoop.hive.serde2.io.DoubleWritable -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory -import org.apache.spark.sql.catalyst.types._ -import org.apache.spark.sql.catalyst.types.decimal.Decimal -import org.scalatest.FunSuite - import org.apache.hadoop.hive.ql.udf.UDAFPercentile -import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, StructObjectInspector, ObjectInspectorFactory} +import org.apache.hadoop.hive.serde2.io.DoubleWritable +import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory, StructObjectInspector} import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.ObjectInspectorOptions +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory import org.apache.hadoop.io.LongWritable +import org.scalatest.FunSuite import org.apache.spark.sql.catalyst.expressions.{Literal, Row} +import org.apache.spark.sql.types._ +import org.apache.spark.sql.types.decimal.Decimal class HiveInspectorSuite extends FunSuite with HiveInspectors { test("Test wrap SettableStructObjectInspector") { @@ -93,7 +92,6 @@ class HiveInspectorSuite extends FunSuite with HiveInspectors { val row = data.map(_.eval(null)) val dataTypes = data.map(_.dataType) - import scala.collection.JavaConversions._ def toWritableInspector(dataType: DataType): ObjectInspector = dataType match { case ArrayType(tpe, _) => ObjectInspectorFactory.getStandardListObjectInspector(toWritableInspector(tpe)) @@ -115,7 +113,8 @@ class HiveInspectorSuite extends FunSuite with HiveInspectors { case DecimalType() => PrimitiveObjectInspectorFactory.writableHiveDecimalObjectInspector case StructType(fields) => ObjectInspectorFactory.getStandardStructObjectInspector( - fields.map(f => f.name), fields.map(f => toWritableInspector(f.dataType))) + java.util.Arrays.asList(fields.map(f => f.name) :_*), + java.util.Arrays.asList(fields.map(f => toWritableInspector(f.dataType)) :_*)) } def checkDataType(dt1: Seq[DataType], dt2: Seq[DataType]): Unit = { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala index 041a36f129..fa6905f31f 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala @@ -19,9 +19,9 @@ package org.apache.spark.sql.hive import org.scalatest.FunSuite -import org.apache.spark.sql.catalyst.types.StructType import org.apache.spark.sql.sources.DDLParser import org.apache.spark.sql.test.ExamplePointUDT +import org.apache.spark.sql.types.StructType class HiveMetastoreCatalogSuite extends FunSuite { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala index fb481edc85..7cfb875e05 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala @@ -22,6 +22,7 @@ import java.io.File import com.google.common.io.Files import org.apache.spark.sql.{QueryTest, _} import org.apache.spark.sql.hive.test.TestHive +import org.apache.spark.sql.types._ /* Implicits */ import org.apache.spark.sql.hive.test.TestHive._ diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala index ec9ebb4a77..8ff833e0d6 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala @@ -25,6 +25,7 @@ import org.apache.commons.io.FileUtils import org.apache.spark.sql._ import org.apache.spark.util.Utils +import org.apache.spark.sql.types._ /* Implicits */ import org.apache.spark.sql.hive.test.TestHive._ |