From ff2fe56004209ffe8eb150a56cbd5dccfb8d774b Mon Sep 17 00:00:00 2001 From: Cheng Lian Date: Mon, 17 Nov 2014 16:31:05 -0800 Subject: [SPARK-4309][SPARK-4407][SQL] Date type support for Thrift server, and fixes for complex types This PR is exactly the same as #3178 except it reverts the `FileStatus.isDir` to `FileStatus.isDirectory` change, since it doesn't compile with Hadoop 1. [Review on Reviewable](https://reviewable.io/reviews/apache/spark/3298) Author: Cheng Lian Closes #3298 from liancheng/date-for-thriftserver and squashes the following commits: 866037e [Cheng Lian] Revers isDirectory to isDir (it breaks Hadoop 1 profile) 6f71d0b [Cheng Lian] Makes toHiveString static 26fa955 [Cheng Lian] Fixes complex type support in Hive 0.13.1 shim a92882a [Cheng Lian] Updates HiveShim for 0.13.1 73f442b [Cheng Lian] Adds Date support for HiveThriftServer2 (Hive 0.12.0) (cherry picked from commit 6b7f2f753d16ff038881772f1958e3f4fd5597a7) Signed-off-by: Michael Armbrust --- .../spark/sql/hive/thriftserver/Shim13.scala | 29 ++++++++++------------ 1 file changed, 13 insertions(+), 16 deletions(-) (limited to 'sql/hive-thriftserver/v0.13.1') diff --git a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala index f2ceba8282..23b182dd61 100644 --- a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala +++ b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala @@ -18,7 +18,7 @@ package org.apache.spark.sql.hive.thriftserver import java.security.PrivilegedExceptionAction -import java.sql.Timestamp +import java.sql.{Date, Timestamp} import java.util.concurrent.Future import java.util.{ArrayList => JArrayList, List => JList, Map => JMap} @@ -113,7 +113,7 @@ private[hive] class SparkExecuteStatementOperation( def addNonNullColumnValue(from: SparkRow, to: ArrayBuffer[Any], ordinal: Int) { dataTypes(ordinal) match { case StringType => - to += from.get(ordinal).asInstanceOf[String] + to += from.getString(ordinal) case IntegerType => to += from.getInt(ordinal) case BooleanType => @@ -123,23 +123,20 @@ private[hive] class SparkExecuteStatementOperation( case FloatType => to += from.getFloat(ordinal) case DecimalType() => - to += from.get(ordinal).asInstanceOf[BigDecimal].bigDecimal + to += from.getAs[BigDecimal](ordinal).bigDecimal case LongType => to += from.getLong(ordinal) case ByteType => to += from.getByte(ordinal) case ShortType => to += from.getShort(ordinal) + case DateType => + to += from.getAs[Date](ordinal) case TimestampType => - to += from.get(ordinal).asInstanceOf[Timestamp] - case BinaryType => - to += from.get(ordinal).asInstanceOf[String] - case _: ArrayType => - to += from.get(ordinal).asInstanceOf[String] - case _: StructType => - to += from.get(ordinal).asInstanceOf[String] - case _: MapType => - to += from.get(ordinal).asInstanceOf[String] + to += from.getAs[Timestamp](ordinal) + case BinaryType | _: ArrayType | _: StructType | _: MapType => + val hiveString = HiveContext.toHiveString((from.get(ordinal), dataTypes(ordinal))) + to += hiveString } } @@ -147,9 +144,9 @@ private[hive] class SparkExecuteStatementOperation( validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) - val reultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion) + val resultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion) if (!iter.hasNext) { - reultRowSet + resultRowSet } else { // maxRowsL here typically maps to java.sql.Statement.getFetchSize, which is an int val maxRows = maxRowsL.toInt @@ -166,10 +163,10 @@ private[hive] class SparkExecuteStatementOperation( } curCol += 1 } - reultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]]) + resultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]]) curRow += 1 } - reultRowSet + resultRowSet } } -- cgit v1.2.3