aboutsummaryrefslogtreecommitdiff
path: root/sql/hive-thriftserver/v0.13.1
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2014-11-17 16:31:05 -0800
committerMichael Armbrust <michael@databricks.com>2014-11-17 16:31:15 -0800
commitff2fe56004209ffe8eb150a56cbd5dccfb8d774b (patch)
tree1932580590dc974f3c878f5dfc0af4f4049c9cc4 /sql/hive-thriftserver/v0.13.1
parent7d0442652ed090783af6f2614c37a9522c46dc95 (diff)
downloadspark-ff2fe56004209ffe8eb150a56cbd5dccfb8d774b.tar.gz
spark-ff2fe56004209ffe8eb150a56cbd5dccfb8d774b.tar.bz2
spark-ff2fe56004209ffe8eb150a56cbd5dccfb8d774b.zip
[SPARK-4309][SPARK-4407][SQL] Date type support for Thrift server, and fixes for complex types
This PR is exactly the same as #3178 except it reverts the `FileStatus.isDir` to `FileStatus.isDirectory` change, since it doesn't compile with Hadoop 1. <!-- Reviewable:start --> [<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/apache/spark/3298) <!-- Reviewable:end --> Author: Cheng Lian <lian@databricks.com> Closes #3298 from liancheng/date-for-thriftserver and squashes the following commits: 866037e [Cheng Lian] Revers isDirectory to isDir (it breaks Hadoop 1 profile) 6f71d0b [Cheng Lian] Makes toHiveString static 26fa955 [Cheng Lian] Fixes complex type support in Hive 0.13.1 shim a92882a [Cheng Lian] Updates HiveShim for 0.13.1 73f442b [Cheng Lian] Adds Date support for HiveThriftServer2 (Hive 0.12.0) (cherry picked from commit 6b7f2f753d16ff038881772f1958e3f4fd5597a7) Signed-off-by: Michael Armbrust <michael@databricks.com>
Diffstat (limited to 'sql/hive-thriftserver/v0.13.1')
-rw-r--r--sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala29
1 files changed, 13 insertions, 16 deletions
diff --git a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
index f2ceba8282..23b182dd61 100644
--- a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
+++ b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
@@ -18,7 +18,7 @@
package org.apache.spark.sql.hive.thriftserver
import java.security.PrivilegedExceptionAction
-import java.sql.Timestamp
+import java.sql.{Date, Timestamp}
import java.util.concurrent.Future
import java.util.{ArrayList => JArrayList, List => JList, Map => JMap}
@@ -113,7 +113,7 @@ private[hive] class SparkExecuteStatementOperation(
def addNonNullColumnValue(from: SparkRow, to: ArrayBuffer[Any], ordinal: Int) {
dataTypes(ordinal) match {
case StringType =>
- to += from.get(ordinal).asInstanceOf[String]
+ to += from.getString(ordinal)
case IntegerType =>
to += from.getInt(ordinal)
case BooleanType =>
@@ -123,23 +123,20 @@ private[hive] class SparkExecuteStatementOperation(
case FloatType =>
to += from.getFloat(ordinal)
case DecimalType() =>
- to += from.get(ordinal).asInstanceOf[BigDecimal].bigDecimal
+ to += from.getAs[BigDecimal](ordinal).bigDecimal
case LongType =>
to += from.getLong(ordinal)
case ByteType =>
to += from.getByte(ordinal)
case ShortType =>
to += from.getShort(ordinal)
+ case DateType =>
+ to += from.getAs[Date](ordinal)
case TimestampType =>
- to += from.get(ordinal).asInstanceOf[Timestamp]
- case BinaryType =>
- to += from.get(ordinal).asInstanceOf[String]
- case _: ArrayType =>
- to += from.get(ordinal).asInstanceOf[String]
- case _: StructType =>
- to += from.get(ordinal).asInstanceOf[String]
- case _: MapType =>
- to += from.get(ordinal).asInstanceOf[String]
+ to += from.getAs[Timestamp](ordinal)
+ case BinaryType | _: ArrayType | _: StructType | _: MapType =>
+ val hiveString = HiveContext.toHiveString((from.get(ordinal), dataTypes(ordinal)))
+ to += hiveString
}
}
@@ -147,9 +144,9 @@ private[hive] class SparkExecuteStatementOperation(
validateDefaultFetchOrientation(order)
assertState(OperationState.FINISHED)
setHasResultSet(true)
- val reultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion)
+ val resultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion)
if (!iter.hasNext) {
- reultRowSet
+ resultRowSet
} else {
// maxRowsL here typically maps to java.sql.Statement.getFetchSize, which is an int
val maxRows = maxRowsL.toInt
@@ -166,10 +163,10 @@ private[hive] class SparkExecuteStatementOperation(
}
curCol += 1
}
- reultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]])
+ resultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]])
curRow += 1
}
- reultRowSet
+ resultRowSet
}
}