aboutsummaryrefslogtreecommitdiff
path: root/sql/hive-thriftserver/v0.13.1/src
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2014-11-16 14:26:41 -0800
committerMichael Armbrust <michael@databricks.com>2014-11-16 14:26:55 -0800
commit8b83a34fa310f4e6802c5ef32dcc737f6fb4903f (patch)
tree7b47a9eaa8bc18d74c39de386199183d95089e0f /sql/hive-thriftserver/v0.13.1/src
parent2200de6352fdc1000908554003912303edc3d160 (diff)
downloadspark-8b83a34fa310f4e6802c5ef32dcc737f6fb4903f.tar.gz
spark-8b83a34fa310f4e6802c5ef32dcc737f6fb4903f.tar.bz2
spark-8b83a34fa310f4e6802c5ef32dcc737f6fb4903f.zip
[SPARK-4309][SPARK-4407][SQL] Date type support for Thrift server, and fixes for complex types
SPARK-4407 was detected while working on SPARK-4309. Merged these two into a single PR since 1.2.0 RC is approaching. <!-- Reviewable:start --> [<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/apache/spark/3178) <!-- Reviewable:end --> Author: Cheng Lian <lian@databricks.com> Closes #3178 from liancheng/date-for-thriftserver and squashes the following commits: 6f71d0b [Cheng Lian] Makes toHiveString static 26fa955 [Cheng Lian] Fixes complex type support in Hive 0.13.1 shim a92882a [Cheng Lian] Updates HiveShim for 0.13.1 73f442b [Cheng Lian] Adds Date support for HiveThriftServer2 (Hive 0.12.0) (cherry picked from commit cb6bd83a91d9b4a227dc6467255231869c1820e2) Signed-off-by: Michael Armbrust <michael@databricks.com>
Diffstat (limited to 'sql/hive-thriftserver/v0.13.1/src')
-rw-r--r--sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala29
1 files changed, 13 insertions, 16 deletions
diff --git a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
index f2ceba8282..23b182dd61 100644
--- a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
+++ b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
@@ -18,7 +18,7 @@
package org.apache.spark.sql.hive.thriftserver
import java.security.PrivilegedExceptionAction
-import java.sql.Timestamp
+import java.sql.{Date, Timestamp}
import java.util.concurrent.Future
import java.util.{ArrayList => JArrayList, List => JList, Map => JMap}
@@ -113,7 +113,7 @@ private[hive] class SparkExecuteStatementOperation(
def addNonNullColumnValue(from: SparkRow, to: ArrayBuffer[Any], ordinal: Int) {
dataTypes(ordinal) match {
case StringType =>
- to += from.get(ordinal).asInstanceOf[String]
+ to += from.getString(ordinal)
case IntegerType =>
to += from.getInt(ordinal)
case BooleanType =>
@@ -123,23 +123,20 @@ private[hive] class SparkExecuteStatementOperation(
case FloatType =>
to += from.getFloat(ordinal)
case DecimalType() =>
- to += from.get(ordinal).asInstanceOf[BigDecimal].bigDecimal
+ to += from.getAs[BigDecimal](ordinal).bigDecimal
case LongType =>
to += from.getLong(ordinal)
case ByteType =>
to += from.getByte(ordinal)
case ShortType =>
to += from.getShort(ordinal)
+ case DateType =>
+ to += from.getAs[Date](ordinal)
case TimestampType =>
- to += from.get(ordinal).asInstanceOf[Timestamp]
- case BinaryType =>
- to += from.get(ordinal).asInstanceOf[String]
- case _: ArrayType =>
- to += from.get(ordinal).asInstanceOf[String]
- case _: StructType =>
- to += from.get(ordinal).asInstanceOf[String]
- case _: MapType =>
- to += from.get(ordinal).asInstanceOf[String]
+ to += from.getAs[Timestamp](ordinal)
+ case BinaryType | _: ArrayType | _: StructType | _: MapType =>
+ val hiveString = HiveContext.toHiveString((from.get(ordinal), dataTypes(ordinal)))
+ to += hiveString
}
}
@@ -147,9 +144,9 @@ private[hive] class SparkExecuteStatementOperation(
validateDefaultFetchOrientation(order)
assertState(OperationState.FINISHED)
setHasResultSet(true)
- val reultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion)
+ val resultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion)
if (!iter.hasNext) {
- reultRowSet
+ resultRowSet
} else {
// maxRowsL here typically maps to java.sql.Statement.getFetchSize, which is an int
val maxRows = maxRowsL.toInt
@@ -166,10 +163,10 @@ private[hive] class SparkExecuteStatementOperation(
}
curCol += 1
}
- reultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]])
+ resultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]])
curRow += 1
}
- reultRowSet
+ resultRowSet
}
}