aboutsummaryrefslogtreecommitdiff
path: root/sql/hive-thriftserver/v0.13.1
diff options
context:
space:
mode:
authorMichael Armbrust <michael@databricks.com>2014-11-16 15:05:04 -0800
committerMichael Armbrust <michael@databricks.com>2014-11-16 15:05:08 -0800
commit45ce3273cb618d14ec4d20c4c95699634b951086 (patch)
tree100aa5c6368cd63bb896e36f3bfca5ed5e29279b /sql/hive-thriftserver/v0.13.1
parentcb6bd83a91d9b4a227dc6467255231869c1820e2 (diff)
downloadspark-45ce3273cb618d14ec4d20c4c95699634b951086.tar.gz
spark-45ce3273cb618d14ec4d20c4c95699634b951086.tar.bz2
spark-45ce3273cb618d14ec4d20c4c95699634b951086.zip
Revert "[SPARK-4309][SPARK-4407][SQL] Date type support for Thrift server, and fixes for complex types"
Author: Michael Armbrust <michael@databricks.com> Closes #3292 from marmbrus/revert4309 and squashes the following commits: 808e96e [Michael Armbrust] Revert "[SPARK-4309][SPARK-4407][SQL] Date type support for Thrift server, and fixes for complex types"
Diffstat (limited to 'sql/hive-thriftserver/v0.13.1')
-rw-r--r--sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala29
1 files changed, 16 insertions, 13 deletions
diff --git a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
index 3c7f62af45..a642478d08 100644
--- a/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
+++ b/sql/hive-thriftserver/v0.13.1/src/main/scala/org/apache/spark/sql/hive/thriftserver/Shim13.scala
@@ -18,7 +18,7 @@
package org.apache.spark.sql.hive.thriftserver
import java.security.PrivilegedExceptionAction
-import java.sql.{Date, Timestamp}
+import java.sql.Timestamp
import java.util.concurrent.Future
import java.util.{ArrayList => JArrayList, List => JList, Map => JMap}
@@ -113,7 +113,7 @@ private[hive] class SparkExecuteStatementOperation(
def addNonNullColumnValue(from: SparkRow, to: ArrayBuffer[Any], ordinal: Int) {
dataTypes(ordinal) match {
case StringType =>
- to += from.getString(ordinal)
+ to += from.get(ordinal).asInstanceOf[String]
case IntegerType =>
to += from.getInt(ordinal)
case BooleanType =>
@@ -123,20 +123,23 @@ private[hive] class SparkExecuteStatementOperation(
case FloatType =>
to += from.getFloat(ordinal)
case DecimalType() =>
- to += from.getAs[BigDecimal](ordinal).bigDecimal
+ to += from.get(ordinal).asInstanceOf[BigDecimal].bigDecimal
case LongType =>
to += from.getLong(ordinal)
case ByteType =>
to += from.getByte(ordinal)
case ShortType =>
to += from.getShort(ordinal)
- case DateType =>
- to += from.getAs[Date](ordinal)
case TimestampType =>
- to += from.getAs[Timestamp](ordinal)
- case BinaryType | _: ArrayType | _: StructType | _: MapType =>
- val hiveString = HiveContext.toHiveString((from.get(ordinal), dataTypes(ordinal)))
- to += hiveString
+ to += from.get(ordinal).asInstanceOf[Timestamp]
+ case BinaryType =>
+ to += from.get(ordinal).asInstanceOf[String]
+ case _: ArrayType =>
+ to += from.get(ordinal).asInstanceOf[String]
+ case _: StructType =>
+ to += from.get(ordinal).asInstanceOf[String]
+ case _: MapType =>
+ to += from.get(ordinal).asInstanceOf[String]
}
}
@@ -144,9 +147,9 @@ private[hive] class SparkExecuteStatementOperation(
validateDefaultFetchOrientation(order)
assertState(OperationState.FINISHED)
setHasResultSet(true)
- val resultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion)
+ val reultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion)
if (!iter.hasNext) {
- resultRowSet
+ reultRowSet
} else {
// maxRowsL here typically maps to java.sql.Statement.getFetchSize, which is an int
val maxRows = maxRowsL.toInt
@@ -163,10 +166,10 @@ private[hive] class SparkExecuteStatementOperation(
}
curCol += 1
}
- resultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]])
+ reultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]])
curRow += 1
}
- resultRowSet
+ reultRowSet
}
}