aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorDoingDone9 <799203320@qq.com>2015-03-25 11:11:52 -0700
committerMichael Armbrust <michael@databricks.com>2015-03-25 11:11:52 -0700
commit968408b345a0e26f7ee9105a6a0c3456cf10576a (patch)
tree2e9c076e234b952ce04ba5c5819b736b8fdd0c3a /sql/hive
parent982952f4aebb474823dd886dd2b18f4277bd7c30 (diff)
downloadspark-968408b345a0e26f7ee9105a6a0c3456cf10576a.tar.gz
spark-968408b345a0e26f7ee9105a6a0c3456cf10576a.tar.bz2
spark-968408b345a0e26f7ee9105a6a0c3456cf10576a.zip
[SPARK-6409][SQL] It is not necessary that avoid old inteface of hive, because this will make some UDAF can not work.
spark avoid old inteface of hive, then some udaf can not work like "org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage" Author: DoingDone9 <799203320@qq.com> Closes #5131 from DoingDone9/udaf and squashes the following commits: 9de08d0 [DoingDone9] Update HiveUdfSuite.scala 49c62dc [DoingDone9] Update hiveUdfs.scala 98b134f [DoingDone9] Merge pull request #5 from apache/master 161cae3 [DoingDone9] Merge pull request #4 from apache/master c87e8b6 [DoingDone9] Merge pull request #3 from apache/master cb1852d [DoingDone9] Merge pull request #2 from apache/master c3f046f [DoingDone9] Merge pull request #1 from apache/master
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala3
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala11
2 files changed, 11 insertions, 3 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
index bfe43373d9..47305571e5 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
@@ -375,9 +375,8 @@ private[hive] case class HiveUdafFunction(
private val returnInspector = function.init(GenericUDAFEvaluator.Mode.COMPLETE, inspectors)
- // Cast required to avoid type inference selecting a deprecated Hive API.
private val buffer =
- function.getNewAggregationBuffer.asInstanceOf[GenericUDAFEvaluator.AbstractAggregationBuffer]
+ function.getNewAggregationBuffer
override def eval(input: Row): Any = unwrap(function.evaluate(buffer), returnInspector)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala
index cb405f56bf..d7c5d1a25a 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala
@@ -22,7 +22,7 @@ import java.util
import java.util.Properties
import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF
+import org.apache.hadoop.hive.ql.udf.generic.{GenericUDAFAverage, GenericUDF}
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory}
@@ -93,6 +93,15 @@ class HiveUdfSuite extends QueryTest {
sql("DROP TEMPORARY FUNCTION IF EXISTS testUdf")
}
+ test("SPARK-6409 UDAFAverage test") {
+ sql(s"CREATE TEMPORARY FUNCTION test_avg AS '${classOf[GenericUDAFAverage].getName}'")
+ checkAnswer(
+ sql("SELECT test_avg(1), test_avg(substr(value,5)) FROM src"),
+ Seq(Row(1.0, 260.182)))
+ sql("DROP TEMPORARY FUNCTION IF EXISTS test_avg")
+ TestHive.reset()
+ }
+
test("SPARK-2693 udaf aggregates test") {
checkAnswer(sql("SELECT percentile(key, 1) FROM src LIMIT 1"),
sql("SELECT max(key) FROM src").collect().toSeq)