aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src
diff options
context:
space:
mode:
Diffstat (limited to 'sql/hive/src')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala6
-rw-r--r--sql/hive/src/test/resources/golden/case else null-0-8ef2f741400830ef889a9dd0c817fe3d1
-rw-r--r--sql/hive/src/test/resources/golden/double case-0-f513687d17dcb18546fefa75000a52f21
-rw-r--r--sql/hive/src/test/resources/golden/single case-0-c264e319c52f1840a32959d552b99e731
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala11
8 files changed, 21 insertions, 5 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index 84d43eaeea..f0a61270da 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -231,7 +231,7 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) {
HiveTableScans,
DataSinks,
Scripts,
- PartialAggregation,
+ HashAggregation,
LeftSemiJoin,
HashJoin,
BasicOperators,
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
index c2b0b00aa5..39033bdeac 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
@@ -131,7 +131,7 @@ case class InsertIntoHiveTable(
conf,
SparkHiveHadoopWriter.createPathFromString(fileSinkConf.getDirName, conf))
- logger.debug("Saving as hadoop file of type " + valueClass.getSimpleName)
+ log.debug("Saving as hadoop file of type " + valueClass.getSimpleName)
val writer = new SparkHiveHadoopWriter(conf, fileSinkConf)
writer.preSetup()
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
index 8258ee5fef..0c8f676e9c 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
@@ -67,7 +67,7 @@ case class ScriptTransformation(
}
}
readerThread.start()
- val outputProjection = new Projection(input)
+ val outputProjection = new InterpretedProjection(input, child.output)
iter
.map(outputProjection)
// TODO: Use SerDe
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
index 057eb60a02..7582b4743d 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
@@ -251,8 +251,10 @@ private[hive] case class HiveGenericUdtf(
@transient
protected lazy val function: GenericUDTF = createFunction()
+ @transient
protected lazy val inputInspectors = children.map(_.dataType).map(toInspector)
+ @transient
protected lazy val outputInspectors = {
val structInspector = function.initialize(inputInspectors.toArray)
structInspector.getAllStructFieldRefs.map(_.getFieldObjectInspector)
@@ -278,7 +280,7 @@ private[hive] case class HiveGenericUdtf(
override def eval(input: Row): TraversableOnce[Row] = {
outputInspectors // Make sure initialized.
- val inputProjection = new Projection(children)
+ val inputProjection = new InterpretedProjection(children)
val collector = new UDTFCollector
function.setCollector(collector)
@@ -332,7 +334,7 @@ private[hive] case class HiveUdafFunction(
override def eval(input: Row): Any = unwrapData(function.evaluate(buffer), returnInspector)
@transient
- val inputProjection = new Projection(exprs)
+ val inputProjection = new InterpretedProjection(exprs)
def update(input: Row): Unit = {
val inputs = inputProjection(input).asInstanceOf[Seq[AnyRef]].toArray
diff --git a/sql/hive/src/test/resources/golden/case else null-0-8ef2f741400830ef889a9dd0c817fe3d b/sql/hive/src/test/resources/golden/case else null-0-8ef2f741400830ef889a9dd0c817fe3d
new file mode 100644
index 0000000000..00750edc07
--- /dev/null
+++ b/sql/hive/src/test/resources/golden/case else null-0-8ef2f741400830ef889a9dd0c817fe3d
@@ -0,0 +1 @@
+3
diff --git a/sql/hive/src/test/resources/golden/double case-0-f513687d17dcb18546fefa75000a52f2 b/sql/hive/src/test/resources/golden/double case-0-f513687d17dcb18546fefa75000a52f2
new file mode 100644
index 0000000000..00750edc07
--- /dev/null
+++ b/sql/hive/src/test/resources/golden/double case-0-f513687d17dcb18546fefa75000a52f2
@@ -0,0 +1 @@
+3
diff --git a/sql/hive/src/test/resources/golden/single case-0-c264e319c52f1840a32959d552b99e73 b/sql/hive/src/test/resources/golden/single case-0-c264e319c52f1840a32959d552b99e73
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/sql/hive/src/test/resources/golden/single case-0-c264e319c52f1840a32959d552b99e73
@@ -0,0 +1 @@
+1
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index aadfd2e900..89cc589fb8 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -19,6 +19,8 @@ package org.apache.spark.sql.hive.execution
import scala.util.Try
+import org.apache.spark.sql.{SchemaRDD, Row}
+import org.apache.spark.sql.hive._
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.hive.test.TestHive._
import org.apache.spark.sql.{Row, SchemaRDD}
@@ -30,6 +32,15 @@ case class TestData(a: Int, b: String)
*/
class HiveQuerySuite extends HiveComparisonTest {
+ createQueryTest("single case",
+ """SELECT case when true then 1 else 2 end FROM src LIMIT 1""")
+
+ createQueryTest("double case",
+ """SELECT case when 1 = 2 then 1 when 2 = 2 then 3 else 2 end FROM src LIMIT 1""")
+
+ createQueryTest("case else null",
+ """SELECT case when 1 = 2 then 1 when 2 = 2 then 3 else null end FROM src LIMIT 1""")
+
createQueryTest("having no references",
"SELECT key FROM src GROUP BY key HAVING COUNT(*) > 1")