aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorhyukjinkwon <gurwls223@gmail.com>2016-03-09 10:48:53 -0800
committerMichael Armbrust <michael@databricks.com>2016-03-09 10:48:53 -0800
commitcad29a40b24a8e89f2d906e263866546f8ab6071 (patch)
tree1b6ec01ce39ccb331bc6fd8b15c849d045f3ba37 /sql
parent23369c3bd2c6a6d7a2b9d1396d6962022676cee7 (diff)
downloadspark-cad29a40b24a8e89f2d906e263866546f8ab6071.tar.gz
spark-cad29a40b24a8e89f2d906e263866546f8ab6071.tar.bz2
spark-cad29a40b24a8e89f2d906e263866546f8ab6071.zip
[SPARK-13728][SQL] Fix ORC PPD test so that pushed filters can be checked.
## What changes were proposed in this pull request? https://issues.apache.org/jira/browse/SPARK-13728 https://github.com/apache/spark/pull/11509 makes the output only single ORC file. It was 10 files but this PR writes only single file. So, this could not skip stripes in ORC by the pushed down filters. So, this PR simply repartitions data into 10 so that the test could pass. ## How was this patch tested? unittest and `./dev/run_tests` for code style test. Author: hyukjinkwon <gurwls223@gmail.com> Closes #11593 from HyukjinKwon/SPARK-13728.
Diffstat (limited to 'sql')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala6
1 files changed, 4 insertions, 2 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
index 3c05266532..9ca07e96eb 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
@@ -348,7 +348,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
}
}
- ignore("SPARK-10623 Enable ORC PPD") {
+ test("SPARK-10623 Enable ORC PPD") {
withTempPath { dir =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
import testImplicits._
@@ -363,7 +363,9 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
val nullValue: Option[String] = None
(maybeInt, nullValue)
}
- createDataFrame(data).toDF("a", "b").write.orc(path)
+ // It needs to repartition data so that we can have several ORC files
+ // in order to skip stripes in ORC.
+ createDataFrame(data).toDF("a", "b").repartition(10).write.orc(path)
val df = sqlContext.read.orc(path)
def checkPredicate(pred: Column, answer: Seq[Row]): Unit = {