aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test/scala/org
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-07-07 18:09:18 +0800
committerCheng Lian <lian@databricks.com>2016-07-07 18:09:18 +0800
commit986b2514013ed9ebab526f2cf3dc714cc9e480bf (patch)
tree2c1fbd3515c18a50702bb67399339163ecd42196 /sql/core/src/test/scala/org
parentab05db0b48f395543cd7d91e2ad9dd760516868b (diff)
downloadspark-986b2514013ed9ebab526f2cf3dc714cc9e480bf.tar.gz
spark-986b2514013ed9ebab526f2cf3dc714cc9e480bf.tar.bz2
spark-986b2514013ed9ebab526f2cf3dc714cc9e480bf.zip
[SPARK-16400][SQL] Remove InSet filter pushdown from Parquet
## What changes were proposed in this pull request? This patch removes InSet filter pushdown from Parquet data source, since row-based pushdown is not beneficial to Spark and brings extra complexity to the code base. ## How was this patch tested? N/A Author: Reynold Xin <rxin@databricks.com> Closes #14076 from rxin/SPARK-16400.
Diffstat (limited to 'sql/core/src/test/scala/org')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala30
1 files changed, 0 insertions, 30 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
index 84fdcfea3c..f59d474d00 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala
@@ -514,36 +514,6 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex
}
}
- test("SPARK-11164: test the parquet filter in") {
- import testImplicits._
- withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
- withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
- withTempPath { dir =>
- val path = s"${dir.getCanonicalPath}/table1"
- (1 to 5).map(i => (i.toFloat, i%3)).toDF("a", "b").write.parquet(path)
-
- // When a filter is pushed to Parquet, Parquet can apply it to every row.
- // So, we can check the number of rows returned from the Parquet
- // to make sure our filter pushdown work.
- val df = spark.read.parquet(path).where("b in (0,2)")
- assert(stripSparkFilter(df).count == 3)
-
- val df1 = spark.read.parquet(path).where("not (b in (1))")
- assert(stripSparkFilter(df1).count == 3)
-
- val df2 = spark.read.parquet(path).where("not (b in (1,3) or a <= 2)")
- assert(stripSparkFilter(df2).count == 2)
-
- val df3 = spark.read.parquet(path).where("not (b in (1,3) and a <= 2)")
- assert(stripSparkFilter(df3).count == 4)
-
- val df4 = spark.read.parquet(path).where("not (a <= 2)")
- assert(stripSparkFilter(df4).count == 3)
- }
- }
- }
- }
-
test("SPARK-16371 Do not push down filters when inner name and outer name are the same") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Tuple1(i)))) { implicit df =>
// Here the schema becomes as below: