aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2015-05-26 20:48:56 -0700
committerYin Huai <yhuai@databricks.com>2015-05-26 20:49:05 -0700
commitd0bd68ff8a1dcfbff8e6d40573ca049d208ab2de (patch)
treed7bf1cfe91c3e3859fc1d574249e8ecada3fb782 /sql/hive/src
parentfaadbd4d99c51eabf22277430b5e3939b1606cdb (diff)
downloadspark-d0bd68ff8a1dcfbff8e6d40573ca049d208ab2de.tar.gz
spark-d0bd68ff8a1dcfbff8e6d40573ca049d208ab2de.tar.bz2
spark-d0bd68ff8a1dcfbff8e6d40573ca049d208ab2de.zip
[SPARK-7868] [SQL] Ignores _temporary directories in HadoopFsRelation
So that potential partial/corrupted data files left by failed tasks/jobs won't affect normal data scan. Author: Cheng Lian <lian@databricks.com> Closes #6411 from liancheng/spark-7868 and squashes the following commits: 273ea36 [Cheng Lian] Ignores _temporary directories (cherry picked from commit b463e6d618e69c535297e51f41eca4f91bd33cc8) Signed-off-by: Yin Huai <yhuai@databricks.com>
Diffstat (limited to 'sql/hive/src')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala16
1 files changed, 16 insertions, 0 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
index 7c02d563f8..cf5ae88dc4 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/hadoopFsRelationSuites.scala
@@ -548,4 +548,20 @@ class ParquetHadoopFsRelationSuite extends HadoopFsRelationTest {
checkAnswer(table("t"), df.select('b, 'c, 'a).collect())
}
}
+
+ test("SPARK-7868: _temporary directories should be ignored") {
+ withTempPath { dir =>
+ val df = Seq("a", "b", "c").zipWithIndex.toDF()
+
+ df.write
+ .format("parquet")
+ .save(dir.getCanonicalPath)
+
+ df.write
+ .format("parquet")
+ .save(s"${dir.getCanonicalPath}/_temporary")
+
+ checkAnswer(read.format("parquet").load(dir.getCanonicalPath), df.collect())
+ }
+ }
}