diff options
author | Tathagata Das <tathagata.das1565@gmail.com> | 2016-05-06 15:04:16 -0700 |
---|---|---|
committer | Yin Huai <yhuai@databricks.com> | 2016-05-06 15:04:16 -0700 |
commit | f7b7ef41662d7d02fc4f834f3c6c4ee8802e949c (patch) | |
tree | 715c731c578d7ebe519ae3b0473882164a418a20 /sql/hive/src/test | |
parent | e20cd9f4ce977739ce80a2c39f8ebae5e53f72f6 (diff) | |
download | spark-f7b7ef41662d7d02fc4f834f3c6c4ee8802e949c.tar.gz spark-f7b7ef41662d7d02fc4f834f3c6c4ee8802e949c.tar.bz2 spark-f7b7ef41662d7d02fc4f834f3c6c4ee8802e949c.zip |
[SPARK-14997][SQL] Fixed FileCatalog to return correct set of files when there is no partitioning scheme in the given paths
## What changes were proposed in this pull request?
Lets says there are json files in the following directories structure
```
xyz/file0.json
xyz/subdir1/file1.json
xyz/subdir2/file2.json
xyz/subdir1/subsubdir1/file3.json
```
`sqlContext.read.json("xyz")` should read only file0.json according to behavior in Spark 1.6.1. However in current master, all the 4 files are read.
The fix is to make FileCatalog return only the children files of the given path if there is not partitioning detected (instead of all the recursive list of files).
Closes #12774
## How was this patch tested?
unit tests
Author: Tathagata Das <tathagata.das1565@gmail.com>
Closes #12856 from tdas/SPARK-14997.
Diffstat (limited to 'sql/hive/src/test')
-rw-r--r-- | sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala | 232 |
1 files changed, 208 insertions, 24 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala index 67b403a9bd..20c5f72ff1 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala @@ -17,6 +17,8 @@ package org.apache.spark.sql.sources +import java.io.File + import scala.util.Random import org.apache.hadoop.fs.Path @@ -486,40 +488,222 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes } } - test("Hadoop style globbing") { + test("load() - with directory of unpartitioned data in nested subdirs") { + withTempPath { dir => + val subdir = new File(dir, "subdir") + + val dataInDir = Seq(1, 2, 3).toDF("value") + val dataInSubdir = Seq(4, 5, 6).toDF("value") + + /* + + Directory structure to be generated + + dir + | + |___ [ files of dataInDir ] + | + |___ subsubdir + | + |___ [ files of dataInSubdir ] + */ + + // Generated dataInSubdir, not data in dir + dataInSubdir.write + .format(dataSourceName) + .mode(SaveMode.Overwrite) + .save(subdir.getCanonicalPath) + + // Inferring schema should throw error as it should not find any file to infer + val e = intercept[Exception] { + sqlContext.read.format(dataSourceName).load(dir.getCanonicalPath) + } + + e match { + case _: AnalysisException => + assert(e.getMessage.contains("infer")) + + case _: java.util.NoSuchElementException if e.getMessage.contains("dataSchema") => + // Ignore error, the source format requires schema to be provided by user + // This is needed for SimpleTextHadoopFsRelationSuite as SimpleTextSource needs schema + + case _ => + fail("Unexpected error trying to infer schema from empty dir", e) + } + + /** Test whether data is read with the given path matches the expected answer */ + def testWithPath(path: File, expectedAnswer: Seq[Row]): Unit = { + val df = sqlContext.read + .format(dataSourceName) + .schema(dataInDir.schema) // avoid schema inference for any format + .load(path.getCanonicalPath) + checkAnswer(df, expectedAnswer) + } + + // Verify that reading by path 'dir/' gives empty results as there are no files in 'file' + // and it should not pick up files in 'dir/subdir' + require(subdir.exists) + require(subdir.listFiles().exists(!_.isDirectory)) + testWithPath(dir, Seq.empty) + + // Verify that if there is data in dir, then reading by path 'dir/' reads only dataInDir + dataInDir.write + .format(dataSourceName) + .mode(SaveMode.Append) // append to prevent subdir from being deleted + .save(dir.getCanonicalPath) + require(dir.listFiles().exists(!_.isDirectory)) + require(subdir.exists()) + require(subdir.listFiles().exists(!_.isDirectory)) + testWithPath(dir, dataInDir.collect()) + } + } + + test("Hadoop style globbing - unpartitioned data") { withTempPath { file => + + val dir = file.getCanonicalPath + val subdir = new File(dir, "subdir") + val subsubdir = new File(subdir, "subsubdir") + val anotherSubsubdir = + new File(new File(dir, "another-subdir"), "another-subsubdir") + + val dataInSubdir = Seq(1, 2, 3).toDF("value") + val dataInSubsubdir = Seq(4, 5, 6).toDF("value") + val dataInAnotherSubsubdir = Seq(7, 8, 9).toDF("value") + + dataInSubdir.write + .format (dataSourceName) + .mode (SaveMode.Overwrite) + .save (subdir.getCanonicalPath) + + dataInSubsubdir.write + .format (dataSourceName) + .mode (SaveMode.Overwrite) + .save (subsubdir.getCanonicalPath) + + dataInAnotherSubsubdir.write + .format (dataSourceName) + .mode (SaveMode.Overwrite) + .save (anotherSubsubdir.getCanonicalPath) + + require(subdir.exists) + require(subdir.listFiles().exists(!_.isDirectory)) + require(subsubdir.exists) + require(subsubdir.listFiles().exists(!_.isDirectory)) + require(anotherSubsubdir.exists) + require(anotherSubsubdir.listFiles().exists(!_.isDirectory)) + + /* + Directory structure generated + + dir + | + |___ subdir + | | + | |___ [ files of dataInSubdir ] + | | + | |___ subsubdir + | | + | |___ [ files of dataInSubsubdir ] + | + | + |___ anotherSubdir + | + |___ anotherSubsubdir + | + |___ [ files of dataInAnotherSubsubdir ] + */ + + val schema = dataInSubdir.schema + + /** Check whether data is read with the given path matches the expected answer */ + def check(path: String, expectedDf: DataFrame): Unit = { + val df = sqlContext.read + .format(dataSourceName) + .schema(schema) // avoid schema inference for any format, expected to be same format + .load(path) + checkAnswer(df, expectedDf) + } + + check(s"$dir/*/", dataInSubdir) + check(s"$dir/sub*/*", dataInSubdir.union(dataInSubsubdir)) + check(s"$dir/another*/*", dataInAnotherSubsubdir) + check(s"$dir/*/another*", dataInAnotherSubsubdir) + check(s"$dir/*/*", dataInSubdir.union(dataInSubsubdir).union(dataInAnotherSubsubdir)) + } + } + + test("Hadoop style globbing - partitioned data with schema inference") { + + // Tests the following on partition data + // - partitions are not discovered with globbing and without base path set. + // - partitions are discovered with globbing and base path set, though more detailed + // tests for this is in ParquetPartitionDiscoverySuite + + withTempPath { path => + val dir = path.getCanonicalPath partitionedTestDF.write .format(dataSourceName) .mode(SaveMode.Overwrite) .partitionBy("p1", "p2") - .save(file.getCanonicalPath) + .save(dir) + + def check( + path: String, + expectedResult: Either[DataFrame, String], + basePath: Option[String] = None + ): Unit = { + try { + val reader = sqlContext.read + basePath.foreach(reader.option("basePath", _)) + val testDf = reader + .format(dataSourceName) + .load(path) + assert(expectedResult.isLeft, s"Error was expected with $path but result found") + checkAnswer(testDf, expectedResult.left.get) + } catch { + case e: java.util.NoSuchElementException if e.getMessage.contains("dataSchema") => + // Ignore error, the source format requires schema to be provided by user + // This is needed for SimpleTextHadoopFsRelationSuite as SimpleTextSource needs schema + + case e: Throwable => + assert(expectedResult.isRight, s"Was not expecting error with $path: " + e) + assert( + e.getMessage.contains(expectedResult.right.get), + s"Did not find expected error message wiht $path") + } + } - val df = sqlContext.read - .format(dataSourceName) - .option("dataSchema", dataSchema.json) - .option("basePath", file.getCanonicalPath) - .load(s"${file.getCanonicalPath}/p1=*/p2=???") - - val expectedPaths = Set( - s"${file.getCanonicalFile}/p1=1/p2=foo", - s"${file.getCanonicalFile}/p1=2/p2=foo", - s"${file.getCanonicalFile}/p1=1/p2=bar", - s"${file.getCanonicalFile}/p1=2/p2=bar" - ).map { p => - val path = new Path(p) - val fs = path.getFileSystem(sqlContext.sessionState.newHadoopConf()) - path.makeQualified(fs.getUri, fs.getWorkingDirectory).toString + object Error { + def apply(msg: String): Either[DataFrame, String] = Right(msg) } - val actualPaths = df.queryExecution.analyzed.collectFirst { - case LogicalRelation(relation: HadoopFsRelation, _, _) => - relation.location.paths.map(_.toString).toSet - }.getOrElse { - fail("Expect an FSBasedRelation, but none could be found") + object Result { + def apply(df: DataFrame): Either[DataFrame, String] = Left(df) } - assert(actualPaths === expectedPaths) - checkAnswer(df, partitionedTestDF.collect()) + // ---- Without base path set ---- + // Should find all the data with partitioning columns + check(s"$dir", Result(partitionedTestDF)) + + // Should fail as globbing finds dirs without files, only subdirs in them. + check(s"$dir/*/", Error("please set \"basePath\"")) + check(s"$dir/p1=*/", Error("please set \"basePath\"")) + + // Should not find partition columns as the globs resolve to p2 dirs + // with files in them + check(s"$dir/*/*", Result(partitionedTestDF.drop("p1", "p2"))) + check(s"$dir/p1=*/p2=foo", Result(partitionedTestDF.filter("p2 = 'foo'").drop("p1", "p2"))) + check(s"$dir/p1=1/p2=???", Result(partitionedTestDF.filter("p1 = 1").drop("p1", "p2"))) + + // Should find all data without the partitioning columns as the globs resolve to the files + check(s"$dir/*/*/*", Result(partitionedTestDF.drop("p1", "p2"))) + + // ---- With base path set ---- + val resultDf = partitionedTestDF.select("a", "b", "p1", "p2") + check(path = s"$dir/*", Result(resultDf), basePath = Some(dir)) + check(path = s"$dir/*/*", Result(resultDf), basePath = Some(dir)) + check(path = s"$dir/*/*/*", Result(resultDf), basePath = Some(dir)) } } |