aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test
diff options
context:
space:
mode:
authorWojtek Szymanski <wk.szymanski@gmail.com>2017-03-06 13:19:36 -0800
committerWenchen Fan <wenchen@databricks.com>2017-03-06 13:19:36 -0800
commitf6471dc0d5db2d98e48f9f1ae1dba0f174ed9648 (patch)
treed7b3fb6b966bb532b0e0ba0550f6a686242de56c /sql/core/src/test
parent926543664f9d785e70f8314ed6ecc6ecda96d0f4 (diff)
downloadspark-f6471dc0d5db2d98e48f9f1ae1dba0f174ed9648.tar.gz
spark-f6471dc0d5db2d98e48f9f1ae1dba0f174ed9648.tar.bz2
spark-f6471dc0d5db2d98e48f9f1ae1dba0f174ed9648.zip
[SPARK-19709][SQL] Read empty file with CSV data source
## What changes were proposed in this pull request? Bugfix for reading empty file with CSV data source. Instead of throwing `NoSuchElementException`, an empty data frame is returned. ## How was this patch tested? Added new unit test in `org.apache.spark.sql.execution.datasources.csv.CSVSuite` Author: Wojtek Szymanski <wk.szymanski@gmail.com> Closes #17068 from wojtek-szymanski/SPARK-19709.
Diffstat (limited to 'sql/core/src/test')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala10
1 files changed, 4 insertions, 6 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
index 56071803f6..eaedede349 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
@@ -1077,14 +1077,12 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils {
}
}
- test("Empty file produces empty dataframe with empty schema - wholeFile option") {
- withTempPath { path =>
- path.createNewFile()
-
+ test("Empty file produces empty dataframe with empty schema") {
+ Seq(false, true).foreach { wholeFile =>
val df = spark.read.format("csv")
.option("header", true)
- .option("wholeFile", true)
- .load(path.getAbsolutePath)
+ .option("wholeFile", wholeFile)
+ .load(testFile(emptyFile))
assert(df.schema === spark.emptyDataFrame.schema)
checkAnswer(df, spark.emptyDataFrame)