diff options
author | Dongjoon Hyun <dongjoon@apache.org> | 2016-06-29 15:00:41 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2016-06-29 15:00:41 -0700 |
commit | 9b1b3ae771babf127f64898d5dc110721597a760 (patch) | |
tree | de98cf6489f46b4e77c0932acdd269e1519e68fd /sql | |
parent | 8b5a8b25b9d29b7d0949d5663c7394b26154a836 (diff) | |
download | spark-9b1b3ae771babf127f64898d5dc110721597a760.tar.gz spark-9b1b3ae771babf127f64898d5dc110721597a760.tar.bz2 spark-9b1b3ae771babf127f64898d5dc110721597a760.zip |
[SPARK-16006][SQL] Attemping to write empty DataFrame with no fields throws non-intuitive exception
## What changes were proposed in this pull request?
This PR allows `emptyDataFrame.write` since the user didn't specify any partition columns.
**Before**
```scala
scala> spark.emptyDataFrame.write.parquet("/tmp/t1")
org.apache.spark.sql.AnalysisException: Cannot use all columns for partition columns;
scala> spark.emptyDataFrame.write.csv("/tmp/t1")
org.apache.spark.sql.AnalysisException: Cannot use all columns for partition columns;
```
After this PR, there occurs no exceptions and the created directory has only one file, `_SUCCESS`, as expected.
## How was this patch tested?
Pass the Jenkins tests including updated test cases.
Author: Dongjoon Hyun <dongjoon@apache.org>
Closes #13730 from dongjoon-hyun/SPARK-16006.
Diffstat (limited to 'sql')
-rw-r--r-- | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala | 2 | ||||
-rw-r--r-- | sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala | 3 |
2 files changed, 3 insertions, 2 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala index 388df7002d..c3561099d6 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala @@ -351,7 +351,7 @@ private[sql] object PartitioningUtils { } } - if (partitionColumns.size == schema.fields.size) { + if (partitionColumns.nonEmpty && partitionColumns.size == schema.fields.length) { throw new AnalysisException(s"Cannot use all columns for partition columns") } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala index 58b1d56358..d454100ccb 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala @@ -246,8 +246,9 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSQLContext with Be spark.range(10).write.format("parquet").mode("overwrite").partitionBy("id").save(path) } intercept[AnalysisException] { - spark.range(10).write.format("orc").mode("overwrite").partitionBy("id").save(path) + spark.range(10).write.format("csv").mode("overwrite").partitionBy("id").save(path) } + spark.emptyDataFrame.write.format("parquet").mode("overwrite").save(path) } } |