diff options
author | Tejas Patil <tejasp@fb.com> | 2017-02-15 22:45:58 -0800 |
---|---|---|
committer | Wenchen Fan <wenchen@databricks.com> | 2017-02-15 22:45:58 -0800 |
commit | f041e55eefe1d8a995fed321c66bccbd8b8e5255 (patch) | |
tree | b82f5402f3a5abf38127322be8f5e5657654f7d7 /sql/hive | |
parent | 8487902a98caf727ba3f9820452b01276d20ede3 (diff) | |
download | spark-f041e55eefe1d8a995fed321c66bccbd8b8e5255.tar.gz spark-f041e55eefe1d8a995fed321c66bccbd8b8e5255.tar.bz2 spark-f041e55eefe1d8a995fed321c66bccbd8b8e5255.zip |
[SPARK-19618][SQL] Inconsistency wrt max. buckets allowed from Dataframe API vs SQL
## What changes were proposed in this pull request?
Jira: https://issues.apache.org/jira/browse/SPARK-19618
Moved the check for validating number of buckets from `DataFrameWriter` to `BucketSpec` creation
## How was this patch tested?
- Added more unit tests
Author: Tejas Patil <tejasp@fb.com>
Closes #16948 from tejasapatil/SPARK-19618_max_buckets.
Diffstat (limited to 'sql/hive')
-rw-r--r-- | sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala index 8528dfc4ce..61cef2a800 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala @@ -38,10 +38,14 @@ class BucketedWriteSuite extends QueryTest with SQLTestUtils with TestHiveSingle intercept[AnalysisException](df.write.bucketBy(2, "k").saveAsTable("tt")) } - test("numBuckets not greater than 0 or less than 100000") { + test("numBuckets be greater than 0 but less than 100000") { val df = Seq(1 -> "a", 2 -> "b").toDF("i", "j") - intercept[IllegalArgumentException](df.write.bucketBy(0, "i").saveAsTable("tt")) - intercept[IllegalArgumentException](df.write.bucketBy(100000, "i").saveAsTable("tt")) + + Seq(-1, 0, 100000).foreach(numBuckets => { + val e = intercept[AnalysisException](df.write.bucketBy(numBuckets, "i").saveAsTable("tt")) + assert( + e.getMessage.contains("Number of buckets should be greater than 0 but less than 100000")) + }) } test("specify sorting columns without bucketing columns") { |