aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/main/scala/org
diff options
context:
space:
mode:
authorTejas Patil <tejasp@fb.com>2017-02-15 22:45:58 -0800
committerWenchen Fan <wenchen@databricks.com>2017-02-15 22:45:58 -0800
commitf041e55eefe1d8a995fed321c66bccbd8b8e5255 (patch)
treeb82f5402f3a5abf38127322be8f5e5657654f7d7 /sql/core/src/main/scala/org
parent8487902a98caf727ba3f9820452b01276d20ede3 (diff)
downloadspark-f041e55eefe1d8a995fed321c66bccbd8b8e5255.tar.gz
spark-f041e55eefe1d8a995fed321c66bccbd8b8e5255.tar.bz2
spark-f041e55eefe1d8a995fed321c66bccbd8b8e5255.zip
[SPARK-19618][SQL] Inconsistency wrt max. buckets allowed from Dataframe API vs SQL
## What changes were proposed in this pull request? Jira: https://issues.apache.org/jira/browse/SPARK-19618 Moved the check for validating number of buckets from `DataFrameWriter` to `BucketSpec` creation ## How was this patch tested? - Added more unit tests Author: Tejas Patil <tejasp@fb.com> Closes #16948 from tejasapatil/SPARK-19618_max_buckets.
Diffstat (limited to 'sql/core/src/main/scala/org')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala1
1 files changed, 0 insertions, 1 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
index 1d834b1821..cdae8ea458 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
@@ -275,7 +275,6 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
}
numBuckets.map { n =>
- require(n > 0 && n < 100000, "Bucket number must be greater than 0 and less than 100000.")
BucketSpec(n, bucketColumnNames.get, sortColumnNames.getOrElse(Nil))
}
}