aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorzsxwing <zsxwing@gmail.com>2015-06-19 11:58:07 +0200
committerSean Owen <sowen@cloudera.com>2015-06-19 11:58:07 +0200
commit93360dc3cd6186e9d33c762d153a829a5882b72b (patch)
treec44938a8d24a8aab62fbb10416e5cc84ae4dde4f /core
parent54557f353e588f5ff622ab8e67068bab408bce92 (diff)
downloadspark-93360dc3cd6186e9d33c762d153a829a5882b72b.tar.gz
spark-93360dc3cd6186e9d33c762d153a829a5882b72b.tar.bz2
spark-93360dc3cd6186e9d33c762d153a829a5882b72b.zip
[SPARK-7913] [CORE] Make AppendOnlyMap use the same growth strategy of OpenHashSet and consistent exception message
This is a follow up PR for #6456 to make AppendOnlyMap consistent with OpenHashSet. /cc srowen andrewor14 Author: zsxwing <zsxwing@gmail.com> Closes #6879 from zsxwing/append-only-map and squashes the following commits: 912c0ad [zsxwing] Fix the doc dd4385b [zsxwing] Make AppendOnlyMap use the same growth strategy of OpenHashSet and consistent exception message
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/util/collection/AppendOnlyMap.scala10
1 files changed, 4 insertions, 6 deletions
diff --git a/core/src/main/scala/org/apache/spark/util/collection/AppendOnlyMap.scala b/core/src/main/scala/org/apache/spark/util/collection/AppendOnlyMap.scala
index d215ee43cb..4c1e161554 100644
--- a/core/src/main/scala/org/apache/spark/util/collection/AppendOnlyMap.scala
+++ b/core/src/main/scala/org/apache/spark/util/collection/AppendOnlyMap.scala
@@ -32,7 +32,7 @@ import org.apache.spark.annotation.DeveloperApi
* size, which is guaranteed to explore all spaces for each key (see
* http://en.wikipedia.org/wiki/Quadratic_probing).
*
- * The map can support up to `536870912 (2 ^ 29)` elements.
+ * The map can support up to `375809638 (0.7 * 2 ^ 29)` elements.
*
* TODO: Cache the hash values of each key? java.util.HashMap does that.
*/
@@ -199,11 +199,8 @@ class AppendOnlyMap[K, V](initialCapacity: Int = 64)
/** Increase table size by 1, rehashing if necessary */
private def incrementSize() {
- if (curSize == MAXIMUM_CAPACITY) {
- throw new IllegalStateException(s"Can't put more that ${MAXIMUM_CAPACITY} elements")
- }
curSize += 1
- if (curSize > growThreshold && capacity < MAXIMUM_CAPACITY) {
+ if (curSize > growThreshold) {
growTable()
}
}
@@ -216,7 +213,8 @@ class AppendOnlyMap[K, V](initialCapacity: Int = 64)
/** Double the table's size and re-hash everything */
protected def growTable() {
// capacity < MAXIMUM_CAPACITY (2 ^ 29) so capacity * 2 won't overflow
- val newCapacity = (capacity * 2).min(MAXIMUM_CAPACITY)
+ val newCapacity = capacity * 2
+ require(newCapacity <= MAXIMUM_CAPACITY, s"Can't contain more than ${growThreshold} elements")
val newData = new Array[AnyRef](2 * newCapacity)
val newMask = newCapacity - 1
// Insert all our old values into the new array. Note that because our old keys are