aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/RDD.scala7
-rw-r--r--core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala2
-rw-r--r--python/pyspark/rdd.py2
-rw-r--r--python/pyspark/tests.py1
4 files changed, 6 insertions, 6 deletions
diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
index 8baf199f21..7dad30ecbd 100644
--- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
@@ -1161,8 +1161,8 @@ abstract class RDD[T: ClassTag](
*/
@Experimental
def countApproxDistinct(p: Int, sp: Int): Long = withScope {
- require(p >= 4, s"p ($p) must be at least 4")
- require(sp <= 32, s"sp ($sp) cannot be greater than 32")
+ require(p >= 4, s"p ($p) must be >= 4")
+ require(sp <= 32, s"sp ($sp) must be <= 32")
require(sp == 0 || p <= sp, s"p ($p) cannot be greater than sp ($sp)")
val zeroCounter = new HyperLogLogPlus(p, sp)
aggregate(zeroCounter)(
@@ -1187,8 +1187,9 @@ abstract class RDD[T: ClassTag](
* It must be greater than 0.000017.
*/
def countApproxDistinct(relativeSD: Double = 0.05): Long = withScope {
+ require(relativeSD > 0.000017, s"accuracy ($relativeSD) must be greater than 0.000017")
val p = math.ceil(2.0 * math.log(1.054 / relativeSD) / math.log(2)).toInt
- countApproxDistinct(p, 0)
+ countApproxDistinct(if (p < 4) 4 else p, 0)
}
/**
diff --git a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
index ef8c36a286..afc11bdc4d 100644
--- a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
@@ -89,6 +89,8 @@ class RDDSuite extends FunSuite with SharedSparkContext {
val simpleRdd = sc.makeRDD(uniformDistro, 10)
assert(error(simpleRdd.countApproxDistinct(8, 0), size) < 0.2)
assert(error(simpleRdd.countApproxDistinct(12, 0), size) < 0.1)
+ assert(error(simpleRdd.countApproxDistinct(0.02), size) < 0.1)
+ assert(error(simpleRdd.countApproxDistinct(0.5), size) < 0.22)
}
test("SparkContext.union") {
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index d254deb527..545c5ad20c 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -2239,8 +2239,6 @@ class RDD(object):
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
- if relativeSD > 0.37:
- raise ValueError("relativeSD should be smaller than 0.37")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py
index ea63a396da..09de4d159f 100644
--- a/python/pyspark/tests.py
+++ b/python/pyspark/tests.py
@@ -644,7 +644,6 @@ class RDDTests(ReusedPySparkTestCase):
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
- self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.5))
def test_histogram(self):
# empty