diff options
author | Maria Rydzy <majrydzy+gh@gmail.com> | 2016-11-02 09:09:16 +0000 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2016-11-02 09:09:16 +0000 |
commit | bcbe44440e6c871e217f06d2a4696fd41f1d2606 (patch) | |
tree | 9cc8b36446b564f704b2bd8f634420a2f7ed2098 /examples/src | |
parent | 2dc048081668665f85623839d5f663b402e42555 (diff) | |
download | spark-bcbe44440e6c871e217f06d2a4696fd41f1d2606.tar.gz spark-bcbe44440e6c871e217f06d2a4696fd41f1d2606.tar.bz2 spark-bcbe44440e6c871e217f06d2a4696fd41f1d2606.zip |
[MINOR] Use <= for clarity in Pi examples' Monte Carlo process
## What changes were proposed in this pull request?
If my understanding is correct we should be rather looking at closed disk than the opened one.
## How was this patch tested?
Run simple comparison, of the mean squared error of approaches with closed and opened disk.
https://gist.github.com/mrydzy/1cf0e5c316ef9d6fbd91426b91f1969f
The closed one performed slightly better, but the tested sample wasn't too big, so I rely mostly on the algorithm understanding.
Author: Maria Rydzy <majrydzy+gh@gmail.com>
Closes #15687 from mrydzy/master.
Diffstat (limited to 'examples/src')
4 files changed, 4 insertions, 4 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java index 7df145e311..89855e81f1 100644 --- a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java +++ b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java @@ -54,7 +54,7 @@ public final class JavaSparkPi { public Integer call(Integer integer) { double x = Math.random() * 2 - 1; double y = Math.random() * 2 - 1; - return (x * x + y * y < 1) ? 1 : 0; + return (x * x + y * y <= 1) ? 1 : 0; } }).reduce(new Function2<Integer, Integer, Integer>() { @Override diff --git a/examples/src/main/python/pi.py b/examples/src/main/python/pi.py index e3f0c4aeef..37029b7679 100755 --- a/examples/src/main/python/pi.py +++ b/examples/src/main/python/pi.py @@ -38,7 +38,7 @@ if __name__ == "__main__": def f(_): x = random() * 2 - 1 y = random() * 2 - 1 - return 1 if x ** 2 + y ** 2 < 1 else 0 + return 1 if x ** 2 + y ** 2 <= 1 else 0 count = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add) print("Pi is roughly %f" % (4.0 * count / n)) diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala b/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala index 720d92fb9d..121b768e41 100644 --- a/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala +++ b/examples/src/main/scala/org/apache/spark/examples/LocalPi.scala @@ -26,7 +26,7 @@ object LocalPi { for (i <- 1 to 100000) { val x = random * 2 - 1 val y = random * 2 - 1 - if (x*x + y*y < 1) count += 1 + if (x*x + y*y <= 1) count += 1 } println("Pi is roughly " + 4 * count / 100000.0) } diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala b/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala index 272c1a4fc2..a5cacf17a5 100644 --- a/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala +++ b/examples/src/main/scala/org/apache/spark/examples/SparkPi.scala @@ -34,7 +34,7 @@ object SparkPi { val count = spark.sparkContext.parallelize(1 until n, slices).map { i => val x = random * 2 - 1 val y = random * 2 - 1 - if (x*x + y*y < 1) 1 else 0 + if (x*x + y*y <= 1) 1 else 0 }.reduce(_ + _) println("Pi is roughly " + 4.0 * count / (n - 1)) spark.stop() |