From a03e5b81e91d9d792b6a2e01d1505394ea303dd8 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Fri, 19 Sep 2014 14:35:22 -0700 Subject: [SPARK-1701] [PySpark] remove slice terminology from python examples Author: Matthew Farrellee Closes #2304 from mattf/SPARK-1701-partition-over-slice-for-python-examples and squashes the following commits: 928a581 [Matthew Farrellee] [SPARK-1701] [PySpark] remove slice terminology from python examples --- examples/src/main/python/pi.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'examples/src/main/python/pi.py') diff --git a/examples/src/main/python/pi.py b/examples/src/main/python/pi.py index ee9036adfa..a7c74e969c 100755 --- a/examples/src/main/python/pi.py +++ b/examples/src/main/python/pi.py @@ -24,18 +24,18 @@ from pyspark import SparkContext if __name__ == "__main__": """ - Usage: pi [slices] + Usage: pi [partitions] """ sc = SparkContext(appName="PythonPi") - slices = int(sys.argv[1]) if len(sys.argv) > 1 else 2 - n = 100000 * slices + partitions = int(sys.argv[1]) if len(sys.argv) > 1 else 2 + n = 100000 * partitions def f(_): x = random() * 2 - 1 y = random() * 2 - 1 return 1 if x ** 2 + y ** 2 < 1 else 0 - count = sc.parallelize(xrange(1, n + 1), slices).map(f).reduce(add) + count = sc.parallelize(xrange(1, n + 1), partitions).map(f).reduce(add) print "Pi is roughly %f" % (4.0 * count / n) sc.stop() -- cgit v1.2.3