diff options
author | Niccolo Becchi <niccolo.becchi@gmail.com> | 2015-05-05 08:54:42 +0100 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2015-05-05 08:54:42 +0100 |
commit | da738cffa8f7e12545b47f31dcb051f2927e4149 (patch) | |
tree | 40beb380c618e6af5214f33863a6717be07923ea | |
parent | e9b16e67c636a8a91ab9fb0f4ef98146abbde1e9 (diff) | |
download | spark-da738cffa8f7e12545b47f31dcb051f2927e4149.tar.gz spark-da738cffa8f7e12545b47f31dcb051f2927e4149.tar.bz2 spark-da738cffa8f7e12545b47f31dcb051f2927e4149.zip |
[MINOR] Renamed variables in SparkKMeans.scala, LocalKMeans.scala and kmeans.py to simplify readability
With the previous syntax it could look like that the reduceByKey sums separately abscissas and ordinates of some 2D points. Perhaps in this way should be easier to understand the example, especially for who is starting the functional programming like me now.
Author: Niccolo Becchi <niccolo.becchi@gmail.com>
Author: pippobaudos <niccolo.becchi@gmail.com>
Closes #5875 from pippobaudos/patch-1 and squashes the following commits:
3bb3a47 [pippobaudos] renamed variables in LocalKMeans.scala and kmeans.py to simplify readability
2c2a7a2 [Niccolo Becchi] Update SparkKMeans.scala
3 files changed, 7 insertions, 7 deletions
diff --git a/examples/src/main/python/kmeans.py b/examples/src/main/python/kmeans.py index 1939150646..1456c87312 100755 --- a/examples/src/main/python/kmeans.py +++ b/examples/src/main/python/kmeans.py @@ -68,14 +68,14 @@ if __name__ == "__main__": closest = data.map( lambda p: (closestPoint(p, kPoints), (p, 1))) pointStats = closest.reduceByKey( - lambda (x1, y1), (x2, y2): (x1 + x2, y1 + y2)) + lambda (p1, c1), (p2, c2): (p1 + p2, c1 + c2)) newPoints = pointStats.map( - lambda xy: (xy[0], xy[1][0] / xy[1][1])).collect() + lambda st: (st[0], st[1][0] / st[1][1])).collect() - tempDist = sum(np.sum((kPoints[x] - y) ** 2) for (x, y) in newPoints) + tempDist = sum(np.sum((kPoints[iK] - p) ** 2) for (iK, p) in newPoints) - for (x, y) in newPoints: - kPoints[x] = y + for (iK, p) in newPoints: + kPoints[iK] = p print("Final centers: " + str(kPoints)) diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala index f73eac1e2b..04fc0a0330 100644 --- a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala +++ b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala @@ -99,7 +99,7 @@ object LocalKMeans { var pointStats = mappings.map { pair => pair._2.reduceLeft [(Int, (Vector[Double], Int))] { - case ((id1, (x1, y1)), (id2, (x2, y2))) => (id1, (x1 + x2, y1 + y2)) + case ((id1, (p1, c1)), (id2, (p2, c2))) => (id1, (p1 + p2, c1 + c2)) } } diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala index 48e8d11cdf..b514d9123f 100644 --- a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala +++ b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala @@ -79,7 +79,7 @@ object SparkKMeans { while(tempDist > convergeDist) { val closest = data.map (p => (closestPoint(p, kPoints), (p, 1))) - val pointStats = closest.reduceByKey{case ((x1, y1), (x2, y2)) => (x1 + x2, y1 + y2)} + val pointStats = closest.reduceByKey{case ((p1, c1), (p2, c2)) => (p1 + p2, c1 + c2)} val newPoints = pointStats.map {pair => (pair._1, pair._2._1 * (1.0 / pair._2._2))}.collectAsMap() |