aboutsummaryrefslogtreecommitdiff
path: root/examples/src
diff options
context:
space:
mode:
authorCodingCat <zhunansjtu@gmail.com>2014-03-12 17:43:12 -0700
committerAaron Davidson <aaron@databricks.com>2014-03-12 17:43:12 -0700
commit9032f7c0d5f1ae7985a20d54ca04c297201aae85 (patch)
treedff8324523fd8163ea369b524f73b1ef303605c0 /examples/src
parentb8afe3052086547879ebf28d6e36207e0d370710 (diff)
downloadspark-9032f7c0d5f1ae7985a20d54ca04c297201aae85.tar.gz
spark-9032f7c0d5f1ae7985a20d54ca04c297201aae85.tar.bz2
spark-9032f7c0d5f1ae7985a20d54ca04c297201aae85.zip
SPARK-1160: Deprecate toArray in RDD
https://spark-project.atlassian.net/browse/SPARK-1160 reported by @mateiz: "It's redundant with collect() and the name doesn't make sense in Java, where we return a List (we can't return an array due to the way Java generics work). It's also missing in Python." In this patch, I deprecated the method and changed the source files using it by replacing toArray with collect() directly Author: CodingCat <zhunansjtu@gmail.com> Closes #105 from CodingCat/SPARK-1060 and squashes the following commits: 286f163 [CodingCat] deprecate in JavaRDDLike ee17b4e [CodingCat] add message and since 2ff7319 [CodingCat] deprecate toArray in RDD
Diffstat (limited to 'examples/src')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkALS.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala2
2 files changed, 3 insertions, 3 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala b/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
index 17bafc2218..ce4b3c8451 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
@@ -128,11 +128,11 @@ object SparkALS {
println("Iteration " + iter + ":")
ms = sc.parallelize(0 until M, slices)
.map(i => update(i, msb.value(i), usb.value, Rc.value))
- .toArray
+ .collect()
msb = sc.broadcast(ms) // Re-broadcast ms because it was updated
us = sc.parallelize(0 until U, slices)
.map(i => update(i, usb.value(i), msb.value, algebra.transpose(Rc.value)))
- .toArray
+ .collect()
usb = sc.broadcast(us) // Re-broadcast us because it was updated
println("RMSE = " + rmse(R, ms, us))
println()
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala
index 19676fcc1a..ce2b133368 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala
@@ -54,6 +54,6 @@ object SparkSVD {
val s = decomposed.S.data
val v = decomposed.V.data
- println("singular values = " + s.toArray.mkString)
+ println("singular values = " + s.collect().mkString)
}
}