aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkSVD.scala10
1 files changed, 5 insertions, 5 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkSVD.scala b/examples/src/main/scala/org/apache/spark/examples/SparkSVD.scala
index d9c672f140..ce7c1c48b5 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkSVD.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkSVD.scala
@@ -29,12 +29,12 @@ import org.apache.spark.mllib.linalg.SparseMatrix
* Where i is the column, j the row, and value is the matrix entry
*
* For example input file, see:
- * mllib/data/als/test.data
+ * mllib/data/als/test.data (example is 4 x 4)
*/
object SparkSVD {
def main(args: Array[String]) {
- if (args.length != 2) {
- System.err.println("Usage: SparkSVD <master> <file>")
+ if (args.length != 4) {
+ System.err.println("Usage: SparkSVD <master> <file> m n")
System.exit(1)
}
val sc = new SparkContext(args(0), "SVD",
@@ -45,8 +45,8 @@ object SparkSVD {
val parts = line.split(',')
MatrixEntry(parts(0).toInt, parts(1).toInt, parts(2).toDouble)
}
- val m = 4
- val n = 4
+ val m = args(2).toInt
+ val n = args(3).toInt
// recover largest singular vector
val decomposed = SVD.sparseSVD(SparseMatrix(data, m, n), 1)