From 746148bc18d5e25ea93f5ff17a6cb4da9b671b75 Mon Sep 17 00:00:00 2001 From: Reza Zadeh Date: Sun, 5 Jan 2014 18:03:57 -0800 Subject: fix docs to use SparseMatrix --- docs/mllib-guide.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'docs/mllib-guide.md') diff --git a/docs/mllib-guide.md b/docs/mllib-guide.md index abeb55d081..653848b6d4 100644 --- a/docs/mllib-guide.md +++ b/docs/mllib-guide.md @@ -243,18 +243,21 @@ as tuples of the form ((i,j),value) all in RDDs. Below is example usage. import org.apache.spark.SparkContext import org.apache.spark.mllib.linalg.SVD +import org.apache.spark.mllib.linalg.SparseMatrix +import org.apache.spark.mllib.linalg.MatrixEntry // Load and parse the data file val data = sc.textFile("mllib/data/als/test.data").map { line => val parts = line.split(',') - ((parts(0).toInt, parts(1).toInt), parts(2).toDouble) + MatrixEntry(parts(0).toInt, parts(1).toInt, parts(2).toDouble) } val m = 4 val n = 4 val k = 1 // recover largest singular vector -val (u, s, v) = SVD.sparseSVD(data, m, n, 1) +val decomposed = SVD.sparseSVD(SparseMatrix(data, m, n), k) +val = decomposed.S.data println("singular values = " + s.toArray.mkString) -- cgit v1.2.3