aboutsummaryrefslogtreecommitdiff
path: root/examples/src
diff options
context:
space:
mode:
Diffstat (limited to 'examples/src')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/SparkPCA.scala51
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala4
2 files changed, 53 insertions, 2 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/SparkPCA.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/SparkPCA.scala
new file mode 100644
index 0000000000..d4e08c5e12
--- /dev/null
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SparkPCA.scala
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.examples.mllib
+
+import org.apache.spark.SparkContext
+import org.apache.spark.mllib.linalg.PCA
+import org.apache.spark.mllib.linalg.MatrixEntry
+import org.apache.spark.mllib.linalg.SparseMatrix
+import org.apache.spark.mllib.util._
+
+
+/**
+ * Compute PCA of an example matrix.
+ */
+object SparkPCA {
+ def main(args: Array[String]) {
+ if (args.length != 3) {
+ System.err.println("Usage: SparkPCA <master> m n")
+ System.exit(1)
+ }
+ val sc = new SparkContext(args(0), "PCA",
+ System.getenv("SPARK_HOME"), SparkContext.jarOfClass(this.getClass))
+
+ val m = args(2).toInt
+ val n = args(3).toInt
+
+ // Make example matrix
+ val data = Array.tabulate(m, n) { (a, b) =>
+ (a + 2).toDouble * (b + 1) / (1 + a + b) }
+
+ // recover top principal component
+ val coeffs = new PCA().setK(1).compute(sc.makeRDD(data))
+
+ println("top principal component = " + coeffs.mkString(", "))
+ }
+}
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala
index ce2b133368..2933cec497 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SparkSVD.scala
@@ -38,7 +38,7 @@ object SparkSVD {
System.exit(1)
}
val sc = new SparkContext(args(0), "SVD",
- System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR")))
+ System.getenv("SPARK_HOME"), SparkContext.jarOfClass(this.getClass))
// Load and parse the data file
val data = sc.textFile(args(1)).map { line =>
@@ -49,7 +49,7 @@ object SparkSVD {
val n = args(3).toInt
// recover largest singular vector
- val decomposed = SVD.sparseSVD(SparseMatrix(data, m, n), 1)
+ val decomposed = new SVD().setK(1).compute(SparseMatrix(data, m, n))
val u = decomposed.U.data
val s = decomposed.S.data
val v = decomposed.V.data