aboutsummaryrefslogtreecommitdiff
path: root/docs/mllib-collaborative-filtering.md
diff options
context:
space:
mode:
Diffstat (limited to 'docs/mllib-collaborative-filtering.md')
-rw-r--r--docs/mllib-collaborative-filtering.md80
1 files changed, 79 insertions, 1 deletions
diff --git a/docs/mllib-collaborative-filtering.md b/docs/mllib-collaborative-filtering.md
index 5cd7173872..0d28b5f7c8 100644
--- a/docs/mllib-collaborative-filtering.md
+++ b/docs/mllib-collaborative-filtering.md
@@ -99,7 +99,85 @@ val model = ALS.trainImplicit(ratings, rank, numIterations, alpha)
All of MLlib's methods use Java-friendly types, so you can import and call them there the same
way you do in Scala. The only caveat is that the methods take Scala RDD objects, while the
Spark Java API uses a separate `JavaRDD` class. You can convert a Java RDD to a Scala one by
-calling `.rdd()` on your `JavaRDD` object.
+calling `.rdd()` on your `JavaRDD` object. A standalone application example
+that is equivalent to the provided example in Scala is given bellow:
+
+{% highlight java %}
+import scala.Tuple2;
+
+import org.apache.spark.api.java.*;
+import org.apache.spark.api.java.function.Function;
+import org.apache.spark.mllib.recommendation.ALS;
+import org.apache.spark.mllib.recommendation.MatrixFactorizationModel;
+import org.apache.spark.mllib.recommendation.Rating;
+import org.apache.spark.SparkConf;
+
+public class CollaborativeFiltering {
+ public static void main(String[] args) {
+ SparkConf conf = new SparkConf().setAppName("Collaborative Filtering Example");
+ JavaSparkContext sc = new JavaSparkContext(conf);
+
+ // Load and parse the data
+ String path = "data/mllib/als/test.data";
+ JavaRDD<String> data = sc.textFile(path);
+ JavaRDD<Rating> ratings = data.map(
+ new Function<String, Rating>() {
+ public Rating call(String s) {
+ String[] sarray = s.split(",");
+ return new Rating(Integer.parseInt(sarray[0]), Integer.parseInt(sarray[1]),
+ Double.parseDouble(sarray[2]));
+ }
+ }
+ );
+
+ // Build the recommendation model using ALS
+ int rank = 10;
+ int numIterations = 20;
+ MatrixFactorizationModel model = ALS.train(JavaRDD.toRDD(ratings), rank, numIterations, 0.01);
+
+ // Evaluate the model on rating data
+ JavaRDD<Tuple2<Object, Object>> userProducts = ratings.map(
+ new Function<Rating, Tuple2<Object, Object>>() {
+ public Tuple2<Object, Object> call(Rating r) {
+ return new Tuple2<Object, Object>(r.user(), r.product());
+ }
+ }
+ );
+ JavaPairRDD<Tuple2<Integer, Integer>, Double> predictions = JavaPairRDD.fromJavaRDD(
+ model.predict(JavaRDD.toRDD(userProducts)).toJavaRDD().map(
+ new Function<Rating, Tuple2<Tuple2<Integer, Integer>, Double>>() {
+ public Tuple2<Tuple2<Integer, Integer>, Double> call(Rating r){
+ return new Tuple2<Tuple2<Integer, Integer>, Double>(
+ new Tuple2<Integer, Integer>(r.user(), r.product()), r.rating());
+ }
+ }
+ ));
+ JavaRDD<Tuple2<Double, Double>> ratesAndPreds =
+ JavaPairRDD.fromJavaRDD(ratings.map(
+ new Function<Rating, Tuple2<Tuple2<Integer, Integer>, Double>>() {
+ public Tuple2<Tuple2<Integer, Integer>, Double> call(Rating r){
+ return new Tuple2<Tuple2<Integer, Integer>, Double>(
+ new Tuple2<Integer, Integer>(r.user(), r.product()), r.rating());
+ }
+ }
+ )).join(predictions).values();
+ double MSE = JavaDoubleRDD.fromRDD(ratesAndPreds.map(
+ new Function<Tuple2<Double, Double>, Object>() {
+ public Object call(Tuple2<Double, Double> pair) {
+ Double err = pair._1() - pair._2();
+ return err * err;
+ }
+ }
+ ).rdd()).mean();
+ System.out.println("Mean Squared Error = " + MSE);
+ }
+}
+{% endhighlight %}
+
+In order to run the above standalone application using Spark framework make
+sure that you follow the instructions provided at section [Standalone
+Applications](quick-start.html) of the quick-start guide. What is more, you
+should include to your build file *spark-mllib* as a dependency.
</div>
<div data-lang="python" markdown="1">