aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorXusen Yin <yinxusen@gmail.com>2015-11-17 23:44:06 -0800
committerXiangrui Meng <meng@databricks.com>2015-11-17 23:44:06 -0800
commit9154f89befb7a33d4853cea95efd7dc6b25d033b (patch)
tree8eb6da0ff09ba6c3b2fe34859077e5a55c5ed3df /examples
parent2f191c66b668fc97f82f44fd8336b6a4488c2f5d (diff)
downloadspark-9154f89befb7a33d4853cea95efd7dc6b25d033b.tar.gz
spark-9154f89befb7a33d4853cea95efd7dc6b25d033b.tar.bz2
spark-9154f89befb7a33d4853cea95efd7dc6b25d033b.zip
[SPARK-11728] Replace example code in ml-ensembles.md using include_example
JIRA issue https://issues.apache.org/jira/browse/SPARK-11728. The ml-ensembles.md file contains `OneVsRestExample`. Instead of writing new code files of two `OneVsRestExample`s, I use two existing files in the examples directory, they are `OneVsRestExample.scala` and `JavaOneVsRestExample.scala`. Author: Xusen Yin <yinxusen@gmail.com> Closes #9716 from yinxusen/SPARK-11728.
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java102
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeRegressorExample.java90
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaOneVsRestExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestClassifierExample.java101
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestRegressorExample.java90
-rw-r--r--examples/src/main/python/ml/gradient_boosted_tree_classifier_example.py77
-rw-r--r--examples/src/main/python/ml/gradient_boosted_tree_regressor_example.py74
-rw-r--r--examples/src/main/python/ml/random_forest_classifier_example.py77
-rw-r--r--examples/src/main/python/ml/random_forest_regressor_example.py74
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala97
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala85
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala97
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala84
14 files changed, 1056 insertions, 0 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java
new file mode 100644
index 0000000000..848fe6566c
--- /dev/null
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.examples.ml;
+
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaSparkContext;
+// $example on$
+import org.apache.spark.ml.Pipeline;
+import org.apache.spark.ml.PipelineModel;
+import org.apache.spark.ml.PipelineStage;
+import org.apache.spark.ml.classification.GBTClassificationModel;
+import org.apache.spark.ml.classification.GBTClassifier;
+import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator;
+import org.apache.spark.ml.feature.*;
+import org.apache.spark.sql.DataFrame;
+import org.apache.spark.sql.SQLContext;
+// $example off$
+
+public class JavaGradientBoostedTreeClassifierExample {
+ public static void main(String[] args) {
+ SparkConf conf = new SparkConf().setAppName("JavaGradientBoostedTreeClassifierExample");
+ JavaSparkContext jsc = new JavaSparkContext(conf);
+ SQLContext sqlContext = new SQLContext(jsc);
+
+ // $example on$
+ // Load and parse the data file, converting it to a DataFrame.
+ DataFrame data = sqlContext.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt");
+
+ // Index labels, adding metadata to the label column.
+ // Fit on whole dataset to include all labels in index.
+ StringIndexerModel labelIndexer = new StringIndexer()
+ .setInputCol("label")
+ .setOutputCol("indexedLabel")
+ .fit(data);
+ // Automatically identify categorical features, and index them.
+ // Set maxCategories so features with > 4 distinct values are treated as continuous.
+ VectorIndexerModel featureIndexer = new VectorIndexer()
+ .setInputCol("features")
+ .setOutputCol("indexedFeatures")
+ .setMaxCategories(4)
+ .fit(data);
+
+ // Split the data into training and test sets (30% held out for testing)
+ DataFrame[] splits = data.randomSplit(new double[] {0.7, 0.3});
+ DataFrame trainingData = splits[0];
+ DataFrame testData = splits[1];
+
+ // Train a GBT model.
+ GBTClassifier gbt = new GBTClassifier()
+ .setLabelCol("indexedLabel")
+ .setFeaturesCol("indexedFeatures")
+ .setMaxIter(10);
+
+ // Convert indexed labels back to original labels.
+ IndexToString labelConverter = new IndexToString()
+ .setInputCol("prediction")
+ .setOutputCol("predictedLabel")
+ .setLabels(labelIndexer.labels());
+
+ // Chain indexers and GBT in a Pipeline
+ Pipeline pipeline = new Pipeline()
+ .setStages(new PipelineStage[] {labelIndexer, featureIndexer, gbt, labelConverter});
+
+ // Train model. This also runs the indexers.
+ PipelineModel model = pipeline.fit(trainingData);
+
+ // Make predictions.
+ DataFrame predictions = model.transform(testData);
+
+ // Select example rows to display.
+ predictions.select("predictedLabel", "label", "features").show(5);
+
+ // Select (prediction, true label) and compute test error
+ MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator()
+ .setLabelCol("indexedLabel")
+ .setPredictionCol("prediction")
+ .setMetricName("precision");
+ double accuracy = evaluator.evaluate(predictions);
+ System.out.println("Test Error = " + (1.0 - accuracy));
+
+ GBTClassificationModel gbtModel = (GBTClassificationModel)(model.stages()[2]);
+ System.out.println("Learned classification GBT model:\n" + gbtModel.toDebugString());
+ // $example off$
+
+ jsc.stop();
+ }
+}
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeRegressorExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeRegressorExample.java
new file mode 100644
index 0000000000..1f67b0842d
--- /dev/null
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeRegressorExample.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.examples.ml;
+
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaSparkContext;
+// $example on$
+import org.apache.spark.ml.Pipeline;
+import org.apache.spark.ml.PipelineModel;
+import org.apache.spark.ml.PipelineStage;
+import org.apache.spark.ml.evaluation.RegressionEvaluator;
+import org.apache.spark.ml.feature.VectorIndexer;
+import org.apache.spark.ml.feature.VectorIndexerModel;
+import org.apache.spark.ml.regression.GBTRegressionModel;
+import org.apache.spark.ml.regression.GBTRegressor;
+import org.apache.spark.sql.DataFrame;
+import org.apache.spark.sql.SQLContext;
+// $example off$
+
+public class JavaGradientBoostedTreeRegressorExample {
+ public static void main(String[] args) {
+ SparkConf conf = new SparkConf().setAppName("JavaGradientBoostedTreeRegressorExample");
+ JavaSparkContext jsc = new JavaSparkContext(conf);
+ SQLContext sqlContext = new SQLContext(jsc);
+
+ // $example on$
+ // Load and parse the data file, converting it to a DataFrame.
+ DataFrame data = sqlContext.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt");
+
+ // Automatically identify categorical features, and index them.
+ // Set maxCategories so features with > 4 distinct values are treated as continuous.
+ VectorIndexerModel featureIndexer = new VectorIndexer()
+ .setInputCol("features")
+ .setOutputCol("indexedFeatures")
+ .setMaxCategories(4)
+ .fit(data);
+
+ // Split the data into training and test sets (30% held out for testing)
+ DataFrame[] splits = data.randomSplit(new double[] {0.7, 0.3});
+ DataFrame trainingData = splits[0];
+ DataFrame testData = splits[1];
+
+ // Train a GBT model.
+ GBTRegressor gbt = new GBTRegressor()
+ .setLabelCol("label")
+ .setFeaturesCol("indexedFeatures")
+ .setMaxIter(10);
+
+ // Chain indexer and GBT in a Pipeline
+ Pipeline pipeline = new Pipeline().setStages(new PipelineStage[] {featureIndexer, gbt});
+
+ // Train model. This also runs the indexer.
+ PipelineModel model = pipeline.fit(trainingData);
+
+ // Make predictions.
+ DataFrame predictions = model.transform(testData);
+
+ // Select example rows to display.
+ predictions.select("prediction", "label", "features").show(5);
+
+ // Select (prediction, true label) and compute test error
+ RegressionEvaluator evaluator = new RegressionEvaluator()
+ .setLabelCol("label")
+ .setPredictionCol("prediction")
+ .setMetricName("rmse");
+ double rmse = evaluator.evaluate(predictions);
+ System.out.println("Root Mean Squared Error (RMSE) on test data = " + rmse);
+
+ GBTRegressionModel gbtModel = (GBTRegressionModel)(model.stages()[1]);
+ System.out.println("Learned regression GBT model:\n" + gbtModel.toDebugString());
+ // $example off$
+
+ jsc.stop();
+ }
+}
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaOneVsRestExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaOneVsRestExample.java
index f0d92a56be..42374e77ac 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaOneVsRestExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaOneVsRestExample.java
@@ -21,6 +21,7 @@ import org.apache.commons.cli.*;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
+// $example on$
import org.apache.spark.ml.classification.LogisticRegression;
import org.apache.spark.ml.classification.OneVsRest;
import org.apache.spark.ml.classification.OneVsRestModel;
@@ -31,6 +32,7 @@ import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.types.StructField;
+// $example off$
/**
* An example runner for Multiclass to Binary Reduction with One Vs Rest.
@@ -61,6 +63,7 @@ public class JavaOneVsRestExample {
JavaSparkContext jsc = new JavaSparkContext(conf);
SQLContext jsql = new SQLContext(jsc);
+ // $example on$
// configure the base classifier
LogisticRegression classifier = new LogisticRegression()
.setMaxIter(params.maxIter)
@@ -125,6 +128,7 @@ public class JavaOneVsRestExample {
System.out.println(confusionMatrix);
System.out.println();
System.out.println(results);
+ // $example off$
jsc.stop();
}
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestClassifierExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestClassifierExample.java
new file mode 100644
index 0000000000..5a62496660
--- /dev/null
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestClassifierExample.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.examples.ml;
+
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaSparkContext;
+// $example on$
+import org.apache.spark.ml.Pipeline;
+import org.apache.spark.ml.PipelineModel;
+import org.apache.spark.ml.PipelineStage;
+import org.apache.spark.ml.classification.RandomForestClassificationModel;
+import org.apache.spark.ml.classification.RandomForestClassifier;
+import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator;
+import org.apache.spark.ml.feature.*;
+import org.apache.spark.sql.DataFrame;
+import org.apache.spark.sql.SQLContext;
+// $example off$
+
+public class JavaRandomForestClassifierExample {
+ public static void main(String[] args) {
+ SparkConf conf = new SparkConf().setAppName("JavaRandomForestClassifierExample");
+ JavaSparkContext jsc = new JavaSparkContext(conf);
+ SQLContext sqlContext = new SQLContext(jsc);
+
+ // $example on$
+ // Load and parse the data file, converting it to a DataFrame.
+ DataFrame data = sqlContext.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt");
+
+ // Index labels, adding metadata to the label column.
+ // Fit on whole dataset to include all labels in index.
+ StringIndexerModel labelIndexer = new StringIndexer()
+ .setInputCol("label")
+ .setOutputCol("indexedLabel")
+ .fit(data);
+ // Automatically identify categorical features, and index them.
+ // Set maxCategories so features with > 4 distinct values are treated as continuous.
+ VectorIndexerModel featureIndexer = new VectorIndexer()
+ .setInputCol("features")
+ .setOutputCol("indexedFeatures")
+ .setMaxCategories(4)
+ .fit(data);
+
+ // Split the data into training and test sets (30% held out for testing)
+ DataFrame[] splits = data.randomSplit(new double[] {0.7, 0.3});
+ DataFrame trainingData = splits[0];
+ DataFrame testData = splits[1];
+
+ // Train a RandomForest model.
+ RandomForestClassifier rf = new RandomForestClassifier()
+ .setLabelCol("indexedLabel")
+ .setFeaturesCol("indexedFeatures");
+
+ // Convert indexed labels back to original labels.
+ IndexToString labelConverter = new IndexToString()
+ .setInputCol("prediction")
+ .setOutputCol("predictedLabel")
+ .setLabels(labelIndexer.labels());
+
+ // Chain indexers and forest in a Pipeline
+ Pipeline pipeline = new Pipeline()
+ .setStages(new PipelineStage[] {labelIndexer, featureIndexer, rf, labelConverter});
+
+ // Train model. This also runs the indexers.
+ PipelineModel model = pipeline.fit(trainingData);
+
+ // Make predictions.
+ DataFrame predictions = model.transform(testData);
+
+ // Select example rows to display.
+ predictions.select("predictedLabel", "label", "features").show(5);
+
+ // Select (prediction, true label) and compute test error
+ MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator()
+ .setLabelCol("indexedLabel")
+ .setPredictionCol("prediction")
+ .setMetricName("precision");
+ double accuracy = evaluator.evaluate(predictions);
+ System.out.println("Test Error = " + (1.0 - accuracy));
+
+ RandomForestClassificationModel rfModel = (RandomForestClassificationModel)(model.stages()[2]);
+ System.out.println("Learned classification forest model:\n" + rfModel.toDebugString());
+ // $example off$
+
+ jsc.stop();
+ }
+}
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestRegressorExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestRegressorExample.java
new file mode 100644
index 0000000000..05782a0724
--- /dev/null
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestRegressorExample.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.examples.ml;
+
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaSparkContext;
+// $example on$
+import org.apache.spark.ml.Pipeline;
+import org.apache.spark.ml.PipelineModel;
+import org.apache.spark.ml.PipelineStage;
+import org.apache.spark.ml.evaluation.RegressionEvaluator;
+import org.apache.spark.ml.feature.VectorIndexer;
+import org.apache.spark.ml.feature.VectorIndexerModel;
+import org.apache.spark.ml.regression.RandomForestRegressionModel;
+import org.apache.spark.ml.regression.RandomForestRegressor;
+import org.apache.spark.sql.DataFrame;
+import org.apache.spark.sql.SQLContext;
+// $example off$
+
+public class JavaRandomForestRegressorExample {
+ public static void main(String[] args) {
+ SparkConf conf = new SparkConf().setAppName("JavaRandomForestRegressorExample");
+ JavaSparkContext jsc = new JavaSparkContext(conf);
+ SQLContext sqlContext = new SQLContext(jsc);
+
+ // $example on$
+ // Load and parse the data file, converting it to a DataFrame.
+ DataFrame data = sqlContext.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt");
+
+ // Automatically identify categorical features, and index them.
+ // Set maxCategories so features with > 4 distinct values are treated as continuous.
+ VectorIndexerModel featureIndexer = new VectorIndexer()
+ .setInputCol("features")
+ .setOutputCol("indexedFeatures")
+ .setMaxCategories(4)
+ .fit(data);
+
+ // Split the data into training and test sets (30% held out for testing)
+ DataFrame[] splits = data.randomSplit(new double[] {0.7, 0.3});
+ DataFrame trainingData = splits[0];
+ DataFrame testData = splits[1];
+
+ // Train a RandomForest model.
+ RandomForestRegressor rf = new RandomForestRegressor()
+ .setLabelCol("label")
+ .setFeaturesCol("indexedFeatures");
+
+ // Chain indexer and forest in a Pipeline
+ Pipeline pipeline = new Pipeline()
+ .setStages(new PipelineStage[] {featureIndexer, rf});
+
+ // Train model. This also runs the indexer.
+ PipelineModel model = pipeline.fit(trainingData);
+
+ // Make predictions.
+ DataFrame predictions = model.transform(testData);
+
+ // Select example rows to display.
+ predictions.select("prediction", "label", "features").show(5);
+
+ // Select (prediction, true label) and compute test error
+ RegressionEvaluator evaluator = new RegressionEvaluator()
+ .setLabelCol("label")
+ .setPredictionCol("prediction")
+ .setMetricName("rmse");
+ double rmse = evaluator.evaluate(predictions);
+ System.out.println("Root Mean Squared Error (RMSE) on test data = " + rmse);
+
+ RandomForestRegressionModel rfModel = (RandomForestRegressionModel)(model.stages()[1]);
+ System.out.println("Learned regression forest model:\n" + rfModel.toDebugString());
+ // $example off$
+
+ jsc.stop();
+ }
+}
diff --git a/examples/src/main/python/ml/gradient_boosted_tree_classifier_example.py b/examples/src/main/python/ml/gradient_boosted_tree_classifier_example.py
new file mode 100644
index 0000000000..028497651f
--- /dev/null
+++ b/examples/src/main/python/ml/gradient_boosted_tree_classifier_example.py
@@ -0,0 +1,77 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Gradient Boosted Tree Classifier Example.
+"""
+from __future__ import print_function
+
+import sys
+
+from pyspark import SparkContext, SQLContext
+# $example on$
+from pyspark.ml import Pipeline
+from pyspark.ml.classification import GBTClassifier
+from pyspark.ml.feature import StringIndexer, VectorIndexer
+from pyspark.ml.evaluation import MulticlassClassificationEvaluator
+# $example off$
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="gradient_boosted_tree_classifier_example")
+ sqlContext = SQLContext(sc)
+
+ # $example on$
+ # Load and parse the data file, converting it to a DataFrame.
+ data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+
+ # Index labels, adding metadata to the label column.
+ # Fit on whole dataset to include all labels in index.
+ labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
+ # Automatically identify categorical features, and index them.
+ # Set maxCategories so features with > 4 distinct values are treated as continuous.
+ featureIndexer =\
+ VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
+
+ # Split the data into training and test sets (30% held out for testing)
+ (trainingData, testData) = data.randomSplit([0.7, 0.3])
+
+ # Train a GBT model.
+ gbt = GBTClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures", maxIter=10)
+
+ # Chain indexers and GBT in a Pipeline
+ pipeline = Pipeline(stages=[labelIndexer, featureIndexer, gbt])
+
+ # Train model. This also runs the indexers.
+ model = pipeline.fit(trainingData)
+
+ # Make predictions.
+ predictions = model.transform(testData)
+
+ # Select example rows to display.
+ predictions.select("prediction", "indexedLabel", "features").show(5)
+
+ # Select (prediction, true label) and compute test error
+ evaluator = MulticlassClassificationEvaluator(
+ labelCol="indexedLabel", predictionCol="prediction", metricName="precision")
+ accuracy = evaluator.evaluate(predictions)
+ print("Test Error = %g" % (1.0 - accuracy))
+
+ gbtModel = model.stages[2]
+ print(gbtModel) # summary only
+ # $example off$
+
+ sc.stop()
diff --git a/examples/src/main/python/ml/gradient_boosted_tree_regressor_example.py b/examples/src/main/python/ml/gradient_boosted_tree_regressor_example.py
new file mode 100644
index 0000000000..4246e133a9
--- /dev/null
+++ b/examples/src/main/python/ml/gradient_boosted_tree_regressor_example.py
@@ -0,0 +1,74 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Gradient Boosted Tree Regressor Example.
+"""
+from __future__ import print_function
+
+import sys
+
+from pyspark import SparkContext, SQLContext
+# $example on$
+from pyspark.ml import Pipeline
+from pyspark.ml.regression import GBTRegressor
+from pyspark.ml.feature import VectorIndexer
+from pyspark.ml.evaluation import RegressionEvaluator
+# $example off$
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="gradient_boosted_tree_regressor_example")
+ sqlContext = SQLContext(sc)
+
+ # $example on$
+ # Load and parse the data file, converting it to a DataFrame.
+ data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+
+ # Automatically identify categorical features, and index them.
+ # Set maxCategories so features with > 4 distinct values are treated as continuous.
+ featureIndexer =\
+ VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
+
+ # Split the data into training and test sets (30% held out for testing)
+ (trainingData, testData) = data.randomSplit([0.7, 0.3])
+
+ # Train a GBT model.
+ gbt = GBTRegressor(featuresCol="indexedFeatures", maxIter=10)
+
+ # Chain indexer and GBT in a Pipeline
+ pipeline = Pipeline(stages=[featureIndexer, gbt])
+
+ # Train model. This also runs the indexer.
+ model = pipeline.fit(trainingData)
+
+ # Make predictions.
+ predictions = model.transform(testData)
+
+ # Select example rows to display.
+ predictions.select("prediction", "label", "features").show(5)
+
+ # Select (prediction, true label) and compute test error
+ evaluator = RegressionEvaluator(
+ labelCol="label", predictionCol="prediction", metricName="rmse")
+ rmse = evaluator.evaluate(predictions)
+ print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
+
+ gbtModel = model.stages[1]
+ print(gbtModel) # summary only
+ # $example off$
+
+ sc.stop()
diff --git a/examples/src/main/python/ml/random_forest_classifier_example.py b/examples/src/main/python/ml/random_forest_classifier_example.py
new file mode 100644
index 0000000000..b3530d4f41
--- /dev/null
+++ b/examples/src/main/python/ml/random_forest_classifier_example.py
@@ -0,0 +1,77 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Random Forest Classifier Example.
+"""
+from __future__ import print_function
+
+import sys
+
+from pyspark import SparkContext, SQLContext
+# $example on$
+from pyspark.ml import Pipeline
+from pyspark.ml.classification import RandomForestClassifier
+from pyspark.ml.feature import StringIndexer, VectorIndexer
+from pyspark.ml.evaluation import MulticlassClassificationEvaluator
+# $example off$
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="random_forest_classifier_example")
+ sqlContext = SQLContext(sc)
+
+ # $example on$
+ # Load and parse the data file, converting it to a DataFrame.
+ data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+
+ # Index labels, adding metadata to the label column.
+ # Fit on whole dataset to include all labels in index.
+ labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
+ # Automatically identify categorical features, and index them.
+ # Set maxCategories so features with > 4 distinct values are treated as continuous.
+ featureIndexer =\
+ VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
+
+ # Split the data into training and test sets (30% held out for testing)
+ (trainingData, testData) = data.randomSplit([0.7, 0.3])
+
+ # Train a RandomForest model.
+ rf = RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
+
+ # Chain indexers and forest in a Pipeline
+ pipeline = Pipeline(stages=[labelIndexer, featureIndexer, rf])
+
+ # Train model. This also runs the indexers.
+ model = pipeline.fit(trainingData)
+
+ # Make predictions.
+ predictions = model.transform(testData)
+
+ # Select example rows to display.
+ predictions.select("prediction", "indexedLabel", "features").show(5)
+
+ # Select (prediction, true label) and compute test error
+ evaluator = MulticlassClassificationEvaluator(
+ labelCol="indexedLabel", predictionCol="prediction", metricName="precision")
+ accuracy = evaluator.evaluate(predictions)
+ print("Test Error = %g" % (1.0 - accuracy))
+
+ rfModel = model.stages[2]
+ print(rfModel) # summary only
+ # $example off$
+
+ sc.stop()
diff --git a/examples/src/main/python/ml/random_forest_regressor_example.py b/examples/src/main/python/ml/random_forest_regressor_example.py
new file mode 100644
index 0000000000..b59c7c9414
--- /dev/null
+++ b/examples/src/main/python/ml/random_forest_regressor_example.py
@@ -0,0 +1,74 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Random Forest Regressor Example.
+"""
+from __future__ import print_function
+
+import sys
+
+from pyspark import SparkContext, SQLContext
+# $example on$
+from pyspark.ml import Pipeline
+from pyspark.ml.regression import RandomForestRegressor
+from pyspark.ml.feature import VectorIndexer
+from pyspark.ml.evaluation import RegressionEvaluator
+# $example off$
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="random_forest_regressor_example")
+ sqlContext = SQLContext(sc)
+
+ # $example on$
+ # Load and parse the data file, converting it to a DataFrame.
+ data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+
+ # Automatically identify categorical features, and index them.
+ # Set maxCategories so features with > 4 distinct values are treated as continuous.
+ featureIndexer =\
+ VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
+
+ # Split the data into training and test sets (30% held out for testing)
+ (trainingData, testData) = data.randomSplit([0.7, 0.3])
+
+ # Train a RandomForest model.
+ rf = RandomForestRegressor(featuresCol="indexedFeatures")
+
+ # Chain indexer and forest in a Pipeline
+ pipeline = Pipeline(stages=[featureIndexer, rf])
+
+ # Train model. This also runs the indexer.
+ model = pipeline.fit(trainingData)
+
+ # Make predictions.
+ predictions = model.transform(testData)
+
+ # Select example rows to display.
+ predictions.select("prediction", "label", "features").show(5)
+
+ # Select (prediction, true label) and compute test error
+ evaluator = RegressionEvaluator(
+ labelCol="label", predictionCol="prediction", metricName="rmse")
+ rmse = evaluator.evaluate(predictions)
+ print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
+
+ rfModel = model.stages[1]
+ print(rfModel) # summary only
+ # $example off$
+
+ sc.stop()
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
new file mode 100644
index 0000000000..474af7db4b
--- /dev/null
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// scalastyle:off println
+package org.apache.spark.examples.ml
+
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.{SparkConf, SparkContext}
+// $example on$
+import org.apache.spark.ml.Pipeline
+import org.apache.spark.ml.classification.{GBTClassificationModel, GBTClassifier}
+import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
+import org.apache.spark.ml.feature.{IndexToString, StringIndexer, VectorIndexer}
+// $example off$
+
+object GradientBoostedTreeClassifierExample {
+ def main(args: Array[String]): Unit = {
+ val conf = new SparkConf().setAppName("GradientBoostedTreeClassifierExample")
+ val sc = new SparkContext(conf)
+ val sqlContext = new SQLContext(sc)
+
+ // $example on$
+ // Load and parse the data file, converting it to a DataFrame.
+ val data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+
+ // Index labels, adding metadata to the label column.
+ // Fit on whole dataset to include all labels in index.
+ val labelIndexer = new StringIndexer()
+ .setInputCol("label")
+ .setOutputCol("indexedLabel")
+ .fit(data)
+ // Automatically identify categorical features, and index them.
+ // Set maxCategories so features with > 4 distinct values are treated as continuous.
+ val featureIndexer = new VectorIndexer()
+ .setInputCol("features")
+ .setOutputCol("indexedFeatures")
+ .setMaxCategories(4)
+ .fit(data)
+
+ // Split the data into training and test sets (30% held out for testing)
+ val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3))
+
+ // Train a GBT model.
+ val gbt = new GBTClassifier()
+ .setLabelCol("indexedLabel")
+ .setFeaturesCol("indexedFeatures")
+ .setMaxIter(10)
+
+ // Convert indexed labels back to original labels.
+ val labelConverter = new IndexToString()
+ .setInputCol("prediction")
+ .setOutputCol("predictedLabel")
+ .setLabels(labelIndexer.labels)
+
+ // Chain indexers and GBT in a Pipeline
+ val pipeline = new Pipeline()
+ .setStages(Array(labelIndexer, featureIndexer, gbt, labelConverter))
+
+ // Train model. This also runs the indexers.
+ val model = pipeline.fit(trainingData)
+
+ // Make predictions.
+ val predictions = model.transform(testData)
+
+ // Select example rows to display.
+ predictions.select("predictedLabel", "label", "features").show(5)
+
+ // Select (prediction, true label) and compute test error
+ val evaluator = new MulticlassClassificationEvaluator()
+ .setLabelCol("indexedLabel")
+ .setPredictionCol("prediction")
+ .setMetricName("precision")
+ val accuracy = evaluator.evaluate(predictions)
+ println("Test Error = " + (1.0 - accuracy))
+
+ val gbtModel = model.stages(2).asInstanceOf[GBTClassificationModel]
+ println("Learned classification GBT model:\n" + gbtModel.toDebugString)
+ // $example off$
+
+ sc.stop()
+ }
+}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
new file mode 100644
index 0000000000..da1cd9c2ce
--- /dev/null
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// scalastyle:off println
+package org.apache.spark.examples.ml
+
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.{SparkConf, SparkContext}
+// $example on$
+import org.apache.spark.ml.Pipeline
+import org.apache.spark.ml.evaluation.RegressionEvaluator
+import org.apache.spark.ml.feature.VectorIndexer
+import org.apache.spark.ml.regression.{GBTRegressionModel, GBTRegressor}
+// $example off$
+
+object GradientBoostedTreeRegressorExample {
+ def main(args: Array[String]): Unit = {
+ val conf = new SparkConf().setAppName("GradientBoostedTreeRegressorExample")
+ val sc = new SparkContext(conf)
+ val sqlContext = new SQLContext(sc)
+
+ // $example on$
+ // Load and parse the data file, converting it to a DataFrame.
+ val data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+
+ // Automatically identify categorical features, and index them.
+ // Set maxCategories so features with > 4 distinct values are treated as continuous.
+ val featureIndexer = new VectorIndexer()
+ .setInputCol("features")
+ .setOutputCol("indexedFeatures")
+ .setMaxCategories(4)
+ .fit(data)
+
+ // Split the data into training and test sets (30% held out for testing)
+ val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3))
+
+ // Train a GBT model.
+ val gbt = new GBTRegressor()
+ .setLabelCol("label")
+ .setFeaturesCol("indexedFeatures")
+ .setMaxIter(10)
+
+ // Chain indexer and GBT in a Pipeline
+ val pipeline = new Pipeline()
+ .setStages(Array(featureIndexer, gbt))
+
+ // Train model. This also runs the indexer.
+ val model = pipeline.fit(trainingData)
+
+ // Make predictions.
+ val predictions = model.transform(testData)
+
+ // Select example rows to display.
+ predictions.select("prediction", "label", "features").show(5)
+
+ // Select (prediction, true label) and compute test error
+ val evaluator = new RegressionEvaluator()
+ .setLabelCol("label")
+ .setPredictionCol("prediction")
+ .setMetricName("rmse")
+ val rmse = evaluator.evaluate(predictions)
+ println("Root Mean Squared Error (RMSE) on test data = " + rmse)
+
+ val gbtModel = model.stages(1).asInstanceOf[GBTRegressionModel]
+ println("Learned regression GBT model:\n" + gbtModel.toDebugString)
+ // $example off$
+
+ sc.stop()
+ }
+}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
index 8e4f1b09a2..b46faea571 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
@@ -23,12 +23,14 @@ import java.util.concurrent.TimeUnit.{NANOSECONDS => NANO}
import scopt.OptionParser
import org.apache.spark.{SparkContext, SparkConf}
+// $example on$
import org.apache.spark.examples.mllib.AbstractParams
import org.apache.spark.ml.classification.{OneVsRest, LogisticRegression}
import org.apache.spark.ml.util.MetadataUtils
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.sql.DataFrame
+// $example off$
import org.apache.spark.sql.SQLContext
/**
@@ -112,6 +114,7 @@ object OneVsRestExample {
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
+ // $example on$
val inputData = sqlContext.read.format("libsvm").load(params.input)
// compute the train/test split: if testInput is not provided use part of input.
val data = params.testInput match {
@@ -172,6 +175,7 @@ object OneVsRestExample {
println("label\tfpr")
println(fprs.map {case (label, fpr) => label + "\t" + fpr}.mkString("\n"))
+ // $example off$
sc.stop()
}
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
new file mode 100644
index 0000000000..e79176ca6c
--- /dev/null
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// scalastyle:off println
+package org.apache.spark.examples.ml
+
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.{SparkConf, SparkContext}
+// $example on$
+import org.apache.spark.ml.Pipeline
+import org.apache.spark.ml.classification.{RandomForestClassificationModel, RandomForestClassifier}
+import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
+import org.apache.spark.ml.feature.{IndexToString, StringIndexer, VectorIndexer}
+// $example off$
+
+object RandomForestClassifierExample {
+ def main(args: Array[String]): Unit = {
+ val conf = new SparkConf().setAppName("RandomForestClassifierExample")
+ val sc = new SparkContext(conf)
+ val sqlContext = new SQLContext(sc)
+
+ // $example on$
+ // Load and parse the data file, converting it to a DataFrame.
+ val data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+
+ // Index labels, adding metadata to the label column.
+ // Fit on whole dataset to include all labels in index.
+ val labelIndexer = new StringIndexer()
+ .setInputCol("label")
+ .setOutputCol("indexedLabel")
+ .fit(data)
+ // Automatically identify categorical features, and index them.
+ // Set maxCategories so features with > 4 distinct values are treated as continuous.
+ val featureIndexer = new VectorIndexer()
+ .setInputCol("features")
+ .setOutputCol("indexedFeatures")
+ .setMaxCategories(4)
+ .fit(data)
+
+ // Split the data into training and test sets (30% held out for testing)
+ val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3))
+
+ // Train a RandomForest model.
+ val rf = new RandomForestClassifier()
+ .setLabelCol("indexedLabel")
+ .setFeaturesCol("indexedFeatures")
+ .setNumTrees(10)
+
+ // Convert indexed labels back to original labels.
+ val labelConverter = new IndexToString()
+ .setInputCol("prediction")
+ .setOutputCol("predictedLabel")
+ .setLabels(labelIndexer.labels)
+
+ // Chain indexers and forest in a Pipeline
+ val pipeline = new Pipeline()
+ .setStages(Array(labelIndexer, featureIndexer, rf, labelConverter))
+
+ // Train model. This also runs the indexers.
+ val model = pipeline.fit(trainingData)
+
+ // Make predictions.
+ val predictions = model.transform(testData)
+
+ // Select example rows to display.
+ predictions.select("predictedLabel", "label", "features").show(5)
+
+ // Select (prediction, true label) and compute test error
+ val evaluator = new MulticlassClassificationEvaluator()
+ .setLabelCol("indexedLabel")
+ .setPredictionCol("prediction")
+ .setMetricName("precision")
+ val accuracy = evaluator.evaluate(predictions)
+ println("Test Error = " + (1.0 - accuracy))
+
+ val rfModel = model.stages(2).asInstanceOf[RandomForestClassificationModel]
+ println("Learned classification forest model:\n" + rfModel.toDebugString)
+ // $example off$
+
+ sc.stop()
+ }
+}
+// scalastyle:on println
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
new file mode 100644
index 0000000000..acec1437a1
--- /dev/null
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// scalastyle:off println
+package org.apache.spark.examples.ml
+
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.{SparkConf, SparkContext}
+// $example on$
+import org.apache.spark.ml.Pipeline
+import org.apache.spark.ml.evaluation.RegressionEvaluator
+import org.apache.spark.ml.feature.VectorIndexer
+import org.apache.spark.ml.regression.{RandomForestRegressionModel, RandomForestRegressor}
+// $example off$
+
+object RandomForestRegressorExample {
+ def main(args: Array[String]): Unit = {
+ val conf = new SparkConf().setAppName("RandomForestRegressorExample")
+ val sc = new SparkContext(conf)
+ val sqlContext = new SQLContext(sc)
+
+ // $example on$
+ // Load and parse the data file, converting it to a DataFrame.
+ val data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
+
+ // Automatically identify categorical features, and index them.
+ // Set maxCategories so features with > 4 distinct values are treated as continuous.
+ val featureIndexer = new VectorIndexer()
+ .setInputCol("features")
+ .setOutputCol("indexedFeatures")
+ .setMaxCategories(4)
+ .fit(data)
+
+ // Split the data into training and test sets (30% held out for testing)
+ val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3))
+
+ // Train a RandomForest model.
+ val rf = new RandomForestRegressor()
+ .setLabelCol("label")
+ .setFeaturesCol("indexedFeatures")
+
+ // Chain indexer and forest in a Pipeline
+ val pipeline = new Pipeline()
+ .setStages(Array(featureIndexer, rf))
+
+ // Train model. This also runs the indexer.
+ val model = pipeline.fit(trainingData)
+
+ // Make predictions.
+ val predictions = model.transform(testData)
+
+ // Select example rows to display.
+ predictions.select("prediction", "label", "features").show(5)
+
+ // Select (prediction, true label) and compute test error
+ val evaluator = new RegressionEvaluator()
+ .setLabelCol("label")
+ .setPredictionCol("prediction")
+ .setMetricName("rmse")
+ val rmse = evaluator.evaluate(predictions)
+ println("Root Mean Squared Error (RMSE) on test data = " + rmse)
+
+ val rfModel = model.stages(1).asInstanceOf[RandomForestRegressionModel]
+ println("Learned regression forest model:\n" + rfModel.toDebugString)
+ // $example off$
+
+ sc.stop()
+ }
+}
+// scalastyle:on println