aboutsummaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
authorsachin aggarwal <different.sachin@gmail.com>2015-11-09 14:25:42 -0800
committerXiangrui Meng <meng@databricks.com>2015-11-09 14:25:42 -0800
commit51d41e4b1a3a25a3fde3a4345afcfe4766023d23 (patch)
tree1b70dbcd2e113257ded3851c9adb4bc30449915c /docs
parent5039a49b636325f321daa089971107003fae9d4b (diff)
downloadspark-51d41e4b1a3a25a3fde3a4345afcfe4766023d23.tar.gz
spark-51d41e4b1a3a25a3fde3a4345afcfe4766023d23.tar.bz2
spark-51d41e4b1a3a25a3fde3a4345afcfe4766023d23.zip
[SPARK-11552][DOCS][Replaced example code in ml-decision-tree.md using include_example]
I have tested it on my local, it is working fine, please review Author: sachin aggarwal <different.sachin@gmail.com> Closes #9539 from agsachin/SPARK-11552-real.
Diffstat (limited to 'docs')
-rw-r--r--docs/ml-decision-tree.md338
1 files changed, 8 insertions, 330 deletions
diff --git a/docs/ml-decision-tree.md b/docs/ml-decision-tree.md
index 542819e93e..2bfac6f6c8 100644
--- a/docs/ml-decision-tree.md
+++ b/docs/ml-decision-tree.md
@@ -118,196 +118,24 @@ We use two feature transformers to prepare the data; these help index categories
More details on parameters can be found in the [Scala API documentation](api/scala/index.html#org.apache.spark.ml.classification.DecisionTreeClassifier).
-{% highlight scala %}
-import org.apache.spark.ml.Pipeline
-import org.apache.spark.ml.classification.DecisionTreeClassifier
-import org.apache.spark.ml.classification.DecisionTreeClassificationModel
-import org.apache.spark.ml.feature.{StringIndexer, IndexToString, VectorIndexer}
-import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
-import org.apache.spark.mllib.util.MLUtils
-
-// Load and parse the data file, converting it to a DataFrame.
-val data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt").toDF()
-
-// Index labels, adding metadata to the label column.
-// Fit on whole dataset to include all labels in index.
-val labelIndexer = new StringIndexer()
- .setInputCol("label")
- .setOutputCol("indexedLabel")
- .fit(data)
-// Automatically identify categorical features, and index them.
-val featureIndexer = new VectorIndexer()
- .setInputCol("features")
- .setOutputCol("indexedFeatures")
- .setMaxCategories(4) // features with > 4 distinct values are treated as continuous
- .fit(data)
-
-// Split the data into training and test sets (30% held out for testing)
-val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3))
-
-// Train a DecisionTree model.
-val dt = new DecisionTreeClassifier()
- .setLabelCol("indexedLabel")
- .setFeaturesCol("indexedFeatures")
-
-// Convert indexed labels back to original labels.
-val labelConverter = new IndexToString()
- .setInputCol("prediction")
- .setOutputCol("predictedLabel")
- .setLabels(labelIndexer.labels)
-
-// Chain indexers and tree in a Pipeline
-val pipeline = new Pipeline()
- .setStages(Array(labelIndexer, featureIndexer, dt, labelConverter))
-
-// Train model. This also runs the indexers.
-val model = pipeline.fit(trainingData)
-
-// Make predictions.
-val predictions = model.transform(testData)
-
-// Select example rows to display.
-predictions.select("predictedLabel", "label", "features").show(5)
-
-// Select (prediction, true label) and compute test error
-val evaluator = new MulticlassClassificationEvaluator()
- .setLabelCol("indexedLabel")
- .setPredictionCol("prediction")
- .setMetricName("precision")
-val accuracy = evaluator.evaluate(predictions)
-println("Test Error = " + (1.0 - accuracy))
-
-val treeModel = model.stages(2).asInstanceOf[DecisionTreeClassificationModel]
-println("Learned classification tree model:\n" + treeModel.toDebugString)
-{% endhighlight %}
+{% include_example scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala %}
+
</div>
<div data-lang="java" markdown="1">
More details on parameters can be found in the [Java API documentation](api/java/org/apache/spark/ml/classification/DecisionTreeClassifier.html).
-{% highlight java %}
-import org.apache.spark.ml.Pipeline;
-import org.apache.spark.ml.PipelineModel;
-import org.apache.spark.ml.PipelineStage;
-import org.apache.spark.ml.classification.DecisionTreeClassifier;
-import org.apache.spark.ml.classification.DecisionTreeClassificationModel;
-import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator;
-import org.apache.spark.ml.feature.*;
-import org.apache.spark.mllib.regression.LabeledPoint;
-import org.apache.spark.mllib.util.MLUtils;
-import org.apache.spark.rdd.RDD;
-import org.apache.spark.sql.DataFrame;
-
-// Load and parse the data file, converting it to a DataFrame.
-RDD<LabeledPoint> rdd = MLUtils.loadLibSVMFile(sc.sc(), "data/mllib/sample_libsvm_data.txt");
-DataFrame data = jsql.createDataFrame(rdd, LabeledPoint.class);
-
-// Index labels, adding metadata to the label column.
-// Fit on whole dataset to include all labels in index.
-StringIndexerModel labelIndexer = new StringIndexer()
- .setInputCol("label")
- .setOutputCol("indexedLabel")
- .fit(data);
-// Automatically identify categorical features, and index them.
-VectorIndexerModel featureIndexer = new VectorIndexer()
- .setInputCol("features")
- .setOutputCol("indexedFeatures")
- .setMaxCategories(4) // features with > 4 distinct values are treated as continuous
- .fit(data);
-
-// Split the data into training and test sets (30% held out for testing)
-DataFrame[] splits = data.randomSplit(new double[] {0.7, 0.3});
-DataFrame trainingData = splits[0];
-DataFrame testData = splits[1];
-
-// Train a DecisionTree model.
-DecisionTreeClassifier dt = new DecisionTreeClassifier()
- .setLabelCol("indexedLabel")
- .setFeaturesCol("indexedFeatures");
-
-// Convert indexed labels back to original labels.
-IndexToString labelConverter = new IndexToString()
- .setInputCol("prediction")
- .setOutputCol("predictedLabel")
- .setLabels(labelIndexer.labels());
-
-// Chain indexers and tree in a Pipeline
-Pipeline pipeline = new Pipeline()
- .setStages(new PipelineStage[] {labelIndexer, featureIndexer, dt, labelConverter});
-
-// Train model. This also runs the indexers.
-PipelineModel model = pipeline.fit(trainingData);
-
-// Make predictions.
-DataFrame predictions = model.transform(testData);
-
-// Select example rows to display.
-predictions.select("predictedLabel", "label", "features").show(5);
-
-// Select (prediction, true label) and compute test error
-MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator()
- .setLabelCol("indexedLabel")
- .setPredictionCol("prediction")
- .setMetricName("precision");
-double accuracy = evaluator.evaluate(predictions);
-System.out.println("Test Error = " + (1.0 - accuracy));
-
-DecisionTreeClassificationModel treeModel =
- (DecisionTreeClassificationModel)(model.stages()[2]);
-System.out.println("Learned classification tree model:\n" + treeModel.toDebugString());
-{% endhighlight %}
+{% include_example java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java %}
+
</div>
<div data-lang="python" markdown="1">
More details on parameters can be found in the [Python API documentation](api/python/pyspark.ml.html#pyspark.ml.classification.DecisionTreeClassifier).
-{% highlight python %}
-from pyspark.ml import Pipeline
-from pyspark.ml.classification import DecisionTreeClassifier
-from pyspark.ml.feature import StringIndexer, VectorIndexer
-from pyspark.ml.evaluation import MulticlassClassificationEvaluator
-from pyspark.mllib.util import MLUtils
-
-# Load and parse the data file, converting it to a DataFrame.
-data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt").toDF()
-
-# Index labels, adding metadata to the label column.
-# Fit on whole dataset to include all labels in index.
-labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
-# Automatically identify categorical features, and index them.
-# We specify maxCategories so features with > 4 distinct values are treated as continuous.
-featureIndexer =\
- VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
-
-# Split the data into training and test sets (30% held out for testing)
-(trainingData, testData) = data.randomSplit([0.7, 0.3])
-
-# Train a DecisionTree model.
-dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
-
-# Chain indexers and tree in a Pipeline
-pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt])
-
-# Train model. This also runs the indexers.
-model = pipeline.fit(trainingData)
-
-# Make predictions.
-predictions = model.transform(testData)
-
-# Select example rows to display.
-predictions.select("prediction", "indexedLabel", "features").show(5)
-
-# Select (prediction, true label) and compute test error
-evaluator = MulticlassClassificationEvaluator(
- labelCol="indexedLabel", predictionCol="prediction", metricName="precision")
-accuracy = evaluator.evaluate(predictions)
-print "Test Error = %g" % (1.0 - accuracy)
+{% include_example python/ml/decision_tree_classification_example.py %}
-treeModel = model.stages[2]
-print treeModel # summary only
-{% endhighlight %}
</div>
</div>
@@ -323,171 +151,21 @@ We use a feature transformer to index categorical features, adding metadata to t
More details on parameters can be found in the [Scala API documentation](api/scala/index.html#org.apache.spark.ml.regression.DecisionTreeRegressor).
-{% highlight scala %}
-import org.apache.spark.ml.Pipeline
-import org.apache.spark.ml.regression.DecisionTreeRegressor
-import org.apache.spark.ml.regression.DecisionTreeRegressionModel
-import org.apache.spark.ml.feature.VectorIndexer
-import org.apache.spark.ml.evaluation.RegressionEvaluator
-import org.apache.spark.mllib.util.MLUtils
-
-// Load and parse the data file, converting it to a DataFrame.
-val data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt").toDF()
-
-// Automatically identify categorical features, and index them.
-// Here, we treat features with > 4 distinct values as continuous.
-val featureIndexer = new VectorIndexer()
- .setInputCol("features")
- .setOutputCol("indexedFeatures")
- .setMaxCategories(4)
- .fit(data)
-
-// Split the data into training and test sets (30% held out for testing)
-val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3))
-
-// Train a DecisionTree model.
-val dt = new DecisionTreeRegressor()
- .setLabelCol("label")
- .setFeaturesCol("indexedFeatures")
-
-// Chain indexer and tree in a Pipeline
-val pipeline = new Pipeline()
- .setStages(Array(featureIndexer, dt))
-
-// Train model. This also runs the indexer.
-val model = pipeline.fit(trainingData)
-
-// Make predictions.
-val predictions = model.transform(testData)
-
-// Select example rows to display.
-predictions.select("prediction", "label", "features").show(5)
-
-// Select (prediction, true label) and compute test error
-val evaluator = new RegressionEvaluator()
- .setLabelCol("label")
- .setPredictionCol("prediction")
- .setMetricName("rmse")
-val rmse = evaluator.evaluate(predictions)
-println("Root Mean Squared Error (RMSE) on test data = " + rmse)
-
-val treeModel = model.stages(1).asInstanceOf[DecisionTreeRegressionModel]
-println("Learned regression tree model:\n" + treeModel.toDebugString)
-{% endhighlight %}
+{% include_example scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala %}
</div>
<div data-lang="java" markdown="1">
More details on parameters can be found in the [Java API documentation](api/java/org/apache/spark/ml/regression/DecisionTreeRegressor.html).
-{% highlight java %}
-import org.apache.spark.ml.Pipeline;
-import org.apache.spark.ml.PipelineModel;
-import org.apache.spark.ml.PipelineStage;
-import org.apache.spark.ml.evaluation.RegressionEvaluator;
-import org.apache.spark.ml.feature.VectorIndexer;
-import org.apache.spark.ml.feature.VectorIndexerModel;
-import org.apache.spark.ml.regression.DecisionTreeRegressionModel;
-import org.apache.spark.ml.regression.DecisionTreeRegressor;
-import org.apache.spark.mllib.regression.LabeledPoint;
-import org.apache.spark.mllib.util.MLUtils;
-import org.apache.spark.rdd.RDD;
-import org.apache.spark.sql.DataFrame;
-
-// Load and parse the data file, converting it to a DataFrame.
-RDD<LabeledPoint> rdd = MLUtils.loadLibSVMFile(sc.sc(), "data/mllib/sample_libsvm_data.txt");
-DataFrame data = jsql.createDataFrame(rdd, LabeledPoint.class);
-
-// Automatically identify categorical features, and index them.
-// Set maxCategories so features with > 4 distinct values are treated as continuous.
-VectorIndexerModel featureIndexer = new VectorIndexer()
- .setInputCol("features")
- .setOutputCol("indexedFeatures")
- .setMaxCategories(4)
- .fit(data);
-
-// Split the data into training and test sets (30% held out for testing)
-DataFrame[] splits = data.randomSplit(new double[] {0.7, 0.3});
-DataFrame trainingData = splits[0];
-DataFrame testData = splits[1];
-
-// Train a DecisionTree model.
-DecisionTreeRegressor dt = new DecisionTreeRegressor()
- .setFeaturesCol("indexedFeatures");
-
-// Chain indexer and tree in a Pipeline
-Pipeline pipeline = new Pipeline()
- .setStages(new PipelineStage[] {featureIndexer, dt});
-
-// Train model. This also runs the indexer.
-PipelineModel model = pipeline.fit(trainingData);
-
-// Make predictions.
-DataFrame predictions = model.transform(testData);
-
-// Select example rows to display.
-predictions.select("label", "features").show(5);
-
-// Select (prediction, true label) and compute test error
-RegressionEvaluator evaluator = new RegressionEvaluator()
- .setLabelCol("label")
- .setPredictionCol("prediction")
- .setMetricName("rmse");
-double rmse = evaluator.evaluate(predictions);
-System.out.println("Root Mean Squared Error (RMSE) on test data = " + rmse);
-
-DecisionTreeRegressionModel treeModel =
- (DecisionTreeRegressionModel)(model.stages()[1]);
-System.out.println("Learned regression tree model:\n" + treeModel.toDebugString());
-{% endhighlight %}
+{% include_example java/org/apache/spark/examples/ml/JavaDecisionTreeRegressionExample.java %}
</div>
<div data-lang="python" markdown="1">
More details on parameters can be found in the [Python API documentation](api/python/pyspark.ml.html#pyspark.ml.regression.DecisionTreeRegressor).
-{% highlight python %}
-from pyspark.ml import Pipeline
-from pyspark.ml.regression import DecisionTreeRegressor
-from pyspark.ml.feature import VectorIndexer
-from pyspark.ml.evaluation import RegressionEvaluator
-from pyspark.mllib.util import MLUtils
-
-# Load and parse the data file, converting it to a DataFrame.
-data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt").toDF()
-
-# Automatically identify categorical features, and index them.
-# We specify maxCategories so features with > 4 distinct values are treated as continuous.
-featureIndexer =\
- VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
-
-# Split the data into training and test sets (30% held out for testing)
-(trainingData, testData) = data.randomSplit([0.7, 0.3])
-
-# Train a DecisionTree model.
-dt = DecisionTreeRegressor(featuresCol="indexedFeatures")
-
-# Chain indexer and tree in a Pipeline
-pipeline = Pipeline(stages=[featureIndexer, dt])
-
-# Train model. This also runs the indexer.
-model = pipeline.fit(trainingData)
-
-# Make predictions.
-predictions = model.transform(testData)
-
-# Select example rows to display.
-predictions.select("prediction", "label", "features").show(5)
-
-# Select (prediction, true label) and compute test error
-evaluator = RegressionEvaluator(
- labelCol="label", predictionCol="prediction", metricName="rmse")
-rmse = evaluator.evaluate(predictions)
-print "Root Mean Squared Error (RMSE) on test data = %g" % rmse
-
-treeModel = model.stages[1]
-print treeModel # summary only
-{% endhighlight %}
+{% include_example python/ml/decision_tree_regression_example.py %}
</div>
</div>