aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/python
diff options
context:
space:
mode:
authorVikas Nelamangala <vikasnelamangala@Vikass-MacBook-Pro.local>2015-11-20 15:18:41 -0800
committerXiangrui Meng <meng@databricks.com>2015-11-20 15:18:41 -0800
commited47b1e660b830e2d4fac8d6df93f634b260393c (patch)
tree0c3805370f6a088791d7d8767d7ce3e90238b501 /examples/src/main/python
parent4b84c72dfbb9ddb415fee35f69305b5d7b280891 (diff)
downloadspark-ed47b1e660b830e2d4fac8d6df93f634b260393c.tar.gz
spark-ed47b1e660b830e2d4fac8d6df93f634b260393c.tar.bz2
spark-ed47b1e660b830e2d4fac8d6df93f634b260393c.zip
[SPARK-11549][DOCS] Replace example code in mllib-evaluation-metrics.md using include_example
Author: Vikas Nelamangala <vikasnelamangala@Vikass-MacBook-Pro.local> Closes #9689 from vikasnp/master.
Diffstat (limited to 'examples/src/main/python')
-rw-r--r--examples/src/main/python/mllib/binary_classification_metrics_example.py55
-rw-r--r--examples/src/main/python/mllib/multi_class_metrics_example.py69
-rw-r--r--examples/src/main/python/mllib/multi_label_metrics_example.py61
-rw-r--r--examples/src/main/python/mllib/ranking_metrics_example.py55
-rw-r--r--examples/src/main/python/mllib/regression_metrics_example.py59
5 files changed, 299 insertions, 0 deletions
diff --git a/examples/src/main/python/mllib/binary_classification_metrics_example.py b/examples/src/main/python/mllib/binary_classification_metrics_example.py
new file mode 100644
index 0000000000..437acb998a
--- /dev/null
+++ b/examples/src/main/python/mllib/binary_classification_metrics_example.py
@@ -0,0 +1,55 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+Binary Classification Metrics Example.
+"""
+from __future__ import print_function
+import sys
+from pyspark import SparkContext, SQLContext
+# $example on$
+from pyspark.mllib.classification import LogisticRegressionWithLBFGS
+from pyspark.mllib.evaluation import BinaryClassificationMetrics
+from pyspark.mllib.util import MLUtils
+# $example off$
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="BinaryClassificationMetricsExample")
+ sqlContext = SQLContext(sc)
+ # $example on$
+ # Several of the methods available in scala are currently missing from pyspark
+ # Load training data in LIBSVM format
+ data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_binary_classification_data.txt")
+
+ # Split data into training (60%) and test (40%)
+ training, test = data.randomSplit([0.6, 0.4], seed=11L)
+ training.cache()
+
+ # Run training algorithm to build the model
+ model = LogisticRegressionWithLBFGS.train(training)
+
+ # Compute raw scores on the test set
+ predictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp.label))
+
+ # Instantiate metrics object
+ metrics = BinaryClassificationMetrics(predictionAndLabels)
+
+ # Area under precision-recall curve
+ print("Area under PR = %s" % metrics.areaUnderPR)
+
+ # Area under ROC curve
+ print("Area under ROC = %s" % metrics.areaUnderROC)
+ # $example off$
diff --git a/examples/src/main/python/mllib/multi_class_metrics_example.py b/examples/src/main/python/mllib/multi_class_metrics_example.py
new file mode 100644
index 0000000000..cd56b3c97c
--- /dev/null
+++ b/examples/src/main/python/mllib/multi_class_metrics_example.py
@@ -0,0 +1,69 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# $example on$
+from pyspark.mllib.classification import LogisticRegressionWithLBFGS
+from pyspark.mllib.util import MLUtils
+from pyspark.mllib.evaluation import MulticlassMetrics
+# $example off$
+
+from pyspark import SparkContext
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="MultiClassMetricsExample")
+
+ # Several of the methods available in scala are currently missing from pyspark
+ # $example on$
+ # Load training data in LIBSVM format
+ data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_multiclass_classification_data.txt")
+
+ # Split data into training (60%) and test (40%)
+ training, test = data.randomSplit([0.6, 0.4], seed=11L)
+ training.cache()
+
+ # Run training algorithm to build the model
+ model = LogisticRegressionWithLBFGS.train(training, numClasses=3)
+
+ # Compute raw scores on the test set
+ predictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp.label))
+
+ # Instantiate metrics object
+ metrics = MulticlassMetrics(predictionAndLabels)
+
+ # Overall statistics
+ precision = metrics.precision()
+ recall = metrics.recall()
+ f1Score = metrics.fMeasure()
+ print("Summary Stats")
+ print("Precision = %s" % precision)
+ print("Recall = %s" % recall)
+ print("F1 Score = %s" % f1Score)
+
+ # Statistics by class
+ labels = data.map(lambda lp: lp.label).distinct().collect()
+ for label in sorted(labels):
+ print("Class %s precision = %s" % (label, metrics.precision(label)))
+ print("Class %s recall = %s" % (label, metrics.recall(label)))
+ print("Class %s F1 Measure = %s" % (label, metrics.fMeasure(label, beta=1.0)))
+
+ # Weighted stats
+ print("Weighted recall = %s" % metrics.weightedRecall)
+ print("Weighted precision = %s" % metrics.weightedPrecision)
+ print("Weighted F(1) Score = %s" % metrics.weightedFMeasure())
+ print("Weighted F(0.5) Score = %s" % metrics.weightedFMeasure(beta=0.5))
+ print("Weighted false positive rate = %s" % metrics.weightedFalsePositiveRate)
+ # $example off$
diff --git a/examples/src/main/python/mllib/multi_label_metrics_example.py b/examples/src/main/python/mllib/multi_label_metrics_example.py
new file mode 100644
index 0000000000..960ade6597
--- /dev/null
+++ b/examples/src/main/python/mllib/multi_label_metrics_example.py
@@ -0,0 +1,61 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# $example on$
+from pyspark.mllib.evaluation import MultilabelMetrics
+# $example off$
+from pyspark import SparkContext
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="MultiLabelMetricsExample")
+ # $example on$
+ scoreAndLabels = sc.parallelize([
+ ([0.0, 1.0], [0.0, 2.0]),
+ ([0.0, 2.0], [0.0, 1.0]),
+ ([], [0.0]),
+ ([2.0], [2.0]),
+ ([2.0, 0.0], [2.0, 0.0]),
+ ([0.0, 1.0, 2.0], [0.0, 1.0]),
+ ([1.0], [1.0, 2.0])])
+
+ # Instantiate metrics object
+ metrics = MultilabelMetrics(scoreAndLabels)
+
+ # Summary stats
+ print("Recall = %s" % metrics.recall())
+ print("Precision = %s" % metrics.precision())
+ print("F1 measure = %s" % metrics.f1Measure())
+ print("Accuracy = %s" % metrics.accuracy)
+
+ # Individual label stats
+ labels = scoreAndLabels.flatMap(lambda x: x[1]).distinct().collect()
+ for label in labels:
+ print("Class %s precision = %s" % (label, metrics.precision(label)))
+ print("Class %s recall = %s" % (label, metrics.recall(label)))
+ print("Class %s F1 Measure = %s" % (label, metrics.f1Measure(label)))
+
+ # Micro stats
+ print("Micro precision = %s" % metrics.microPrecision)
+ print("Micro recall = %s" % metrics.microRecall)
+ print("Micro F1 measure = %s" % metrics.microF1Measure)
+
+ # Hamming loss
+ print("Hamming loss = %s" % metrics.hammingLoss)
+
+ # Subset accuracy
+ print("Subset accuracy = %s" % metrics.subsetAccuracy)
+ # $example off$
diff --git a/examples/src/main/python/mllib/ranking_metrics_example.py b/examples/src/main/python/mllib/ranking_metrics_example.py
new file mode 100644
index 0000000000..327791966c
--- /dev/null
+++ b/examples/src/main/python/mllib/ranking_metrics_example.py
@@ -0,0 +1,55 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# $example on$
+from pyspark.mllib.recommendation import ALS, Rating
+from pyspark.mllib.evaluation import RegressionMetrics, RankingMetrics
+# $example off$
+from pyspark import SparkContext
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="Ranking Metrics Example")
+
+ # Several of the methods available in scala are currently missing from pyspark
+ # $example on$
+ # Read in the ratings data
+ lines = sc.textFile("data/mllib/sample_movielens_data.txt")
+
+ def parseLine(line):
+ fields = line.split("::")
+ return Rating(int(fields[0]), int(fields[1]), float(fields[2]) - 2.5)
+ ratings = lines.map(lambda r: parseLine(r))
+
+ # Train a model on to predict user-product ratings
+ model = ALS.train(ratings, 10, 10, 0.01)
+
+ # Get predicted ratings on all existing user-product pairs
+ testData = ratings.map(lambda p: (p.user, p.product))
+ predictions = model.predictAll(testData).map(lambda r: ((r.user, r.product), r.rating))
+
+ ratingsTuple = ratings.map(lambda r: ((r.user, r.product), r.rating))
+ scoreAndLabels = predictions.join(ratingsTuple).map(lambda tup: tup[1])
+
+ # Instantiate regression metrics to compare predicted and actual ratings
+ metrics = RegressionMetrics(scoreAndLabels)
+
+ # Root mean sqaured error
+ print("RMSE = %s" % metrics.rootMeanSquaredError)
+
+ # R-squared
+ print("R-squared = %s" % metrics.r2)
+ # $example off$
diff --git a/examples/src/main/python/mllib/regression_metrics_example.py b/examples/src/main/python/mllib/regression_metrics_example.py
new file mode 100644
index 0000000000..a3a83aafd7
--- /dev/null
+++ b/examples/src/main/python/mllib/regression_metrics_example.py
@@ -0,0 +1,59 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# $example on$
+from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD
+from pyspark.mllib.evaluation import RegressionMetrics
+from pyspark.mllib.linalg import DenseVector
+# $example off$
+
+from pyspark import SparkContext
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="Regression Metrics Example")
+
+ # $example on$
+ # Load and parse the data
+ def parsePoint(line):
+ values = line.split()
+ return LabeledPoint(float(values[0]),
+ DenseVector([float(x.split(':')[1]) for x in values[1:]]))
+
+ data = sc.textFile("data/mllib/sample_linear_regression_data.txt")
+ parsedData = data.map(parsePoint)
+
+ # Build the model
+ model = LinearRegressionWithSGD.train(parsedData)
+
+ # Get predictions
+ valuesAndPreds = parsedData.map(lambda p: (float(model.predict(p.features)), p.label))
+
+ # Instantiate metrics object
+ metrics = RegressionMetrics(valuesAndPreds)
+
+ # Squared Error
+ print("MSE = %s" % metrics.meanSquaredError)
+ print("RMSE = %s" % metrics.rootMeanSquaredError)
+
+ # R-squared
+ print("R-squared = %s" % metrics.r2)
+
+ # Mean absolute error
+ print("MAE = %s" % metrics.meanAbsoluteError)
+
+ # Explained variance
+ print("Explained variance = %s" % metrics.explainedVariance)
+ # $example off$