aboutsummaryrefslogtreecommitdiff
path: root/docs/mllib-evaluation-metrics.md
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2015-07-31 13:45:28 -0700
committerXiangrui Meng <meng@databricks.com>2015-07-31 13:45:28 -0700
commit873ab0f9692d8ea6220abdb8d9200041068372a8 (patch)
treee1116f9c5a53c796943ad189be61aceca5f31653 /docs/mllib-evaluation-metrics.md
parent815c8245f47e61226a04e2e02f508457b5e9e536 (diff)
downloadspark-873ab0f9692d8ea6220abdb8d9200041068372a8.tar.gz
spark-873ab0f9692d8ea6220abdb8d9200041068372a8.tar.bz2
spark-873ab0f9692d8ea6220abdb8d9200041068372a8.zip
[SPARK-9490] [DOCS] [MLLIB] MLlib evaluation metrics guide example python code uses deprecated print statement
Use print(x) not print x for Python 3 in eval examples CC sethah mengxr -- just wanted to close this out before 1.5 Author: Sean Owen <sowen@cloudera.com> Closes #7822 from srowen/SPARK-9490 and squashes the following commits: 01abeba [Sean Owen] Change "print x" to "print(x)" in the rest of the docs too bd7f7fb [Sean Owen] Use print(x) not print x for Python 3 in eval examples
Diffstat (limited to 'docs/mllib-evaluation-metrics.md')
-rw-r--r--docs/mllib-evaluation-metrics.md66
1 files changed, 33 insertions, 33 deletions
diff --git a/docs/mllib-evaluation-metrics.md b/docs/mllib-evaluation-metrics.md
index 4ca0bb06b2..7066d5c974 100644
--- a/docs/mllib-evaluation-metrics.md
+++ b/docs/mllib-evaluation-metrics.md
@@ -302,10 +302,10 @@ predictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp
metrics = BinaryClassificationMetrics(predictionAndLabels)
# Area under precision-recall curve
-print "Area under PR = %s" % metrics.areaUnderPR
+print("Area under PR = %s" % metrics.areaUnderPR)
# Area under ROC curve
-print "Area under ROC = %s" % metrics.areaUnderROC
+print("Area under ROC = %s" % metrics.areaUnderROC)
{% endhighlight %}
@@ -606,24 +606,24 @@ metrics = MulticlassMetrics(predictionAndLabels)
precision = metrics.precision()
recall = metrics.recall()
f1Score = metrics.fMeasure()
-print "Summary Stats"
-print "Precision = %s" % precision
-print "Recall = %s" % recall
-print "F1 Score = %s" % f1Score
+print("Summary Stats")
+print("Precision = %s" % precision)
+print("Recall = %s" % recall)
+print("F1 Score = %s" % f1Score)
# Statistics by class
labels = data.map(lambda lp: lp.label).distinct().collect()
for label in sorted(labels):
- print "Class %s precision = %s" % (label, metrics.precision(label))
- print "Class %s recall = %s" % (label, metrics.recall(label))
- print "Class %s F1 Measure = %s" % (label, metrics.fMeasure(label, beta=1.0))
+ print("Class %s precision = %s" % (label, metrics.precision(label)))
+ print("Class %s recall = %s" % (label, metrics.recall(label)))
+ print("Class %s F1 Measure = %s" % (label, metrics.fMeasure(label, beta=1.0)))
# Weighted stats
-print "Weighted recall = %s" % metrics.weightedRecall
-print "Weighted precision = %s" % metrics.weightedPrecision
-print "Weighted F(1) Score = %s" % metrics.weightedFMeasure()
-print "Weighted F(0.5) Score = %s" % metrics.weightedFMeasure(beta=0.5)
-print "Weighted false positive rate = %s" % metrics.weightedFalsePositiveRate
+print("Weighted recall = %s" % metrics.weightedRecall)
+print("Weighted precision = %s" % metrics.weightedPrecision)
+print("Weighted F(1) Score = %s" % metrics.weightedFMeasure())
+print("Weighted F(0.5) Score = %s" % metrics.weightedFMeasure(beta=0.5))
+print("Weighted false positive rate = %s" % metrics.weightedFalsePositiveRate)
{% endhighlight %}
</div>
@@ -881,28 +881,28 @@ scoreAndLabels = sc.parallelize([
metrics = MultilabelMetrics(scoreAndLabels)
# Summary stats
-print "Recall = %s" % metrics.recall()
-print "Precision = %s" % metrics.precision()
-print "F1 measure = %s" % metrics.f1Measure()
-print "Accuracy = %s" % metrics.accuracy
+print("Recall = %s" % metrics.recall())
+print("Precision = %s" % metrics.precision())
+print("F1 measure = %s" % metrics.f1Measure())
+print("Accuracy = %s" % metrics.accuracy)
# Individual label stats
labels = scoreAndLabels.flatMap(lambda x: x[1]).distinct().collect()
for label in labels:
- print "Class %s precision = %s" % (label, metrics.precision(label))
- print "Class %s recall = %s" % (label, metrics.recall(label))
- print "Class %s F1 Measure = %s" % (label, metrics.f1Measure(label))
+ print("Class %s precision = %s" % (label, metrics.precision(label)))
+ print("Class %s recall = %s" % (label, metrics.recall(label)))
+ print("Class %s F1 Measure = %s" % (label, metrics.f1Measure(label)))
# Micro stats
-print "Micro precision = %s" % metrics.microPrecision
-print "Micro recall = %s" % metrics.microRecall
-print "Micro F1 measure = %s" % metrics.microF1Measure
+print("Micro precision = %s" % metrics.microPrecision)
+print("Micro recall = %s" % metrics.microRecall)
+print("Micro F1 measure = %s" % metrics.microF1Measure)
# Hamming loss
-print "Hamming loss = %s" % metrics.hammingLoss
+print("Hamming loss = %s" % metrics.hammingLoss)
# Subset accuracy
-print "Subset accuracy = %s" % metrics.subsetAccuracy
+print("Subset accuracy = %s" % metrics.subsetAccuracy)
{% endhighlight %}
@@ -1283,10 +1283,10 @@ scoreAndLabels = predictions.join(ratingsTuple).map(lambda tup: tup[1])
metrics = RegressionMetrics(scoreAndLabels)
# Root mean sqaured error
-print "RMSE = %s" % metrics.rootMeanSquaredError
+print("RMSE = %s" % metrics.rootMeanSquaredError)
# R-squared
-print "R-squared = %s" % metrics.r2
+print("R-squared = %s" % metrics.r2)
{% endhighlight %}
@@ -1479,17 +1479,17 @@ valuesAndPreds = parsedData.map(lambda p: (float(model.predict(p.features)), p.l
metrics = RegressionMetrics(valuesAndPreds)
# Squared Error
-print "MSE = %s" % metrics.meanSquaredError
-print "RMSE = %s" % metrics.rootMeanSquaredError
+print("MSE = %s" % metrics.meanSquaredError)
+print("RMSE = %s" % metrics.rootMeanSquaredError)
# R-squared
-print "R-squared = %s" % metrics.r2
+print("R-squared = %s" % metrics.r2)
# Mean absolute error
-print "MAE = %s" % metrics.meanAbsoluteError
+print("MAE = %s" % metrics.meanAbsoluteError)
# Explained variance
-print "Explained variance = %s" % metrics.explainedVariance
+print("Explained variance = %s" % metrics.explainedVariance)
{% endhighlight %}