aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/python
diff options
context:
space:
mode:
authorYanbo Liang <ybliang8@gmail.com>2015-11-30 15:01:08 -0800
committerXiangrui Meng <meng@databricks.com>2015-11-30 15:01:08 -0800
commitde64b65f7cf2ac58c1abc310ba547637fdbb8557 (patch)
tree2621373b1be8c74814166743055680212deef675 /examples/src/main/python
parente232720a65dfb9ae6135cbb7674e35eddd88d625 (diff)
downloadspark-de64b65f7cf2ac58c1abc310ba547637fdbb8557.tar.gz
spark-de64b65f7cf2ac58c1abc310ba547637fdbb8557.tar.bz2
spark-de64b65f7cf2ac58c1abc310ba547637fdbb8557.zip
[SPARK-11975][ML] Remove duplicate mllib example (DT/RF/GBT in Java/Python)
Remove duplicate mllib example (DT/RF/GBT in Java/Python). Since we have tutorial code for DT/RF/GBT classification/regression in Scala/Java/Python and example applications for DT/RF/GBT in Scala, so we mark these as duplicated and remove them. mengxr Author: Yanbo Liang <ybliang8@gmail.com> Closes #9954 from yanboliang/SPARK-11975.
Diffstat (limited to 'examples/src/main/python')
-rwxr-xr-xexamples/src/main/python/mllib/decision_tree_runner.py144
-rw-r--r--examples/src/main/python/mllib/gradient_boosted_trees.py77
-rwxr-xr-xexamples/src/main/python/mllib/random_forest_example.py90
3 files changed, 0 insertions, 311 deletions
diff --git a/examples/src/main/python/mllib/decision_tree_runner.py b/examples/src/main/python/mllib/decision_tree_runner.py
deleted file mode 100755
index 513ed8fd51..0000000000
--- a/examples/src/main/python/mllib/decision_tree_runner.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Decision tree classification and regression using MLlib.
-
-This example requires NumPy (http://www.numpy.org/).
-"""
-from __future__ import print_function
-
-import numpy
-import os
-import sys
-
-from operator import add
-
-from pyspark import SparkContext
-from pyspark.mllib.regression import LabeledPoint
-from pyspark.mllib.tree import DecisionTree
-from pyspark.mllib.util import MLUtils
-
-
-def getAccuracy(dtModel, data):
- """
- Return accuracy of DecisionTreeModel on the given RDD[LabeledPoint].
- """
- seqOp = (lambda acc, x: acc + (x[0] == x[1]))
- predictions = dtModel.predict(data.map(lambda x: x.features))
- truth = data.map(lambda p: p.label)
- trainCorrect = predictions.zip(truth).aggregate(0, seqOp, add)
- if data.count() == 0:
- return 0
- return trainCorrect / (0.0 + data.count())
-
-
-def getMSE(dtModel, data):
- """
- Return mean squared error (MSE) of DecisionTreeModel on the given
- RDD[LabeledPoint].
- """
- seqOp = (lambda acc, x: acc + numpy.square(x[0] - x[1]))
- predictions = dtModel.predict(data.map(lambda x: x.features))
- truth = data.map(lambda p: p.label)
- trainMSE = predictions.zip(truth).aggregate(0, seqOp, add)
- if data.count() == 0:
- return 0
- return trainMSE / (0.0 + data.count())
-
-
-def reindexClassLabels(data):
- """
- Re-index class labels in a dataset to the range {0,...,numClasses-1}.
- If all labels in that range already appear at least once,
- then the returned RDD is the same one (without a mapping).
- Note: If a label simply does not appear in the data,
- the index will not include it.
- Be aware of this when reindexing subsampled data.
- :param data: RDD of LabeledPoint where labels are integer values
- denoting labels for a classification problem.
- :return: Pair (reindexedData, origToNewLabels) where
- reindexedData is an RDD of LabeledPoint with labels in
- the range {0,...,numClasses-1}, and
- origToNewLabels is a dictionary mapping original labels
- to new labels.
- """
- # classCounts: class --> # examples in class
- classCounts = data.map(lambda x: x.label).countByValue()
- numExamples = sum(classCounts.values())
- sortedClasses = sorted(classCounts.keys())
- numClasses = len(classCounts)
- # origToNewLabels: class --> index in 0,...,numClasses-1
- if (numClasses < 2):
- print("Dataset for classification should have at least 2 classes."
- " The given dataset had only %d classes." % numClasses, file=sys.stderr)
- exit(1)
- origToNewLabels = dict([(sortedClasses[i], i) for i in range(0, numClasses)])
-
- print("numClasses = %d" % numClasses)
- print("Per-class example fractions, counts:")
- print("Class\tFrac\tCount")
- for c in sortedClasses:
- frac = classCounts[c] / (numExamples + 0.0)
- print("%g\t%g\t%d" % (c, frac, classCounts[c]))
-
- if (sortedClasses[0] == 0 and sortedClasses[-1] == numClasses - 1):
- return (data, origToNewLabels)
- else:
- reindexedData = \
- data.map(lambda x: LabeledPoint(origToNewLabels[x.label], x.features))
- return (reindexedData, origToNewLabels)
-
-
-def usage():
- print("Usage: decision_tree_runner [libsvm format data filepath]", file=sys.stderr)
- exit(1)
-
-
-if __name__ == "__main__":
- if len(sys.argv) > 2:
- usage()
- sc = SparkContext(appName="PythonDT")
-
- # Load data.
- dataPath = 'data/mllib/sample_libsvm_data.txt'
- if len(sys.argv) == 2:
- dataPath = sys.argv[1]
- if not os.path.isfile(dataPath):
- sc.stop()
- usage()
- points = MLUtils.loadLibSVMFile(sc, dataPath)
-
- # Re-index class labels if needed.
- (reindexedData, origToNewLabels) = reindexClassLabels(points)
- numClasses = len(origToNewLabels)
-
- # Train a classifier.
- categoricalFeaturesInfo = {} # no categorical features
- model = DecisionTree.trainClassifier(reindexedData, numClasses=numClasses,
- categoricalFeaturesInfo=categoricalFeaturesInfo)
- # Print learned tree and stats.
- print("Trained DecisionTree for classification:")
- print(" Model numNodes: %d" % model.numNodes())
- print(" Model depth: %d" % model.depth())
- print(" Training accuracy: %g" % getAccuracy(model, reindexedData))
- if model.numNodes() < 20:
- print(model.toDebugString())
- else:
- print(model)
-
- sc.stop()
diff --git a/examples/src/main/python/mllib/gradient_boosted_trees.py b/examples/src/main/python/mllib/gradient_boosted_trees.py
deleted file mode 100644
index 781bd61c9d..0000000000
--- a/examples/src/main/python/mllib/gradient_boosted_trees.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Gradient boosted Trees classification and regression using MLlib.
-"""
-from __future__ import print_function
-
-import sys
-
-from pyspark.context import SparkContext
-from pyspark.mllib.tree import GradientBoostedTrees
-from pyspark.mllib.util import MLUtils
-
-
-def testClassification(trainingData, testData):
- # Train a GradientBoostedTrees model.
- # Empty categoricalFeaturesInfo indicates all features are continuous.
- model = GradientBoostedTrees.trainClassifier(trainingData, categoricalFeaturesInfo={},
- numIterations=30, maxDepth=4)
- # Evaluate model on test instances and compute test error
- predictions = model.predict(testData.map(lambda x: x.features))
- labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
- testErr = labelsAndPredictions.filter(lambda v_p: v_p[0] != v_p[1]).count() \
- / float(testData.count())
- print('Test Error = ' + str(testErr))
- print('Learned classification ensemble model:')
- print(model.toDebugString())
-
-
-def testRegression(trainingData, testData):
- # Train a GradientBoostedTrees model.
- # Empty categoricalFeaturesInfo indicates all features are continuous.
- model = GradientBoostedTrees.trainRegressor(trainingData, categoricalFeaturesInfo={},
- numIterations=30, maxDepth=4)
- # Evaluate model on test instances and compute test error
- predictions = model.predict(testData.map(lambda x: x.features))
- labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
- testMSE = labelsAndPredictions.map(lambda vp: (vp[0] - vp[1]) * (vp[0] - vp[1])).sum() \
- / float(testData.count())
- print('Test Mean Squared Error = ' + str(testMSE))
- print('Learned regression ensemble model:')
- print(model.toDebugString())
-
-
-if __name__ == "__main__":
- if len(sys.argv) > 1:
- print("Usage: gradient_boosted_trees", file=sys.stderr)
- exit(1)
- sc = SparkContext(appName="PythonGradientBoostedTrees")
-
- # Load and parse the data file into an RDD of LabeledPoint.
- data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
- # Split the data into training and test sets (30% held out for testing)
- (trainingData, testData) = data.randomSplit([0.7, 0.3])
-
- print('\nRunning example of classification using GradientBoostedTrees\n')
- testClassification(trainingData, testData)
-
- print('\nRunning example of regression using GradientBoostedTrees\n')
- testRegression(trainingData, testData)
-
- sc.stop()
diff --git a/examples/src/main/python/mllib/random_forest_example.py b/examples/src/main/python/mllib/random_forest_example.py
deleted file mode 100755
index 4cfdad868c..0000000000
--- a/examples/src/main/python/mllib/random_forest_example.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Random Forest classification and regression using MLlib.
-
-Note: This example illustrates binary classification.
- For information on multiclass classification, please refer to the decision_tree_runner.py
- example.
-"""
-from __future__ import print_function
-
-import sys
-
-from pyspark.context import SparkContext
-from pyspark.mllib.tree import RandomForest
-from pyspark.mllib.util import MLUtils
-
-
-def testClassification(trainingData, testData):
- # Train a RandomForest model.
- # Empty categoricalFeaturesInfo indicates all features are continuous.
- # Note: Use larger numTrees in practice.
- # Setting featureSubsetStrategy="auto" lets the algorithm choose.
- model = RandomForest.trainClassifier(trainingData, numClasses=2,
- categoricalFeaturesInfo={},
- numTrees=3, featureSubsetStrategy="auto",
- impurity='gini', maxDepth=4, maxBins=32)
-
- # Evaluate model on test instances and compute test error
- predictions = model.predict(testData.map(lambda x: x.features))
- labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
- testErr = labelsAndPredictions.filter(lambda v_p: v_p[0] != v_p[1]).count()\
- / float(testData.count())
- print('Test Error = ' + str(testErr))
- print('Learned classification forest model:')
- print(model.toDebugString())
-
-
-def testRegression(trainingData, testData):
- # Train a RandomForest model.
- # Empty categoricalFeaturesInfo indicates all features are continuous.
- # Note: Use larger numTrees in practice.
- # Setting featureSubsetStrategy="auto" lets the algorithm choose.
- model = RandomForest.trainRegressor(trainingData, categoricalFeaturesInfo={},
- numTrees=3, featureSubsetStrategy="auto",
- impurity='variance', maxDepth=4, maxBins=32)
-
- # Evaluate model on test instances and compute test error
- predictions = model.predict(testData.map(lambda x: x.features))
- labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
- testMSE = labelsAndPredictions.map(lambda v_p1: (v_p1[0] - v_p1[1]) * (v_p1[0] - v_p1[1]))\
- .sum() / float(testData.count())
- print('Test Mean Squared Error = ' + str(testMSE))
- print('Learned regression forest model:')
- print(model.toDebugString())
-
-
-if __name__ == "__main__":
- if len(sys.argv) > 1:
- print("Usage: random_forest_example", file=sys.stderr)
- exit(1)
- sc = SparkContext(appName="PythonRandomForestExample")
-
- # Load and parse the data file into an RDD of LabeledPoint.
- data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
- # Split the data into training and test sets (30% held out for testing)
- (trainingData, testData) = data.randomSplit([0.7, 0.3])
-
- print('\nRunning example of classification using RandomForest\n')
- testClassification(trainingData, testData)
-
- print('\nRunning example of regression using RandomForest\n')
- testRegression(trainingData, testData)
-
- sc.stop()