aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorYanbo Liang <ybliang8@gmail.com>2015-11-24 09:52:53 -0800
committerXiangrui Meng <meng@databricks.com>2015-11-24 09:52:53 -0800
commit56a0aba0a60326ba026056c9a23f3f6ec7258c19 (patch)
treedd1777659d53539305f9af848693c8bf1e37dd7f /examples
parente5aaae6e1145b8c25c4872b2992ab425da9c6f9b (diff)
downloadspark-56a0aba0a60326ba026056c9a23f3f6ec7258c19.tar.gz
spark-56a0aba0a60326ba026056c9a23f3f6ec7258c19.tar.bz2
spark-56a0aba0a60326ba026056c9a23f3f6ec7258c19.zip
[SPARK-11952][ML] Remove duplicate ml examples
Remove duplicate ml examples (only for ml). mengxr Author: Yanbo Liang <ybliang8@gmail.com> Closes #9933 from yanboliang/SPARK-11685.
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/python/ml/gradient_boosted_trees.py82
-rw-r--r--examples/src/main/python/ml/logistic_regression.py66
-rw-r--r--examples/src/main/python/ml/random_forest_example.py87
3 files changed, 0 insertions, 235 deletions
diff --git a/examples/src/main/python/ml/gradient_boosted_trees.py b/examples/src/main/python/ml/gradient_boosted_trees.py
deleted file mode 100644
index c3bf8aa2eb..0000000000
--- a/examples/src/main/python/ml/gradient_boosted_trees.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from __future__ import print_function
-
-import sys
-
-from pyspark import SparkContext
-from pyspark.ml.classification import GBTClassifier
-from pyspark.ml.feature import StringIndexer
-from pyspark.ml.regression import GBTRegressor
-from pyspark.mllib.evaluation import BinaryClassificationMetrics, RegressionMetrics
-from pyspark.sql import Row, SQLContext
-
-"""
-A simple example demonstrating a Gradient Boosted Trees Classification/Regression Pipeline.
-Note: GBTClassifier only supports binary classification currently
-Run with:
- bin/spark-submit examples/src/main/python/ml/gradient_boosted_trees.py
-"""
-
-
-def testClassification(train, test):
- # Train a GradientBoostedTrees model.
-
- rf = GBTClassifier(maxIter=30, maxDepth=4, labelCol="indexedLabel")
-
- model = rf.fit(train)
- predictionAndLabels = model.transform(test).select("prediction", "indexedLabel") \
- .map(lambda x: (x.prediction, x.indexedLabel))
-
- metrics = BinaryClassificationMetrics(predictionAndLabels)
- print("AUC %.3f" % metrics.areaUnderROC)
-
-
-def testRegression(train, test):
- # Train a GradientBoostedTrees model.
-
- rf = GBTRegressor(maxIter=30, maxDepth=4, labelCol="indexedLabel")
-
- model = rf.fit(train)
- predictionAndLabels = model.transform(test).select("prediction", "indexedLabel") \
- .map(lambda x: (x.prediction, x.indexedLabel))
-
- metrics = RegressionMetrics(predictionAndLabels)
- print("rmse %.3f" % metrics.rootMeanSquaredError)
- print("r2 %.3f" % metrics.r2)
- print("mae %.3f" % metrics.meanAbsoluteError)
-
-
-if __name__ == "__main__":
- if len(sys.argv) > 1:
- print("Usage: gradient_boosted_trees", file=sys.stderr)
- exit(1)
- sc = SparkContext(appName="PythonGBTExample")
- sqlContext = SQLContext(sc)
-
- # Load the data stored in LIBSVM format as a DataFrame.
- df = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
-
- # Map labels into an indexed column of labels in [0, numLabels)
- stringIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel")
- si_model = stringIndexer.fit(df)
- td = si_model.transform(df)
- [train, test] = td.randomSplit([0.7, 0.3])
- testClassification(train, test)
- testRegression(train, test)
- sc.stop()
diff --git a/examples/src/main/python/ml/logistic_regression.py b/examples/src/main/python/ml/logistic_regression.py
deleted file mode 100644
index 4cd027fdfb..0000000000
--- a/examples/src/main/python/ml/logistic_regression.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from __future__ import print_function
-
-import sys
-
-from pyspark import SparkContext
-from pyspark.ml.classification import LogisticRegression
-from pyspark.mllib.evaluation import MulticlassMetrics
-from pyspark.ml.feature import StringIndexer
-from pyspark.sql import SQLContext
-
-"""
-A simple example demonstrating a logistic regression with elastic net regularization Pipeline.
-Run with:
- bin/spark-submit examples/src/main/python/ml/logistic_regression.py
-"""
-
-if __name__ == "__main__":
-
- if len(sys.argv) > 1:
- print("Usage: logistic_regression", file=sys.stderr)
- exit(-1)
-
- sc = SparkContext(appName="PythonLogisticRegressionExample")
- sqlContext = SQLContext(sc)
-
- # Load the data stored in LIBSVM format as a DataFrame.
- df = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
-
- # Map labels into an indexed column of labels in [0, numLabels)
- stringIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel")
- si_model = stringIndexer.fit(df)
- td = si_model.transform(df)
- [training, test] = td.randomSplit([0.7, 0.3])
-
- lr = LogisticRegression(maxIter=100, regParam=0.3).setLabelCol("indexedLabel")
- lr.setElasticNetParam(0.8)
-
- # Fit the model
- lrModel = lr.fit(training)
-
- predictionAndLabels = lrModel.transform(test).select("prediction", "indexedLabel") \
- .map(lambda x: (x.prediction, x.indexedLabel))
-
- metrics = MulticlassMetrics(predictionAndLabels)
- print("weighted f-measure %.3f" % metrics.weightedFMeasure())
- print("precision %s" % metrics.precision())
- print("recall %s" % metrics.recall())
-
- sc.stop()
diff --git a/examples/src/main/python/ml/random_forest_example.py b/examples/src/main/python/ml/random_forest_example.py
deleted file mode 100644
index dc6a778670..0000000000
--- a/examples/src/main/python/ml/random_forest_example.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from __future__ import print_function
-
-import sys
-
-from pyspark import SparkContext
-from pyspark.ml.classification import RandomForestClassifier
-from pyspark.ml.feature import StringIndexer
-from pyspark.ml.regression import RandomForestRegressor
-from pyspark.mllib.evaluation import MulticlassMetrics, RegressionMetrics
-from pyspark.mllib.util import MLUtils
-from pyspark.sql import Row, SQLContext
-
-"""
-A simple example demonstrating a RandomForest Classification/Regression Pipeline.
-Run with:
- bin/spark-submit examples/src/main/python/ml/random_forest_example.py
-"""
-
-
-def testClassification(train, test):
- # Train a RandomForest model.
- # Setting featureSubsetStrategy="auto" lets the algorithm choose.
- # Note: Use larger numTrees in practice.
-
- rf = RandomForestClassifier(labelCol="indexedLabel", numTrees=3, maxDepth=4)
-
- model = rf.fit(train)
- predictionAndLabels = model.transform(test).select("prediction", "indexedLabel") \
- .map(lambda x: (x.prediction, x.indexedLabel))
-
- metrics = MulticlassMetrics(predictionAndLabels)
- print("weighted f-measure %.3f" % metrics.weightedFMeasure())
- print("precision %s" % metrics.precision())
- print("recall %s" % metrics.recall())
-
-
-def testRegression(train, test):
- # Train a RandomForest model.
- # Note: Use larger numTrees in practice.
-
- rf = RandomForestRegressor(labelCol="indexedLabel", numTrees=3, maxDepth=4)
-
- model = rf.fit(train)
- predictionAndLabels = model.transform(test).select("prediction", "indexedLabel") \
- .map(lambda x: (x.prediction, x.indexedLabel))
-
- metrics = RegressionMetrics(predictionAndLabels)
- print("rmse %.3f" % metrics.rootMeanSquaredError)
- print("r2 %.3f" % metrics.r2)
- print("mae %.3f" % metrics.meanAbsoluteError)
-
-
-if __name__ == "__main__":
- if len(sys.argv) > 1:
- print("Usage: random_forest_example", file=sys.stderr)
- exit(1)
- sc = SparkContext(appName="PythonRandomForestExample")
- sqlContext = SQLContext(sc)
-
- # Load the data stored in LIBSVM format as a DataFrame.
- df = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
-
- # Map labels into an indexed column of labels in [0, numLabels)
- stringIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel")
- si_model = stringIndexer.fit(df)
- td = si_model.transform(df)
- [train, test] = td.randomSplit([0.7, 0.3])
- testClassification(train, test)
- testRegression(train, test)
- sc.stop()