aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/python
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-02-26 08:31:55 -0800
committerXiangrui Meng <meng@databricks.com>2016-02-26 08:31:55 -0800
commit7af0de076f74e975c9235c88b0f11b22fcbae060 (patch)
tree0dfcb6c7eb3213c7d37ed203e7f513112bfbba02 /examples/src/main/python
parentb33261f91387904c5aaccae40f86922c92a4e09a (diff)
downloadspark-7af0de076f74e975c9235c88b0f11b22fcbae060.tar.gz
spark-7af0de076f74e975c9235c88b0f11b22fcbae060.tar.bz2
spark-7af0de076f74e975c9235c88b0f11b22fcbae060.zip
[SPARK-11381][DOCS] Replace example code in mllib-linear-methods.md using include_example
## What changes were proposed in this pull request? This PR replaces example codes in `mllib-linear-methods.md` using `include_example` by doing the followings: * Extracts the example codes(Scala,Java,Python) as files in `example` module. * Merges some dialog-style examples into a single file. * Hide redundant codes in HTML for the consistency with other docs. ## How was the this patch tested? manual test. This PR can be tested by document generations, `SKIP_API=1 jekyll build`. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #11320 from dongjoon-hyun/SPARK-11381.
Diffstat (limited to 'examples/src/main/python')
-rw-r--r--examples/src/main/python/mllib/linear_regression_with_sgd_example.py54
-rw-r--r--examples/src/main/python/mllib/logistic_regression_with_lbfgs_example.py54
-rw-r--r--examples/src/main/python/mllib/streaming_linear_regression_example.py62
-rw-r--r--examples/src/main/python/mllib/svm_with_sgd_example.py47
4 files changed, 217 insertions, 0 deletions
diff --git a/examples/src/main/python/mllib/linear_regression_with_sgd_example.py b/examples/src/main/python/mllib/linear_regression_with_sgd_example.py
new file mode 100644
index 0000000000..6fbaeff0cd
--- /dev/null
+++ b/examples/src/main/python/mllib/linear_regression_with_sgd_example.py
@@ -0,0 +1,54 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Linear Regression With SGD Example.
+"""
+from __future__ import print_function
+
+from pyspark import SparkContext
+# $example on$
+from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD, LinearRegressionModel
+# $example off$
+
+if __name__ == "__main__":
+
+ sc = SparkContext(appName="PythonLinearRegressionWithSGDExample")
+
+ # $example on$
+ # Load and parse the data
+ def parsePoint(line):
+ values = [float(x) for x in line.replace(',', ' ').split(' ')]
+ return LabeledPoint(values[0], values[1:])
+
+ data = sc.textFile("data/mllib/ridge-data/lpsa.data")
+ parsedData = data.map(parsePoint)
+
+ # Build the model
+ model = LinearRegressionWithSGD.train(parsedData, iterations=100, step=0.00000001)
+
+ # Evaluate the model on training data
+ valuesAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
+ MSE = valuesAndPreds \
+ .map(lambda (v, p): (v - p)**2) \
+ .reduce(lambda x, y: x + y) / valuesAndPreds.count()
+ print("Mean Squared Error = " + str(MSE))
+
+ # Save and load model
+ model.save(sc, "target/tmp/pythonLinearRegressionWithSGDModel")
+ sameModel = LinearRegressionModel.load(sc, "target/tmp/pythonLinearRegressionWithSGDModel")
+ # $example off$
diff --git a/examples/src/main/python/mllib/logistic_regression_with_lbfgs_example.py b/examples/src/main/python/mllib/logistic_regression_with_lbfgs_example.py
new file mode 100644
index 0000000000..e030b74ba6
--- /dev/null
+++ b/examples/src/main/python/mllib/logistic_regression_with_lbfgs_example.py
@@ -0,0 +1,54 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Logistic Regression With LBFGS Example.
+"""
+from __future__ import print_function
+
+from pyspark import SparkContext
+# $example on$
+from pyspark.mllib.classification import LogisticRegressionWithLBFGS, LogisticRegressionModel
+from pyspark.mllib.regression import LabeledPoint
+# $example off$
+
+if __name__ == "__main__":
+
+ sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
+
+ # $example on$
+ # Load and parse the data
+ def parsePoint(line):
+ values = [float(x) for x in line.split(' ')]
+ return LabeledPoint(values[0], values[1:])
+
+ data = sc.textFile("data/mllib/sample_svm_data.txt")
+ parsedData = data.map(parsePoint)
+
+ # Build the model
+ model = LogisticRegressionWithLBFGS.train(parsedData)
+
+ # Evaluating the model on training data
+ labelsAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
+ trainErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(parsedData.count())
+ print("Training Error = " + str(trainErr))
+
+ # Save and load model
+ model.save(sc, "target/tmp/pythonLogisticRegressionWithLBFGSModel")
+ sameModel = LogisticRegressionModel.load(sc,
+ "target/tmp/pythonLogisticRegressionWithLBFGSModel")
+ # $example off$
diff --git a/examples/src/main/python/mllib/streaming_linear_regression_example.py b/examples/src/main/python/mllib/streaming_linear_regression_example.py
new file mode 100644
index 0000000000..f600496867
--- /dev/null
+++ b/examples/src/main/python/mllib/streaming_linear_regression_example.py
@@ -0,0 +1,62 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Streaming Linear Regression Example.
+"""
+from __future__ import print_function
+
+# $example on$
+import sys
+# $example off$
+
+from pyspark import SparkContext
+from pyspark.streaming import StreamingContext
+# $example on$
+from pyspark.mllib.linalg import Vectors
+from pyspark.mllib.regression import LabeledPoint
+from pyspark.mllib.regression import StreamingLinearRegressionWithSGD
+# $example off$
+
+if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ print("Usage: streaming_linear_regression_example.py <trainingDir> <testDir>",
+ file=sys.stderr)
+ exit(-1)
+
+ sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
+ ssc = StreamingContext(sc, 1)
+
+ # $example on$
+ def parse(lp):
+ label = float(lp[lp.find('(') + 1: lp.find(',')])
+ vec = Vectors.dense(lp[lp.find('[') + 1: lp.find(']')].split(','))
+ return LabeledPoint(label, vec)
+
+ trainingData = ssc.textFileStream(sys.argv[1]).map(parse).cache()
+ testData = ssc.textFileStream(sys.argv[2]).map(parse)
+
+ numFeatures = 3
+ model = StreamingLinearRegressionWithSGD()
+ model.setInitialWeights([0.0, 0.0, 0.0])
+
+ model.trainOn(trainingData)
+ print(model.predictOnValues(testData.map(lambda lp: (lp.label, lp.features))))
+
+ ssc.start()
+ ssc.awaitTermination()
+ # $example off$
diff --git a/examples/src/main/python/mllib/svm_with_sgd_example.py b/examples/src/main/python/mllib/svm_with_sgd_example.py
new file mode 100644
index 0000000000..309ab09cc3
--- /dev/null
+++ b/examples/src/main/python/mllib/svm_with_sgd_example.py
@@ -0,0 +1,47 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from pyspark import SparkContext
+# $example on$
+from pyspark.mllib.classification import SVMWithSGD, SVMModel
+from pyspark.mllib.regression import LabeledPoint
+# $example off$
+
+if __name__ == "__main__":
+ sc = SparkContext(appName="PythonSVMWithSGDExample")
+
+ # $example on$
+ # Load and parse the data
+ def parsePoint(line):
+ values = [float(x) for x in line.split(' ')]
+ return LabeledPoint(values[0], values[1:])
+
+ data = sc.textFile("data/mllib/sample_svm_data.txt")
+ parsedData = data.map(parsePoint)
+
+ # Build the model
+ model = SVMWithSGD.train(parsedData, iterations=100)
+
+ # Evaluating the model on training data
+ labelsAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
+ trainErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(parsedData.count())
+ print("Training Error = " + str(trainErr))
+
+ # Save and load model
+ model.save(sc, "target/tmp/pythonSVMWithSGDModel")
+ sameModel = SVMModel.load(sc, "target/tmp/pythonSVMWithSGDModel")
+ # $example off$