aboutsummaryrefslogtreecommitdiff
path: root/python/examples/logistic_regression.py
diff options
context:
space:
mode:
authorSandeep <sandeep@techaddict.me>2014-05-06 17:27:52 -0700
committerMatei Zaharia <matei@databricks.com>2014-05-06 17:27:52 -0700
commita000b5c3b0438c17e9973df4832c320210c29c27 (patch)
tree446bbe902ecd6de05072357f7ef25aaeb0687b73 /python/examples/logistic_regression.py
parent39b8b1489ff92697e4aeec997cdc436c7079d6f8 (diff)
downloadspark-a000b5c3b0438c17e9973df4832c320210c29c27.tar.gz
spark-a000b5c3b0438c17e9973df4832c320210c29c27.tar.bz2
spark-a000b5c3b0438c17e9973df4832c320210c29c27.zip
SPARK-1637: Clean up examples for 1.0
- [x] Move all of them into subpackages of org.apache.spark.examples (right now some are in org.apache.spark.streaming.examples, for instance, and others are in org.apache.spark.examples.mllib) - [x] Move Python examples into examples/src/main/python - [x] Update docs to reflect these changes Author: Sandeep <sandeep@techaddict.me> This patch had conflicts when merged, resolved by Committer: Matei Zaharia <matei@databricks.com> Closes #571 from techaddict/SPARK-1637 and squashes the following commits: 47ef86c [Sandeep] Changes based on Discussions on PR, removing use of RawTextHelper from examples 8ed2d3f [Sandeep] Docs Updated for changes, Change for java examples 5f96121 [Sandeep] Move Python examples into examples/src/main/python 0a8dd77 [Sandeep] Move all Scala Examples to org.apache.spark.examples (some are in org.apache.spark.streaming.examples, for instance, and others are in org.apache.spark.examples.mllib)
Diffstat (limited to 'python/examples/logistic_regression.py')
-rwxr-xr-xpython/examples/logistic_regression.py76
1 files changed, 0 insertions, 76 deletions
diff --git a/python/examples/logistic_regression.py b/python/examples/logistic_regression.py
deleted file mode 100755
index 28d52e6a40..0000000000
--- a/python/examples/logistic_regression.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-A logistic regression implementation that uses NumPy (http://www.numpy.org)
-to act on batches of input data using efficient matrix operations.
-
-In practice, one may prefer to use the LogisticRegression algorithm in
-MLlib, as shown in python/examples/mllib/logistic_regression.py.
-"""
-
-from collections import namedtuple
-from math import exp
-from os.path import realpath
-import sys
-
-import numpy as np
-from pyspark import SparkContext
-
-
-D = 10 # Number of dimensions
-
-
-# Read a batch of points from the input file into a NumPy matrix object. We operate on batches to
-# make further computations faster.
-# The data file contains lines of the form <label> <x1> <x2> ... <xD>. We load each block of these
-# into a NumPy array of size numLines * (D + 1) and pull out column 0 vs the others in gradient().
-def readPointBatch(iterator):
- strs = list(iterator)
- matrix = np.zeros((len(strs), D + 1))
- for i in xrange(len(strs)):
- matrix[i] = np.fromstring(strs[i].replace(',', ' '), dtype=np.float32, sep=' ')
- return [matrix]
-
-if __name__ == "__main__":
- if len(sys.argv) != 4:
- print >> sys.stderr, "Usage: logistic_regression <master> <file> <iters>"
- exit(-1)
- sc = SparkContext(sys.argv[1], "PythonLR", pyFiles=[realpath(__file__)])
- points = sc.textFile(sys.argv[2]).mapPartitions(readPointBatch).cache()
- iterations = int(sys.argv[3])
-
- # Initialize w to a random value
- w = 2 * np.random.ranf(size=D) - 1
- print "Initial w: " + str(w)
-
- # Compute logistic regression gradient for a matrix of data points
- def gradient(matrix, w):
- Y = matrix[:,0] # point labels (first column of input file)
- X = matrix[:,1:] # point coordinates
- # For each point (x, y), compute gradient function, then sum these up
- return ((1.0 / (1.0 + np.exp(-Y * X.dot(w))) - 1.0) * Y * X.T).sum(1)
-
- def add(x, y):
- x += y
- return x
-
- for i in range(iterations):
- print "On iteration %i" % (i + 1)
- w -= points.map(lambda m: gradient(m, w)).reduce(add)
-
- print "Final w: " + str(w)