aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/python/ml
diff options
context:
space:
mode:
authorJagadeesan <as2@us.ibm.com>2016-10-28 02:26:55 -0700
committerYanbo Liang <ybliang8@gmail.com>2016-10-28 02:26:55 -0700
commite9746f87d0b553b8115948acb79f7e32c23dfd86 (patch)
tree86ec06dbb61a11e07477c0a9f24ba8c9db6b037c /examples/src/main/python/ml
parent569788a55e4c6b218fb697e1e54c6138ffe657a6 (diff)
downloadspark-e9746f87d0b553b8115948acb79f7e32c23dfd86.tar.gz
spark-e9746f87d0b553b8115948acb79f7e32c23dfd86.tar.bz2
spark-e9746f87d0b553b8115948acb79f7e32c23dfd86.zip
[SPARK-18133][EXAMPLES][ML] Python ML Pipeline Example has syntax e…
## What changes were proposed in this pull request? In Python 3, there is only one integer type (i.e., int), which mostly behaves like the long type in Python 2. Since Python 3 won't accept "L", so removed "L" in all examples. ## How was this patch tested? Unit tests. …rrors] Author: Jagadeesan <as2@us.ibm.com> Closes #15660 from jagadeesanas2/SPARK-18133.
Diffstat (limited to 'examples/src/main/python/ml')
-rw-r--r--examples/src/main/python/ml/cross_validator.py8
-rw-r--r--examples/src/main/python/ml/gaussian_mixture_example.py2
-rw-r--r--examples/src/main/python/ml/pipeline_example.py16
3 files changed, 13 insertions, 13 deletions
diff --git a/examples/src/main/python/ml/cross_validator.py b/examples/src/main/python/ml/cross_validator.py
index 907eec67a0..db7054307c 100644
--- a/examples/src/main/python/ml/cross_validator.py
+++ b/examples/src/main/python/ml/cross_validator.py
@@ -84,10 +84,10 @@ if __name__ == "__main__":
# Prepare test documents, which are unlabeled.
test = spark.createDataFrame([
- (4L, "spark i j k"),
- (5L, "l m n"),
- (6L, "mapreduce spark"),
- (7L, "apache hadoop")
+ (4, "spark i j k"),
+ (5, "l m n"),
+ (6, "mapreduce spark"),
+ (7, "apache hadoop")
], ["id", "text"])
# Make predictions on test documents. cvModel uses the best model found (lrModel).
diff --git a/examples/src/main/python/ml/gaussian_mixture_example.py b/examples/src/main/python/ml/gaussian_mixture_example.py
index 8ad450b669..e4a0d314e9 100644
--- a/examples/src/main/python/ml/gaussian_mixture_example.py
+++ b/examples/src/main/python/ml/gaussian_mixture_example.py
@@ -38,7 +38,7 @@ if __name__ == "__main__":
# loads data
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
- gmm = GaussianMixture().setK(2).setSeed(538009335L)
+ gmm = GaussianMixture().setK(2).setSeed(538009335)
model = gmm.fit(dataset)
print("Gaussians shown as a DataFrame: ")
diff --git a/examples/src/main/python/ml/pipeline_example.py b/examples/src/main/python/ml/pipeline_example.py
index f63e4db434..e1fab7cbe6 100644
--- a/examples/src/main/python/ml/pipeline_example.py
+++ b/examples/src/main/python/ml/pipeline_example.py
@@ -35,10 +35,10 @@ if __name__ == "__main__":
# $example on$
# Prepare training documents from a list of (id, text, label) tuples.
training = spark.createDataFrame([
- (0L, "a b c d e spark", 1.0),
- (1L, "b d", 0.0),
- (2L, "spark f g h", 1.0),
- (3L, "hadoop mapreduce", 0.0)
+ (0, "a b c d e spark", 1.0),
+ (1, "b d", 0.0),
+ (2, "spark f g h", 1.0),
+ (3, "hadoop mapreduce", 0.0)
], ["id", "text", "label"])
# Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.
@@ -52,10 +52,10 @@ if __name__ == "__main__":
# Prepare test documents, which are unlabeled (id, text) tuples.
test = spark.createDataFrame([
- (4L, "spark i j k"),
- (5L, "l m n"),
- (6L, "spark hadoop spark"),
- (7L, "apache hadoop")
+ (4, "spark i j k"),
+ (5, "l m n"),
+ (6, "spark hadoop spark"),
+ (7, "apache hadoop")
], ["id", "text"])
# Make predictions on test documents and print columns of interest.