aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/python/ml
diff options
context:
space:
mode:
authorXiangrui Meng <meng@databricks.com>2015-02-15 20:29:26 -0800
committerXiangrui Meng <meng@databricks.com>2015-02-15 20:29:26 -0800
commitcd4a15366244657c4b7936abe5054754534366f2 (patch)
treefbee98a5031440c879705f2c7f9717b5d815c66e /examples/src/main/python/ml
parent836577b382695558f5c97d94ee725d0156ebfad2 (diff)
downloadspark-cd4a15366244657c4b7936abe5054754534366f2.tar.gz
spark-cd4a15366244657c4b7936abe5054754534366f2.tar.bz2
spark-cd4a15366244657c4b7936abe5054754534366f2.zip
[SPARK-5769] Set params in constructors and in setParams in Python ML pipelines
This PR allow Python users to set params in constructors and in setParams, where we use decorator `keyword_only` to force keyword arguments. The trade-off is discussed in the design doc of SPARK-4586. Generated doc: ![screen shot 2015-02-12 at 3 06 58 am](https://cloud.githubusercontent.com/assets/829644/6166491/9cfcd06a-b265-11e4-99ea-473d866634fc.png) CC: davies rxin Author: Xiangrui Meng <meng@databricks.com> Closes #4564 from mengxr/py-pipeline-kw and squashes the following commits: fedf720 [Xiangrui Meng] use toDF d565f2c [Xiangrui Meng] Merge remote-tracking branch 'apache/master' into py-pipeline-kw cbc15d3 [Xiangrui Meng] fix style 5032097 [Xiangrui Meng] update pipeline signature 950774e [Xiangrui Meng] simplify keyword_only and update constructor/setParams signatures fdde5fc [Xiangrui Meng] fix style c9384b8 [Xiangrui Meng] fix sphinx doc 8e59180 [Xiangrui Meng] add setParams and make constructors take params, where we force keyword args
Diffstat (limited to 'examples/src/main/python/ml')
-rw-r--r--examples/src/main/python/ml/simple_text_classification_pipeline.py44
1 files changed, 17 insertions, 27 deletions
diff --git a/examples/src/main/python/ml/simple_text_classification_pipeline.py b/examples/src/main/python/ml/simple_text_classification_pipeline.py
index c7df3d7b74..b4d9355b68 100644
--- a/examples/src/main/python/ml/simple_text_classification_pipeline.py
+++ b/examples/src/main/python/ml/simple_text_classification_pipeline.py
@@ -36,43 +36,33 @@ if __name__ == "__main__":
sqlCtx = SQLContext(sc)
# Prepare training documents, which are labeled.
- LabeledDocument = Row('id', 'text', 'label')
- training = sqlCtx.inferSchema(
- sc.parallelize([(0L, "a b c d e spark", 1.0),
- (1L, "b d", 0.0),
- (2L, "spark f g h", 1.0),
- (3L, "hadoop mapreduce", 0.0)])
- .map(lambda x: LabeledDocument(*x)))
+ LabeledDocument = Row("id", "text", "label")
+ training = sc.parallelize([(0L, "a b c d e spark", 1.0),
+ (1L, "b d", 0.0),
+ (2L, "spark f g h", 1.0),
+ (3L, "hadoop mapreduce", 0.0)]) \
+ .map(lambda x: LabeledDocument(*x)).toDF()
# Configure an ML pipeline, which consists of tree stages: tokenizer, hashingTF, and lr.
- tokenizer = Tokenizer() \
- .setInputCol("text") \
- .setOutputCol("words")
- hashingTF = HashingTF() \
- .setInputCol(tokenizer.getOutputCol()) \
- .setOutputCol("features")
- lr = LogisticRegression() \
- .setMaxIter(10) \
- .setRegParam(0.01)
- pipeline = Pipeline() \
- .setStages([tokenizer, hashingTF, lr])
+ tokenizer = Tokenizer(inputCol="text", outputCol="words")
+ hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
+ lr = LogisticRegression(maxIter=10, regParam=0.01)
+ pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
# Fit the pipeline to training documents.
model = pipeline.fit(training)
# Prepare test documents, which are unlabeled.
- Document = Row('id', 'text')
- test = sqlCtx.inferSchema(
- sc.parallelize([(4L, "spark i j k"),
- (5L, "l m n"),
- (6L, "mapreduce spark"),
- (7L, "apache hadoop")])
- .map(lambda x: Document(*x)))
+ Document = Row("id", "text")
+ test = sc.parallelize([(4L, "spark i j k"),
+ (5L, "l m n"),
+ (6L, "mapreduce spark"),
+ (7L, "apache hadoop")]) \
+ .map(lambda x: Document(*x)).toDF()
# Make predictions on test documents and print columns of interest.
prediction = model.transform(test)
- prediction.registerTempTable("prediction")
- selected = sqlCtx.sql("SELECT id, text, prediction from prediction")
+ selected = prediction.select("id", "text", "prediction")
for row in selected.collect():
print row