aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/python/ml/tokenizer_example.py
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2015-12-08 19:18:59 +0800
committerCheng Lian <lian@databricks.com>2015-12-08 19:18:59 +0800
commitda2012a0e152aa078bdd19a5c7f91786a2dd7016 (patch)
tree1f00975b821733925effbaf0090a40795c50d669 /examples/src/main/python/ml/tokenizer_example.py
parent037b7e76a7f8b59e031873a768d81417dd180472 (diff)
downloadspark-da2012a0e152aa078bdd19a5c7f91786a2dd7016.tar.gz
spark-da2012a0e152aa078bdd19a5c7f91786a2dd7016.tar.bz2
spark-da2012a0e152aa078bdd19a5c7f91786a2dd7016.zip
[SPARK-11551][DOC][EXAMPLE] Revert PR #10002
This reverts PR #10002, commit 78209b0ccaf3f22b5e2345dfb2b98edfdb746819. The original PR wasn't tested on Jenkins before being merged. Author: Cheng Lian <lian@databricks.com> Closes #10200 from liancheng/revert-pr-10002.
Diffstat (limited to 'examples/src/main/python/ml/tokenizer_example.py')
-rw-r--r--examples/src/main/python/ml/tokenizer_example.py44
1 files changed, 0 insertions, 44 deletions
diff --git a/examples/src/main/python/ml/tokenizer_example.py b/examples/src/main/python/ml/tokenizer_example.py
deleted file mode 100644
index ce9b225be5..0000000000
--- a/examples/src/main/python/ml/tokenizer_example.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from __future__ import print_function
-
-from pyspark import SparkContext
-from pyspark.sql import SQLContext
-# $example on$
-from pyspark.ml.feature import Tokenizer, RegexTokenizer
-# $example off$
-
-if __name__ == "__main__":
- sc = SparkContext(appName="TokenizerExample")
- sqlContext = SQLContext(sc)
-
- # $example on$
- sentenceDataFrame = sqlContext.createDataFrame([
- (0, "Hi I heard about Spark"),
- (1, "I wish Java could use case classes"),
- (2, "Logistic,regression,models,are,neat")
- ], ["label", "sentence"])
- tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
- wordsDataFrame = tokenizer.transform(sentenceDataFrame)
- for words_label in wordsDataFrame.select("words", "label").take(3):
- print(words_label)
- regexTokenizer = RegexTokenizer(inputCol="sentence", outputCol="words", pattern="\\W")
- # alternatively, pattern="\\w+", gaps(False)
- # $example off$
-
- sc.stop()