1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import scala.beans.BeanInfo
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.DefaultReadWriteTest
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{DataFrame, Row}
@BeanInfo
case class TokenizerTestData(rawText: String, wantedTokens: Array[String])
class TokenizerSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
test("params") {
ParamsSuite.checkParams(new Tokenizer)
}
test("read/write") {
val t = new Tokenizer()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
testDefaultReadWrite(t)
}
}
class RegexTokenizerSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import org.apache.spark.ml.feature.RegexTokenizerSuite._
test("params") {
ParamsSuite.checkParams(new RegexTokenizer)
}
test("RegexTokenizer") {
val tokenizer0 = new RegexTokenizer()
.setGaps(false)
.setPattern("\\w+|\\p{Punct}")
.setInputCol("rawText")
.setOutputCol("tokens")
val dataset0 = sqlContext.createDataFrame(Seq(
TokenizerTestData("Test for tokenization.", Array("test", "for", "tokenization", ".")),
TokenizerTestData("Te,st. punct", Array("te", ",", "st", ".", "punct"))
))
testRegexTokenizer(tokenizer0, dataset0)
val dataset1 = sqlContext.createDataFrame(Seq(
TokenizerTestData("Test for tokenization.", Array("test", "for", "tokenization")),
TokenizerTestData("Te,st. punct", Array("punct"))
))
tokenizer0.setMinTokenLength(3)
testRegexTokenizer(tokenizer0, dataset1)
val tokenizer2 = new RegexTokenizer()
.setInputCol("rawText")
.setOutputCol("tokens")
val dataset2 = sqlContext.createDataFrame(Seq(
TokenizerTestData("Test for tokenization.", Array("test", "for", "tokenization.")),
TokenizerTestData("Te,st. punct", Array("te,st.", "punct"))
))
testRegexTokenizer(tokenizer2, dataset2)
}
test("RegexTokenizer with toLowercase false") {
val tokenizer = new RegexTokenizer()
.setInputCol("rawText")
.setOutputCol("tokens")
.setToLowercase(false)
val dataset = sqlContext.createDataFrame(Seq(
TokenizerTestData("JAVA SCALA", Array("JAVA", "SCALA")),
TokenizerTestData("java scala", Array("java", "scala"))
))
testRegexTokenizer(tokenizer, dataset)
}
test("read/write") {
val t = new RegexTokenizer()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setMinTokenLength(2)
.setGaps(false)
.setPattern("hi")
.setToLowercase(false)
testDefaultReadWrite(t)
}
}
object RegexTokenizerSuite extends SparkFunSuite {
def testRegexTokenizer(t: RegexTokenizer, dataset: DataFrame): Unit = {
t.transform(dataset)
.select("tokens", "wantedTokens")
.collect()
.foreach { case Row(tokens, wantedTokens) =>
assert(tokens === wantedTokens)
}
}
}
|