1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.util.DefaultReadWriteTest
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.udf
class QuantileDiscretizerSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
test("Test observed number of buckets and their sizes match expected values") {
val spark = this.spark
import spark.implicits._
val datasetSize = 100000
val numBuckets = 5
val df = sc.parallelize(1.0 to datasetSize by 1.0).map(Tuple1.apply).toDF("input")
val discretizer = new QuantileDiscretizer()
.setInputCol("input")
.setOutputCol("result")
.setNumBuckets(numBuckets)
val result = discretizer.fit(df).transform(df)
val observedNumBuckets = result.select("result").distinct.count
assert(observedNumBuckets === numBuckets,
"Observed number of buckets does not equal expected number of buckets.")
val relativeError = discretizer.getRelativeError
val isGoodBucket = udf {
(size: Int) => math.abs( size - (datasetSize / numBuckets)) <= (relativeError * datasetSize)
}
val numGoodBuckets = result.groupBy("result").count.filter(isGoodBucket($"count")).count
assert(numGoodBuckets === numBuckets,
"Bucket sizes are not within expected relative error tolerance.")
}
test("Test Bucketizer on duplicated splits") {
val spark = this.spark
import spark.implicits._
val datasetSize = 12
val numBuckets = 5
val df = sc.parallelize(Array(1.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0))
.map(Tuple1.apply).toDF("input")
val discretizer = new QuantileDiscretizer()
.setInputCol("input")
.setOutputCol("result")
.setNumBuckets(numBuckets)
val result = discretizer.fit(df).transform(df)
val observedNumBuckets = result.select("result").distinct.count
assert(2 <= observedNumBuckets && observedNumBuckets <= numBuckets,
"Observed number of buckets are not within expected range.")
}
test("Test transform method on unseen data") {
val spark = this.spark
import spark.implicits._
val trainDF = sc.parallelize(1.0 to 100.0 by 1.0).map(Tuple1.apply).toDF("input")
val testDF = sc.parallelize(-10.0 to 110.0 by 1.0).map(Tuple1.apply).toDF("input")
val discretizer = new QuantileDiscretizer()
.setInputCol("input")
.setOutputCol("result")
.setNumBuckets(5)
val result = discretizer.fit(trainDF).transform(testDF)
val firstBucketSize = result.filter(result("result") === 0.0).count
val lastBucketSize = result.filter(result("result") === 4.0).count
assert(firstBucketSize === 30L,
s"Size of first bucket ${firstBucketSize} did not equal expected value of 30.")
assert(lastBucketSize === 31L,
s"Size of last bucket ${lastBucketSize} did not equal expected value of 31.")
}
test("read/write") {
val t = new QuantileDiscretizer()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setNumBuckets(6)
testDefaultReadWrite(t)
}
test("Verify resulting model has parent") {
val spark = this.spark
import spark.implicits._
val df = sc.parallelize(1 to 100).map(Tuple1.apply).toDF("input")
val discretizer = new QuantileDiscretizer()
.setInputCol("input")
.setOutputCol("result")
.setNumBuckets(5)
val model = discretizer.fit(df)
assert(model.hasParent)
}
}
|