1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, NumericAttribute}
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.DefaultReadWriteTest
import org.apache.spark.mllib.linalg.{Vector, Vectors, VectorUDT}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.types.{StructField, StructType}
class VectorSlicerSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
test("params") {
val slicer = new VectorSlicer().setInputCol("feature")
ParamsSuite.checkParams(slicer)
assert(slicer.getIndices.length === 0)
assert(slicer.getNames.length === 0)
withClue("VectorSlicer should not have any features selected by default") {
intercept[IllegalArgumentException] {
slicer.transformSchema(StructType(Seq(StructField("feature", new VectorUDT, true))))
}
}
}
test("feature validity checks") {
import VectorSlicer._
assert(validIndices(Array(0, 1, 8, 2)))
assert(validIndices(Array.empty[Int]))
assert(!validIndices(Array(-1)))
assert(!validIndices(Array(1, 2, 1)))
assert(validNames(Array("a", "b")))
assert(validNames(Array.empty[String]))
assert(!validNames(Array("", "b")))
assert(!validNames(Array("a", "b", "a")))
}
test("Test vector slicer") {
val data = Array(
Vectors.sparse(5, Seq((0, -2.0), (1, 2.3))),
Vectors.dense(-2.0, 2.3, 0.0, 0.0, 1.0),
Vectors.dense(0.0, 0.0, 0.0, 0.0, 0.0),
Vectors.dense(0.6, -1.1, -3.0, 4.5, 3.3),
Vectors.sparse(5, Seq())
)
// Expected after selecting indices 1, 4
val expected = Array(
Vectors.sparse(2, Seq((0, 2.3))),
Vectors.dense(2.3, 1.0),
Vectors.dense(0.0, 0.0),
Vectors.dense(-1.1, 3.3),
Vectors.sparse(2, Seq())
)
val defaultAttr = NumericAttribute.defaultAttr
val attrs = Array("f0", "f1", "f2", "f3", "f4").map(defaultAttr.withName)
val attrGroup = new AttributeGroup("features", attrs.asInstanceOf[Array[Attribute]])
val resultAttrs = Array("f1", "f4").map(defaultAttr.withName)
val resultAttrGroup = new AttributeGroup("expected", resultAttrs.asInstanceOf[Array[Attribute]])
val rdd = sc.parallelize(data.zip(expected)).map { case (a, b) => Row(a, b) }
val df = spark.createDataFrame(rdd,
StructType(Array(attrGroup.toStructField(), resultAttrGroup.toStructField())))
val vectorSlicer = new VectorSlicer().setInputCol("features").setOutputCol("result")
def validateResults(df: DataFrame): Unit = {
df.select("result", "expected").collect().foreach { case Row(vec1: Vector, vec2: Vector) =>
assert(vec1 === vec2)
}
val resultMetadata = AttributeGroup.fromStructField(df.schema("result"))
val expectedMetadata = AttributeGroup.fromStructField(df.schema("expected"))
assert(resultMetadata.numAttributes === expectedMetadata.numAttributes)
resultMetadata.attributes.get.zip(expectedMetadata.attributes.get).foreach { case (a, b) =>
assert(a === b)
}
}
vectorSlicer.setIndices(Array(1, 4)).setNames(Array.empty)
validateResults(vectorSlicer.transform(df))
vectorSlicer.setIndices(Array(1)).setNames(Array("f4"))
validateResults(vectorSlicer.transform(df))
vectorSlicer.setIndices(Array.empty).setNames(Array("f1", "f4"))
validateResults(vectorSlicer.transform(df))
}
test("read/write") {
val t = new VectorSlicer()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setIndices(Array(1, 3))
.setNames(Array("a", "d"))
testDefaultReadWrite(t)
}
}
|