aboutsummaryrefslogtreecommitdiff
path: root/mllib/src/test
diff options
context:
space:
mode:
authorzero323 <zero323@users.noreply.github.com>2016-08-14 11:59:24 +0100
committerSean Owen <sowen@cloudera.com>2016-08-14 11:59:24 +0100
commit0ebf7c1bff736cf54ec47957d71394d5b75b47a7 (patch)
tree4bc4aa60b1254db1af2aaa8981e4eac486fe0973 /mllib/src/test
parentcdaa562c9a09e2e83e6df4e84d911ce1428a7a7c (diff)
downloadspark-0ebf7c1bff736cf54ec47957d71394d5b75b47a7.tar.gz
spark-0ebf7c1bff736cf54ec47957d71394d5b75b47a7.tar.bz2
spark-0ebf7c1bff736cf54ec47957d71394d5b75b47a7.zip
[SPARK-17027][ML] Avoid integer overflow in PolynomialExpansion.getPolySize
## What changes were proposed in this pull request? Replaces custom choose function with o.a.commons.math3.CombinatoricsUtils.binomialCoefficient ## How was this patch tested? Spark unit tests Author: zero323 <zero323@users.noreply.github.com> Closes #14614 from zero323/SPARK-17027.
Diffstat (limited to 'mllib/src/test')
-rw-r--r--mllib/src/test/scala/org/apache/spark/ml/feature/PolynomialExpansionSuite.scala24
1 files changed, 24 insertions, 0 deletions
diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/PolynomialExpansionSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/PolynomialExpansionSuite.scala
index 8e1f9ddb36..9ecd321b12 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/feature/PolynomialExpansionSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/feature/PolynomialExpansionSuite.scala
@@ -116,5 +116,29 @@ class PolynomialExpansionSuite
.setDegree(3)
testDefaultReadWrite(t)
}
+
+ test("SPARK-17027. Integer overflow in PolynomialExpansion.getPolySize") {
+ val data: Array[(Vector, Int, Int)] = Array(
+ (Vectors.dense(1.0, 2.0, 3.0, 4.0, 5.0), 3002, 4367),
+ (Vectors.sparse(5, Seq((0, 1.0), (4, 5.0))), 3002, 4367),
+ (Vectors.dense(1.0, 2.0, 3.0, 4.0, 5.0, 6.0), 8007, 12375)
+ )
+
+ val df = spark.createDataFrame(data)
+ .toDF("features", "expectedPoly10size", "expectedPoly11size")
+
+ val t = new PolynomialExpansion()
+ .setInputCol("features")
+ .setOutputCol("polyFeatures")
+
+ for (i <- Seq(10, 11)) {
+ val transformed = t.setDegree(i)
+ .transform(df)
+ .select(s"expectedPoly${i}size", "polyFeatures")
+ .rdd.map { case Row(expected: Int, v: Vector) => expected == v.size }
+
+ assert(transformed.collect.forall(identity))
+ }
+ }
}