aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiangrui Meng <meng@databricks.com>2015-05-07 15:45:37 -0700
committerXiangrui Meng <meng@databricks.com>2015-05-07 15:45:37 -0700
commite43803b8f477b2c8d28836ac163cb54328d13f1a (patch)
tree3da358b42d71c182cf1ff262dc0ec509124d3b39
parent658a478d3f86456df09d0fbb1ba438fb36d8725c (diff)
downloadspark-e43803b8f477b2c8d28836ac163cb54328d13f1a.tar.gz
spark-e43803b8f477b2c8d28836ac163cb54328d13f1a.tar.bz2
spark-e43803b8f477b2c8d28836ac163cb54328d13f1a.zip
[SPARK-6948] [MLLIB] compress vectors in VectorAssembler
The compression is based on storage. brkyvz Author: Xiangrui Meng <meng@databricks.com> Closes #5985 from mengxr/SPARK-6948 and squashes the following commits: df56a00 [Xiangrui Meng] update python tests 6d90d45 [Xiangrui Meng] compress vectors in VectorAssembler
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala2
-rw-r--r--mllib/src/test/scala/org/apache/spark/ml/feature/VectorAssemblerSuite.scala10
-rw-r--r--python/pyspark/ml/feature.py6
3 files changed, 13 insertions, 5 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala
index b5a69cee6d..796758a70e 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala
@@ -102,6 +102,6 @@ object VectorAssembler {
case o =>
throw new SparkException(s"$o of type ${o.getClass.getName} is not supported.")
}
- Vectors.sparse(cur, indices.result(), values.result())
+ Vectors.sparse(cur, indices.result(), values.result()).compressed
}
}
diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/VectorAssemblerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/VectorAssemblerSuite.scala
index 57d0278e03..0db27607bc 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/feature/VectorAssemblerSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/feature/VectorAssemblerSuite.scala
@@ -20,7 +20,7 @@ package org.apache.spark.ml.feature
import org.scalatest.FunSuite
import org.apache.spark.SparkException
-import org.apache.spark.mllib.linalg.{Vector, Vectors}
+import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector, Vectors}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{Row, SQLContext}
@@ -48,6 +48,14 @@ class VectorAssemblerSuite extends FunSuite with MLlibTestSparkContext {
}
}
+ test("assemble should compress vectors") {
+ import org.apache.spark.ml.feature.VectorAssembler.assemble
+ val v1 = assemble(0.0, 0.0, 0.0, Vectors.dense(4.0))
+ assert(v1.isInstanceOf[SparseVector])
+ val v2 = assemble(1.0, 2.0, 3.0, Vectors.sparse(1, Array(0), Array(4.0)))
+ assert(v2.isInstanceOf[DenseVector])
+ }
+
test("VectorAssembler") {
val df = sqlContext.createDataFrame(Seq(
(0, 0.0, Vectors.dense(1.0, 2.0), "a", Vectors.sparse(2, Array(1), Array(3.0)), 10L)
diff --git a/python/pyspark/ml/feature.py b/python/pyspark/ml/feature.py
index 8a0fdddd2d..705a368192 100644
--- a/python/pyspark/ml/feature.py
+++ b/python/pyspark/ml/feature.py
@@ -121,12 +121,12 @@ class VectorAssembler(JavaTransformer, HasInputCols, HasOutputCol):
>>> df = sc.parallelize([Row(a=1, b=0, c=3)]).toDF()
>>> vecAssembler = VectorAssembler(inputCols=["a", "b", "c"], outputCol="features")
>>> vecAssembler.transform(df).head().features
- SparseVector(3, {0: 1.0, 2: 3.0})
+ DenseVector([1.0, 0.0, 3.0])
>>> vecAssembler.setParams(outputCol="freqs").transform(df).head().freqs
- SparseVector(3, {0: 1.0, 2: 3.0})
+ DenseVector([1.0, 0.0, 3.0])
>>> params = {vecAssembler.inputCols: ["b", "a"], vecAssembler.outputCol: "vector"}
>>> vecAssembler.transform(df, params).head().vector
- SparseVector(2, {1: 1.0})
+ DenseVector([0.0, 1.0])
"""
_java_class = "org.apache.spark.ml.feature.VectorAssembler"