aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiangrui Meng <meng@databricks.com>2015-08-25 23:45:41 -0700
committerDB Tsai <dbt@netflix.com>2015-08-25 23:45:53 -0700
commit21a10a86d20ec1a6fea42286b4d2aae9ce7e848d (patch)
tree5151143669ca9f76ed514f0b618322a6ba7396d1
parent08d390f457f80ffdc2dfce61ea579d9026047f12 (diff)
downloadspark-21a10a86d20ec1a6fea42286b4d2aae9ce7e848d.tar.gz
spark-21a10a86d20ec1a6fea42286b4d2aae9ce7e848d.tar.bz2
spark-21a10a86d20ec1a6fea42286b4d2aae9ce7e848d.zip
[SPARK-10236] [MLLIB] update since versions in mllib.feature
Same as #8421 but for `mllib.feature`. cc dbtsai Author: Xiangrui Meng <meng@databricks.com> Closes #8449 from mengxr/SPARK-10236.feature and squashes the following commits: 0e8d658 [Xiangrui Meng] remove unnecessary comment ad70b03 [Xiangrui Meng] update since versions in mllib.feature (cherry picked from commit 321d7759691bed9867b1f0470f12eab2faa50aff) Signed-off-by: DB Tsai <dbt@netflix.com>
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala4
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/ElementwiseProduct.scala3
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala6
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala7
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala12
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala1
8 files changed, 21 insertions, 16 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala
index da234bdbb2..6c76e26fd1 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala
@@ -71,8 +71,6 @@ object PowerIterationClusteringModel extends Loader[PowerIterationClusteringMode
private[clustering]
val thisClassName = "org.apache.spark.mllib.clustering.PowerIterationClusteringModel"
- /**
- */
@Since("1.4.0")
def save(sc: SparkContext, model: PowerIterationClusteringModel, path: String): Unit = {
val sqlContext = new SQLContext(sc)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
index fdd974d7a3..4743cfd1a2 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/ChiSqSelector.scala
@@ -33,7 +33,7 @@ import org.apache.spark.rdd.RDD
*/
@Since("1.3.0")
@Experimental
-class ChiSqSelectorModel (
+class ChiSqSelectorModel @Since("1.3.0") (
@Since("1.3.0") val selectedFeatures: Array[Int]) extends VectorTransformer {
require(isSorted(selectedFeatures), "Array has to be sorted asc")
@@ -112,7 +112,7 @@ class ChiSqSelectorModel (
*/
@Since("1.3.0")
@Experimental
-class ChiSqSelector (
+class ChiSqSelector @Since("1.3.0") (
@Since("1.3.0") val numTopFeatures: Int) extends Serializable {
/**
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/ElementwiseProduct.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/ElementwiseProduct.scala
index 33e2d17bb4..d0a6cf6168 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/ElementwiseProduct.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/ElementwiseProduct.scala
@@ -29,7 +29,8 @@ import org.apache.spark.mllib.linalg._
*/
@Since("1.4.0")
@Experimental
-class ElementwiseProduct(val scalingVec: Vector) extends VectorTransformer {
+class ElementwiseProduct @Since("1.4.0") (
+ @Since("1.4.0") val scalingVec: Vector) extends VectorTransformer {
/**
* Does the hadamard product transformation.
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
index d5353ddd97..68078ccfa3 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
@@ -39,8 +39,9 @@ import org.apache.spark.rdd.RDD
*/
@Since("1.1.0")
@Experimental
-class IDF(val minDocFreq: Int) {
+class IDF @Since("1.2.0") (@Since("1.2.0") val minDocFreq: Int) {
+ @Since("1.1.0")
def this() = this(0)
// TODO: Allow different IDF formulations.
@@ -162,7 +163,8 @@ private object IDF {
* Represents an IDF model that can transform term frequency vectors.
*/
@Experimental
-class IDFModel private[spark] (val idf: Vector) extends Serializable {
+@Since("1.1.0")
+class IDFModel private[spark] (@Since("1.1.0") val idf: Vector) extends Serializable {
/**
* Transforms term frequency (TF) vectors to TF-IDF vectors.
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
index 0e070257d9..8d5a22520d 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
@@ -33,7 +33,7 @@ import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector, Vectors
*/
@Since("1.1.0")
@Experimental
-class Normalizer(p: Double) extends VectorTransformer {
+class Normalizer @Since("1.1.0") (p: Double) extends VectorTransformer {
@Since("1.1.0")
def this() = this(2)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala
index a48b7bba66..ecb3c1e6c1 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala
@@ -29,7 +29,7 @@ import org.apache.spark.rdd.RDD
* @param k number of principal components
*/
@Since("1.4.0")
-class PCA(val k: Int) {
+class PCA @Since("1.4.0") (@Since("1.4.0") val k: Int) {
require(k >= 1, s"PCA requires a number of principal components k >= 1 but was given $k")
/**
@@ -74,7 +74,10 @@ class PCA(val k: Int) {
* @param k number of principal components.
* @param pc a principal components Matrix. Each column is one principal component.
*/
-class PCAModel private[spark] (val k: Int, val pc: DenseMatrix) extends VectorTransformer {
+@Since("1.4.0")
+class PCAModel private[spark] (
+ @Since("1.4.0") val k: Int,
+ @Since("1.4.0") val pc: DenseMatrix) extends VectorTransformer {
/**
* Transform a vector by computed Principal Components.
*
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
index b95d5a8990..f018b453ba 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
@@ -34,7 +34,7 @@ import org.apache.spark.rdd.RDD
*/
@Since("1.1.0")
@Experimental
-class StandardScaler(withMean: Boolean, withStd: Boolean) extends Logging {
+class StandardScaler @Since("1.1.0") (withMean: Boolean, withStd: Boolean) extends Logging {
@Since("1.1.0")
def this() = this(false, true)
@@ -74,11 +74,11 @@ class StandardScaler(withMean: Boolean, withStd: Boolean) extends Logging {
*/
@Since("1.1.0")
@Experimental
-class StandardScalerModel (
- val std: Vector,
- val mean: Vector,
- var withStd: Boolean,
- var withMean: Boolean) extends VectorTransformer {
+class StandardScalerModel @Since("1.3.0") (
+ @Since("1.3.0") val std: Vector,
+ @Since("1.1.0") val mean: Vector,
+ @Since("1.3.0") var withStd: Boolean,
+ @Since("1.3.0") var withMean: Boolean) extends VectorTransformer {
/**
*/
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala
index e6f45ae4b0..36b124c5d2 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala
@@ -436,6 +436,7 @@ class Word2Vec extends Serializable with Logging {
* (i * vectorSize, i * vectorSize + vectorSize)
*/
@Experimental
+@Since("1.1.0")
class Word2VecModel private[mllib] (
private val wordIndex: Map[String, Int],
private val wordVectors: Array[Float]) extends Serializable with Saveable {