aboutsummaryrefslogtreecommitdiff
path: root/mllib/src/main/scala
diff options
context:
space:
mode:
authorNick Pentreath <nickp@za.ibm.com>2016-06-22 10:05:25 -0700
committerXiangrui Meng <meng@databricks.com>2016-06-22 10:05:25 -0700
commit18faa588ca11190890d2eb569d7497fbb25eee5c (patch)
tree309c146287fdb3112772d9d41172b73427be2323 /mllib/src/main/scala
parentea3a12b0147821960f8dabdc58d726f07f1f0e52 (diff)
downloadspark-18faa588ca11190890d2eb569d7497fbb25eee5c.tar.gz
spark-18faa588ca11190890d2eb569d7497fbb25eee5c.tar.bz2
spark-18faa588ca11190890d2eb569d7497fbb25eee5c.zip
[SPARK-16127][ML][PYPSARK] Audit @Since annotations related to ml.linalg
[SPARK-14615](https://issues.apache.org/jira/browse/SPARK-14615) and #12627 changed `spark.ml` pipelines to use the new `ml.linalg` classes for `Vector`/`Matrix`. Some `Since` annotations for public methods/vals have not been updated accordingly to be `2.0.0`. This PR updates them. ## How was this patch tested? Existing unit tests. Author: Nick Pentreath <nickp@za.ibm.com> Closes #13840 from MLnick/SPARK-16127-ml-linalg-since.
Diffstat (limited to 'mllib/src/main/scala')
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala4
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala4
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/feature/ElementwiseProduct.scala6
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/feature/Normalizer.scala12
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala12
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala6
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala4
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala6
12 files changed, 31 insertions, 31 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
index 2fa8fbcc76..be69d46eeb 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
@@ -482,7 +482,7 @@ object LogisticRegression extends DefaultParamsReadable[LogisticRegression] {
@Experimental
class LogisticRegressionModel private[spark] (
@Since("1.4.0") override val uid: String,
- @Since("1.6.0") val coefficients: Vector,
+ @Since("2.0.0") val coefficients: Vector,
@Since("1.3.0") val intercept: Double)
extends ProbabilisticClassificationModel[Vector, LogisticRegressionModel]
with LogisticRegressionParams with MLWritable {
diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala
index 700542117e..76ef32aa3d 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifier.scala
@@ -296,7 +296,7 @@ object MultilayerPerceptronClassifier
class MultilayerPerceptronClassificationModel private[ml] (
@Since("1.5.0") override val uid: String,
@Since("1.5.0") val layers: Array[Int],
- @Since("1.5.0") val weights: Vector)
+ @Since("2.0.0") val weights: Vector)
extends PredictionModel[Vector, MultilayerPerceptronClassificationModel]
with Serializable with MLWritable {
diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
index a9d493032b..7c340312df 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
@@ -130,8 +130,8 @@ object NaiveBayes extends DefaultParamsReadable[NaiveBayes] {
@Experimental
class NaiveBayesModel private[ml] (
@Since("1.5.0") override val uid: String,
- @Since("1.5.0") val pi: Vector,
- @Since("1.5.0") val theta: Matrix)
+ @Since("2.0.0") val pi: Vector,
+ @Since("2.0.0") val theta: Matrix)
extends ProbabilisticClassificationModel[Vector, NaiveBayesModel]
with NaiveBayesParams with MLWritable {
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala b/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala
index 6f63d04818..9fb7d6a9a2 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala
@@ -131,7 +131,7 @@ class KMeansModel private[ml] (
private[clustering] def predict(features: Vector): Int = parentModel.predict(features)
- @Since("1.5.0")
+ @Since("2.0.0")
def clusterCenters: Array[Vector] = parentModel.clusterCenters.map(_.asML)
/**
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
index 609e50eb49..b333d59258 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/LDA.scala
@@ -432,7 +432,7 @@ sealed abstract class LDAModel private[ml] (
* If Online LDA was used and [[optimizeDocConcentration]] was set to false,
* then this returns the fixed (given) value for the [[docConcentration]] parameter.
*/
- @Since("1.6.0")
+ @Since("2.0.0")
def estimatedDocConcentration: Vector = getModel.docConcentration
/**
@@ -444,7 +444,7 @@ sealed abstract class LDAModel private[ml] (
* the Expectation-Maximization ("em") [[optimizer]], then this method could involve
* collecting a large amount of data to the driver (on the order of vocabSize x k).
*/
- @Since("1.6.0")
+ @Since("2.0.0")
def topicsMatrix: Matrix = oldLocalModel.topicsMatrix.asML
/** Indicates whether this instance is of type [[DistributedLDAModel]] */
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/ElementwiseProduct.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/ElementwiseProduct.scala
index 92fefb1e6c..d07833e580 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/ElementwiseProduct.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/ElementwiseProduct.scala
@@ -33,11 +33,11 @@ import org.apache.spark.sql.types.DataType
* multiplier.
*/
@Experimental
-@Since("2.0.0")
-class ElementwiseProduct @Since("2.0.0") (@Since("2.0.0") override val uid: String)
+@Since("1.4.0")
+class ElementwiseProduct @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends UnaryTransformer[Vector, Vector, ElementwiseProduct] with DefaultParamsWritable {
- @Since("2.0.0")
+ @Since("1.4.0")
def this() = this(Identifiable.randomUID("elemProd"))
/**
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/Normalizer.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/Normalizer.scala
index 9a4e682890..f9cbad90c9 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/Normalizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/Normalizer.scala
@@ -31,11 +31,11 @@ import org.apache.spark.sql.types.DataType
* Normalize a vector to have unit norm using the given p-norm.
*/
@Experimental
-@Since("2.0.0")
-class Normalizer @Since("2.0.0") (@Since("2.0.0") override val uid: String)
+@Since("1.4.0")
+class Normalizer @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends UnaryTransformer[Vector, Vector, Normalizer] with DefaultParamsWritable {
- @Since("2.0.0")
+ @Since("1.4.0")
def this() = this(Identifiable.randomUID("normalizer"))
/**
@@ -43,17 +43,17 @@ class Normalizer @Since("2.0.0") (@Since("2.0.0") override val uid: String)
* (default: p = 2)
* @group param
*/
- @Since("2.0.0")
+ @Since("1.4.0")
val p = new DoubleParam(this, "p", "the p norm value", ParamValidators.gtEq(1))
setDefault(p -> 2.0)
/** @group getParam */
- @Since("2.0.0")
+ @Since("1.4.0")
def getP: Double = $(p)
/** @group setParam */
- @Since("2.0.0")
+ @Since("1.4.0")
def setP(value: Double): this.type = set(p, value)
override protected def createTransformFunc: Vector => Vector = {
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala
index 026014c7d6..7b35fdeaf4 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/PolynomialExpansion.scala
@@ -35,11 +35,11 @@ import org.apache.spark.sql.types.DataType
* `(x, y)`, if we want to expand it with degree 2, then we get `(x, x * x, y, x * y, y * y)`.
*/
@Experimental
-@Since("2.0.0")
-class PolynomialExpansion @Since("2.0.0") (@Since("2.0.0") override val uid: String)
+@Since("1.4.0")
+class PolynomialExpansion @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends UnaryTransformer[Vector, Vector, PolynomialExpansion] with DefaultParamsWritable {
- @Since("2.0.0")
+ @Since("1.4.0")
def this() = this(Identifiable.randomUID("poly"))
/**
@@ -47,18 +47,18 @@ class PolynomialExpansion @Since("2.0.0") (@Since("2.0.0") override val uid: Str
* Default: 2
* @group param
*/
- @Since("2.0.0")
+ @Since("1.4.0")
val degree = new IntParam(this, "degree", "the polynomial degree to expand (>= 1)",
ParamValidators.gtEq(1))
setDefault(degree -> 2)
/** @group getParam */
- @Since("2.0.0")
+ @Since("1.4.0")
def getDegree: Int = $(degree)
/** @group setParam */
- @Since("2.0.0")
+ @Since("1.4.0")
def setDegree(value: Int): this.type = set(degree, value)
override protected def createTransformFunc: Vector => Vector = { v =>
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala
index a74d31ff9d..0cac3fa2d7 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/Word2Vec.scala
@@ -240,7 +240,7 @@ class Word2VecModel private[ml] (
* of the word. Returns a dataframe with the words and the cosine similarities between the
* synonyms and the given word vector.
*/
- @Since("1.5.0")
+ @Since("2.0.0")
def findSynonyms(word: Vector, num: Int): DataFrame = {
val spark = SparkSession.builder().getOrCreate()
spark.createDataFrame(wordVectors.findSynonyms(word, num)).toDF("word", "similarity")
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
index fe65e3e810..2dbac49ccf 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/AFTSurvivalRegression.scala
@@ -286,7 +286,7 @@ object AFTSurvivalRegression extends DefaultParamsReadable[AFTSurvivalRegression
@Since("1.6.0")
class AFTSurvivalRegressionModel private[ml] (
@Since("1.6.0") override val uid: String,
- @Since("1.6.0") val coefficients: Vector,
+ @Since("2.0.0") val coefficients: Vector,
@Since("1.6.0") val intercept: Double,
@Since("1.6.0") val scale: Double)
extends Model[AFTSurvivalRegressionModel] with AFTSurvivalRegressionParams with MLWritable {
@@ -307,7 +307,7 @@ class AFTSurvivalRegressionModel private[ml] (
@Since("1.6.0")
def setQuantilesCol(value: String): this.type = set(quantilesCol, value)
- @Since("1.6.0")
+ @Since("2.0.0")
def predictQuantiles(features: Vector): Vector = {
// scale parameter for the Weibull distribution of lifetime
val lambda = math.exp(BLAS.dot(coefficients, features) + intercept)
@@ -319,7 +319,7 @@ class AFTSurvivalRegressionModel private[ml] (
Vectors.dense(quantiles)
}
- @Since("1.6.0")
+ @Since("2.0.0")
def predict(features: Vector): Double = {
math.exp(BLAS.dot(coefficients, features) + intercept)
}
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala
index f05b47eda7..9b9429a328 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/IsotonicRegression.scala
@@ -221,14 +221,14 @@ class IsotonicRegressionModel private[ml] (
def setFeatureIndex(value: Int): this.type = set(featureIndex, value)
/** Boundaries in increasing order for which predictions are known. */
- @Since("1.5.0")
+ @Since("2.0.0")
def boundaries: Vector = Vectors.dense(oldModel.boundaries)
/**
* Predictions associated with the boundaries at the same index, monotone because of isotonic
* regression.
*/
- @Since("1.5.0")
+ @Since("2.0.0")
def predictions: Vector = Vectors.dense(oldModel.predictions)
@Since("1.5.0")
diff --git a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
index 5e8ef1b375..2723f74724 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/regression/LinearRegression.scala
@@ -387,9 +387,9 @@ object LinearRegression extends DefaultParamsReadable[LinearRegression] {
@Since("1.3.0")
@Experimental
class LinearRegressionModel private[ml] (
- override val uid: String,
- val coefficients: Vector,
- val intercept: Double)
+ @Since("1.4.0") override val uid: String,
+ @Since("2.0.0") val coefficients: Vector,
+ @Since("1.3.0") val intercept: Double)
extends RegressionModel[Vector, LinearRegressionModel]
with LinearRegressionParams with MLWritable {