aboutsummaryrefslogtreecommitdiff
path: root/mllib
diff options
context:
space:
mode:
authorXiangrui Meng <meng@databricks.com>2015-05-12 14:39:03 -0700
committerXiangrui Meng <meng@databricks.com>2015-05-12 14:39:03 -0700
commita4874b0d1820efd24071108434a4d89429473fe3 (patch)
tree2c7a40af55ae544374e86208bb0d71d272d4ccc8 /mllib
parent455551d1c6cc206ffe1ff5ac52ca0ed89c61653d (diff)
downloadspark-a4874b0d1820efd24071108434a4d89429473fe3.tar.gz
spark-a4874b0d1820efd24071108434a4d89429473fe3.tar.bz2
spark-a4874b0d1820efd24071108434a4d89429473fe3.zip
[SPARK-7571] [MLLIB] rename Math to math
`scala.Math` is deprecated since 2.8. This PR only touchs `Math` usages in MLlib. dbtsai Author: Xiangrui Meng <meng@databricks.com> Closes #6092 from mengxr/SPARK-7571 and squashes the following commits: fe8f8d3 [Xiangrui Meng] Math -> math
Diffstat (limited to 'mllib')
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala4
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixture.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/optimization/NNLS.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/stat/KernelDensity.scala4
-rw-r--r--mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala2
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/feature/NormalizerSuite.scala8
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/RowMatrixSuite.scala4
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/optimization/LBFGSSuite.scala2
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala2
9 files changed, 15 insertions, 15 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
index 647226a0d1..93ba91167b 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
@@ -175,7 +175,7 @@ class LogisticRegression
* }}}
*/
initialWeightsWithIntercept.toArray(numFeatures)
- = Math.log(histogram(1).toDouble / histogram(0).toDouble)
+ = math.log(histogram(1).toDouble / histogram(0).toDouble)
}
val states = optimizer.iterations(new CachedDiffFunction(costFun),
@@ -285,7 +285,7 @@ class LogisticRegressionModel private[ml] (
} else if (t == 1.0) {
Double.PositiveInfinity
} else {
- Math.log(t / (1.0 - t))
+ math.log(t / (1.0 - t))
}
if (rawPrediction(1) > rawThreshold) 1 else 0
}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixture.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixture.scala
index 568b653056..c88410ac0f 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixture.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/GaussianMixture.scala
@@ -160,7 +160,7 @@ class GaussianMixture private (
var llhp = 0.0 // previous log-likelihood
var iter = 0
- while(iter < maxIterations && Math.abs(llh-llhp) > convergenceTol) {
+ while (iter < maxIterations && math.abs(llh-llhp) > convergenceTol) {
// create and broadcast curried cluster contribution function
val compute = sc.broadcast(ExpectationSum.add(weights, gaussians)_)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/optimization/NNLS.scala b/mllib/src/main/scala/org/apache/spark/mllib/optimization/NNLS.scala
index 4766f77082..64d52bae00 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/optimization/NNLS.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/optimization/NNLS.scala
@@ -91,7 +91,7 @@ private[spark] object NNLS {
val dir = ws.dir
val lastDir = ws.lastDir
val res = ws.res
- val iterMax = Math.max(400, 20 * n)
+ val iterMax = math.max(400, 20 * n)
var lastNorm = 0.0
var iterno = 0
var lastWall = 0 // Last iteration when we hit a bound constraint.
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/KernelDensity.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/KernelDensity.scala
index 0deef11b45..79747cc5d7 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/stat/KernelDensity.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/KernelDensity.scala
@@ -32,7 +32,7 @@ private[stat] object KernelDensity {
// This gets used in each Gaussian PDF computation, so compute it up front
val logStandardDeviationPlusHalfLog2Pi =
- Math.log(standardDeviation) + 0.5 * Math.log(2 * Math.PI)
+ math.log(standardDeviation) + 0.5 * math.log(2 * math.Pi)
val (points, count) = samples.aggregate((new Array[Double](evaluationPoints.length), 0))(
(x, y) => {
@@ -66,6 +66,6 @@ private[stat] object KernelDensity {
val x0 = x - mean
val x1 = x0 / standardDeviation
val logDensity = -0.5 * x1 * x1 - logStandardDeviationPlusHalfLog2Pi
- Math.exp(logDensity)
+ math.exp(logDensity)
}
}
diff --git a/mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala
index 78cdd47185..4df8016009 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala
@@ -489,7 +489,7 @@ class LogisticRegressionSuite extends FunSuite with MLlibTestSparkContext {
* b = \log{P(1) / P(0)} = \log{count_1 / count_0}
* }}}
*/
- val interceptTheory = Math.log(histogram(1).toDouble / histogram(0).toDouble)
+ val interceptTheory = math.log(histogram(1).toDouble / histogram(0).toDouble)
val weightsTheory = Array(0.0, 0.0, 0.0, 0.0)
assert(model.intercept ~== interceptTheory relTol 1E-5)
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/feature/NormalizerSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/feature/NormalizerSuite.scala
index 85fdd271b5..5c4af2b99e 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/feature/NormalizerSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/feature/NormalizerSuite.scala
@@ -106,10 +106,10 @@ class NormalizerSuite extends FunSuite with MLlibTestSparkContext {
assert((dataInf, dataInfRDD.collect()).zipped.forall((v1, v2) => v1 ~== v2 absTol 1E-5))
- assert(dataInf(0).toArray.map(Math.abs).max ~== 1.0 absTol 1E-5)
- assert(dataInf(2).toArray.map(Math.abs).max ~== 1.0 absTol 1E-5)
- assert(dataInf(3).toArray.map(Math.abs).max ~== 1.0 absTol 1E-5)
- assert(dataInf(4).toArray.map(Math.abs).max ~== 1.0 absTol 1E-5)
+ assert(dataInf(0).toArray.map(math.abs).max ~== 1.0 absTol 1E-5)
+ assert(dataInf(2).toArray.map(math.abs).max ~== 1.0 absTol 1E-5)
+ assert(dataInf(3).toArray.map(math.abs).max ~== 1.0 absTol 1E-5)
+ assert(dataInf(4).toArray.map(math.abs).max ~== 1.0 absTol 1E-5)
assert(dataInf(0) ~== Vectors.sparse(3, Seq((0, -0.86956522), (1, 1.0))) absTol 1E-5)
assert(dataInf(1) ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5)
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/RowMatrixSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/RowMatrixSuite.scala
index 3309713e91..27bb19f472 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/RowMatrixSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/RowMatrixSuite.scala
@@ -96,7 +96,7 @@ class RowMatrixSuite extends FunSuite with MLlibTestSparkContext {
}
test("similar columns") {
- val colMags = Vectors.dense(Math.sqrt(126), Math.sqrt(66), Math.sqrt(94))
+ val colMags = Vectors.dense(math.sqrt(126), math.sqrt(66), math.sqrt(94))
val expected = BDM(
(0.0, 54.0, 72.0),
(0.0, 0.0, 78.0),
@@ -232,7 +232,7 @@ class RowMatrixSuite extends FunSuite with MLlibTestSparkContext {
assert(summary.numNonzeros === Vectors.dense(3.0, 3.0, 4.0), "nnz mismatch")
assert(summary.max === Vectors.dense(9.0, 7.0, 8.0), "max mismatch")
assert(summary.min === Vectors.dense(0.0, 0.0, 1.0), "column mismatch.")
- assert(summary.normL2 === Vectors.dense(Math.sqrt(126), Math.sqrt(66), Math.sqrt(94)),
+ assert(summary.normL2 === Vectors.dense(math.sqrt(126), math.sqrt(66), math.sqrt(94)),
"magnitude mismatch.")
assert(summary.normL1 === Vectors.dense(18.0, 12.0, 16.0), "L1 norm mismatch")
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/optimization/LBFGSSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/optimization/LBFGSSuite.scala
index 70c64775e4..c8f2adcf15 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/optimization/LBFGSSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/optimization/LBFGSSuite.scala
@@ -89,7 +89,7 @@ class LBFGSSuite extends FunSuite with MLlibTestSparkContext with Matchers {
// it requires 90 iterations in GD. No matter how hard we increase
// the number of iterations in GD here, the lossGD will be always
// larger than lossLBFGS. This is based on observation, no theoretically guaranteed
- assert(Math.abs((lossGD.last - loss.last) / loss.last) < 0.02,
+ assert(math.abs((lossGD.last - loss.last) / loss.last) < 0.02,
"LBFGS should match GD result within 2% difference.")
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala
index 8e12340bbd..3b38bdf5ef 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/regression/IsotonicRegressionSuite.scala
@@ -26,7 +26,7 @@ import org.apache.spark.util.Utils
class IsotonicRegressionSuite extends FunSuite with MLlibTestSparkContext with Matchers {
private def round(d: Double) = {
- Math.round(d * 100).toDouble / 100
+ math.round(d * 100).toDouble / 100
}
private def generateIsotonicInput(labels: Seq[Double]): Seq[(Double, Double, Double)] = {