diff options
Diffstat (limited to 'mllib/src/test')
15 files changed, 49 insertions, 49 deletions
diff --git a/mllib/src/test/scala/org/apache/spark/ml/classification/NaiveBayesSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/classification/NaiveBayesSuite.scala index ff52115ec0..04c010bd13 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/classification/NaiveBayesSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/classification/NaiveBayesSuite.scala @@ -69,7 +69,7 @@ class NaiveBayesSuite extends SparkFunSuite with MLlibTestSparkContext with Defa } def expectedMultinomialProbabilities(model: NaiveBayesModel, feature: Vector): Vector = { - val logClassProbs: BV[Double] = model.pi.toBreeze + model.theta.multiply(feature).toBreeze + val logClassProbs: BV[Double] = model.pi.asBreeze + model.theta.multiply(feature).asBreeze val classProbs = logClassProbs.toArray.map(math.exp) val classProbsSum = classProbs.sum Vectors.dense(classProbs.map(_ / classProbsSum)) @@ -78,8 +78,8 @@ class NaiveBayesSuite extends SparkFunSuite with MLlibTestSparkContext with Defa def expectedBernoulliProbabilities(model: NaiveBayesModel, feature: Vector): Vector = { val negThetaMatrix = model.theta.map(v => math.log(1.0 - math.exp(v))) val negFeature = Vectors.dense(feature.toArray.map(v => 1.0 - v)) - val piTheta: BV[Double] = model.pi.toBreeze + model.theta.multiply(feature).toBreeze - val logClassProbs: BV[Double] = piTheta + negThetaMatrix.multiply(negFeature).toBreeze + val piTheta: BV[Double] = model.pi.asBreeze + model.theta.multiply(feature).asBreeze + val logClassProbs: BV[Double] = piTheta + negThetaMatrix.multiply(negFeature).asBreeze val classProbs = logClassProbs.toArray.map(math.exp) val classProbsSum = classProbs.sum Vectors.dense(classProbs.map(_ / classProbsSum)) diff --git a/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala index 28fada7053..5cf4377768 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/classification/LogisticRegressionSuite.scala @@ -411,10 +411,10 @@ class LogisticRegressionSuite extends SparkFunSuite with MLlibTestSparkContext w val testRDD1 = sc.parallelize(testData, 2) val testRDD2 = sc.parallelize( - testData.map(x => LabeledPoint(x.label, Vectors.fromBreeze(x.features.toBreeze * 1.0E3))), 2) + testData.map(x => LabeledPoint(x.label, Vectors.fromBreeze(x.features.asBreeze * 1.0E3))), 2) val testRDD3 = sc.parallelize( - testData.map(x => LabeledPoint(x.label, Vectors.fromBreeze(x.features.toBreeze * 1.0E6))), 2) + testData.map(x => LabeledPoint(x.label, Vectors.fromBreeze(x.features.asBreeze * 1.0E6))), 2) testRDD1.cache() testRDD2.cache() diff --git a/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala index ab54cb06d5..0c0aefc52b 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala @@ -182,7 +182,7 @@ class NaiveBayesSuite extends SparkFunSuite with MLlibTestSparkContext { val piVector = new BDV(model.pi) // model.theta is row-major; treat it as col-major representation of transpose, and transpose: val thetaMatrix = new BDM(model.theta(0).length, model.theta.length, model.theta.flatten).t - val logClassProbs: BV[Double] = piVector + (thetaMatrix * testData.toBreeze) + val logClassProbs: BV[Double] = piVector + (thetaMatrix * testData.asBreeze) val classProbs = logClassProbs.toArray.map(math.exp) val classProbsSum = classProbs.sum classProbs.map(_ / classProbsSum) @@ -234,7 +234,7 @@ class NaiveBayesSuite extends SparkFunSuite with MLlibTestSparkContext { val thetaMatrix = new BDM(model.theta(0).length, model.theta.length, model.theta.flatten).t val negThetaMatrix = new BDM(model.theta(0).length, model.theta.length, model.theta.flatten.map(v => math.log(1.0 - math.exp(v)))).t - val testBreeze = testData.toBreeze + val testBreeze = testData.asBreeze val negTestBreeze = new BDV(Array.fill(testBreeze.size)(1.0)) - testBreeze val piTheta: BV[Double] = piVector + (thetaMatrix * testBreeze) val logClassProbs: BV[Double] = piTheta + (negThetaMatrix * negTestBreeze) diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala index ea23196d2c..eb050158d4 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala @@ -116,7 +116,7 @@ class LDASuite extends SparkFunSuite with MLlibTestSparkContext { case (docId, (topicDistribution, (indices, weights))) => assert(indices.length == 2) assert(weights.length == 2) - val bdvTopicDist = topicDistribution.toBreeze + val bdvTopicDist = topicDistribution.asBreeze val top2Indices = argtopk(bdvTopicDist, 2) assert(top2Indices.toArray === indices) assert(bdvTopicDist(top2Indices).toArray === weights) @@ -369,7 +369,7 @@ class LDASuite extends SparkFunSuite with MLlibTestSparkContext { val actualPredictions = ldaModel.topicDistributions(docs).cache() val topTopics = actualPredictions.map { case (id, topics) => // convert results to expectedPredictions format, which only has highest probability topic - val topicsBz = topics.toBreeze.toDenseVector + val topicsBz = topics.asBreeze.toDenseVector (id, (argmax(topicsBz), max(topicsBz))) }.sortByKey() .values diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/StreamingKMeansSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/StreamingKMeansSuite.scala index 65e37c64d4..fdaa098345 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/StreamingKMeansSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/StreamingKMeansSuite.scala @@ -67,7 +67,7 @@ class StreamingKMeansSuite extends SparkFunSuite with TestSuiteBase { // estimated center from streaming should exactly match the arithmetic mean of all data points // because the decay factor is set to 1.0 val grandMean = - input.flatten.map(x => x.toBreeze).reduce(_ + _) / (numBatches * numPoints).toDouble + input.flatten.map(x => x.asBreeze).reduce(_ + _) / (numBatches * numPoints).toDouble assert(model.latestModel().clusterCenters(0) ~== Vectors.dense(grandMean.toArray) absTol 1E-5) } diff --git a/mllib/src/test/scala/org/apache/spark/mllib/feature/NormalizerSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/feature/NormalizerSuite.scala index 34122d6ed2..10f7bafd6c 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/feature/NormalizerSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/feature/NormalizerSuite.scala @@ -51,10 +51,10 @@ class NormalizerSuite extends SparkFunSuite with MLlibTestSparkContext { assert((data1, data1RDD.collect()).zipped.forall((v1, v2) => v1 ~== v2 absTol 1E-5)) - assert(brzNorm(data1(0).toBreeze, 1) ~== 1.0 absTol 1E-5) - assert(brzNorm(data1(2).toBreeze, 1) ~== 1.0 absTol 1E-5) - assert(brzNorm(data1(3).toBreeze, 1) ~== 1.0 absTol 1E-5) - assert(brzNorm(data1(4).toBreeze, 1) ~== 1.0 absTol 1E-5) + assert(brzNorm(data1(0).asBreeze, 1) ~== 1.0 absTol 1E-5) + assert(brzNorm(data1(2).asBreeze, 1) ~== 1.0 absTol 1E-5) + assert(brzNorm(data1(3).asBreeze, 1) ~== 1.0 absTol 1E-5) + assert(brzNorm(data1(4).asBreeze, 1) ~== 1.0 absTol 1E-5) assert(data1(0) ~== Vectors.sparse(3, Seq((0, -0.465116279), (1, 0.53488372))) absTol 1E-5) assert(data1(1) ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) @@ -78,10 +78,10 @@ class NormalizerSuite extends SparkFunSuite with MLlibTestSparkContext { assert((data2, data2RDD.collect()).zipped.forall((v1, v2) => v1 ~== v2 absTol 1E-5)) - assert(brzNorm(data2(0).toBreeze, 2) ~== 1.0 absTol 1E-5) - assert(brzNorm(data2(2).toBreeze, 2) ~== 1.0 absTol 1E-5) - assert(brzNorm(data2(3).toBreeze, 2) ~== 1.0 absTol 1E-5) - assert(brzNorm(data2(4).toBreeze, 2) ~== 1.0 absTol 1E-5) + assert(brzNorm(data2(0).asBreeze, 2) ~== 1.0 absTol 1E-5) + assert(brzNorm(data2(2).asBreeze, 2) ~== 1.0 absTol 1E-5) + assert(brzNorm(data2(3).asBreeze, 2) ~== 1.0 absTol 1E-5) + assert(brzNorm(data2(4).asBreeze, 2) ~== 1.0 absTol 1E-5) assert(data2(0) ~== Vectors.sparse(3, Seq((0, -0.65617871), (1, 0.75460552))) absTol 1E-5) assert(data2(1) ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1E-5) diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/BreezeMatrixConversionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/BreezeMatrixConversionSuite.scala index de2c3c13bd..9e4735afdd 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/BreezeMatrixConversionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/BreezeMatrixConversionSuite.scala @@ -24,7 +24,7 @@ import org.apache.spark.SparkFunSuite class BreezeMatrixConversionSuite extends SparkFunSuite { test("dense matrix to breeze") { val mat = Matrices.dense(3, 2, Array(0.0, 1.0, 2.0, 3.0, 4.0, 5.0)) - val breeze = mat.toBreeze.asInstanceOf[BDM[Double]] + val breeze = mat.asBreeze.asInstanceOf[BDM[Double]] assert(breeze.rows === mat.numRows) assert(breeze.cols === mat.numCols) assert(breeze.data.eq(mat.asInstanceOf[DenseMatrix].values), "should not copy data") @@ -48,7 +48,7 @@ class BreezeMatrixConversionSuite extends SparkFunSuite { val colPtrs = Array(0, 2, 4) val rowIndices = Array(1, 2, 1, 2) val mat = Matrices.sparse(3, 2, colPtrs, rowIndices, values) - val breeze = mat.toBreeze.asInstanceOf[BSM[Double]] + val breeze = mat.asBreeze.asInstanceOf[BSM[Double]] assert(breeze.rows === mat.numRows) assert(breeze.cols === mat.numCols) assert(breeze.data.eq(mat.asInstanceOf[SparseMatrix].values), "should not copy data") diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/BreezeVectorConversionSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/BreezeVectorConversionSuite.scala index 3772c9235a..996f621f18 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/BreezeVectorConversionSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/BreezeVectorConversionSuite.scala @@ -33,12 +33,12 @@ class BreezeVectorConversionSuite extends SparkFunSuite { test("dense to breeze") { val vec = Vectors.dense(arr) - assert(vec.toBreeze === new BDV[Double](arr)) + assert(vec.asBreeze === new BDV[Double](arr)) } test("sparse to breeze") { val vec = Vectors.sparse(n, indices, values) - assert(vec.toBreeze === new BSV[Double](indices, values, n)) + assert(vec.asBreeze === new BSV[Double](indices, values, n)) } test("dense breeze to vector") { diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/MatricesSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/MatricesSuite.scala index 8c5b4bda25..d0c4dd28e1 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/MatricesSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/MatricesSuite.scala @@ -63,7 +63,7 @@ class MatricesSuite extends SparkFunSuite { (1, 2, 2.0), (2, 2, 2.0), (1, 2, 2.0), (0, 0, 0.0)) val mat2 = SparseMatrix.fromCOO(m, n, entries) - assert(mat.toBreeze === mat2.toBreeze) + assert(mat.asBreeze === mat2.asBreeze) assert(mat2.values.length == 4) } @@ -176,8 +176,8 @@ class MatricesSuite extends SparkFunSuite { val spMat2 = deMat1.toSparse val deMat2 = spMat1.toDense - assert(spMat1.toBreeze === spMat2.toBreeze) - assert(deMat1.toBreeze === deMat2.toBreeze) + assert(spMat1.asBreeze === spMat2.asBreeze) + assert(deMat1.asBreeze === deMat2.asBreeze) } test("map, update") { @@ -211,8 +211,8 @@ class MatricesSuite extends SparkFunSuite { val sATexpected = new SparseMatrix(3, 4, Array(0, 1, 2, 3, 4), Array(1, 0, 1, 2), Array(2.0, 1.0, 1.0, 3.0)) - assert(dAT.toBreeze === dATexpected.toBreeze) - assert(sAT.toBreeze === sATexpected.toBreeze) + assert(dAT.asBreeze === dATexpected.asBreeze) + assert(sAT.asBreeze === sATexpected.asBreeze) assert(dA(1, 0) === dAT(0, 1)) assert(dA(2, 1) === dAT(1, 2)) assert(sA(1, 0) === sAT(0, 1)) @@ -221,8 +221,8 @@ class MatricesSuite extends SparkFunSuite { assert(!dA.toArray.eq(dAT.toArray), "has to have a new array") assert(dA.values.eq(dAT.transpose.asInstanceOf[DenseMatrix].values), "should not copy array") - assert(dAT.toSparse.toBreeze === sATexpected.toBreeze) - assert(sAT.toDense.toBreeze === dATexpected.toBreeze) + assert(dAT.toSparse.asBreeze === sATexpected.asBreeze) + assert(sAT.toDense.asBreeze === dATexpected.asBreeze) } test("foreachActive") { diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala index 2e9c40ab88..71a3ceac1b 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala @@ -269,7 +269,7 @@ class VectorsSuite extends SparkFunSuite with Logging { val denseVector1 = Vectors.dense(sparseVector1.toArray) val denseVector2 = Vectors.dense(sparseVector2.toArray) - val squaredDist = breezeSquaredDistance(sparseVector1.toBreeze, sparseVector2.toBreeze) + val squaredDist = breezeSquaredDistance(sparseVector1.asBreeze, sparseVector2.asBreeze) // SparseVector vs. SparseVector assert(Vectors.sqdist(sparseVector1, sparseVector2) ~== squaredDist relTol 1E-8) diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrixSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrixSuite.scala index f37eaf225a..e5a2cbbb58 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrixSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/BlockMatrixSuite.scala @@ -152,7 +152,7 @@ class BlockMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { val C = B.toIndexedRowMatrix.rows.collect - (C(0).vector.toBreeze, C(1).vector.toBreeze) match { + (C(0).vector.asBreeze, C(1).vector.asBreeze) match { case (denseVector: BDV[Double], sparseVector: BSV[Double]) => assert(denseVector.length === sparseVector.length) diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrixSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrixSuite.scala index 5b7ccb9015..99af5fa10d 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrixSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrixSuite.scala @@ -108,7 +108,7 @@ class IndexedRowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { val C = A.multiply(B) val localA = A.toBreeze() val localC = C.toBreeze() - val expected = localA * B.toBreeze.asInstanceOf[BDM[Double]] + val expected = localA * B.asBreeze.asInstanceOf[BDM[Double]] assert(localC === expected) } @@ -119,7 +119,7 @@ class IndexedRowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { (90.0, 12.0, 24.0), (12.0, 17.0, 22.0), (24.0, 22.0, 30.0)) - assert(G.toBreeze === expected) + assert(G.asBreeze === expected) } test("svd") { @@ -128,8 +128,8 @@ class IndexedRowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { assert(svd.U.isInstanceOf[IndexedRowMatrix]) val localA = A.toBreeze() val U = svd.U.toBreeze() - val s = svd.s.toBreeze.asInstanceOf[BDV[Double]] - val V = svd.V.toBreeze.asInstanceOf[BDM[Double]] + val s = svd.s.asBreeze.asInstanceOf[BDV[Double]] + val V = svd.V.asBreeze.asInstanceOf[BDM[Double]] assert(closeToZero(U.t * U - BDM.eye[Double](n))) assert(closeToZero(V.t * V - BDM.eye[Double](n))) assert(closeToZero(U * brzDiag(s) * V.t - localA)) @@ -155,7 +155,7 @@ class IndexedRowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { test("similar columns") { val A = new IndexedRowMatrix(indexedRows) - val gram = A.computeGramianMatrix().toBreeze.toDenseMatrix + val gram = A.computeGramianMatrix().asBreeze.toDenseMatrix val G = A.columnSimilarities().toBreeze() diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/RowMatrixSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/RowMatrixSuite.scala index 2dff52c601..7c4c6d8409 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/RowMatrixSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/RowMatrixSuite.scala @@ -96,7 +96,7 @@ class RowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { Matrices.dense(n, n, Array(126.0, 54.0, 72.0, 54.0, 66.0, 78.0, 72.0, 78.0, 94.0)) for (mat <- Seq(denseMat, sparseMat)) { val G = mat.computeGramianMatrix() - assert(G.toBreeze === expected.toBreeze) + assert(G.asBreeze === expected.asBreeze) } } @@ -153,8 +153,8 @@ class RowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { assert(V.numRows === n) assert(V.numCols === k) assertColumnEqualUpToSign(U.toBreeze(), localU, k) - assertColumnEqualUpToSign(V.toBreeze.asInstanceOf[BDM[Double]], localV, k) - assert(closeToZero(s.toBreeze.asInstanceOf[BDV[Double]] - localSigma(0 until k))) + assertColumnEqualUpToSign(V.asBreeze.asInstanceOf[BDM[Double]], localV, k) + assert(closeToZero(s.asBreeze.asInstanceOf[BDV[Double]] - localSigma(0 until k))) } } val svdWithoutU = mat.computeSVD(1, computeU = false, 1e-9, 300, 1e-10, mode) @@ -207,7 +207,7 @@ class RowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { val (pc, expVariance) = mat.computePrincipalComponentsAndExplainedVariance(k) assert(pc.numRows === n) assert(pc.numCols === k) - assertColumnEqualUpToSign(pc.toBreeze.asInstanceOf[BDM[Double]], principalComponents, k) + assertColumnEqualUpToSign(pc.asBreeze.asInstanceOf[BDM[Double]], principalComponents, k) assert( closeToZero(BDV(expVariance.toArray) - BDV(Arrays.copyOfRange(explainedVariance.data, 0, k)))) @@ -256,12 +256,12 @@ class RowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { val calcQ = result.Q val calcR = result.R assert(closeToZero(abs(expected.q) - abs(calcQ.toBreeze()))) - assert(closeToZero(abs(expected.r) - abs(calcR.toBreeze.asInstanceOf[BDM[Double]]))) + assert(closeToZero(abs(expected.r) - abs(calcR.asBreeze.asInstanceOf[BDM[Double]]))) assert(closeToZero(calcQ.multiply(calcR).toBreeze - mat.toBreeze())) // Decomposition without computing Q val rOnly = mat.tallSkinnyQR(computeQ = false) assert(rOnly.Q == null) - assert(closeToZero(abs(expected.r) - abs(rOnly.R.toBreeze.asInstanceOf[BDM[Double]]))) + assert(closeToZero(abs(expected.r) - abs(rOnly.R.asBreeze.asInstanceOf[BDM[Double]]))) } } @@ -269,7 +269,7 @@ class RowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext { for (mat <- Seq(denseMat, sparseMat)) { val result = mat.computeCovariance() val expected = breeze.linalg.cov(mat.toBreeze()) - assert(closeToZero(abs(expected) - abs(result.toBreeze.asInstanceOf[BDM[Double]]))) + assert(closeToZero(abs(expected) - abs(result.asBreeze.asInstanceOf[BDM[Double]]))) } } diff --git a/mllib/src/test/scala/org/apache/spark/mllib/stat/CorrelationSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/stat/CorrelationSuite.scala index 700f803490..e32767edb1 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/stat/CorrelationSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/stat/CorrelationSuite.scala @@ -104,8 +104,8 @@ class CorrelationSuite extends SparkFunSuite with MLlibTestSparkContext with Log (Double.NaN, Double.NaN, 1.00000000, Double.NaN), (0.40047142, 0.91359586, Double.NaN, 1.0000000)) // scalastyle:on - assert(matrixApproxEqual(defaultMat.toBreeze, expected)) - assert(matrixApproxEqual(pearsonMat.toBreeze, expected)) + assert(matrixApproxEqual(defaultMat.asBreeze, expected)) + assert(matrixApproxEqual(pearsonMat.asBreeze, expected)) } test("corr(X) spearman") { @@ -118,7 +118,7 @@ class CorrelationSuite extends SparkFunSuite with MLlibTestSparkContext with Log (Double.NaN, Double.NaN, 1.00000000, Double.NaN), (0.4000000, 0.9486833, Double.NaN, 1.0000000)) // scalastyle:on - assert(matrixApproxEqual(spearmanMat.toBreeze, expected)) + assert(matrixApproxEqual(spearmanMat.asBreeze, expected)) } test("method identification") { diff --git a/mllib/src/test/scala/org/apache/spark/mllib/util/MLUtilsSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/util/MLUtilsSuite.scala index 0c6aabf192..7b6bfee00c 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/util/MLUtilsSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/util/MLUtilsSuite.scala @@ -53,13 +53,13 @@ class MLUtilsSuite extends SparkFunSuite with MLlibTestSparkContext { val norm2 = Vectors.norm(v2, 2.0) val v3 = Vectors.sparse(n, indices, indices.map(i => a(i) + 0.5)) val norm3 = Vectors.norm(v3, 2.0) - val squaredDist = breezeSquaredDistance(v1.toBreeze, v2.toBreeze) + val squaredDist = breezeSquaredDistance(v1.asBreeze, v2.asBreeze) val fastSquaredDist1 = fastSquaredDistance(v1, norm1, v2, norm2, precision) assert((fastSquaredDist1 - squaredDist) <= precision * squaredDist, s"failed with m = $m") val fastSquaredDist2 = fastSquaredDistance(v1, norm1, Vectors.dense(v2.toArray), norm2, precision) assert((fastSquaredDist2 - squaredDist) <= precision * squaredDist, s"failed with m = $m") - val squaredDist2 = breezeSquaredDistance(v2.toBreeze, v3.toBreeze) + val squaredDist2 = breezeSquaredDistance(v2.asBreeze, v3.asBreeze) val fastSquaredDist3 = fastSquaredDistance(v2, norm2, v3, norm3, precision) assert((fastSquaredDist3 - squaredDist2) <= precision * squaredDist2, s"failed with m = $m") @@ -67,7 +67,7 @@ class MLUtilsSuite extends SparkFunSuite with MLlibTestSparkContext { val v4 = Vectors.sparse(n, indices.slice(0, m - 10), indices.map(i => a(i) + 0.5).slice(0, m - 10)) val norm4 = Vectors.norm(v4, 2.0) - val squaredDist = breezeSquaredDistance(v2.toBreeze, v4.toBreeze) + val squaredDist = breezeSquaredDistance(v2.asBreeze, v4.asBreeze) val fastSquaredDist = fastSquaredDistance(v2, norm2, v4, norm4, precision) assert((fastSquaredDist - squaredDist) <= precision * squaredDist, s"failed with m = $m") |