aboutsummaryrefslogtreecommitdiff
path: root/mllib
diff options
context:
space:
mode:
authorVijay Ramesh <vramesh@demandbase.com>2017-04-09 19:39:09 +0100
committerSean Owen <sowen@cloudera.com>2017-04-09 19:39:09 +0100
commit261eaf5149a8fe479ab4f9c34db892bcedbf5739 (patch)
treee086e6fe7cd0f180e0751220506916a89b09c0c7 /mllib
parent1f0de3c1c85a41eadc7c4131bdc948405f340099 (diff)
downloadspark-261eaf5149a8fe479ab4f9c34db892bcedbf5739.tar.gz
spark-261eaf5149a8fe479ab4f9c34db892bcedbf5739.tar.bz2
spark-261eaf5149a8fe479ab4f9c34db892bcedbf5739.zip
[SPARK-20260][MLLIB] String interpolation required for error message
## What changes were proposed in this pull request? This error message doesn't get properly formatted because of a missing `s`. Currently the error looks like: ``` Caused by: java.lang.IllegalArgumentException: requirement failed: indices should be one-based and in ascending order; found current=$current, previous=$previous; line="$line" ``` (note the literal `$current` instead of the interpolated value) Please review http://spark.apache.org/contributing.html before opening a pull request. Author: Vijay Ramesh <vramesh@demandbase.com> Closes #17572 from vijaykramesh/master.
Diffstat (limited to 'mllib')
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala4
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala2
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtils.scala2
4 files changed, 5 insertions, 5 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala
index 4d3e265455..b2437b845f 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.scala
@@ -259,7 +259,7 @@ object PowerIterationClustering extends Logging {
val j = ctx.dstId
val s = ctx.attr
if (s < 0.0) {
- throw new SparkException("Similarity must be nonnegative but found s($i, $j) = $s.")
+ throw new SparkException(s"Similarity must be nonnegative but found s($i, $j) = $s.")
}
if (s > 0.0) {
ctx.sendToSrc(s)
@@ -283,7 +283,7 @@ object PowerIterationClustering extends Logging {
: Graph[Double, Double] = {
val edges = similarities.flatMap { case (i, j, s) =>
if (s < 0.0) {
- throw new SparkException("Similarity must be nonnegative but found s($i, $j) = $s.")
+ throw new SparkException(s"Similarity must be nonnegative but found s($i, $j) = $s.")
}
if (i != j) {
Seq(Edge(i, j, s), Edge(j, i, s))
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala
index a1562384b0..27618e122a 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala
@@ -248,7 +248,7 @@ object DecisionTreeModel extends Loader[DecisionTreeModel] with Logging {
// Build node data into a tree.
val trees = constructTrees(nodes)
assert(trees.length == 1,
- "Decision tree should contain exactly one tree but got ${trees.size} trees.")
+ s"Decision tree should contain exactly one tree but got ${trees.size} trees.")
val model = new DecisionTreeModel(trees(0), Algo.fromString(algo))
assert(model.numNodes == numNodes, s"Unable to load DecisionTreeModel data from: $dataPath." +
s" Expected $numNodes nodes but found ${model.numNodes}")
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
index 95f904dac5..4fdad05973 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
@@ -119,7 +119,7 @@ object MLUtils extends Logging {
while (i < indicesLength) {
val current = indices(i)
require(current > previous, s"indices should be one-based and in ascending order;"
- + " found current=$current, previous=$previous; line=\"$line\"")
+ + s""" found current=$current, previous=$previous; line="$line"""")
previous = current
i += 1
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtils.scala b/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtils.scala
index 39a6bc37d9..d39865a19a 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtils.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtils.scala
@@ -207,7 +207,7 @@ object TestingUtils {
if (r.fun(x, r.y, r.eps)) {
throw new TestFailedException(
s"Did not expect \n$x\n and \n${r.y}\n to be within " +
- "${r.eps}${r.method} for all elements.", 0)
+ s"${r.eps}${r.method} for all elements.", 0)
}
true
}