aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/main/scala/org/apache/spark/storage/BlockManager.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/util/Utils.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala2
13 files changed, 15 insertions, 15 deletions
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
index 86493673d9..eedb27942e 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala
@@ -222,7 +222,7 @@ private[spark] class BlockManager(
return
} catch {
case e: Exception if i < MAX_ATTEMPTS =>
- logError(s"Failed to connect to external shuffle server, will retry ${MAX_ATTEMPTS - i}}"
+ logError(s"Failed to connect to external shuffle server, will retry ${MAX_ATTEMPTS - i}"
+ s" more times after waiting $SLEEP_TIME_SECS seconds...", e)
Thread.sleep(SLEEP_TIME_SECS * 1000)
}
diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala
index f70f701494..2a11f371b9 100644
--- a/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala
+++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala
@@ -103,7 +103,7 @@ class BlockManagerMaster(
val future = driverEndpoint.askWithRetry[Future[Seq[Int]]](RemoveRdd(rddId))
future.onFailure {
case e: Exception =>
- logWarning(s"Failed to remove RDD $rddId - ${e.getMessage}}", e)
+ logWarning(s"Failed to remove RDD $rddId - ${e.getMessage}", e)
}(ThreadUtils.sameThread)
if (blocking) {
timeout.awaitResult(future)
@@ -115,7 +115,7 @@ class BlockManagerMaster(
val future = driverEndpoint.askWithRetry[Future[Seq[Boolean]]](RemoveShuffle(shuffleId))
future.onFailure {
case e: Exception =>
- logWarning(s"Failed to remove shuffle $shuffleId - ${e.getMessage}}", e)
+ logWarning(s"Failed to remove shuffle $shuffleId - ${e.getMessage}", e)
}(ThreadUtils.sameThread)
if (blocking) {
timeout.awaitResult(future)
@@ -129,7 +129,7 @@ class BlockManagerMaster(
future.onFailure {
case e: Exception =>
logWarning(s"Failed to remove broadcast $broadcastId" +
- s" with removeFromMaster = $removeFromMaster - ${e.getMessage}}", e)
+ s" with removeFromMaster = $removeFromMaster - ${e.getMessage}", e)
}(ThreadUtils.sameThread)
if (blocking) {
timeout.awaitResult(future)
diff --git a/core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala b/core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala
index ebead830c6..150d82b393 100644
--- a/core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala
+++ b/core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala
@@ -181,7 +181,7 @@ private[spark] object ClosureCleaner extends Logging {
return
}
- logDebug(s"+++ Cleaning closure $func (${func.getClass.getName}}) +++")
+ logDebug(s"+++ Cleaning closure $func (${func.getClass.getName}) +++")
// A list of classes that represents closures enclosed in the given one
val innerClasses = getInnerClosureClasses(func)
diff --git a/core/src/main/scala/org/apache/spark/util/Utils.scala b/core/src/main/scala/org/apache/spark/util/Utils.scala
index f2abf227dc..fddc24dbfc 100644
--- a/core/src/main/scala/org/apache/spark/util/Utils.scala
+++ b/core/src/main/scala/org/apache/spark/util/Utils.scala
@@ -1366,7 +1366,7 @@ private[spark] object Utils extends Logging {
file.getAbsolutePath, effectiveStartIndex, effectiveEndIndex))
}
sum += fileToLength(file)
- logDebug(s"After processing file $file, string built is ${stringBuffer.toString}}")
+ logDebug(s"After processing file $file, string built is ${stringBuffer.toString}")
}
stringBuffer.toString
}
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala b/examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala
index cd411397a4..3ae53e57db 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala
@@ -76,7 +76,7 @@ object MovieLensALS {
.text("path to a MovieLens dataset of movies")
.action((x, c) => c.copy(movies = x))
opt[Int]("rank")
- .text(s"rank, default: ${defaultParams.rank}}")
+ .text(s"rank, default: ${defaultParams.rank}")
.action((x, c) => c.copy(rank = x))
opt[Int]("maxIter")
.text(s"max number of iterations, default: ${defaultParams.maxIter}")
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
index 57ffe3dd25..cc6bce3cb7 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
@@ -100,7 +100,7 @@ object DecisionTreeRunner {
.action((x, c) => c.copy(numTrees = x))
opt[String]("featureSubsetStrategy")
.text(s"feature subset sampling strategy" +
- s" (${RandomForest.supportedFeatureSubsetStrategies.mkString(", ")}}), " +
+ s" (${RandomForest.supportedFeatureSubsetStrategies.mkString(", ")}), " +
s"default: ${defaultParams.featureSubsetStrategy}")
.action((x, c) => c.copy(featureSubsetStrategy = x))
opt[Double]("fracTest")
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
index e43a6f2864..69691ae297 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
@@ -55,7 +55,7 @@ object MovieLensALS {
val parser = new OptionParser[Params]("MovieLensALS") {
head("MovieLensALS: an example app for ALS on MovieLens data.")
opt[Int]("rank")
- .text(s"rank, default: ${defaultParams.rank}}")
+ .text(s"rank, default: ${defaultParams.rank}")
.action((x, c) => c.copy(rank = x))
opt[Int]("numIterations")
.text(s"number of iterations, default: ${defaultParams.numIterations}")
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala
index 9029093e0f..bbbcc8436b 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala
@@ -469,7 +469,7 @@ private[spark] object BLAS extends Serializable with Logging {
require(A.numCols == x.size,
s"The columns of A don't match the number of elements of x. A: ${A.numCols}, x: ${x.size}")
require(A.numRows == y.size,
- s"The rows of A don't match the number of elements of y. A: ${A.numRows}, y:${y.size}}")
+ s"The rows of A don't match the number of elements of y. A: ${A.numRows}, y:${y.size}")
if (alpha == 0.0) {
logDebug("gemv: alpha is equal to 0. Returning y.")
} else {
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
index 970f3c8282..8581d6b496 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
@@ -164,7 +164,7 @@ object HiveTypeCoercion {
// Leave the same if the dataTypes match.
case Some(newType) if a.dataType == newType.dataType => a
case Some(newType) =>
- logDebug(s"Promoting $a to $newType in ${q.simpleString}}")
+ logDebug(s"Promoting $a to $newType in ${q.simpleString}")
newType
}
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
index 039c13bf16..8ee3b8bda8 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
@@ -170,7 +170,7 @@ object JdbcUtils extends Logging {
case BinaryType => "BLOB"
case TimestampType => "TIMESTAMP"
case DateType => "DATE"
- case t: DecimalType => s"DECIMAL(${t.precision}},${t.scale}})"
+ case t: DecimalType => s"DECIMAL(${t.precision},${t.scale})"
case _ => throw new IllegalArgumentException(s"Don't know how to save $field to JDBC")
})
val nullable = if (field.nullable) "" else "NOT NULL"
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala
index 0b9bad987c..5779c71f64 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala
@@ -162,7 +162,7 @@ private[sql] class SQLListener(sqlContext: SQLContext) extends SparkListener wit
// A task of an old stage attempt. Because a new stage is submitted, we can ignore it.
} else if (stageAttemptID > stageMetrics.stageAttemptId) {
logWarning(s"A task should not have a higher stageAttemptID ($stageAttemptID) then " +
- s"what we have seen (${stageMetrics.stageAttemptId}})")
+ s"what we have seen (${stageMetrics.stageAttemptId})")
} else {
// TODO We don't know the attemptId. Currently, what we can do is overriding the
// accumulator updates. However, if there are two same task are running, such as
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala b/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala
index 31ce8e1ec1..620b8a36a2 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala
@@ -84,7 +84,7 @@ class WriteAheadLogBackedBlockRDD[T: ClassTag](
require(
blockIds.length == walRecordHandles.length,
s"Number of block Ids (${blockIds.length}) must be " +
- s" same as number of WAL record handles (${walRecordHandles.length}})")
+ s" same as number of WAL record handles (${walRecordHandles.length})")
require(
isBlockIdValid.isEmpty || isBlockIdValid.length == blockIds.length,
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala
index 363c03d431..deb15d0759 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala
@@ -66,7 +66,7 @@ private[streaming] class InputInfoTracker(ssc: StreamingContext) extends Logging
new mutable.HashMap[Int, StreamInputInfo]())
if (inputInfos.contains(inputInfo.inputStreamId)) {
- throw new IllegalStateException(s"Input stream ${inputInfo.inputStreamId}} for batch" +
+ throw new IllegalStateException(s"Input stream ${inputInfo.inputStreamId} for batch" +
s"$batchTime is already added into InputInfoTracker, this is a illegal state")
}
inputInfos += ((inputInfo.inputStreamId, inputInfo))