aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorJacek Laskowski <jacek@japila.pl>2016-05-05 16:34:27 -0700
committerAndrew Or <andrew@databricks.com>2016-05-05 16:34:27 -0700
commitbbb77734374010e36731bf6db1fac0273de8206d (patch)
tree0ee24dee864521415ce1ae5e3a0b9857e147b4c9 /core
parent02c07e8999dca545849cb3aa758a624dc51cd1e9 (diff)
downloadspark-bbb77734374010e36731bf6db1fac0273de8206d.tar.gz
spark-bbb77734374010e36731bf6db1fac0273de8206d.tar.bz2
spark-bbb77734374010e36731bf6db1fac0273de8206d.zip
[SPARK-15152][DOC][MINOR] Scaladoc and Code style Improvements
## What changes were proposed in this pull request? Minor doc and code style fixes ## How was this patch tested? local build Author: Jacek Laskowski <jacek@japila.pl> Closes #12928 from jaceklaskowski/SPARK-15152.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/Accumulator.scala9
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/Pool.scala9
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/SchedulingAlgorithm.scala13
-rw-r--r--core/src/main/scala/org/apache/spark/util/ShutdownHookManager.scala4
4 files changed, 14 insertions, 21 deletions
diff --git a/core/src/main/scala/org/apache/spark/Accumulator.scala b/core/src/main/scala/org/apache/spark/Accumulator.scala
index 23245043e2..9d1f1d59db 100644
--- a/core/src/main/scala/org/apache/spark/Accumulator.scala
+++ b/core/src/main/scala/org/apache/spark/Accumulator.scala
@@ -24,16 +24,17 @@ package org.apache.spark
* They can be used to implement counters (as in MapReduce) or sums. Spark natively supports
* accumulators of numeric value types, and programmers can add support for new types.
*
- * An accumulator is created from an initial value `v` by calling [[SparkContext#accumulator]].
- * Tasks running on the cluster can then add to it using the [[Accumulable#+=]] operator.
+ * An accumulator is created from an initial value `v` by calling
+ * [[SparkContext#accumulator SparkContext.accumulator]].
+ * Tasks running on the cluster can then add to it using the [[Accumulable#+= +=]] operator.
* However, they cannot read its value. Only the driver program can read the accumulator's value,
- * using its value method.
+ * using its [[#value]] method.
*
* The interpreter session below shows an accumulator being used to add up the elements of an array:
*
* {{{
* scala> val accum = sc.accumulator(0)
- * accum: spark.Accumulator[Int] = 0
+ * accum: org.apache.spark.Accumulator[Int] = 0
*
* scala> sc.parallelize(Array(1, 2, 3, 4)).foreach(x => accum += x)
* ...
diff --git a/core/src/main/scala/org/apache/spark/scheduler/Pool.scala b/core/src/main/scala/org/apache/spark/scheduler/Pool.scala
index a79e71ec7c..5987cfea2e 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/Pool.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/Pool.scala
@@ -26,16 +26,14 @@ import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
/**
- * An Schedulable entity that represent collection of Pools or TaskSetManagers
+ * An Schedulable entity that represents collection of Pools or TaskSetManagers
*/
-
private[spark] class Pool(
val poolName: String,
val schedulingMode: SchedulingMode,
initMinShare: Int,
initWeight: Int)
- extends Schedulable
- with Logging {
+ extends Schedulable with Logging {
val schedulableQueue = new ConcurrentLinkedQueue[Schedulable]
val schedulableNameToSchedulable = new ConcurrentHashMap[String, Schedulable]
@@ -56,7 +54,8 @@ private[spark] class Pool(
case SchedulingMode.FIFO =>
new FIFOSchedulingAlgorithm()
case _ =>
- throw new IllegalArgumentException(s"Unsupported spark.scheduler.mode: $schedulingMode")
+ val msg = "Unsupported scheduling mode: $schedulingMode. Use FAIR or FIFO instead."
+ throw new IllegalArgumentException(msg)
}
}
diff --git a/core/src/main/scala/org/apache/spark/scheduler/SchedulingAlgorithm.scala b/core/src/main/scala/org/apache/spark/scheduler/SchedulingAlgorithm.scala
index 864941d468..18ebbbe78a 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/SchedulingAlgorithm.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/SchedulingAlgorithm.scala
@@ -36,11 +36,7 @@ private[spark] class FIFOSchedulingAlgorithm extends SchedulingAlgorithm {
val stageId2 = s2.stageId
res = math.signum(stageId1 - stageId2)
}
- if (res < 0) {
- true
- } else {
- false
- }
+ res < 0
}
}
@@ -52,12 +48,12 @@ private[spark] class FairSchedulingAlgorithm extends SchedulingAlgorithm {
val runningTasks2 = s2.runningTasks
val s1Needy = runningTasks1 < minShare1
val s2Needy = runningTasks2 < minShare2
- val minShareRatio1 = runningTasks1.toDouble / math.max(minShare1, 1.0).toDouble
- val minShareRatio2 = runningTasks2.toDouble / math.max(minShare2, 1.0).toDouble
+ val minShareRatio1 = runningTasks1.toDouble / math.max(minShare1, 1.0)
+ val minShareRatio2 = runningTasks2.toDouble / math.max(minShare2, 1.0)
val taskToWeightRatio1 = runningTasks1.toDouble / s1.weight.toDouble
val taskToWeightRatio2 = runningTasks2.toDouble / s2.weight.toDouble
- var compare: Int = 0
+ var compare = 0
if (s1Needy && !s2Needy) {
return true
} else if (!s1Needy && s2Needy) {
@@ -67,7 +63,6 @@ private[spark] class FairSchedulingAlgorithm extends SchedulingAlgorithm {
} else {
compare = taskToWeightRatio1.compareTo(taskToWeightRatio2)
}
-
if (compare < 0) {
true
} else if (compare > 0) {
diff --git a/core/src/main/scala/org/apache/spark/util/ShutdownHookManager.scala b/core/src/main/scala/org/apache/spark/util/ShutdownHookManager.scala
index bd26bfd848..93ac67e5db 100644
--- a/core/src/main/scala/org/apache/spark/util/ShutdownHookManager.scala
+++ b/core/src/main/scala/org/apache/spark/util/ShutdownHookManager.scala
@@ -170,9 +170,7 @@ private [util] class SparkShutdownHookManager {
@volatile private var shuttingDown = false
/**
- * Install a hook to run at shutdown and run all registered hooks in order. Hadoop 1.x does not
- * have `ShutdownHookManager`, so in that case we just use the JVM's `Runtime` object and hope for
- * the best.
+ * Install a hook to run at shutdown and run all registered hooks in order.
*/
def install(): Unit = {
val hookTask = new Runnable() {