aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatei Zaharia <matei@eecs.berkeley.edu>2013-06-07 22:51:28 -0700
committerMatei Zaharia <matei@eecs.berkeley.edu>2013-06-07 22:51:28 -0700
commitb58a29295b2e610cadf1cac44438337ce9b51537 (patch)
tree3376a9a0e0a6ae104e470c6f96c97a85d7f33a5d
parentc8fc423bc2d6f9ed571bd0ad8c4c446650c72548 (diff)
downloadspark-b58a29295b2e610cadf1cac44438337ce9b51537.tar.gz
spark-b58a29295b2e610cadf1cac44438337ce9b51537.tar.bz2
spark-b58a29295b2e610cadf1cac44438337ce9b51537.zip
Small formatting and style fixes
-rw-r--r--core/src/main/scala/spark/scheduler/cluster/SchedulingAlgorithm.scala8
-rw-r--r--core/src/main/scala/spark/storage/StorageUtils.scala14
2 files changed, 11 insertions, 11 deletions
diff --git a/core/src/main/scala/spark/scheduler/cluster/SchedulingAlgorithm.scala b/core/src/main/scala/spark/scheduler/cluster/SchedulingAlgorithm.scala
index e071917c00..f33310a34a 100644
--- a/core/src/main/scala/spark/scheduler/cluster/SchedulingAlgorithm.scala
+++ b/core/src/main/scala/spark/scheduler/cluster/SchedulingAlgorithm.scala
@@ -13,11 +13,11 @@ private[spark] class FIFOSchedulingAlgorithm extends SchedulingAlgorithm {
override def comparator(s1: Schedulable, s2: Schedulable): Boolean = {
val priority1 = s1.priority
val priority2 = s2.priority
- var res = Math.signum(priority1 - priority2)
+ var res = math.signum(priority1 - priority2)
if (res == 0) {
val stageId1 = s1.stageId
val stageId2 = s2.stageId
- res = Math.signum(stageId1 - stageId2)
+ res = math.signum(stageId1 - stageId2)
}
if (res < 0) {
return true
@@ -35,8 +35,8 @@ private[spark] class FairSchedulingAlgorithm extends SchedulingAlgorithm {
val runningTasks2 = s2.runningTasks
val s1Needy = runningTasks1 < minShare1
val s2Needy = runningTasks2 < minShare2
- val minShareRatio1 = runningTasks1.toDouble / Math.max(minShare1, 1.0).toDouble
- val minShareRatio2 = runningTasks2.toDouble / Math.max(minShare2, 1.0).toDouble
+ val minShareRatio1 = runningTasks1.toDouble / math.max(minShare1, 1.0).toDouble
+ val minShareRatio2 = runningTasks2.toDouble / math.max(minShare2, 1.0).toDouble
val taskToWeightRatio1 = runningTasks1.toDouble / s1.weight.toDouble
val taskToWeightRatio2 = runningTasks2.toDouble / s2.weight.toDouble
var res:Boolean = true
diff --git a/core/src/main/scala/spark/storage/StorageUtils.scala b/core/src/main/scala/spark/storage/StorageUtils.scala
index 81e607868d..950c0cdf35 100644
--- a/core/src/main/scala/spark/storage/StorageUtils.scala
+++ b/core/src/main/scala/spark/storage/StorageUtils.scala
@@ -55,21 +55,21 @@ object StorageUtils {
}.mapValues(_.values.toArray)
// For each RDD, generate an RDDInfo object
- val rddInfos = groupedRddBlocks.map { case(rddKey, rddBlocks) =>
-
+ val rddInfos = groupedRddBlocks.map { case (rddKey, rddBlocks) =>
// Add up memory and disk sizes
val memSize = rddBlocks.map(_.memSize).reduce(_ + _)
val diskSize = rddBlocks.map(_.diskSize).reduce(_ + _)
// Find the id of the RDD, e.g. rdd_1 => 1
val rddId = rddKey.split("_").last.toInt
- // Get the friendly name for the rdd, if available.
+
+ // Get the friendly name and storage level for the RDD, if available
sc.persistentRdds.get(rddId).map { r =>
- val rddName = Option(r.name).getOrElse(rddKey)
- val rddStorageLevel = r.getStorageLevel
- RDDInfo(rddId, rddName, rddStorageLevel, rddBlocks.length, r.partitions.size, memSize, diskSize)
+ val rddName = Option(r.name).getOrElse(rddKey)
+ val rddStorageLevel = r.getStorageLevel
+ RDDInfo(rddId, rddName, rddStorageLevel, rddBlocks.length, r.partitions.size, memSize, diskSize)
}
- }.flatMap(x => x).toArray
+ }.flatten.toArray
scala.util.Sorting.quickSort(rddInfos)