aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/scala
diff options
context:
space:
mode:
authorwitgo <witgo@qq.com>2014-06-26 21:59:21 -0700
committerPatrick Wendell <pwendell@gmail.com>2014-06-26 21:59:21 -0700
commit18f29b96c7e0948f5f504e522e5aa8a8d1ab163e (patch)
tree7227a2ca8d06c4c0d43d071bc65a451a78562cc3 /core/src/main/scala
parentc23f5db32b3bd4d965d56e5df684a3b814a91cd6 (diff)
downloadspark-18f29b96c7e0948f5f504e522e5aa8a8d1ab163e.tar.gz
spark-18f29b96c7e0948f5f504e522e5aa8a8d1ab163e.tar.bz2
spark-18f29b96c7e0948f5f504e522e5aa8a8d1ab163e.zip
SPARK-2181:The keys for sorting the columns of Executor page in SparkUI are incorrect
Author: witgo <witgo@qq.com> Closes #1135 from witgo/SPARK-2181 and squashes the following commits: 39dad90 [witgo] The keys for sorting the columns of Executor page in SparkUI are incorrect
Diffstat (limited to 'core/src/main/scala')
-rw-r--r--core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala8
-rw-r--r--core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala12
-rw-r--r--core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala8
3 files changed, 17 insertions, 11 deletions
diff --git a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala
index 6cb43c02b8..2d8c3b949c 100644
--- a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala
@@ -79,6 +79,7 @@ private[ui] class ExecutorsPage(parent: ExecutorsTab) extends WebUIPage("") {
val maximumMemory = values("Maximum Memory")
val memoryUsed = values("Memory Used")
val diskUsed = values("Disk Used")
+ // scalastyle:off
<tr>
<td>{values("Executor ID")}</td>
<td>{values("Address")}</td>
@@ -94,10 +95,11 @@ private[ui] class ExecutorsPage(parent: ExecutorsTab) extends WebUIPage("") {
<td>{values("Failed Tasks")}</td>
<td>{values("Complete Tasks")}</td>
<td>{values("Total Tasks")}</td>
- <td>{Utils.msDurationToString(values("Task Time").toLong)}</td>
- <td>{Utils.bytesToString(values("Shuffle Read").toLong)}</td>
- <td>{Utils.bytesToString(values("Shuffle Write").toLong)}</td>
+ <td sorttable_customkey={values("Task Time")}>{Utils.msDurationToString(values("Task Time").toLong)}</td>
+ <td sorttable_customkey={values("Shuffle Read")}>{Utils.bytesToString(values("Shuffle Read").toLong)}</td>
+ <td sorttable_customkey={values("Shuffle Write")} >{Utils.bytesToString(values("Shuffle Write").toLong)}</td>
</tr>
+ // scalastyle:on
}
/** Represent an executor's info as a map given a storage status index */
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala b/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala
index c83e196c9c..add0e9878a 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala
@@ -67,18 +67,20 @@ private[ui] class ExecutorTable(stageId: Int, parent: JobProgressTab) {
executorIdToSummary match {
case Some(x) =>
x.toSeq.sortBy(_._1).map { case (k, v) => {
+ // scalastyle:off
<tr>
<td>{k}</td>
<td>{executorIdToAddress.getOrElse(k, "CANNOT FIND ADDRESS")}</td>
- <td>{UIUtils.formatDuration(v.taskTime)}</td>
+ <td sorttable_customekey={v.taskTime.toString}>{UIUtils.formatDuration(v.taskTime)}</td>
<td>{v.failedTasks + v.succeededTasks}</td>
<td>{v.failedTasks}</td>
<td>{v.succeededTasks}</td>
- <td>{Utils.bytesToString(v.shuffleRead)}</td>
- <td>{Utils.bytesToString(v.shuffleWrite)}</td>
- <td>{Utils.bytesToString(v.memoryBytesSpilled)}</td>
- <td>{Utils.bytesToString(v.diskBytesSpilled)}</td>
+ <td sorttable_customekey={v.shuffleRead.toString}>{Utils.bytesToString(v.shuffleRead)}</td>
+ <td sorttable_customekey={v.shuffleWrite.toString}>{Utils.bytesToString(v.shuffleWrite)}</td>
+ <td sorttable_customekey={v.memoryBytesSpilled.toString} >{Utils.bytesToString(v.memoryBytesSpilled)}</td>
+ <td sorttable_customekey={v.diskBytesSpilled.toString} >{Utils.bytesToString(v.diskBytesSpilled)}</td>
</tr>
+ // scalastyle:on
}
}
case _ => Seq[Node]()
diff --git a/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala b/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
index b66edd91f5..9813d9330a 100644
--- a/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
@@ -49,6 +49,7 @@ private[ui] class StoragePage(parent: StorageTab) extends WebUIPage("") {
/** Render an HTML row representing an RDD */
private def rddRow(rdd: RDDInfo): Seq[Node] = {
+ // scalastyle:off
<tr>
<td>
<a href={"%s/storage/rdd?id=%s".format(UIUtils.prependBaseUri(basePath), rdd.id)}>
@@ -59,9 +60,10 @@ private[ui] class StoragePage(parent: StorageTab) extends WebUIPage("") {
</td>
<td>{rdd.numCachedPartitions}</td>
<td>{"%.0f%%".format(rdd.numCachedPartitions * 100.0 / rdd.numPartitions)}</td>
- <td>{Utils.bytesToString(rdd.memSize)}</td>
- <td>{Utils.bytesToString(rdd.tachyonSize)}</td>
- <td>{Utils.bytesToString(rdd.diskSize)}</td>
+ <td sorttable_customekey={rdd.memSize.toString}>{Utils.bytesToString(rdd.memSize)}</td>
+ <td sorttable_customekey={rdd.tachyonSize.toString}>{Utils.bytesToString(rdd.tachyonSize)}</td>
+ <td sorttable_customekey={rdd.diskSize.toString} >{Utils.bytesToString(rdd.diskSize)}</td>
</tr>
+ // scalastyle:on
}
}