aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuoQiang Li <witgo@qq.com>2014-10-12 22:48:54 -0700
committerJosh Rosen <joshrosen@apache.org>2014-10-12 22:48:54 -0700
commitb4a7fa7a663c462bf537ca9d63af0dba6b4a8033 (patch)
tree73ef6da51a003b64476ad4fb92258222b2b08632
parentfc616d51a510f82627b5be949a5941419834cf70 (diff)
downloadspark-b4a7fa7a663c462bf537ca9d63af0dba6b4a8033.tar.gz
spark-b4a7fa7a663c462bf537ca9d63af0dba6b4a8033.tar.bz2
spark-b4a7fa7a663c462bf537ca9d63af0dba6b4a8033.zip
[SPARK-3905][Web UI]The keys for sorting the columns of Executor page ,Stage page Storage page are incorrect
Author: GuoQiang Li <witgo@qq.com> Closes #2763 from witgo/SPARK-3905 and squashes the following commits: 17d7990 [GuoQiang Li] The keys for sorting the columns of Executor page ,Stage page Storage page are incorrect
-rw-r--r--core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala12
-rw-r--r--core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala6
3 files changed, 12 insertions, 12 deletions
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala b/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala
index 2987dc0449..f0e43fbf70 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala
@@ -71,19 +71,19 @@ private[ui] class ExecutorTable(stageId: Int, stageAttemptId: Int, parent: JobPr
<tr>
<td>{k}</td>
<td>{executorIdToAddress.getOrElse(k, "CANNOT FIND ADDRESS")}</td>
- <td sorttable_customekey={v.taskTime.toString}>{UIUtils.formatDuration(v.taskTime)}</td>
+ <td sorttable_customkey={v.taskTime.toString}>{UIUtils.formatDuration(v.taskTime)}</td>
<td>{v.failedTasks + v.succeededTasks}</td>
<td>{v.failedTasks}</td>
<td>{v.succeededTasks}</td>
- <td sorttable_customekey={v.inputBytes.toString}>
+ <td sorttable_customkey={v.inputBytes.toString}>
{Utils.bytesToString(v.inputBytes)}</td>
- <td sorttable_customekey={v.shuffleRead.toString}>
+ <td sorttable_customkey={v.shuffleRead.toString}>
{Utils.bytesToString(v.shuffleRead)}</td>
- <td sorttable_customekey={v.shuffleWrite.toString}>
+ <td sorttable_customkey={v.shuffleWrite.toString}>
{Utils.bytesToString(v.shuffleWrite)}</td>
- <td sorttable_customekey={v.memoryBytesSpilled.toString}>
+ <td sorttable_customkey={v.memoryBytesSpilled.toString}>
{Utils.bytesToString(v.memoryBytesSpilled)}</td>
- <td sorttable_customekey={v.diskBytesSpilled.toString}>
+ <td sorttable_customkey={v.diskBytesSpilled.toString}>
{Utils.bytesToString(v.diskBytesSpilled)}</td>
</tr>
}
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala b/core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala
index 2e67310594..4ee7f08ab4 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala
@@ -176,9 +176,9 @@ private[ui] class StageTableBase(
{makeProgressBar(stageData.numActiveTasks, stageData.completedIndices.size,
stageData.numFailedTasks, s.numTasks)}
</td>
- <td sorttable_customekey={inputRead.toString}>{inputReadWithUnit}</td>
- <td sorttable_customekey={shuffleRead.toString}>{shuffleReadWithUnit}</td>
- <td sorttable_customekey={shuffleWrite.toString}>{shuffleWriteWithUnit}</td>
+ <td sorttable_customkey={inputRead.toString}>{inputReadWithUnit}</td>
+ <td sorttable_customkey={shuffleRead.toString}>{shuffleReadWithUnit}</td>
+ <td sorttable_customkey={shuffleWrite.toString}>{shuffleWriteWithUnit}</td>
}
/** Render an HTML row that represents a stage */
diff --git a/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala b/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
index 716591c9ed..83489ca067 100644
--- a/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala
@@ -58,9 +58,9 @@ private[ui] class StoragePage(parent: StorageTab) extends WebUIPage("") {
</td>
<td>{rdd.numCachedPartitions}</td>
<td>{"%.0f%%".format(rdd.numCachedPartitions * 100.0 / rdd.numPartitions)}</td>
- <td sorttable_customekey={rdd.memSize.toString}>{Utils.bytesToString(rdd.memSize)}</td>
- <td sorttable_customekey={rdd.tachyonSize.toString}>{Utils.bytesToString(rdd.tachyonSize)}</td>
- <td sorttable_customekey={rdd.diskSize.toString} >{Utils.bytesToString(rdd.diskSize)}</td>
+ <td sorttable_customkey={rdd.memSize.toString}>{Utils.bytesToString(rdd.memSize)}</td>
+ <td sorttable_customkey={rdd.tachyonSize.toString}>{Utils.bytesToString(rdd.tachyonSize)}</td>
+ <td sorttable_customkey={rdd.diskSize.toString} >{Utils.bytesToString(rdd.diskSize)}</td>
</tr>
// scalastyle:on
}