aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorAndrew Or <andrew@databricks.com>2015-08-24 14:10:50 -0700
committerYin Huai <yhuai@databricks.com>2015-08-24 14:10:50 -0700
commit662bb9667669cb07cf6d2ccee0d8e76bb561cd89 (patch)
tree419cf568d3431e196d0f7158fd143cef0185c364 /core
parent9ce0c7ad333f4a3c01207e5e9ed42bcafb99d894 (diff)
downloadspark-662bb9667669cb07cf6d2ccee0d8e76bb561cd89.tar.gz
spark-662bb9667669cb07cf6d2ccee0d8e76bb561cd89.tar.bz2
spark-662bb9667669cb07cf6d2ccee0d8e76bb561cd89.zip
[SPARK-10144] [UI] Actually show peak execution memory by default
The peak execution memory metric was introduced in SPARK-8735. That was before Tungsten was enabled by default, so it assumed that `spark.sql.unsafe.enabled` must be explicitly set to true. The result is that the memory is not displayed by default. Author: Andrew Or <andrew@databricks.com> Closes #8345 from andrewor14/show-memory-default.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala6
-rw-r--r--core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala8
2 files changed, 8 insertions, 6 deletions
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
index fb4556b836..4adc6596ba 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
@@ -68,8 +68,7 @@ private[ui] class StagePage(parent: StagesTab) extends WebUIPage("stage") {
// if we find that it's okay.
private val MAX_TIMELINE_TASKS = parent.conf.getInt("spark.ui.timeline.tasks.maximum", 1000)
- private val displayPeakExecutionMemory =
- parent.conf.getOption("spark.sql.unsafe.enabled").exists(_.toBoolean)
+ private val displayPeakExecutionMemory = parent.conf.getBoolean("spark.sql.unsafe.enabled", true)
def render(request: HttpServletRequest): Seq[Node] = {
progressListener.synchronized {
@@ -1193,8 +1192,7 @@ private[ui] class TaskPagedTable(
desc: Boolean) extends PagedTable[TaskTableRowData] {
// We only track peak memory used for unsafe operators
- private val displayPeakExecutionMemory =
- conf.getOption("spark.sql.unsafe.enabled").exists(_.toBoolean)
+ private val displayPeakExecutionMemory = conf.getBoolean("spark.sql.unsafe.enabled", true)
override def tableId: String = "task-table"
diff --git a/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala b/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala
index 98f9314f31..3388c6dca8 100644
--- a/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala
@@ -33,14 +33,18 @@ class StagePageSuite extends SparkFunSuite with LocalSparkContext {
test("peak execution memory only displayed if unsafe is enabled") {
val unsafeConf = "spark.sql.unsafe.enabled"
- val conf = new SparkConf().set(unsafeConf, "true")
+ val conf = new SparkConf(false).set(unsafeConf, "true")
val html = renderStagePage(conf).toString().toLowerCase
val targetString = "peak execution memory"
assert(html.contains(targetString))
// Disable unsafe and make sure it's not there
- val conf2 = new SparkConf().set(unsafeConf, "false")
+ val conf2 = new SparkConf(false).set(unsafeConf, "false")
val html2 = renderStagePage(conf2).toString().toLowerCase
assert(!html2.contains(targetString))
+ // Avoid setting anything; it should be displayed by default
+ val conf3 = new SparkConf(false)
+ val html3 = renderStagePage(conf3).toString().toLowerCase
+ assert(html3.contains(targetString))
}
/**