aboutsummaryrefslogtreecommitdiff
path: root/core/src/test/scala/org/apache
diff options
context:
space:
mode:
authorAlex Bozarth <ajbozart@us.ibm.com>2016-08-24 14:39:41 -0500
committerTom Graves <tgraves@yahoo-inc.com>2016-08-24 14:39:41 -0500
commit891ac2b914fb6f90a62c6fbc0a3960a89d1c1d92 (patch)
tree19d1eea2303c7b29c310047d40e04a1b5f694cc7 /core/src/test/scala/org/apache
parent40b30fcf453169534cb53d01cd22236210b13005 (diff)
downloadspark-891ac2b914fb6f90a62c6fbc0a3960a89d1c1d92.tar.gz
spark-891ac2b914fb6f90a62c6fbc0a3960a89d1c1d92.tar.bz2
spark-891ac2b914fb6f90a62c6fbc0a3960a89d1c1d92.zip
[SPARK-15083][WEB UI] History Server can OOM due to unlimited TaskUIData
## What changes were proposed in this pull request? Based on #12990 by tankkyo Since the History Server currently loads all application's data it can OOM if too many applications have a significant task count. `spark.ui.trimTasks` (default: false) can be set to true to trim tasks by `spark.ui.retainedTasks` (default: 10000) (This is a "quick fix" to help those running into the problem until a update of how the history server loads app data can be done) ## How was this patch tested? Manual testing and dev/run-tests ![spark-15083](https://cloud.githubusercontent.com/assets/13952758/17713694/fe82d246-63b0-11e6-9697-b87ea75ff4ef.png) Author: Alex Bozarth <ajbozart@us.ibm.com> Closes #14673 from ajbozarth/spark15083.
Diffstat (limited to 'core/src/test/scala/org/apache')
-rw-r--r--core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala4
1 files changed, 2 insertions, 2 deletions
diff --git a/core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala b/core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala
index f684e16c25..1bfb0c1547 100644
--- a/core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala
+++ b/core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala
@@ -19,7 +19,7 @@ package org.apache.spark.status.api.v1
import java.util.Date
-import scala.collection.mutable.HashMap
+import scala.collection.mutable.LinkedHashMap
import org.apache.spark.SparkFunSuite
import org.apache.spark.scheduler.{StageInfo, TaskInfo, TaskLocality}
@@ -28,7 +28,7 @@ import org.apache.spark.ui.jobs.UIData.{StageUIData, TaskUIData}
class AllStagesResourceSuite extends SparkFunSuite {
def getFirstTaskLaunchTime(taskLaunchTimes: Seq[Long]): Option[Date] = {
- val tasks = new HashMap[Long, TaskUIData]
+ val tasks = new LinkedHashMap[Long, TaskUIData]
taskLaunchTimes.zipWithIndex.foreach { case (time, idx) =>
tasks(idx.toLong) = TaskUIData(
new TaskInfo(idx, idx, 1, time, "", "", TaskLocality.ANY, false), None)