aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala2
-rw-r--r--docs/running-on-mesos.md2
-rw-r--r--docs/running-on-yarn.md2
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala4
4 files changed, 5 insertions, 5 deletions
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
index 5101ec8352..705116cb13 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
@@ -21,7 +21,7 @@ import org.apache.spark.SparkContext
private[spark] object MemoryUtils {
// These defaults copied from YARN
- val OVERHEAD_FRACTION = 1.07
+ val OVERHEAD_FRACTION = 1.10
val OVERHEAD_MINIMUM = 384
def calculateTotalMemory(sc: SparkContext) = {
diff --git a/docs/running-on-mesos.md b/docs/running-on-mesos.md
index db1173a06b..e509e4bf37 100644
--- a/docs/running-on-mesos.md
+++ b/docs/running-on-mesos.md
@@ -225,7 +225,7 @@ See the [configuration page](configuration.html) for information on Spark config
</tr>
<tr>
<td><code>spark.mesos.executor.memoryOverhead</code></td>
- <td>executor memory * 0.07, with minimum of 384</td>
+ <td>executor memory * 0.10, with minimum of 384</td>
<td>
This value is an additive for <code>spark.executor.memory</code>, specified in MiB,
which is used to calculate the total Mesos task memory. A value of <code>384</code>
diff --git a/docs/running-on-yarn.md b/docs/running-on-yarn.md
index 2b93eef6c2..68b1aeb8eb 100644
--- a/docs/running-on-yarn.md
+++ b/docs/running-on-yarn.md
@@ -113,7 +113,7 @@ Most of the configs are the same for Spark on YARN as for other deployment modes
</tr>
<tr>
<td><code>spark.yarn.executor.memoryOverhead</code></td>
- <td>executorMemory * 0.07, with minimum of 384 </td>
+ <td>executorMemory * 0.10, with minimum of 384 </td>
<td>
The amount of off heap memory (in megabytes) to be allocated per executor. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the executor size (typically 6-10%).
</td>
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
index 146b2c0f1a..5881dc5ffa 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala
@@ -86,10 +86,10 @@ class YarnSparkHadoopUtil extends SparkHadoopUtil {
object YarnSparkHadoopUtil {
// Additional memory overhead
- // 7% was arrived at experimentally. In the interest of minimizing memory waste while covering
+ // 10% was arrived at experimentally. In the interest of minimizing memory waste while covering
// the common cases. Memory overhead tends to grow with container size.
- val MEMORY_OVERHEAD_FACTOR = 0.07
+ val MEMORY_OVERHEAD_FACTOR = 0.10
val MEMORY_OVERHEAD_MIN = 384
val ANY_HOST = "*"