aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala10
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala47
-rw-r--r--docs/running-on-mesos.md8
3 files changed, 53 insertions, 12 deletions
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
index 705116cb13..aa3ec0f8cf 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtils.scala
@@ -21,15 +21,11 @@ import org.apache.spark.SparkContext
private[spark] object MemoryUtils {
// These defaults copied from YARN
- val OVERHEAD_FRACTION = 1.10
+ val OVERHEAD_FRACTION = 0.10
val OVERHEAD_MINIMUM = 384
def calculateTotalMemory(sc: SparkContext) = {
- math.max(
- sc.conf.getOption("spark.mesos.executor.memoryOverhead")
- .getOrElse(OVERHEAD_MINIMUM.toString)
- .toInt + sc.executorMemory,
- OVERHEAD_FRACTION * sc.executorMemory
- )
+ sc.conf.getInt("spark.mesos.executor.memoryOverhead",
+ math.max(OVERHEAD_FRACTION * sc.executorMemory, OVERHEAD_MINIMUM).toInt) + sc.executorMemory
}
}
diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala
new file mode 100644
index 0000000000..3fa0115e68
--- /dev/null
+++ b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MemoryUtilsSuite.scala
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.scheduler.cluster.mesos
+
+import org.mockito.Mockito._
+import org.scalatest.FunSuite
+import org.scalatest.mock.MockitoSugar
+
+import org.apache.spark.{SparkConf, SparkContext}
+
+class MemoryUtilsSuite extends FunSuite with MockitoSugar {
+ test("MesosMemoryUtils should always override memoryOverhead when it's set") {
+ val sparkConf = new SparkConf
+
+ val sc = mock[SparkContext]
+ when(sc.conf).thenReturn(sparkConf)
+
+ // 384 > sc.executorMemory * 0.1 => 512 + 384 = 896
+ when(sc.executorMemory).thenReturn(512)
+ assert(MemoryUtils.calculateTotalMemory(sc) === 896)
+
+ // 384 < sc.executorMemory * 0.1 => 4096 + (4096 * 0.1) = 4505.6
+ when(sc.executorMemory).thenReturn(4096)
+ assert(MemoryUtils.calculateTotalMemory(sc) === 4505)
+
+ // set memoryOverhead
+ sparkConf.set("spark.mesos.executor.memoryOverhead", "100")
+ assert(MemoryUtils.calculateTotalMemory(sc) === 4196)
+ sparkConf.set("spark.mesos.executor.memoryOverhead", "400")
+ assert(MemoryUtils.calculateTotalMemory(sc) === 4496)
+ }
+}
diff --git a/docs/running-on-mesos.md b/docs/running-on-mesos.md
index 6a9d304501..c984639bd3 100644
--- a/docs/running-on-mesos.md
+++ b/docs/running-on-mesos.md
@@ -224,11 +224,9 @@ See the [configuration page](configuration.html) for information on Spark config
<td><code>spark.mesos.executor.memoryOverhead</code></td>
<td>executor memory * 0.10, with minimum of 384</td>
<td>
- This value is an additive for <code>spark.executor.memory</code>, specified in MB,
- which is used to calculate the total Mesos task memory. A value of <code>384</code>
- implies a 384MB overhead. Additionally, there is a hard-coded 10% minimum
- overhead. The final overhead will be the larger of either
- `spark.mesos.executor.memoryOverhead` or 10% of `spark.executor.memory`.
+ The amount of additional memory, specified in MB, to be allocated per executor. By default,
+ the overhead will be larger of either 384 or 10% of `spark.executor.memory`. If it's set,
+ the final overhead will be this value.
</td>
</tr>
</table>