diff options
author | Michael Gummelt <mgummelt@mesosphere.io> | 2016-07-29 05:50:47 -0700 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2016-07-29 05:50:47 -0700 |
commit | 266b92faffb66af24d8ed2725beb80770a2d91f8 (patch) | |
tree | 408d3ccb12dcb41ae577600eb1fadaebe79f5482 /core/src/test/scala | |
parent | 04a2c072d94874f3f7ae9dd94c026e8826a75ccd (diff) | |
download | spark-266b92faffb66af24d8ed2725beb80770a2d91f8.tar.gz spark-266b92faffb66af24d8ed2725beb80770a2d91f8.tar.bz2 spark-266b92faffb66af24d8ed2725beb80770a2d91f8.zip |
[SPARK-16637] Unified containerizer
## What changes were proposed in this pull request?
New config var: spark.mesos.docker.containerizer={"mesos","docker" (default)}
This adds support for running docker containers via the Mesos unified containerizer: http://mesos.apache.org/documentation/latest/container-image/
The benefit is losing the dependency on `dockerd`, and all the costs which it incurs.
I've also updated the supported Mesos version to 0.28.2 for support of the required protobufs.
This is blocked on: https://github.com/apache/spark/pull/14167
## How was this patch tested?
- manually testing jobs submitted with both "mesos" and "docker" settings for the new config var.
- spark/mesos integration test suite
Author: Michael Gummelt <mgummelt@mesosphere.io>
Closes #14275 from mgummelt/unified-containerizer.
Diffstat (limited to 'core/src/test/scala')
-rw-r--r-- | core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala | 32 |
1 files changed, 29 insertions, 3 deletions
diff --git a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala index 51d262e75e..a74fdf79a1 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackendSuite.scala @@ -109,7 +109,7 @@ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.length == 1) - val cpus = backend.getResource(taskInfos(0).getResourcesList, "cpus") + val cpus = backend.getResource(taskInfos.head.getResourcesList, "cpus") assert(cpus == executorCores) } @@ -123,7 +123,7 @@ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.length == 1) - val cpus = backend.getResource(taskInfos(0).getResourcesList, "cpus") + val cpus = backend.getResource(taskInfos.head.getResourcesList, "cpus") assert(cpus == offerCores) } @@ -137,7 +137,7 @@ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite val taskInfos = verifyTaskLaunched(driver, "o1") assert(taskInfos.length == 1) - val cpus = backend.getResource(taskInfos(0).getResourcesList, "cpus") + val cpus = backend.getResource(taskInfos.head.getResourcesList, "cpus") assert(cpus == maxCores) } @@ -252,6 +252,32 @@ class MesosCoarseGrainedSchedulerBackendSuite extends SparkFunSuite backend.start() } + test("honors unset spark.mesos.containerizer") { + setBackend(Map("spark.mesos.executor.docker.image" -> "test")) + + val (mem, cpu) = (backend.executorMemory(sc), 4) + + val offer1 = createOffer("o1", "s1", mem, cpu) + backend.resourceOffers(driver, List(offer1).asJava) + + val taskInfos = verifyTaskLaunched(driver, "o1") + assert(taskInfos.head.getContainer.getType == ContainerInfo.Type.DOCKER) + } + + test("honors spark.mesos.containerizer=\"mesos\"") { + setBackend(Map( + "spark.mesos.executor.docker.image" -> "test", + "spark.mesos.containerizer" -> "mesos")) + + val (mem, cpu) = (backend.executorMemory(sc), 4) + + val offer1 = createOffer("o1", "s1", mem, cpu) + backend.resourceOffers(driver, List(offer1).asJava) + + val taskInfos = verifyTaskLaunched(driver, "o1") + assert(taskInfos.head.getContainer.getType == ContainerInfo.Type.MESOS) + } + test("docker settings are reflected in created tasks") { setBackend(Map( "spark.mesos.executor.docker.image" -> "some_image", |