aboutsummaryrefslogtreecommitdiff
path: root/resource-managers/yarn/src/test
diff options
context:
space:
mode:
authorMarcelo Vanzin <vanzin@cloudera.com>2017-04-04 11:38:05 -0700
committerMarcelo Vanzin <vanzin@cloudera.com>2017-04-04 11:38:05 -0700
commit0736980f395f114faccbd58e78280ca63ed289c7 (patch)
tree0b5e3a8322ac7bbf6fcd1e0badc7cbba8a144997 /resource-managers/yarn/src/test
parent11238d4c62961c03376d9b2899221ec74313363a (diff)
downloadspark-0736980f395f114faccbd58e78280ca63ed289c7.tar.gz
spark-0736980f395f114faccbd58e78280ca63ed289c7.tar.bz2
spark-0736980f395f114faccbd58e78280ca63ed289c7.zip
[SPARK-20191][YARN] Crate wrapper for RackResolver so tests can override it.
Current test code tries to override the RackResolver used by setting configuration params, but because YARN libs statically initialize the resolver the first time it's used, that means that those configs don't really take effect during Spark tests. This change adds a wrapper class that easily allows tests to override the behavior of the resolver for the Spark code that uses it. Author: Marcelo Vanzin <vanzin@cloudera.com> Closes #17508 from vanzin/SPARK-20191.
Diffstat (limited to 'resource-managers/yarn/src/test')
-rw-r--r--resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/LocalityPlacementStrategySuite.scala8
-rw-r--r--resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala22
2 files changed, 8 insertions, 22 deletions
diff --git a/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/LocalityPlacementStrategySuite.scala b/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/LocalityPlacementStrategySuite.scala
index fb80ff9f31..b7f25656e4 100644
--- a/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/LocalityPlacementStrategySuite.scala
+++ b/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/LocalityPlacementStrategySuite.scala
@@ -17,10 +17,9 @@
package org.apache.spark.deploy.yarn
+import scala.collection.JavaConverters._
import scala.collection.mutable.{HashMap, HashSet, Set}
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic
-import org.apache.hadoop.net.DNSToSwitchMapping
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.mockito.Mockito._
@@ -51,9 +50,6 @@ class LocalityPlacementStrategySuite extends SparkFunSuite {
private def runTest(): Unit = {
val yarnConf = new YarnConfiguration()
- yarnConf.setClass(
- CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
- classOf[MockResolver], classOf[DNSToSwitchMapping])
// The numbers below have been chosen to balance being large enough to replicate the
// original issue while not taking too long to run when the issue is fixed. The main
@@ -62,7 +58,7 @@ class LocalityPlacementStrategySuite extends SparkFunSuite {
val resource = Resource.newInstance(8 * 1024, 4)
val strategy = new LocalityPreferredContainerPlacementStrategy(new SparkConf(),
- yarnConf, resource)
+ yarnConf, resource, new MockResolver())
val totalTasks = 32 * 1024
val totalContainers = totalTasks / 16
diff --git a/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala b/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
index fcc0594cf6..97b0e8aca3 100644
--- a/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
+++ b/resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala
@@ -17,12 +17,9 @@
package org.apache.spark.deploy.yarn
-import java.util.{Arrays, List => JList}
-
import scala.collection.JavaConverters._
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic
-import org.apache.hadoop.net.DNSToSwitchMapping
+import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.AMRMClient
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest
@@ -38,24 +35,16 @@ import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler.SplitInfo
import org.apache.spark.util.ManualClock
-class MockResolver extends DNSToSwitchMapping {
+class MockResolver extends SparkRackResolver {
- override def resolve(names: JList[String]): JList[String] = {
- if (names.size > 0 && names.get(0) == "host3") Arrays.asList("/rack2")
- else Arrays.asList("/rack1")
+ override def resolve(conf: Configuration, hostName: String): String = {
+ if (hostName == "host3") "/rack2" else "/rack1"
}
- override def reloadCachedMappings() {}
-
- def reloadCachedMappings(names: JList[String]) {}
}
class YarnAllocatorSuite extends SparkFunSuite with Matchers with BeforeAndAfterEach {
val conf = new YarnConfiguration()
- conf.setClass(
- CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
- classOf[MockResolver], classOf[DNSToSwitchMapping])
-
val sparkConf = new SparkConf()
sparkConf.set("spark.driver.host", "localhost")
sparkConf.set("spark.driver.port", "4040")
@@ -111,7 +100,8 @@ class YarnAllocatorSuite extends SparkFunSuite with Matchers with BeforeAndAfter
rmClient,
appAttemptId,
new SecurityManager(sparkConf),
- Map())
+ Map(),
+ new MockResolver())
}
def createContainer(host: String): Container = {