From 7a033fd795b2008b1cdaa0d0aab73817db56d708 Mon Sep 17 00:00:00 2001 From: Charles Reiss Date: Tue, 4 Dec 2012 13:58:12 -0800 Subject: Make LocalSparkCluster use distinct IPs --- core/src/main/scala/spark/deploy/LocalSparkCluster.scala | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/spark/deploy/LocalSparkCluster.scala b/core/src/main/scala/spark/deploy/LocalSparkCluster.scala index 8b2a71add5..4211d80596 100644 --- a/core/src/main/scala/spark/deploy/LocalSparkCluster.scala +++ b/core/src/main/scala/spark/deploy/LocalSparkCluster.scala @@ -35,11 +35,15 @@ class LocalSparkCluster(numSlaves: Int, coresPerSlave: Int, memoryPerSlave: Int) /* Start the Slaves */ for (slaveNum <- 1 to numSlaves) { + /* We can pretend to test distributed stuff by giving the slaves distinct hostnames. + All of 127/8 should be a loopback, we use 127.100.*.* in hopes that it is + sufficiently distinctive. */ + val slaveIpAddress = "127.100.0." + (slaveNum % 256) val (actorSystem, boundPort) = - AkkaUtils.createActorSystem("sparkWorker" + slaveNum, localIpAddress, 0) + AkkaUtils.createActorSystem("sparkWorker" + slaveNum, slaveIpAddress, 0) slaveActorSystems += actorSystem val actor = actorSystem.actorOf( - Props(new Worker(localIpAddress, boundPort, 0, coresPerSlave, memoryPerSlave, masterUrl)), + Props(new Worker(slaveIpAddress, boundPort, 0, coresPerSlave, memoryPerSlave, masterUrl)), name = "Worker") slaveActors += actor } -- cgit v1.2.3