diff options
author | Shixiong Zhu <shixiong@databricks.com> | 2016-06-17 15:48:17 -0700 |
---|---|---|
committer | Shixiong Zhu <shixiong@databricks.com> | 2016-06-17 15:48:17 -0700 |
commit | 62d8fe2089659e8212753a622708517e0f4a77bc (patch) | |
tree | 23685264247e79089bfe89be171590de96d06f71 /core/src/test | |
parent | 298c4ae81520b6b39230a6b0bf733c2b7caea627 (diff) | |
download | spark-62d8fe2089659e8212753a622708517e0f4a77bc.tar.gz spark-62d8fe2089659e8212753a622708517e0f4a77bc.tar.bz2 spark-62d8fe2089659e8212753a622708517e0f4a77bc.zip |
[SPARK-16017][CORE] Send hostname from CoarseGrainedExecutorBackend to driver
## What changes were proposed in this pull request?
[SPARK-15395](https://issues.apache.org/jira/browse/SPARK-15395) changes the behavior that how the driver gets the executor host and the driver will get the executor IP address instead of the host name. This PR just sends the hostname from executors to driver so that driver can pass it to TaskScheduler.
## How was this patch tested?
Existing unit tests.
Author: Shixiong Zhu <shixiong@databricks.com>
Closes #13741 from zsxwing/SPARK-16017.
Diffstat (limited to 'core/src/test')
-rw-r--r-- | core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala | 4 | ||||
-rw-r--r-- | core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala | 2 |
2 files changed, 3 insertions, 3 deletions
diff --git a/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala b/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala index 81b94b5721..5e2ba311ee 100644 --- a/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala +++ b/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala @@ -174,9 +174,9 @@ class HeartbeatReceiverSuite val dummyExecutorEndpointRef1 = rpcEnv.setupEndpoint("fake-executor-1", dummyExecutorEndpoint1) val dummyExecutorEndpointRef2 = rpcEnv.setupEndpoint("fake-executor-2", dummyExecutorEndpoint2) fakeSchedulerBackend.driverEndpoint.askWithRetry[Boolean]( - RegisterExecutor(executorId1, dummyExecutorEndpointRef1, 0, Map.empty)) + RegisterExecutor(executorId1, dummyExecutorEndpointRef1, "1.2.3.4", 0, Map.empty)) fakeSchedulerBackend.driverEndpoint.askWithRetry[Boolean]( - RegisterExecutor(executorId2, dummyExecutorEndpointRef2, 0, Map.empty)) + RegisterExecutor(executorId2, dummyExecutorEndpointRef2, "1.2.3.5", 0, Map.empty)) heartbeatReceiverRef.askWithRetry[Boolean](TaskSchedulerIsSet) addExecutorAndVerify(executorId1) addExecutorAndVerify(executorId2) diff --git a/core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala b/core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala index 3d39bd4a74..814027076d 100644 --- a/core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala @@ -559,7 +559,7 @@ class StandaloneDynamicAllocationSuite val endpointRef = mock(classOf[RpcEndpointRef]) val mockAddress = mock(classOf[RpcAddress]) when(endpointRef.address).thenReturn(mockAddress) - val message = RegisterExecutor(id, endpointRef, 10, Map.empty) + val message = RegisterExecutor(id, endpointRef, "localhost", 10, Map.empty) val backend = sc.schedulerBackend.asInstanceOf[CoarseGrainedSchedulerBackend] backend.driverEndpoint.askWithRetry[Boolean](message) } |