aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/scala
diff options
context:
space:
mode:
authorShixiong Zhu <shixiong@databricks.com>2016-04-21 11:51:04 -0700
committerShixiong Zhu <shixiong@databricks.com>2016-04-21 11:51:04 -0700
commite4904d870a0e705a3a7d370320e6f8a5f23d5944 (patch)
treef93943d5bc88283a3c23ca075d79f9520be24552 /core/src/main/scala
parent3a21e8d5ed640e3f82946893e24c099aa723c169 (diff)
downloadspark-e4904d870a0e705a3a7d370320e6f8a5f23d5944.tar.gz
spark-e4904d870a0e705a3a7d370320e6f8a5f23d5944.tar.bz2
spark-e4904d870a0e705a3a7d370320e6f8a5f23d5944.zip
[SPARK-14699][CORE] Stop endpoints before closing the connections and don't stop client in Outbox
## What changes were proposed in this pull request? In general, `onDisconnected` is for dealing with unexpected network disconnections. When RpcEnv.shutdown is called, the disconnections are expected so RpcEnv should not fire these events. This PR moves `dispatcher.stop()` above closing the connections so that when stopping RpcEnv, the endpoints won't receive `onDisconnected` events. In addition, Outbox should not close the client since it will be reused by others. This PR fixes it as well. ## How was this patch tested? test("SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events") Author: Shixiong Zhu <shixiong@databricks.com> Closes #12481 from zsxwing/SPARK-14699.
Diffstat (limited to 'core/src/main/scala')
-rw-r--r--core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/rpc/netty/Outbox.scala5
2 files changed, 4 insertions, 7 deletions
diff --git a/core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala b/core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala
index 7f2192e1f5..7d7b4c82fa 100644
--- a/core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala
+++ b/core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala
@@ -287,15 +287,15 @@ private[netty] class NettyRpcEnv(
if (timeoutScheduler != null) {
timeoutScheduler.shutdownNow()
}
+ if (dispatcher != null) {
+ dispatcher.stop()
+ }
if (server != null) {
server.close()
}
if (clientFactory != null) {
clientFactory.close()
}
- if (dispatcher != null) {
- dispatcher.stop()
- }
if (clientConnectionExecutor != null) {
clientConnectionExecutor.shutdownNow()
}
diff --git a/core/src/main/scala/org/apache/spark/rpc/netty/Outbox.scala b/core/src/main/scala/org/apache/spark/rpc/netty/Outbox.scala
index 56499c639f..6c090ada5a 100644
--- a/core/src/main/scala/org/apache/spark/rpc/netty/Outbox.scala
+++ b/core/src/main/scala/org/apache/spark/rpc/netty/Outbox.scala
@@ -241,10 +241,7 @@ private[netty] class Outbox(nettyEnv: NettyRpcEnv, val address: RpcAddress) {
}
private def closeClient(): Unit = synchronized {
- // Not sure if `client.close` is idempotent. Just for safety.
- if (client != null) {
- client.close()
- }
+ // Just set client to null. Don't close it in order to reuse the connection.
client = null
}