aboutsummaryrefslogtreecommitdiff
path: root/common/network-common/src/main/java
diff options
context:
space:
mode:
Diffstat (limited to 'common/network-common/src/main/java')
-rw-r--r--common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java32
-rw-r--r--common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java6
-rw-r--r--common/network-common/src/main/java/org/apache/spark/network/protocol/MessageWithHeader.java30
-rw-r--r--common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java2
-rw-r--r--common/network-common/src/main/java/org/apache/spark/network/util/TransportFrameDecoder.java2
5 files changed, 56 insertions, 16 deletions
diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
index f179bad1f4..a27aaf2b27 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
@@ -94,7 +94,7 @@ public class TransportClientFactory implements Closeable {
this.context = Preconditions.checkNotNull(context);
this.conf = context.getConf();
this.clientBootstraps = Lists.newArrayList(Preconditions.checkNotNull(clientBootstraps));
- this.connectionPool = new ConcurrentHashMap<SocketAddress, ClientPool>();
+ this.connectionPool = new ConcurrentHashMap<>();
this.numConnectionsPerPeer = conf.numConnectionsPerPeer();
this.rand = new Random();
@@ -123,13 +123,15 @@ public class TransportClientFactory implements Closeable {
public TransportClient createClient(String remoteHost, int remotePort) throws IOException {
// Get connection from the connection pool first.
// If it is not found or not active, create a new one.
- final InetSocketAddress address = new InetSocketAddress(remoteHost, remotePort);
+ // Use unresolved address here to avoid DNS resolution each time we creates a client.
+ final InetSocketAddress unresolvedAddress =
+ InetSocketAddress.createUnresolved(remoteHost, remotePort);
// Create the ClientPool if we don't have it yet.
- ClientPool clientPool = connectionPool.get(address);
+ ClientPool clientPool = connectionPool.get(unresolvedAddress);
if (clientPool == null) {
- connectionPool.putIfAbsent(address, new ClientPool(numConnectionsPerPeer));
- clientPool = connectionPool.get(address);
+ connectionPool.putIfAbsent(unresolvedAddress, new ClientPool(numConnectionsPerPeer));
+ clientPool = connectionPool.get(unresolvedAddress);
}
int clientIndex = rand.nextInt(numConnectionsPerPeer);
@@ -146,25 +148,35 @@ public class TransportClientFactory implements Closeable {
}
if (cachedClient.isActive()) {
- logger.trace("Returning cached connection to {}: {}", address, cachedClient);
+ logger.trace("Returning cached connection to {}: {}",
+ cachedClient.getSocketAddress(), cachedClient);
return cachedClient;
}
}
// If we reach here, we don't have an existing connection open. Let's create a new one.
// Multiple threads might race here to create new connections. Keep only one of them active.
+ final long preResolveHost = System.nanoTime();
+ final InetSocketAddress resolvedAddress = new InetSocketAddress(remoteHost, remotePort);
+ final long hostResolveTimeMs = (System.nanoTime() - preResolveHost) / 1000000;
+ if (hostResolveTimeMs > 2000) {
+ logger.warn("DNS resolution for {} took {} ms", resolvedAddress, hostResolveTimeMs);
+ } else {
+ logger.trace("DNS resolution for {} took {} ms", resolvedAddress, hostResolveTimeMs);
+ }
+
synchronized (clientPool.locks[clientIndex]) {
cachedClient = clientPool.clients[clientIndex];
if (cachedClient != null) {
if (cachedClient.isActive()) {
- logger.trace("Returning cached connection to {}: {}", address, cachedClient);
+ logger.trace("Returning cached connection to {}: {}", resolvedAddress, cachedClient);
return cachedClient;
} else {
- logger.info("Found inactive connection to {}, creating a new one.", address);
+ logger.info("Found inactive connection to {}, creating a new one.", resolvedAddress);
}
}
- clientPool.clients[clientIndex] = createClient(address);
+ clientPool.clients[clientIndex] = createClient(resolvedAddress);
return clientPool.clients[clientIndex];
}
}
@@ -235,7 +247,7 @@ public class TransportClientFactory implements Closeable {
}
long postBootstrap = System.nanoTime();
- logger.debug("Successfully created connection to {} after {} ms ({} ms spent in bootstraps)",
+ logger.info("Successfully created connection to {} after {} ms ({} ms spent in bootstraps)",
address, (postBootstrap - preConnect) / 1000000, (postBootstrap - preBootstrap) / 1000000);
return client;
diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java
index f0e2004d2d..8a69223c88 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportResponseHandler.java
@@ -64,9 +64,9 @@ public class TransportResponseHandler extends MessageHandler<ResponseMessage> {
public TransportResponseHandler(Channel channel) {
this.channel = channel;
- this.outstandingFetches = new ConcurrentHashMap<StreamChunkId, ChunkReceivedCallback>();
- this.outstandingRpcs = new ConcurrentHashMap<Long, RpcResponseCallback>();
- this.streamCallbacks = new ConcurrentLinkedQueue<StreamCallback>();
+ this.outstandingFetches = new ConcurrentHashMap<>();
+ this.outstandingRpcs = new ConcurrentHashMap<>();
+ this.streamCallbacks = new ConcurrentLinkedQueue<>();
this.timeOfLastRequestNs = new AtomicLong(0);
}
diff --git a/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageWithHeader.java b/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageWithHeader.java
index 66227f96a1..4f8781b42a 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageWithHeader.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/protocol/MessageWithHeader.java
@@ -18,6 +18,7 @@
package org.apache.spark.network.protocol;
import java.io.IOException;
+import java.nio.ByteBuffer;
import java.nio.channels.WritableByteChannel;
import javax.annotation.Nullable;
@@ -44,6 +45,14 @@ class MessageWithHeader extends AbstractReferenceCounted implements FileRegion {
private long totalBytesTransferred;
/**
+ * When the write buffer size is larger than this limit, I/O will be done in chunks of this size.
+ * The size should not be too large as it will waste underlying memory copy. e.g. If network
+ * avaliable buffer is smaller than this limit, the data cannot be sent within one single write
+ * operation while it still will make memory copy with this size.
+ */
+ private static final int NIO_BUFFER_LIMIT = 256 * 1024;
+
+ /**
* Construct a new MessageWithHeader.
*
* @param managedBuffer the {@link ManagedBuffer} that the message body came from. This needs to
@@ -128,8 +137,27 @@ class MessageWithHeader extends AbstractReferenceCounted implements FileRegion {
}
private int copyByteBuf(ByteBuf buf, WritableByteChannel target) throws IOException {
- int written = target.write(buf.nioBuffer());
+ ByteBuffer buffer = buf.nioBuffer();
+ int written = (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
+ target.write(buffer) : writeNioBuffer(target, buffer);
buf.skipBytes(written);
return written;
}
+
+ private int writeNioBuffer(
+ WritableByteChannel writeCh,
+ ByteBuffer buf) throws IOException {
+ int originalLimit = buf.limit();
+ int ret = 0;
+
+ try {
+ int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT);
+ buf.limit(buf.position() + ioSize);
+ ret = writeCh.write(buf);
+ } finally {
+ buf.limit(originalLimit);
+ }
+
+ return ret;
+ }
}
diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java b/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
index e2222ae085..ae7e520b2f 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
@@ -63,7 +63,7 @@ public class OneForOneStreamManager extends StreamManager {
// For debugging purposes, start with a random stream id to help identifying different streams.
// This does not need to be globally unique, only unique to this class.
nextStreamId = new AtomicLong((long) new Random().nextInt(Integer.MAX_VALUE) * 1000);
- streams = new ConcurrentHashMap<Long, StreamState>();
+ streams = new ConcurrentHashMap<>();
}
@Override
diff --git a/common/network-common/src/main/java/org/apache/spark/network/util/TransportFrameDecoder.java b/common/network-common/src/main/java/org/apache/spark/network/util/TransportFrameDecoder.java
index bd1830e6ab..fcec7dfd0c 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/util/TransportFrameDecoder.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/util/TransportFrameDecoder.java
@@ -140,7 +140,7 @@ public class TransportFrameDecoder extends ChannelInboundHandlerAdapter {
}
// Otherwise, create a composite buffer.
- CompositeByteBuf frame = buffers.getFirst().alloc().compositeBuffer();
+ CompositeByteBuf frame = buffers.getFirst().alloc().compositeBuffer(Integer.MAX_VALUE);
while (remaining > 0) {
ByteBuf next = nextBufferForFrame(remaining);
remaining -= next.readableBytes();