aboutsummaryrefslogtreecommitdiff
path: root/network
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@databricks.com>2015-11-18 16:00:35 -0800
committerReynold Xin <rxin@databricks.com>2015-11-18 16:00:35 -0800
commit4b117121900e5f242e7c8f46a69164385f0da7cc (patch)
tree3134b152d511565a7bc4ba7b2cc8e61f859a9f2a /network
parentc07a50b86254578625be777b1890ff95e832ac6e (diff)
downloadspark-4b117121900e5f242e7c8f46a69164385f0da7cc.tar.gz
spark-4b117121900e5f242e7c8f46a69164385f0da7cc.tar.bz2
spark-4b117121900e5f242e7c8f46a69164385f0da7cc.zip
[SPARK-11495] Fix potential socket / file handle leaks that were found via static analysis
The HP Fortify Opens Source Review team (https://www.hpfod.com/open-source-review-project) reported a handful of potential resource leaks that were discovered using their static analysis tool. We should fix the issues identified by their scan. Author: Josh Rosen <joshrosen@databricks.com> Closes #9455 from JoshRosen/fix-potential-resource-leaks.
Diffstat (limited to 'network')
-rw-r--r--network/common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java15
-rw-r--r--network/shuffle/src/test/java/org/apache/spark/network/shuffle/TestShuffleDataContext.java32
2 files changed, 31 insertions, 16 deletions
diff --git a/network/common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java b/network/common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
index dc5fa1cee6..50a324e293 100644
--- a/network/common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
+++ b/network/common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java
@@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
+import com.google.common.io.Closeables;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -78,10 +79,15 @@ public class ChunkFetchIntegrationSuite {
testFile = File.createTempFile("shuffle-test-file", "txt");
testFile.deleteOnExit();
RandomAccessFile fp = new RandomAccessFile(testFile, "rw");
- byte[] fileContent = new byte[1024];
- new Random().nextBytes(fileContent);
- fp.write(fileContent);
- fp.close();
+ boolean shouldSuppressIOException = true;
+ try {
+ byte[] fileContent = new byte[1024];
+ new Random().nextBytes(fileContent);
+ fp.write(fileContent);
+ shouldSuppressIOException = false;
+ } finally {
+ Closeables.close(fp, shouldSuppressIOException);
+ }
final TransportConf conf = new TransportConf("shuffle", new SystemPropertyConfigProvider());
fileChunk = new FileSegmentManagedBuffer(conf, testFile, 10, testFile.length() - 25);
@@ -117,6 +123,7 @@ public class ChunkFetchIntegrationSuite {
@AfterClass
public static void tearDown() {
+ bufferChunk.release();
server.close();
clientFactory.close();
testFile.delete();
diff --git a/network/shuffle/src/test/java/org/apache/spark/network/shuffle/TestShuffleDataContext.java b/network/shuffle/src/test/java/org/apache/spark/network/shuffle/TestShuffleDataContext.java
index 3fdde054ab..7ac1ca128a 100644
--- a/network/shuffle/src/test/java/org/apache/spark/network/shuffle/TestShuffleDataContext.java
+++ b/network/shuffle/src/test/java/org/apache/spark/network/shuffle/TestShuffleDataContext.java
@@ -23,6 +23,7 @@ import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
+import com.google.common.io.Closeables;
import com.google.common.io.Files;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
@@ -60,21 +61,28 @@ public class TestShuffleDataContext {
public void insertSortShuffleData(int shuffleId, int mapId, byte[][] blocks) throws IOException {
String blockId = "shuffle_" + shuffleId + "_" + mapId + "_0";
- OutputStream dataStream = new FileOutputStream(
- ExternalShuffleBlockResolver.getFile(localDirs, subDirsPerLocalDir, blockId + ".data"));
- DataOutputStream indexStream = new DataOutputStream(new FileOutputStream(
- ExternalShuffleBlockResolver.getFile(localDirs, subDirsPerLocalDir, blockId + ".index")));
+ OutputStream dataStream = null;
+ DataOutputStream indexStream = null;
+ boolean suppressExceptionsDuringClose = true;
- long offset = 0;
- indexStream.writeLong(offset);
- for (byte[] block : blocks) {
- offset += block.length;
- dataStream.write(block);
+ try {
+ dataStream = new FileOutputStream(
+ ExternalShuffleBlockResolver.getFile(localDirs, subDirsPerLocalDir, blockId + ".data"));
+ indexStream = new DataOutputStream(new FileOutputStream(
+ ExternalShuffleBlockResolver.getFile(localDirs, subDirsPerLocalDir, blockId + ".index")));
+
+ long offset = 0;
indexStream.writeLong(offset);
+ for (byte[] block : blocks) {
+ offset += block.length;
+ dataStream.write(block);
+ indexStream.writeLong(offset);
+ }
+ suppressExceptionsDuringClose = false;
+ } finally {
+ Closeables.close(dataStream, suppressExceptionsDuringClose);
+ Closeables.close(indexStream, suppressExceptionsDuringClose);
}
-
- dataStream.close();
- indexStream.close();
}
/** Creates reducer blocks in a hash-based data format within our local dirs. */