aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorSandy Ryza <sandy@cloudera.com>2014-09-02 11:34:55 -0700
committerMatei Zaharia <matei@databricks.com>2014-09-02 11:34:55 -0700
commit81b9d5b628229ed69aa9dae45ec4c94068dcd71e (patch)
tree4a01b9824de2093f669c69e90134a6353fd8d71a /core
parent066f31a6b213121441fc9618abd5bae4a706a215 (diff)
downloadspark-81b9d5b628229ed69aa9dae45ec4c94068dcd71e.tar.gz
spark-81b9d5b628229ed69aa9dae45ec4c94068dcd71e.tar.bz2
spark-81b9d5b628229ed69aa9dae45ec4c94068dcd71e.zip
SPARK-3052. Misleading and spurious FileSystem closed errors whenever a ...
...job fails while reading from Hadoop Author: Sandy Ryza <sandy@cloudera.com> Closes #1956 from sryza/sandy-spark-3052 and squashes the following commits: 815813a [Sandy Ryza] SPARK-3052. Misleading and spurious FileSystem closed errors whenever a job fails while reading from Hadoop
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala9
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala7
2 files changed, 13 insertions, 3 deletions
diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
index c8623314c9..036dcc4966 100644
--- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
@@ -42,7 +42,8 @@ import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.executor.{DataReadMethod, InputMetrics}
import org.apache.spark.rdd.HadoopRDD.HadoopMapPartitionsWithSplitRDD
-import org.apache.spark.util.NextIterator
+import org.apache.spark.util.{NextIterator, Utils}
+
/**
* A Spark split class that wraps around a Hadoop InputSplit.
@@ -228,7 +229,11 @@ class HadoopRDD[K, V](
try {
reader.close()
} catch {
- case e: Exception => logWarning("Exception in RecordReader.close()", e)
+ case e: Exception => {
+ if (!Utils.inShutdown()) {
+ logWarning("Exception in RecordReader.close()", e)
+ }
+ }
}
}
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
index 58f707b9b4..4c84b3f623 100644
--- a/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
@@ -35,6 +35,7 @@ import org.apache.spark.SerializableWritable
import org.apache.spark.{SparkContext, TaskContext}
import org.apache.spark.executor.{DataReadMethod, InputMetrics}
import org.apache.spark.rdd.NewHadoopRDD.NewHadoopMapPartitionsWithSplitRDD
+import org.apache.spark.util.Utils
private[spark] class NewHadoopPartition(
rddId: Int,
@@ -153,7 +154,11 @@ class NewHadoopRDD[K, V](
try {
reader.close()
} catch {
- case e: Exception => logWarning("Exception in RecordReader.close()", e)
+ case e: Exception => {
+ if (!Utils.inShutdown()) {
+ logWarning("Exception in RecordReader.close()", e)
+ }
+ }
}
}
}