aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorCodingCat <zhunansjtu@gmail.com>2014-06-15 23:47:58 -0700
committerPatrick Wendell <pwendell@gmail.com>2014-06-15 23:47:58 -0700
commit716c88aa147762f7f617adf34a17edd681d9a4ff (patch)
tree19db4d7ef27adaf4b3d91defd0a6efaa6eac46c1 /core
parent119b06a04f6df3949b3b074a18f791bbc732ac31 (diff)
downloadspark-716c88aa147762f7f617adf34a17edd681d9a4ff.tar.gz
spark-716c88aa147762f7f617adf34a17edd681d9a4ff.tar.bz2
spark-716c88aa147762f7f617adf34a17edd681d9a4ff.zip
SPARK-2039: apply output dir existence checking for all output formats
https://issues.apache.org/jira/browse/SPARK-2039 apply output dir existence checking for all output formats Author: CodingCat <zhunansjtu@gmail.com> Closes #1088 from CodingCat/SPARK-2039 and squashes the following commits: c52747a [CodingCat] apply output dir existence checking for all output formats
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala6
1 files changed, 2 insertions, 4 deletions
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
index b6ad9b6c3e..fe36c80e0b 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
@@ -787,8 +787,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
val outfmt = job.getOutputFormatClass
val jobFormat = outfmt.newInstance
- if (self.conf.getBoolean("spark.hadoop.validateOutputSpecs", true) &&
- jobFormat.isInstanceOf[NewFileOutputFormat[_, _]]) {
+ if (self.conf.getBoolean("spark.hadoop.validateOutputSpecs", true)) {
// FileOutputFormat ignores the filesystem parameter
jobFormat.checkOutputSpecs(job)
}
@@ -854,8 +853,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
logDebug("Saving as hadoop file of type (" + keyClass.getSimpleName + ", " +
valueClass.getSimpleName + ")")
- if (self.conf.getBoolean("spark.hadoop.validateOutputSpecs", true) &&
- outputFormatInstance.isInstanceOf[FileOutputFormat[_, _]]) {
+ if (self.conf.getBoolean("spark.hadoop.validateOutputSpecs", true)) {
// FileOutputFormat ignores the filesystem parameter
val ignoredFs = FileSystem.get(conf)
conf.getOutputFormat.checkOutputSpecs(ignoredFs, conf)