aboutsummaryrefslogtreecommitdiff
path: root/sql/catalyst/src
diff options
context:
space:
mode:
authorTathagata Das <tathagata.das1565@gmail.com>2016-07-29 19:59:35 -0700
committerYin Huai <yhuai@databricks.com>2016-07-29 19:59:35 -0700
commitbbc247548ac6faeca15afc05c266cee37ef13416 (patch)
tree1a2e47d643f5e1a4fb6ed1ebd80774ab821ab358 /sql/catalyst/src
parent2182e4322da6ba732f99ae75dce00f76f1cdc4d9 (diff)
downloadspark-bbc247548ac6faeca15afc05c266cee37ef13416.tar.gz
spark-bbc247548ac6faeca15afc05c266cee37ef13416.tar.bz2
spark-bbc247548ac6faeca15afc05c266cee37ef13416.zip
[SPARK-16748][SQL] SparkExceptions during planning should not wrapped in TreeNodeException
## What changes were proposed in this pull request? We do not want SparkExceptions from job failures in the planning phase to create TreeNodeException. Hence do not wrap SparkException in TreeNodeException. ## How was this patch tested? New unit test Author: Tathagata Das <tathagata.das1565@gmail.com> Closes #14395 from tdas/SPARK-16748.
Diffstat (limited to 'sql/catalyst/src')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala8
1 files changed, 7 insertions, 1 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
index 0420b4b538..0d45f371fa 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/errors/package.scala
@@ -17,7 +17,10 @@
package org.apache.spark.sql.catalyst
+import scala.util.control.NonFatal
+
import org.apache.spark.sql.catalyst.trees.TreeNode
+import org.apache.spark.SparkException
/**
* Functions for attaching and retrieving trees that are associated with errors.
@@ -47,7 +50,10 @@ package object errors {
*/
def attachTree[TreeType <: TreeNode[_], A](tree: TreeType, msg: String = "")(f: => A): A = {
try f catch {
- case e: Exception => throw new TreeNodeException(tree, msg, e)
+ // SPARK-16748: We do not want SparkExceptions from job failures in the planning phase
+ // to create TreeNodeException. Hence, wrap exception only if it is not SparkException.
+ case NonFatal(e) if !e.isInstanceOf[SparkException] =>
+ throw new TreeNodeException(tree, msg, e)
}
}
}