aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2015-04-16 10:45:32 +0100
committerSean Owen <sowen@cloudera.com>2015-04-16 10:45:32 +0100
commit6179a948371897cecb7322ebda366c2de8ecaedd (patch)
tree40303d9c6eece5bf0f2ac9e41eeaf9e70c2e322c
parent8370550593f3549e90ace446961281dad0cd7498 (diff)
downloadspark-6179a948371897cecb7322ebda366c2de8ecaedd.tar.gz
spark-6179a948371897cecb7322ebda366c2de8ecaedd.tar.bz2
spark-6179a948371897cecb7322ebda366c2de8ecaedd.zip
SPARK-4783 [CORE] System.exit() calls in SparkContext disrupt applications embedding Spark
Avoid `System.exit(1)` in `TaskSchedulerImpl` and convert to `SparkException`; ensure scheduler calls `sc.stop()` even when this exception is thrown. CC mateiz aarondav as those who may have last touched this code. Author: Sean Owen <sowen@cloudera.com> Closes #5492 from srowen/SPARK-4783 and squashes the following commits: 60dc682 [Sean Owen] Avoid System.exit(1) in TaskSchedulerImpl and convert to SparkException; ensure scheduler calls sc.stop() even when this exception is thrown
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala5
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala9
2 files changed, 8 insertions, 6 deletions
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
index 2362cc7240..ecc8bf1899 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
@@ -394,7 +394,7 @@ private[spark] class TaskSchedulerImpl(
def error(message: String) {
synchronized {
- if (activeTaskSets.size > 0) {
+ if (activeTaskSets.nonEmpty) {
// Have each task set throw a SparkException with the error
for ((taskSetId, manager) <- activeTaskSets) {
try {
@@ -407,8 +407,7 @@ private[spark] class TaskSchedulerImpl(
// No task sets are active but we still got an error. Just exit since this
// must mean the error is during registration.
// It might be good to do something smarter here in the future.
- logError("Exiting due to error from cluster scheduler: " + message)
- System.exit(1)
+ throw new SparkException(s"Exiting due to error from cluster scheduler: $message")
}
}
}
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala
index ed5b7c1088..ccf1dc5af6 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala
@@ -118,9 +118,12 @@ private[spark] class SparkDeploySchedulerBackend(
notifyContext()
if (!stopping) {
logError("Application has been killed. Reason: " + reason)
- scheduler.error(reason)
- // Ensure the application terminates, as we can no longer run jobs.
- sc.stop()
+ try {
+ scheduler.error(reason)
+ } finally {
+ // Ensure the application terminates, as we can no longer run jobs.
+ sc.stop()
+ }
}
}