diff options
author | Matei Zaharia <matei@eecs.berkeley.edu> | 2011-06-01 11:46:31 -0700 |
---|---|---|
committer | Matei Zaharia <matei@eecs.berkeley.edu> | 2011-06-01 11:46:31 -0700 |
commit | 3297706ab2faf8f646dc635abf554d1d34515f73 (patch) | |
tree | 3e747d860e9138b92953468ad4db231e7ad64c98 /core | |
parent | 0e5dbf2abdcd9c20b5d71807dc1298eec4fd43aa (diff) | |
parent | 9bb448a151a63426b61d1fa3c7c44aef667d1f40 (diff) | |
download | spark-3297706ab2faf8f646dc635abf554d1d34515f73.tar.gz spark-3297706ab2faf8f646dc635abf554d1d34515f73.tar.bz2 spark-3297706ab2faf8f646dc635abf554d1d34515f73.zip |
Merge remote-tracking branch 'origin/master' into scala-2.9
Diffstat (limited to 'core')
-rw-r--r-- | core/src/main/scala/spark/Executor.scala | 4 | ||||
-rw-r--r-- | core/src/main/scala/spark/LocalScheduler.scala | 4 | ||||
-rw-r--r-- | core/src/main/scala/spark/SparkContext.scala | 11 |
3 files changed, 13 insertions, 6 deletions
diff --git a/core/src/main/scala/spark/Executor.scala b/core/src/main/scala/spark/Executor.scala index 7764acad1a..54e169a1a1 100644 --- a/core/src/main/scala/spark/Executor.scala +++ b/core/src/main/scala/spark/Executor.scala @@ -69,9 +69,9 @@ class Executor extends mesos.Executor with Logging { d.sendStatusUpdate(new TaskStatus( taskId, TaskState.TASK_FAILED, Utils.serialize(reason))) } - case e: Exception => { + case t: Throwable => { // TODO: Handle errors in tasks less dramatically - logError("Exception in task ID " + taskId, e) + logError("Exception in task ID " + taskId, t) System.exit(1) } } diff --git a/core/src/main/scala/spark/LocalScheduler.scala b/core/src/main/scala/spark/LocalScheduler.scala index 88da0c1af1..7d3ab661aa 100644 --- a/core/src/main/scala/spark/LocalScheduler.scala +++ b/core/src/main/scala/spark/LocalScheduler.scala @@ -36,9 +36,9 @@ private class LocalScheduler(threads: Int) extends DAGScheduler with Logging { logInfo("Finished task " + i) taskEnded(tasks(i), Success, result, accumUpdates) } catch { - case e: Exception => { + case t: Throwable => { // TODO: Do something nicer here - logError("Exception in task " + i, e) + logError("Exception in task " + i, t) System.exit(1) null } diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 2f2ff0e801..5d27a3d46b 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -148,7 +148,11 @@ extends Logging { None } - private[spark] def runJob[T, U](rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]) + /** + * Run a function on a given set of partitions in an RDD and return the results. + * This is the main entry point to the scheduler, by which all actions get launched. + */ + def runJob[T, U](rdd: RDD[T], func: Iterator[T] => U, partitions: Seq[Int]) (implicit m: ClassManifest[U]) : Array[U] = { logInfo("Starting job...") @@ -158,7 +162,10 @@ extends Logging { result } - private[spark] def runJob[T, U](rdd: RDD[T], func: Iterator[T] => U) + /** + * Run a job on all partitions in an RDD and return the results in an array. + */ + def runJob[T, U](rdd: RDD[T], func: Iterator[T] => U) (implicit m: ClassManifest[U]) : Array[U] = { runJob(rdd, func, 0 until rdd.splits.size) |