diff options
-rw-r--r-- | core/src/main/scala/spark/DAGScheduler.scala | 1 | ||||
-rw-r--r-- | core/src/main/scala/spark/Executor.scala | 2 | ||||
-rw-r--r-- | core/src/main/scala/spark/SimpleJob.scala | 39 |
3 files changed, 39 insertions, 3 deletions
diff --git a/core/src/main/scala/spark/DAGScheduler.scala b/core/src/main/scala/spark/DAGScheduler.scala index 93cab9fb62..c9411f4208 100644 --- a/core/src/main/scala/spark/DAGScheduler.scala +++ b/core/src/main/scala/spark/DAGScheduler.scala @@ -19,6 +19,7 @@ case class CompletionEvent(task: DAGTask[_], reason: TaskEndReason, result: Any, sealed trait TaskEndReason case object Success extends TaskEndReason case class FetchFailed(serverUri: String, shuffleId: Int, mapId: Int, reduceId: Int) extends TaskEndReason +case class ExceptionFailure(exception: Throwable) extends TaskEndReason case class OtherFailure(message: String) extends TaskEndReason /** diff --git a/core/src/main/scala/spark/Executor.scala b/core/src/main/scala/spark/Executor.scala index 31ba122baf..15693fc95f 100644 --- a/core/src/main/scala/spark/Executor.scala +++ b/core/src/main/scala/spark/Executor.scala @@ -87,7 +87,7 @@ class Executor extends org.apache.mesos.Executor with Logging { .build()) } case t: Throwable => { - val reason = OtherFailure(t.toString()) + val reason = ExceptionFailure(t) d.sendStatusUpdate(TaskStatus.newBuilder() .setTaskId(desc.getTaskId) .setState(TaskState.TASK_FAILED) diff --git a/core/src/main/scala/spark/SimpleJob.scala b/core/src/main/scala/spark/SimpleJob.scala index 6a27f159c4..bf881fb2d4 100644 --- a/core/src/main/scala/spark/SimpleJob.scala +++ b/core/src/main/scala/spark/SimpleJob.scala @@ -60,6 +60,15 @@ extends Job(jobId) with Logging var failed = false var causeOfFailure = "" + // How frequently to reprint duplicate exceptions in full, in milliseconds + val EXCEPTION_PRINT_INTERVAL = System.getProperty("spark.logging.exceptionPrintInterval", "10000").toLong + // Map of recent exceptions (identified by string representation and + // top stack frame) to duplicate count (how many times the same + // exception has appeared) and time the full exception was + // printed. This should ideally be an LRU map that can drop old + // exceptions automatically. + val recentExceptions = HashMap[String, (Int, Long)]() + // Add all our tasks to the pending lists. We do this in reverse order // of task index so that tasks with low indices get launched first. for (i <- (0 until numTasks).reverse) { @@ -229,8 +238,34 @@ extends Job(jobId) with Logging if (tasksFinished == numTasks) sched.jobFinished(this) return - case otherFailure: OtherFailure => - logInfo("Loss was due to %s".format(otherFailure.message)) + case ef: ExceptionFailure => + val key = ef.exception.toString + val now = System.currentTimeMillis + val (printFull, dupCount) = + if (recentExceptions.contains(key)) { + val (dupCount, printTime) = recentExceptions(key) + if (now - printTime > EXCEPTION_PRINT_INTERVAL) { + recentExceptions(key) = (0, now) + (true, 0) + } else { + recentExceptions(key) = (dupCount + 1, printTime) + (false, dupCount + 1) + } + } else { + recentExceptions += Tuple(key, (0, now)) + (true, 0) + } + + if (printFull) { + val stackTrace = + for (elem <- ef.exception.getStackTrace) + yield "\tat %s".format(elem.toString) + logInfo("Loss was due to %s\n%s".format( + ef.exception.toString, stackTrace.mkString("\n"))) + } else { + logInfo("Loss was due to %s [duplicate %d]".format( + ef.exception.toString, dupCount)) + } case _ => {} } } |