diff options
author | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-10-15 14:12:33 -0700 |
---|---|---|
committer | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-10-15 14:12:33 -0700 |
commit | 3249e0e90dd9a7b422f561c42407b6a2b3feab17 (patch) | |
tree | bd9a11879cd9306706fde05d1b4e87ab15f5c049 /core | |
parent | 678dec6680f06c2f2a5969fb608ecfdc69981b93 (diff) | |
parent | f41feb7b338b5fdd60260f5ce7cba94202102194 (diff) | |
download | spark-3249e0e90dd9a7b422f561c42407b6a2b3feab17.tar.gz spark-3249e0e90dd9a7b422f561c42407b6a2b3feab17.tar.bz2 spark-3249e0e90dd9a7b422f561c42407b6a2b3feab17.zip |
Merge pull request #59 from rxin/warning
Bump up logging level to warning for failed tasks.
Diffstat (limited to 'core')
-rw-r--r-- | core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala index e1366e0627..7bd3499300 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/ClusterTaskSetManager.scala @@ -456,13 +456,13 @@ private[spark] class ClusterTaskSetManager( val index = info.index info.markFailed() if (!successful(index)) { - logInfo("Lost TID %s (task %s:%d)".format(tid, taskSet.id, index)) + logWarning("Lost TID %s (task %s:%d)".format(tid, taskSet.id, index)) copiesRunning(index) -= 1 // Check if the problem is a map output fetch failure. In that case, this // task will never succeed on any node, so tell the scheduler about it. reason.foreach { case fetchFailed: FetchFailed => - logInfo("Loss was due to fetch failure from " + fetchFailed.bmAddress) + logWarning("Loss was due to fetch failure from " + fetchFailed.bmAddress) sched.listener.taskEnded(tasks(index), fetchFailed, null, null, info, null) successful(index) = true tasksSuccessful += 1 @@ -471,7 +471,7 @@ private[spark] class ClusterTaskSetManager( return case TaskKilled => - logInfo("Task %d was killed.".format(tid)) + logWarning("Task %d was killed.".format(tid)) sched.listener.taskEnded(tasks(index), reason.get, null, null, info, null) return @@ -496,14 +496,14 @@ private[spark] class ClusterTaskSetManager( } if (printFull) { val locs = ef.stackTrace.map(loc => "\tat %s".format(loc.toString)) - logInfo("Loss was due to %s\n%s\n%s".format( + logWarning("Loss was due to %s\n%s\n%s".format( ef.className, ef.description, locs.mkString("\n"))) } else { logInfo("Loss was due to %s [duplicate %d]".format(ef.description, dupCount)) } case TaskResultLost => - logInfo("Lost result for TID %s on host %s".format(tid, info.host)) + logWarning("Lost result for TID %s on host %s".format(tid, info.host)) sched.listener.taskEnded(tasks(index), TaskResultLost, null, null, info, null) case _ => {} |