aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorTathagata Das <tathagata.das1565@gmail.com>2015-09-21 16:47:52 -0700
committerTathagata Das <tathagata.das1565@gmail.com>2015-09-21 16:47:52 -0700
commit72869883f12b6e0a4e5aad79c0ac2cfdb4d83f09 (patch)
tree47371f29556a8342ff3ca052f2b6c812cbf90498 /core
parent7c4f852bfc39537840f56cd8121457a0dc1ad7c1 (diff)
downloadspark-72869883f12b6e0a4e5aad79c0ac2cfdb4d83f09.tar.gz
spark-72869883f12b6e0a4e5aad79c0ac2cfdb4d83f09.tar.bz2
spark-72869883f12b6e0a4e5aad79c0ac2cfdb4d83f09.zip
[SPARK-10649] [STREAMING] Prevent inheriting job group and irrelevant job description in streaming jobs
The job group, and job descriptions information is passed through thread local properties, and get inherited by child threads. In case of spark streaming, the streaming jobs inherit these properties from the thread that called streamingContext.start(). This may not make sense. 1. Job group: This is mainly used for cancelling a group of jobs together. It does not make sense to cancel streaming jobs like this, as the effect will be unpredictable. And its not a valid usecase any way, to cancel a streaming context, call streamingContext.stop() 2. Job description: This is used to pass on nice text descriptions for jobs to show up in the UI. The job description of the thread that calls streamingContext.start() is not useful for all the streaming jobs, as it does not make sense for all of the streaming jobs to have the same description, and the description may or may not be related to streaming. The solution in this PR is meant for the Spark master branch, where local properties are inherited by cloning the properties. The job group and job description in the thread that starts the streaming scheduler are explicitly removed, so that all the subsequent child threads does not inherit them. Also, the starting is done in a new child thread, so that setting the job group and description for streaming, does not change those properties in the thread that called streamingContext.start(). Author: Tathagata Das <tathagata.das1565@gmail.com> Closes #8781 from tdas/SPARK-10649.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/util/ThreadUtils.scala59
-rw-r--r--core/src/test/scala/org/apache/spark/util/ThreadUtilsSuite.scala24
2 files changed, 82 insertions, 1 deletions
diff --git a/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala b/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala
index ca5624a3d8..22e291a2b4 100644
--- a/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala
+++ b/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala
@@ -21,6 +21,7 @@ package org.apache.spark.util
import java.util.concurrent._
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}
+import scala.util.control.NonFatal
import com.google.common.util.concurrent.{MoreExecutors, ThreadFactoryBuilder}
@@ -86,4 +87,62 @@ private[spark] object ThreadUtils {
val threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadName).build()
Executors.newSingleThreadScheduledExecutor(threadFactory)
}
+
+ /**
+ * Run a piece of code in a new thread and return the result. Exception in the new thread is
+ * thrown in the caller thread with an adjusted stack trace that removes references to this
+ * method for clarity. The exception stack traces will be like the following
+ *
+ * SomeException: exception-message
+ * at CallerClass.body-method (sourcefile.scala)
+ * at ... run in separate thread using org.apache.spark.util.ThreadUtils ... ()
+ * at CallerClass.caller-method (sourcefile.scala)
+ * ...
+ */
+ def runInNewThread[T](
+ threadName: String,
+ isDaemon: Boolean = true)(body: => T): T = {
+ @volatile var exception: Option[Throwable] = None
+ @volatile var result: T = null.asInstanceOf[T]
+
+ val thread = new Thread(threadName) {
+ override def run(): Unit = {
+ try {
+ result = body
+ } catch {
+ case NonFatal(e) =>
+ exception = Some(e)
+ }
+ }
+ }
+ thread.setDaemon(isDaemon)
+ thread.start()
+ thread.join()
+
+ exception match {
+ case Some(realException) =>
+ // Remove the part of the stack that shows method calls into this helper method
+ // This means drop everything from the top until the stack element
+ // ThreadUtils.runInNewThread(), and then drop that as well (hence the `drop(1)`).
+ val baseStackTrace = Thread.currentThread().getStackTrace().dropWhile(
+ ! _.getClassName.contains(this.getClass.getSimpleName)).drop(1)
+
+ // Remove the part of the new thread stack that shows methods call from this helper method
+ val extraStackTrace = realException.getStackTrace.takeWhile(
+ ! _.getClassName.contains(this.getClass.getSimpleName))
+
+ // Combine the two stack traces, with a place holder just specifying that there
+ // was a helper method used, without any further details of the helper
+ val placeHolderStackElem = new StackTraceElement(
+ s"... run in separate thread using ${ThreadUtils.getClass.getName.stripSuffix("$")} ..",
+ " ", "", -1)
+ val finalStackTrace = extraStackTrace ++ Seq(placeHolderStackElem) ++ baseStackTrace
+
+ // Update the stack trace and rethrow the exception in the caller thread
+ realException.setStackTrace(finalStackTrace)
+ throw realException
+ case None =>
+ result
+ }
+ }
}
diff --git a/core/src/test/scala/org/apache/spark/util/ThreadUtilsSuite.scala b/core/src/test/scala/org/apache/spark/util/ThreadUtilsSuite.scala
index 8c51e6b14b..620e4debf4 100644
--- a/core/src/test/scala/org/apache/spark/util/ThreadUtilsSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/ThreadUtilsSuite.scala
@@ -20,8 +20,9 @@ package org.apache.spark.util
import java.util.concurrent.{CountDownLatch, TimeUnit}
-import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
+import scala.concurrent.{Await, Future}
+import scala.util.Random
import org.apache.spark.SparkFunSuite
@@ -66,4 +67,25 @@ class ThreadUtilsSuite extends SparkFunSuite {
val futureThreadName = Await.result(f, 10.seconds)
assert(futureThreadName === callerThreadName)
}
+
+ test("runInNewThread") {
+ import ThreadUtils._
+ assert(runInNewThread("thread-name") { Thread.currentThread().getName } === "thread-name")
+ assert(runInNewThread("thread-name") { Thread.currentThread().isDaemon } === true)
+ assert(
+ runInNewThread("thread-name", isDaemon = false) { Thread.currentThread().isDaemon } === false
+ )
+ val uniqueExceptionMessage = "test" + Random.nextInt()
+ val exception = intercept[IllegalArgumentException] {
+ runInNewThread("thread-name") { throw new IllegalArgumentException(uniqueExceptionMessage) }
+ }
+ assert(exception.asInstanceOf[IllegalArgumentException].getMessage === uniqueExceptionMessage)
+ assert(exception.getStackTrace.mkString("\n").contains(
+ "... run in separate thread using org.apache.spark.util.ThreadUtils ...") === true,
+ "stack trace does not contain expected place holder"
+ )
+ assert(exception.getStackTrace.mkString("\n").contains("ThreadUtils.scala") === false,
+ "stack trace contains unexpected references to ThreadUtils"
+ )
+ }
}