diff options
5 files changed, 6 insertions, 6 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index 41f810dde6..ad3337d94c 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -622,7 +622,6 @@ class SparkContext( } else { val uri = new URI(path) key = uri.getScheme match { - // TODO: Have this load jars that are available on the driver // A JAR file which exists only on the driver node case null | "file" => if (SparkHadoopUtil.get.isYarnMode()) { diff --git a/core/src/main/scala/org/apache/spark/deploy/client/DriverClient.scala b/core/src/main/scala/org/apache/spark/deploy/client/DriverClient.scala index a5f57637fb..8a4cdf07bb 100644 --- a/core/src/main/scala/org/apache/spark/deploy/client/DriverClient.scala +++ b/core/src/main/scala/org/apache/spark/deploy/client/DriverClient.scala @@ -70,8 +70,9 @@ object DriverClient extends Logging { driverArgs.cmd match { case "launch" => - // TODO: Could modify env here to pass a flag indicating Spark is in deploy-driver mode - // then use that to load jars locally (e.g. truncate the filesystem path) + // TODO: We could add an env variable here and intercept it in `sc.addJar` that would + // truncate filesystem paths similar to what YARN does. For now, we just require + // people call `addJar` assuming the jar is in the same directory. val env = Map[String, String]() System.getenv().foreach{case (k, v) => env(k) = v} diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala b/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala index 41500bb28d..51baa35018 100644 --- a/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala @@ -58,7 +58,7 @@ private[spark] class DriverRunner( val localJarFilename = downloadUserJar(driverDir) // Make sure user application jar is on the classpath - // TODO: This could eventually exploit ability for driver to add jars + // TODO: If we add ability to submit multiple jars they should also be added here val env = Map(driverDesc.command.environment.toSeq: _*) env("SPARK_CLASSPATH") = env.getOrElse("SPARK_CLASSPATH", "") + s":$localJarFilename" val newCommand = Command(driverDesc.command.mainClass, diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala b/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala index 2e61d394ea..fdc9a34886 100644 --- a/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala @@ -50,7 +50,7 @@ private[spark] class ExecutorRunner( var workerThread: Thread = null var process: Process = null - // NOTE: This is now redundant with the automated shut-down enforced by the Executor. It mike + // NOTE: This is now redundant with the automated shut-down enforced by the Executor. It might // make sense to remove this in the future. var shutdownHook: Thread = null diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/WorkerWatcher.scala b/core/src/main/scala/org/apache/spark/deploy/worker/WorkerWatcher.scala index e4352f1ce2..f4184bc5db 100644 --- a/core/src/main/scala/org/apache/spark/deploy/worker/WorkerWatcher.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/WorkerWatcher.scala @@ -19,7 +19,7 @@ private[spark] class WorkerWatcher(workerUrl: String) extends Actor with Logging worker ! SendHeartbeat // need to send a message here to initiate connection } - // Lets us filter events only from the worker actor + // Lets us filter events only from the worker's actor system private val expectedHostPort = AddressFromURIString(workerUrl).hostPort private def isWorker(address: Address) = address.hostPort == expectedHostPort |