aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorLiwei Lin <lwlin7@gmail.com>2016-06-15 11:52:36 -0700
committerReynold Xin <rxin@databricks.com>2016-06-15 11:52:36 -0700
commit9b234b55d1b5e4a7c80e482b3e297bfb8b583a56 (patch)
treec904fd24870a3c7bc1a5b403425f57b9cc683956 /core/src
parente1585cc74853c497271eecdc943c0eabe1aeb4c1 (diff)
downloadspark-9b234b55d1b5e4a7c80e482b3e297bfb8b583a56.tar.gz
spark-9b234b55d1b5e4a7c80e482b3e297bfb8b583a56.tar.bz2
spark-9b234b55d1b5e4a7c80e482b3e297bfb8b583a56.zip
[SPARK-15518][CORE][FOLLOW-UP] Rename LocalSchedulerBackendEndpoint -> LocalSchedulerBackend
## What changes were proposed in this pull request? This patch is a follow-up to https://github.com/apache/spark/pull/13288 completing the renaming: - LocalScheduler -> LocalSchedulerBackend~~Endpoint~~ ## How was this patch tested? Updated test cases to reflect the name change. Author: Liwei Lin <lwlin7@gmail.com> Closes #13683 from lw-lin/rename-backend.
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/scala/org/apache/spark/SparkContext.scala8
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/local/LocalSchedulerBackend.scala (renamed from core/src/main/scala/org/apache/spark/scheduler/local/LocalSchedulerBackendEndpoint.scala)12
-rw-r--r--core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala14
4 files changed, 19 insertions, 19 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 3c5498782c..d56946e932 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -58,7 +58,7 @@ import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, StandaloneSchedulerBackend}
import org.apache.spark.scheduler.cluster.mesos.{MesosCoarseGrainedSchedulerBackend, MesosFineGrainedSchedulerBackend}
-import org.apache.spark.scheduler.local.LocalSchedulerBackendEndpoint
+import org.apache.spark.scheduler.local.LocalSchedulerBackend
import org.apache.spark.storage._
import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump
import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}
@@ -2429,7 +2429,7 @@ object SparkContext extends Logging {
master match {
case "local" =>
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
- val backend = new LocalSchedulerBackendEndpoint(sc.getConf, scheduler, 1)
+ val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1)
scheduler.initialize(backend)
(backend, scheduler)
@@ -2441,7 +2441,7 @@ object SparkContext extends Logging {
throw new SparkException(s"Asked to run locally with $threadCount threads")
}
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
- val backend = new LocalSchedulerBackendEndpoint(sc.getConf, scheduler, threadCount)
+ val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
@@ -2451,7 +2451,7 @@ object SparkContext extends Logging {
// local[N, M] means exactly N threads with M failures
val threadCount = if (threads == "*") localCpuCount else threads.toInt
val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true)
- val backend = new LocalSchedulerBackendEndpoint(sc.getConf, scheduler, threadCount)
+ val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
index c3adc28685..4282606589 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
@@ -33,13 +33,13 @@ import org.apache.spark.TaskState.TaskState
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.scheduler.TaskLocality.TaskLocality
-import org.apache.spark.scheduler.local.LocalSchedulerBackendEndpoint
+import org.apache.spark.scheduler.local.LocalSchedulerBackend
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.{AccumulatorV2, ThreadUtils, Utils}
/**
* Schedules tasks for multiple types of clusters by acting through a SchedulerBackend.
- * It can also work with a local setup by using a [[LocalSchedulerBackendEndpoint]] and setting
+ * It can also work with a local setup by using a [[LocalSchedulerBackend]] and setting
* isLocal to true. It handles common logic, like determining a scheduling order across jobs, waking
* up to launch speculative tasks, etc.
*
diff --git a/core/src/main/scala/org/apache/spark/scheduler/local/LocalSchedulerBackendEndpoint.scala b/core/src/main/scala/org/apache/spark/scheduler/local/LocalSchedulerBackend.scala
index ee06588379..e386052814 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/local/LocalSchedulerBackendEndpoint.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/local/LocalSchedulerBackend.scala
@@ -39,15 +39,15 @@ private case class KillTask(taskId: Long, interruptThread: Boolean)
private case class StopExecutor()
/**
- * Calls to [[LocalSchedulerBackendEndpoint]] are all serialized through LocalEndpoint. Using an
- * RpcEndpoint makes the calls on [[LocalSchedulerBackendEndpoint]] asynchronous, which is necessary
- * to prevent deadlock between [[LocalSchedulerBackendEndpoint]] and the [[TaskSchedulerImpl]].
+ * Calls to [[LocalSchedulerBackend]] are all serialized through LocalEndpoint. Using an
+ * RpcEndpoint makes the calls on [[LocalSchedulerBackend]] asynchronous, which is necessary
+ * to prevent deadlock between [[LocalSchedulerBackend]] and the [[TaskSchedulerImpl]].
*/
private[spark] class LocalEndpoint(
override val rpcEnv: RpcEnv,
userClassPath: Seq[URL],
scheduler: TaskSchedulerImpl,
- executorBackend: LocalSchedulerBackendEndpoint,
+ executorBackend: LocalSchedulerBackend,
private val totalCores: Int)
extends ThreadSafeRpcEndpoint with Logging {
@@ -93,9 +93,9 @@ private[spark] class LocalEndpoint(
/**
* Used when running a local version of Spark where the executor, backend, and master all run in
* the same JVM. It sits behind a [[TaskSchedulerImpl]] and handles launching tasks on a single
- * Executor (created by the [[LocalSchedulerBackendEndpoint]]) running locally.
+ * Executor (created by the [[LocalSchedulerBackend]]) running locally.
*/
-private[spark] class LocalSchedulerBackendEndpoint(
+private[spark] class LocalSchedulerBackend(
conf: SparkConf,
scheduler: TaskSchedulerImpl,
val totalCores: Int)
diff --git a/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala b/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala
index 6e5655437a..7d75a93ff6 100644
--- a/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala
+++ b/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala
@@ -23,7 +23,7 @@ import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.{SchedulerBackend, TaskScheduler, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
import org.apache.spark.scheduler.cluster.mesos.{MesosCoarseGrainedSchedulerBackend, MesosFineGrainedSchedulerBackend}
-import org.apache.spark.scheduler.local.LocalSchedulerBackendEndpoint
+import org.apache.spark.scheduler.local.LocalSchedulerBackend
class SparkContextSchedulerCreationSuite
@@ -58,7 +58,7 @@ class SparkContextSchedulerCreationSuite
test("local") {
val sched = createTaskScheduler("local")
sched.backend match {
- case s: LocalSchedulerBackendEndpoint => assert(s.totalCores === 1)
+ case s: LocalSchedulerBackend => assert(s.totalCores === 1)
case _ => fail()
}
}
@@ -66,7 +66,7 @@ class SparkContextSchedulerCreationSuite
test("local-*") {
val sched = createTaskScheduler("local[*]")
sched.backend match {
- case s: LocalSchedulerBackendEndpoint =>
+ case s: LocalSchedulerBackend =>
assert(s.totalCores === Runtime.getRuntime.availableProcessors())
case _ => fail()
}
@@ -76,7 +76,7 @@ class SparkContextSchedulerCreationSuite
val sched = createTaskScheduler("local[5]")
assert(sched.maxTaskFailures === 1)
sched.backend match {
- case s: LocalSchedulerBackendEndpoint => assert(s.totalCores === 5)
+ case s: LocalSchedulerBackend => assert(s.totalCores === 5)
case _ => fail()
}
}
@@ -85,7 +85,7 @@ class SparkContextSchedulerCreationSuite
val sched = createTaskScheduler("local[* ,2]")
assert(sched.maxTaskFailures === 2)
sched.backend match {
- case s: LocalSchedulerBackendEndpoint =>
+ case s: LocalSchedulerBackend =>
assert(s.totalCores === Runtime.getRuntime.availableProcessors())
case _ => fail()
}
@@ -95,7 +95,7 @@ class SparkContextSchedulerCreationSuite
val sched = createTaskScheduler("local[4, 2]")
assert(sched.maxTaskFailures === 2)
sched.backend match {
- case s: LocalSchedulerBackendEndpoint => assert(s.totalCores === 4)
+ case s: LocalSchedulerBackend => assert(s.totalCores === 4)
case _ => fail()
}
}
@@ -119,7 +119,7 @@ class SparkContextSchedulerCreationSuite
val sched = createTaskScheduler("local", "client", conf)
sched.backend match {
- case s: LocalSchedulerBackendEndpoint => assert(s.defaultParallelism() === 16)
+ case s: LocalSchedulerBackend => assert(s.defaultParallelism() === 16)
case _ => fail()
}
}