aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorReynold Xin <rxin@apache.org>2013-10-10 13:27:38 -0700
committerReynold Xin <rxin@apache.org>2013-10-10 13:27:38 -0700
commit357733d292230968c6bda68dbe9407c560d97c91 (patch)
tree4ff7800bf9d96fb6dfcf3165454b029d73426df3
parentddf64f019fa98010e0a59e6e1559f4e3f8b25b5f (diff)
downloadspark-357733d292230968c6bda68dbe9407c560d97c91.tar.gz
spark-357733d292230968c6bda68dbe9407c560d97c91.tar.bz2
spark-357733d292230968c6bda68dbe9407c560d97c91.zip
Rename kill -> cancel in user facing API / documentation.
-rw-r--r--core/src/main/scala/org/apache/spark/SparkContext.scala10
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala38
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala8
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/JobWaiter.scala2
4 files changed, 27 insertions, 31 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 515a3b83af..2a82c5c5d7 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -866,15 +866,11 @@ class SparkContext(
}
/**
- * Kill a running job.
+ * Cancel all jobs that have been scheduled or are running.
*/
- def killJob(jobId: Int) {
- dagScheduler.killJob(jobId)
- }
-
- def killAllJobs() {
+ def cancelAllJobs() {
dagScheduler.activeJobs.foreach { job =>
- killJob(job.jobId)
+ dagScheduler.cancelJob(job.jobId)
}
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
index 18c4c01fd0..8d1dbf71cf 100644
--- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
@@ -40,13 +40,13 @@ import org.apache.hadoop.conf.{Configuration, Configurable}
* created from the Configuration.
*/
class HadoopFileRDD[K, V](
- sc: SparkContext,
- path: String,
- broadcastedConf: Broadcast[SerializableWritable[Configuration]],
- inputFormatClass: Class[_ <: InputFormat[K, V]],
- keyClass: Class[K],
- valueClass: Class[V],
- minSplits: Int)
+ sc: SparkContext,
+ path: String,
+ broadcastedConf: Broadcast[SerializableWritable[Configuration]],
+ inputFormatClass: Class[_ <: InputFormat[K, V]],
+ keyClass: Class[K],
+ valueClass: Class[V],
+ minSplits: Int)
extends HadoopRDD[K, V](sc, broadcastedConf, inputFormatClass, keyClass, valueClass, minSplits) {
override def getJobConf(): JobConf = {
@@ -85,21 +85,21 @@ private[spark] class HadoopPartition(rddId: Int, idx: Int, @transient s: InputSp
* A base class that provides core functionality for reading data partitions stored in Hadoop.
*/
class HadoopRDD[K, V](
- sc: SparkContext,
- broadcastedConf: Broadcast[SerializableWritable[Configuration]],
- inputFormatClass: Class[_ <: InputFormat[K, V]],
- keyClass: Class[K],
- valueClass: Class[V],
- minSplits: Int)
+ sc: SparkContext,
+ broadcastedConf: Broadcast[SerializableWritable[Configuration]],
+ inputFormatClass: Class[_ <: InputFormat[K, V]],
+ keyClass: Class[K],
+ valueClass: Class[V],
+ minSplits: Int)
extends RDD[(K, V)](sc, Nil) with Logging {
def this(
- sc: SparkContext,
- conf: JobConf,
- inputFormatClass: Class[_ <: InputFormat[K, V]],
- keyClass: Class[K],
- valueClass: Class[V],
- minSplits: Int) = {
+ sc: SparkContext,
+ conf: JobConf,
+ inputFormatClass: Class[_ <: InputFormat[K, V]],
+ keyClass: Class[K],
+ valueClass: Class[V],
+ minSplits: Int) = {
this(
sc,
sc.broadcast(new SerializableWritable(conf))
diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
index 93303a9d36..7278237a41 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
@@ -265,7 +265,7 @@ class DAGScheduler(
/**
* Submit a job to the job scheduler and get a JobWaiter object back. The JobWaiter object
- * can be used to block until the the job finishes executing or can be used to kill the job.
+ * can be used to block until the the job finishes executing or can be used to cancel the job.
*/
def submitJob[T, U](
rdd: RDD[T],
@@ -334,10 +334,10 @@ class DAGScheduler(
}
/**
- * Kill a job that is running or waiting in the queue.
+ * Cancel a job that is running or waiting in the queue.
*/
- def killJob(jobId: Int): Unit = this.synchronized {
- logInfo("Asked to kill job " + jobId)
+ def cancelJob(jobId: Int): Unit = this.synchronized {
+ logInfo("Asked to cancel job " + jobId)
eventQueue.put(JobCancelled(jobId))
}
diff --git a/core/src/main/scala/org/apache/spark/scheduler/JobWaiter.scala b/core/src/main/scala/org/apache/spark/scheduler/JobWaiter.scala
index 7e152a6f00..727454725d 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/JobWaiter.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/JobWaiter.scala
@@ -40,7 +40,7 @@ private[spark] class JobWaiter[T](
private var jobResult: JobResult = if (jobFinished) JobSucceeded else null
def kill() {
- dagScheduler.killJob(jobId)
+ dagScheduler.cancelJob(jobId)
}
override def taskSucceeded(index: Int, result: Any): Unit = synchronized {