aboutsummaryrefslogtreecommitdiff
path: root/streaming
diff options
context:
space:
mode:
authorXin Ren <iamshrek@126.com>2016-05-30 08:40:03 -0500
committerSean Owen <sowen@cloudera.com>2016-05-30 08:40:03 -0500
commit5728aa558e44f056f3e5a7f8726ab174d3830103 (patch)
tree28895b5ade8f36ad38801c502386c94a6e950bc6 /streaming
parent1360a6d636dd812a27955fc85df8e0255db60dfa (diff)
downloadspark-5728aa558e44f056f3e5a7f8726ab174d3830103.tar.gz
spark-5728aa558e44f056f3e5a7f8726ab174d3830103.tar.bz2
spark-5728aa558e44f056f3e5a7f8726ab174d3830103.zip
[SPARK-15645][STREAMING] Fix some typos of Streaming module
## What changes were proposed in this pull request? No code change, just some typo fixing. ## How was this patch tested? Manually run project build with testing, and build is successful. Author: Xin Ren <iamshrek@126.com> Closes #13385 from keypointt/codeWalkThroughStreaming.
Diffstat (limited to 'streaming')
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/receiver/BlockGenerator.scala6
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/receiver/Receiver.scala6
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/scheduler/JobGenerator.scala3
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverTracker.scala2
5 files changed, 9 insertions, 10 deletions
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala b/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala
index 01dcfcf24b..147e8c1290 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala
@@ -52,7 +52,7 @@ import org.apache.spark.util.{CallSite, Utils}
* `join`. These operations are automatically available on any DStream of pairs
* (e.g., DStream[(Int, Int)] through implicit conversions.
*
- * DStreams internally is characterized by a few basic properties:
+ * A DStream internally is characterized by a few basic properties:
* - A list of other DStreams that the DStream depends on
* - A time interval at which the DStream generates an RDD
* - A function that is used to generate an RDD after each time interval
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/receiver/BlockGenerator.scala b/streaming/src/main/scala/org/apache/spark/streaming/receiver/BlockGenerator.scala
index 4592e015ed..90309c0145 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/receiver/BlockGenerator.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/receiver/BlockGenerator.scala
@@ -86,13 +86,13 @@ private[streaming] class BlockGenerator(
/**
* The BlockGenerator can be in 5 possible states, in the order as follows.
*
- * - Initialized: Nothing has been started
+ * - Initialized: Nothing has been started.
* - Active: start() has been called, and it is generating blocks on added data.
* - StoppedAddingData: stop() has been called, the adding of data has been stopped,
* but blocks are still being generated and pushed.
* - StoppedGeneratingBlocks: Generating of blocks has been stopped, but
* they are still being pushed.
- * - StoppedAll: Everything has stopped, and the BlockGenerator object can be GCed.
+ * - StoppedAll: Everything has been stopped, and the BlockGenerator object can be GCed.
*/
private object GeneratorState extends Enumeration {
type GeneratorState = Value
@@ -148,7 +148,7 @@ private[streaming] class BlockGenerator(
blockIntervalTimer.stop(interruptTimer = false)
synchronized { state = StoppedGeneratingBlocks }
- // Wait for the queue to drain and mark generated as stopped
+ // Wait for the queue to drain and mark state as StoppedAll
logInfo("Waiting for block pushing thread to terminate")
blockPushingThread.join()
synchronized { state = StoppedAll }
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/receiver/Receiver.scala b/streaming/src/main/scala/org/apache/spark/streaming/receiver/Receiver.scala
index 5157ca62dc..d91a64df32 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/receiver/Receiver.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/receiver/Receiver.scala
@@ -32,7 +32,7 @@ import org.apache.spark.storage.StorageLevel
* should define the setup steps necessary to start receiving data,
* and `onStop()` should define the cleanup steps necessary to stop receiving data.
* Exceptions while receiving can be handled either by restarting the receiver with `restart(...)`
- * or stopped completely by `stop(...)` or
+ * or stopped completely by `stop(...)`.
*
* A custom receiver in Scala would look like this.
*
@@ -45,7 +45,7 @@ import org.apache.spark.storage.StorageLevel
* // Call store(...) in those threads to store received data into Spark's memory.
*
* // Call stop(...), restart(...) or reportError(...) on any thread based on how
- * // different errors needs to be handled.
+ * // different errors need to be handled.
*
* // See corresponding method documentation for more details
* }
@@ -71,7 +71,7 @@ import org.apache.spark.storage.StorageLevel
* // Call store(...) in those threads to store received data into Spark's memory.
*
* // Call stop(...), restart(...) or reportError(...) on any thread based on how
- * // different errors needs to be handled.
+ * // different errors need to be handled.
*
* // See corresponding method documentation for more details
* }
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/JobGenerator.scala b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/JobGenerator.scala
index 8f9421fc09..19c88f1ee0 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/JobGenerator.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/JobGenerator.scala
@@ -19,7 +19,6 @@ package org.apache.spark.streaming.scheduler
import scala.util.{Failure, Success, Try}
-import org.apache.spark.SparkEnv
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Checkpoint, CheckpointWriter, Time}
@@ -239,7 +238,7 @@ class JobGenerator(jobScheduler: JobScheduler) extends Logging {
logInfo("Restarted JobGenerator at " + restartTime)
}
- /** Generate jobs and perform checkpoint for the given `time`. */
+ /** Generate jobs and perform checkpointing for the given `time`. */
private def generateJobs(time: Time) {
// Checkpoint all RDDs marked for checkpointing to ensure their lineages are
// truncated periodically. Otherwise, we may run into stack overflows (SPARK-6847).
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverTracker.scala b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverTracker.scala
index 9aa2f0bbb9..b9d898a723 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverTracker.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverTracker.scala
@@ -20,7 +20,7 @@ package org.apache.spark.streaming.scheduler
import java.util.concurrent.{CountDownLatch, TimeUnit}
import scala.collection.mutable.HashMap
-import scala.concurrent.{ExecutionContext, Future}
+import scala.concurrent.ExecutionContext
import scala.language.existentials
import scala.util.{Failure, Success}