aboutsummaryrefslogtreecommitdiff
path: root/streaming/src
diff options
context:
space:
mode:
authorZheng RuiFeng <ruifengz@foxmail.com>2016-05-26 22:39:14 -0700
committerReynold Xin <rxin@databricks.com>2016-05-26 22:39:14 -0700
commit6b1a6180e7bd45b0a0ec47de9f7c7956543f4dfa (patch)
tree6dc232bdc5955f64ebbf327c57d95d4c8346524b /streaming/src
parentee3609a2ef55ae5a2797e5ffe06c2849cbd11e15 (diff)
downloadspark-6b1a6180e7bd45b0a0ec47de9f7c7956543f4dfa.tar.gz
spark-6b1a6180e7bd45b0a0ec47de9f7c7956543f4dfa.tar.bz2
spark-6b1a6180e7bd45b0a0ec47de9f7c7956543f4dfa.zip
[MINOR] Fix Typos 'a -> an'
## What changes were proposed in this pull request? `a` -> `an` I use regex to generate potential error lines: `grep -in ' a [aeiou]' mllib/src/main/scala/org/apache/spark/ml/*/*scala` and review them line by line. ## How was this patch tested? local build `lint-java` checking Author: Zheng RuiFeng <ruifengz@foxmail.com> Closes #13317 from zhengruifeng/a_an.
Diffstat (limited to 'streaming/src')
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala10
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisor.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManager.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala2
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala2
7 files changed, 11 insertions, 11 deletions
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala b/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala
index 6ececb1062..0b11026863 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala
@@ -154,7 +154,7 @@ object Checkpoint extends Logging {
Utils.tryWithSafeFinally {
// ObjectInputStream uses the last defined user-defined class loader in the stack
- // to find classes, which maybe the wrong class loader. Hence, a inherited version
+ // to find classes, which maybe the wrong class loader. Hence, an inherited version
// of ObjectInputStream is used to explicitly use the current thread's default class
// loader to find and load classes. This is a well know Java issue and has popped up
// in other places (e.g., http://jira.codehaus.org/browse/GROOVY-1627)
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala b/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala
index 928739a416..b524af9578 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala
@@ -322,7 +322,7 @@ class StreamingContext private[streaming] (
}
/**
- * Create a input stream from network source hostname:port, where data is received
+ * Create an input stream from network source hostname:port, where data is received
* as serialized blocks (serialized using the Spark's serializer) that can be directly
* pushed into the block manager without deserializing them. This is the most efficient
* way to receive data.
@@ -341,7 +341,7 @@ class StreamingContext private[streaming] (
}
/**
- * Create a input stream that monitors a Hadoop-compatible filesystem
+ * Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
@@ -359,7 +359,7 @@ class StreamingContext private[streaming] (
}
/**
- * Create a input stream that monitors a Hadoop-compatible filesystem
+ * Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system.
@@ -379,7 +379,7 @@ class StreamingContext private[streaming] (
}
/**
- * Create a input stream that monitors a Hadoop-compatible filesystem
+ * Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
@@ -403,7 +403,7 @@ class StreamingContext private[streaming] (
}
/**
- * Create a input stream that monitors a Hadoop-compatible filesystem
+ * Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them as text files (using key as LongWritable, value
* as Text and input format as TextInputFormat). Files must be written to the
* monitored directory by "moving" them from another location within the same
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisor.scala b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisor.scala
index 42fc84c19b..faf6db82d5 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisor.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisor.scala
@@ -79,7 +79,7 @@ private[streaming] abstract class ReceiverSupervisor(
optionalBlockId: Option[StreamBlockId]
): Unit
- /** Store a iterator of received data as a data block into Spark's memory. */
+ /** Store an iterator of received data as a data block into Spark's memory. */
def pushIterator(
iterator: Iterator[_],
optionalMetadata: Option[Any],
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala
index 4fb0f8caac..5ba09a54af 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala
@@ -129,7 +129,7 @@ private[streaming] class ReceiverSupervisorImpl(
pushAndReportBlock(ArrayBufferBlock(arrayBuffer), metadataOption, blockIdOption)
}
- /** Store a iterator of received data as a data block into Spark's memory. */
+ /** Store an iterator of received data as a data block into Spark's memory. */
def pushIterator(
iterator: Iterator[_],
metadataOption: Option[Any],
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManager.scala b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManager.scala
index f7b6584893..fb5587edec 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManager.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManager.scala
@@ -38,7 +38,7 @@ import org.apache.spark.util.{Clock, Utils}
* - Periodically take the average batch completion times and compare with the batch interval
* - If (avg. proc. time / batch interval) >= scaling up ratio, then request more executors.
* The number of executors requested is based on the ratio = (avg. proc. time / batch interval).
- * - If (avg. proc. time / batch interval) <= scaling down ratio, then try to kill a executor that
+ * - If (avg. proc. time / batch interval) <= scaling down ratio, then try to kill an executor that
* is not running a receiver.
*
* This features should ideally be used in conjunction with backpressure, as backpressure ensures
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala
index 4f124a1356..8e1a090618 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/InputInfoTracker.scala
@@ -67,7 +67,7 @@ private[streaming] class InputInfoTracker(ssc: StreamingContext) extends Logging
if (inputInfos.contains(inputInfo.inputStreamId)) {
throw new IllegalStateException(s"Input stream ${inputInfo.inputStreamId} for batch" +
- s"$batchTime is already added into InputInfoTracker, this is a illegal state")
+ s"$batchTime is already added into InputInfoTracker, this is an illegal state")
}
inputInfos += ((inputInfo.inputStreamId, inputInfo))
}
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala b/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
index 1ef26d2f86..60122b4813 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala
@@ -86,7 +86,7 @@ private[ui] class BatchPage(parent: StreamingTab) extends WebUIPage("batch") {
/**
* Generate a row for a Spark Job. Because duplicated output op infos needs to be collapsed into
- * one cell, we use "rowspan" for the first row of a output op.
+ * one cell, we use "rowspan" for the first row of an output op.
*/
private def generateNormalJobRow(
outputOpData: OutputOperationUIData,