aboutsummaryrefslogtreecommitdiff
path: root/external
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-03-14 09:07:39 +0000
committerSean Owen <sowen@cloudera.com>2016-03-14 09:07:39 +0000
commitacdf21970334cea9d6cfc287e4ccb8e72de9dee1 (patch)
treedf8bcf3d80dc92ad74c5c27bd3618397205bcc86 /external
parente58fa19d17db5dd8a00551e20b46921f98b958f7 (diff)
downloadspark-acdf21970334cea9d6cfc287e4ccb8e72de9dee1.tar.gz
spark-acdf21970334cea9d6cfc287e4ccb8e72de9dee1.tar.bz2
spark-acdf21970334cea9d6cfc287e4ccb8e72de9dee1.zip
[MINOR][DOCS] Fix more typos in comments/strings.
## What changes were proposed in this pull request? This PR fixes 135 typos over 107 files: * 121 typos in comments * 11 typos in testcase name * 3 typos in log messages ## How was this patch tested? Manual. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #11689 from dongjoon-hyun/fix_more_typos.
Diffstat (limited to 'external')
-rw-r--r--external/flume-sink/src/main/scala/org/apache/spark/streaming/flume/sink/Logging.scala2
-rw-r--r--external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeBatchFetcher.scala6
-rw-r--r--external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala2
-rw-r--r--external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisRecordProcessor.scala2
-rw-r--r--external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala2
-rw-r--r--external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisReceiverSuite.scala2
6 files changed, 8 insertions, 8 deletions
diff --git a/external/flume-sink/src/main/scala/org/apache/spark/streaming/flume/sink/Logging.scala b/external/flume-sink/src/main/scala/org/apache/spark/streaming/flume/sink/Logging.scala
index aa530a7121..09d3fe91e4 100644
--- a/external/flume-sink/src/main/scala/org/apache/spark/streaming/flume/sink/Logging.scala
+++ b/external/flume-sink/src/main/scala/org/apache/spark/streaming/flume/sink/Logging.scala
@@ -101,7 +101,7 @@ private[sink] trait Logging {
private def initializeLogging() {
Logging.initialized = true
- // Force a call into slf4j to initialize it. Avoids this happening from mutliple threads
+ // Force a call into slf4j to initialize it. Avoids this happening from multiple threads
// and triggering this: http://mailman.qos.ch/pipermail/slf4j-dev/2010-April/002956.html
log
}
diff --git a/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeBatchFetcher.scala b/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeBatchFetcher.scala
index b9d4e762ca..3555fa68b6 100644
--- a/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeBatchFetcher.scala
+++ b/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeBatchFetcher.scala
@@ -77,7 +77,7 @@ private[flume] class FlumeBatchFetcher(receiver: FlumePollingReceiver) extends R
/**
* Gets a batch of events from the specified client. This method does not handle any exceptions
- * which will be propogated to the caller.
+ * which will be propagated to the caller.
* @param client Client to get events from
* @return [[Some]] which contains the event batch if Flume sent any events back, else [[None]]
*/
@@ -96,8 +96,8 @@ private[flume] class FlumeBatchFetcher(receiver: FlumePollingReceiver) extends R
}
/**
- * Store the events in the buffer to Spark. This method will not propogate any exceptions,
- * but will propogate any other errors.
+ * Store the events in the buffer to Spark. This method will not propagate any exceptions,
+ * but will propagate any other errors.
* @param buffer The buffer to store
* @return true if the data was stored without any exception being thrown, else false
*/
diff --git a/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala b/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala
index 8a66621a31..726b5d8ec3 100644
--- a/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala
+++ b/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala
@@ -167,7 +167,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
): Either[Err, Map[TopicAndPartition, LeaderOffset]] = {
getLeaderOffsets(topicAndPartitions, before, 1).right.map { r =>
r.map { kv =>
- // mapValues isnt serializable, see SI-7005
+ // mapValues isn't serializable, see SI-7005
kv._1 -> kv._2.head
}
}
diff --git a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisRecordProcessor.scala b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisRecordProcessor.scala
index b5b76cb92d..23b74da642 100644
--- a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisRecordProcessor.scala
+++ b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisRecordProcessor.scala
@@ -132,7 +132,7 @@ private[kinesis] object KinesisRecordProcessor extends Logging {
* Retry the given amount of times with a random backoff time (millis) less than the
* given maxBackOffMillis
*
- * @param expression expression to evalute
+ * @param expression expression to evaluate
* @param numRetriesLeft number of retries left
* @param maxBackOffMillis: max millis between retries
*
diff --git a/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala b/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
index 2555332d22..905c33834d 100644
--- a/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
+++ b/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala
@@ -122,7 +122,7 @@ abstract class KinesisBackedBlockRDDTests(aggregateTestData: Boolean)
testIsBlockValid = true)
}
- testIfEnabled("Test whether RDD is valid after removing blocks from block anager") {
+ testIfEnabled("Test whether RDD is valid after removing blocks from block manager") {
testRDD(numPartitions = 2, numPartitionsInBM = 2, numPartitionsInKinesis = 2,
testBlockRemove = true)
}
diff --git a/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisReceiverSuite.scala b/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisReceiverSuite.scala
index fd15b6ccdc..deac9090e2 100644
--- a/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisReceiverSuite.scala
+++ b/external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisReceiverSuite.scala
@@ -194,7 +194,7 @@ class KinesisReceiverSuite extends TestSuiteBase with Matchers with BeforeAndAft
verify(checkpointerMock, times(1)).checkpoint()
}
- test("retry failed after exhausing all retries") {
+ test("retry failed after exhausting all retries") {
val expectedErrorMessage = "final try error message"
when(checkpointerMock.checkpoint())
.thenThrow(new ThrottlingException("error message"))