aboutsummaryrefslogtreecommitdiff
path: root/external
diff options
context:
space:
mode:
authorJacek Laskowski <jacek@japila.pl>2016-01-11 11:29:15 -0800
committerShixiong Zhu <shixiong@databricks.com>2016-01-11 11:29:15 -0800
commitb313badaa049f847f33663c61cd70ee2f2cbebac (patch)
tree078c8c02a13db3a9852292a71aef71511f6a726f /external
parent9559ac5f74434cf4bf611bdcde9a216d39799826 (diff)
downloadspark-b313badaa049f847f33663c61cd70ee2f2cbebac.tar.gz
spark-b313badaa049f847f33663c61cd70ee2f2cbebac.tar.bz2
spark-b313badaa049f847f33663c61cd70ee2f2cbebac.zip
[STREAMING][MINOR] Typo fixes
Author: Jacek Laskowski <jacek@japila.pl> Closes #10698 from jaceklaskowski/streaming-kafka-typo-fixes.
Diffstat (limited to 'external')
-rw-r--r--external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala2
-rw-r--r--external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaRDD.scala2
2 files changed, 2 insertions, 2 deletions
diff --git a/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala b/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala
index c4e18d92ee..d7885d7cc1 100644
--- a/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala
+++ b/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala
@@ -385,7 +385,7 @@ object KafkaCluster {
val seedBrokers: Array[(String, Int)] = brokers.split(",").map { hp =>
val hpa = hp.split(":")
if (hpa.size == 1) {
- throw new SparkException(s"Broker not the in correct format of <host>:<port> [$brokers]")
+ throw new SparkException(s"Broker not in the correct format of <host>:<port> [$brokers]")
}
(hpa(0), hpa(1).toInt)
}
diff --git a/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaRDD.scala b/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaRDD.scala
index 603be22818..4eb1556458 100644
--- a/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaRDD.scala
+++ b/external/kafka/src/main/scala/org/apache/spark/streaming/kafka/KafkaRDD.scala
@@ -156,7 +156,7 @@ class KafkaRDD[
var requestOffset = part.fromOffset
var iter: Iterator[MessageAndOffset] = null
- // The idea is to use the provided preferred host, except on task retry atttempts,
+ // The idea is to use the provided preferred host, except on task retry attempts,
// to minimize number of kafka metadata requests
private def connectLeader: SimpleConsumer = {
if (context.attemptNumber > 0) {