aboutsummaryrefslogtreecommitdiff
path: root/external
diff options
context:
space:
mode:
authorLiwei Lin <lwlin7@gmail.com>2016-09-07 10:04:00 +0100
committerSean Owen <sowen@cloudera.com>2016-09-07 10:04:00 +0100
commit3ce3a282c8463408f9a2db93c1748e8df8087e07 (patch)
tree7814535174f3ef7294cfd20e4dfeae28fecd4693 /external
parent9fccde4ff80fb0fd65a9e90eb3337965e4349de4 (diff)
downloadspark-3ce3a282c8463408f9a2db93c1748e8df8087e07.tar.gz
spark-3ce3a282c8463408f9a2db93c1748e8df8087e07.tar.bz2
spark-3ce3a282c8463408f9a2db93c1748e8df8087e07.zip
[SPARK-17359][SQL][MLLIB] Use ArrayBuffer.+=(A) instead of ArrayBuffer.append(A) in performance critical paths
## What changes were proposed in this pull request? We should generally use `ArrayBuffer.+=(A)` rather than `ArrayBuffer.append(A)`, because `append(A)` would involve extra boxing / unboxing. ## How was this patch tested? N/A Author: Liwei Lin <lwlin7@gmail.com> Closes #14914 from lw-lin/append_to_plus_eq_v2.
Diffstat (limited to 'external')
-rw-r--r--external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala22
1 files changed, 11 insertions, 11 deletions
diff --git a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala
index 726b5d8ec3..35acb7b09f 100644
--- a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala
+++ b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala
@@ -108,7 +108,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
} else {
val missing = topicAndPartitions.diff(leaderMap.keySet)
val err = new Err
- err.append(new SparkException(s"Couldn't find leaders for ${missing}"))
+ err += new SparkException(s"Couldn't find leaders for ${missing}")
Left(err)
}
}
@@ -139,7 +139,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
respErrs.foreach { m =>
val cause = ErrorMapping.exceptionFor(m.errorCode)
val msg = s"Error getting partition metadata for '${m.topic}'. Does the topic exist?"
- errs.append(new SparkException(msg, cause))
+ errs += new SparkException(msg, cause)
}
}
}
@@ -205,11 +205,11 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
LeaderOffset(consumer.host, consumer.port, off)
}
} else {
- errs.append(new SparkException(
- s"Empty offsets for ${tp}, is ${before} before log beginning?"))
+ errs += new SparkException(
+ s"Empty offsets for ${tp}, is ${before} before log beginning?")
}
} else {
- errs.append(ErrorMapping.exceptionFor(por.error))
+ errs += ErrorMapping.exceptionFor(por.error)
}
}
}
@@ -218,7 +218,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
}
}
val missing = topicAndPartitions.diff(result.keySet)
- errs.append(new SparkException(s"Couldn't find leader offsets for ${missing}"))
+ errs += new SparkException(s"Couldn't find leader offsets for ${missing}")
Left(errs)
}
}
@@ -274,7 +274,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
if (ome.error == ErrorMapping.NoError) {
result += tp -> ome
} else {
- errs.append(ErrorMapping.exceptionFor(ome.error))
+ errs += ErrorMapping.exceptionFor(ome.error)
}
}
}
@@ -283,7 +283,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
}
}
val missing = topicAndPartitions.diff(result.keySet)
- errs.append(new SparkException(s"Couldn't find consumer offsets for ${missing}"))
+ errs += new SparkException(s"Couldn't find consumer offsets for ${missing}")
Left(errs)
}
@@ -330,7 +330,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
if (err == ErrorMapping.NoError) {
result += tp -> err
} else {
- errs.append(ErrorMapping.exceptionFor(err))
+ errs += ErrorMapping.exceptionFor(err)
}
}
}
@@ -339,7 +339,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
}
}
val missing = topicAndPartitions.diff(result.keySet)
- errs.append(new SparkException(s"Couldn't set offsets for ${missing}"))
+ errs += new SparkException(s"Couldn't set offsets for ${missing}")
Left(errs)
}
@@ -353,7 +353,7 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
fn(consumer)
} catch {
case NonFatal(e) =>
- errs.append(e)
+ errs += e
} finally {
if (consumer != null) {
consumer.close()