aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2016-09-01 12:13:07 -0700
committerJosh Rosen <joshrosen@databricks.com>2016-09-01 12:13:07 -0700
commit3893e8c576cf1a6decc18701267ce7cd8caaf521 (patch)
treee7a7b61f13a348f52ae0a25162157b28203b58ca /core/src
parent2be5f8d7e0819de03971d0af6fa310793d2d0e65 (diff)
downloadspark-3893e8c576cf1a6decc18701267ce7cd8caaf521.tar.gz
spark-3893e8c576cf1a6decc18701267ce7cd8caaf521.tar.bz2
spark-3893e8c576cf1a6decc18701267ce7cd8caaf521.zip
[SPARK-17331][CORE][MLLIB] Avoid allocating 0-length arrays
## What changes were proposed in this pull request? Avoid allocating some 0-length arrays, esp. in UTF8String, and by using Array.empty in Scala over Array[T]() ## How was this patch tested? Jenkins Author: Sean Owen <sowen@cloudera.com> Closes #14895 from srowen/SPARK-17331.
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/scala/org/apache/spark/MapOutputTracker.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala2
2 files changed, 3 insertions, 3 deletions
diff --git a/core/src/main/scala/org/apache/spark/MapOutputTracker.scala b/core/src/main/scala/org/apache/spark/MapOutputTracker.scala
index 486d535da0..7f8f0f5131 100644
--- a/core/src/main/scala/org/apache/spark/MapOutputTracker.scala
+++ b/core/src/main/scala/org/apache/spark/MapOutputTracker.scala
@@ -383,7 +383,7 @@ private[spark] class MapOutputTrackerMaster(conf: SparkConf,
/** Register multiple map output information for the given shuffle */
def registerMapOutputs(shuffleId: Int, statuses: Array[MapStatus], changeEpoch: Boolean = false) {
- mapStatuses.put(shuffleId, Array[MapStatus]() ++ statuses)
+ mapStatuses.put(shuffleId, statuses.clone())
if (changeEpoch) {
incrementEpoch()
}
@@ -535,7 +535,7 @@ private[spark] class MapOutputTrackerMaster(conf: SparkConf,
true
case None =>
logDebug("cached status not found for : " + shuffleId)
- statuses = mapStatuses.getOrElse(shuffleId, Array[MapStatus]())
+ statuses = mapStatuses.getOrElse(shuffleId, Array.empty[MapStatus])
epochGotten = epoch
false
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala b/core/src/main/scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala
index 32931d59ac..b5738b9a95 100644
--- a/core/src/main/scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/ZippedWithIndexRDD.scala
@@ -43,7 +43,7 @@ class ZippedWithIndexRDD[T: ClassTag](prev: RDD[T]) extends RDD[(T, Long)](prev)
@transient private val startIndices: Array[Long] = {
val n = prev.partitions.length
if (n == 0) {
- Array[Long]()
+ Array.empty
} else if (n == 1) {
Array(0L)
} else {