aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorwangyang <wangyang@haizhi.com>2016-06-10 13:10:03 -0700
committerReynold Xin <rxin@databricks.com>2016-06-10 13:10:03 -0700
commit026eb90644be7685971dacaabae67a293edd0133 (patch)
treecacc1ed1fb398d122bb5a46fd20b94574203fd58 /core/src
parent865ec32dd997e63aea01a871d1c7b4947f43c111 (diff)
downloadspark-026eb90644be7685971dacaabae67a293edd0133.tar.gz
spark-026eb90644be7685971dacaabae67a293edd0133.tar.bz2
spark-026eb90644be7685971dacaabae67a293edd0133.zip
[SPARK-15875] Try to use Seq.isEmpty and Seq.nonEmpty instead of Seq.length == 0 and Seq.length > 0
## What changes were proposed in this pull request? In scala, immutable.List.length is an expensive operation so we should avoid using Seq.length == 0 or Seq.lenth > 0, and use Seq.isEmpty and Seq.nonEmpty instead. ## How was this patch tested? existing tests Author: wangyang <wangyang@haizhi.com> Closes #13601 from yangw1234/isEmpty.
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala6
3 files changed, 5 insertions, 5 deletions
diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala b/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala
index 3df87f62f2..6a5e6f7c5a 100644
--- a/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala
+++ b/core/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala
@@ -235,7 +235,7 @@ private[spark] class PythonWorkerFactory(pythonExec: String, envVars: Map[String
}
private def cleanupIdleWorkers() {
- while (idleWorkers.length > 0) {
+ while (idleWorkers.nonEmpty) {
val worker = idleWorkers.dequeue()
try {
// the worker will exit after closing the socket
diff --git a/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala b/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
index b6366f3e68..d744d67592 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
@@ -60,7 +60,7 @@ class PartitionerAwareUnionRDD[T: ClassTag](
sc: SparkContext,
var rdds: Seq[RDD[T]]
) extends RDD[T](sc, rdds.map(x => new OneToOneDependency(x))) {
- require(rdds.length > 0)
+ require(rdds.nonEmpty)
require(rdds.forall(_.partitioner.isDefined))
require(rdds.flatMap(_.partitioner).toSet.size == 1,
"Parent RDDs have different partitioners: " + rdds.flatMap(_.partitioner))
diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
index fc71f8365c..6ddc72afde 100644
--- a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
+++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
@@ -375,14 +375,14 @@ class ExternalAppendOnlyMap[K, V, C](
/**
* Return true if there exists an input stream that still has unvisited pairs.
*/
- override def hasNext: Boolean = mergeHeap.length > 0
+ override def hasNext: Boolean = mergeHeap.nonEmpty
/**
* Select a key with the minimum hash, then combine all values with the same key from all
* input streams.
*/
override def next(): (K, C) = {
- if (mergeHeap.length == 0) {
+ if (mergeHeap.isEmpty) {
throw new NoSuchElementException
}
// Select a key from the StreamBuffer that holds the lowest key hash
@@ -397,7 +397,7 @@ class ExternalAppendOnlyMap[K, V, C](
// For all other streams that may have this key (i.e. have the same minimum key hash),
// merge in the corresponding value (if any) from that stream
val mergedBuffers = ArrayBuffer[StreamBuffer](minBuffer)
- while (mergeHeap.length > 0 && mergeHeap.head.minKeyHash == minHash) {
+ while (mergeHeap.nonEmpty && mergeHeap.head.minKeyHash == minHash) {
val newBuffer = mergeHeap.dequeue()
minCombiner = mergeIfKeyExists(minKey, minCombiner, newBuffer)
mergedBuffers += newBuffer