aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@databricks.com>2016-02-25 17:04:43 -0800
committerAndrew Or <andrew@databricks.com>2016-02-25 17:04:43 -0800
commitf2cfafdfe0f4b18f31bc63969e2abced1a66e896 (patch)
treed55934686998ba9b5db47b7973b052e0bf5d5c64 /core
parent7a6ee8a8fe0fad78416ed7e1ac694959de5c5314 (diff)
downloadspark-f2cfafdfe0f4b18f31bc63969e2abced1a66e896.tar.gz
spark-f2cfafdfe0f4b18f31bc63969e2abced1a66e896.tar.bz2
spark-f2cfafdfe0f4b18f31bc63969e2abced1a66e896.zip
[SPARK-13501] Remove use of Guava Stopwatch
Our nightly doc snapshot builds are failing due to some issue involving the Guava Stopwatch constructor: ``` [error] /home/jenkins/workspace/spark-master-docs/spark/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala:496: constructor Stopwatch in class Stopwatch cannot be accessed in class CoarseMesosSchedulerBackend [error] val stopwatch = new Stopwatch() [error] ^ ``` This Stopwatch constructor was deprecated in newer versions of Guava (https://github.com/google/guava/commit/fd0cbc2c5c90e85fb22c8e86ea19630032090943) and it's possible that some classpath issues affecting Unidoc could be causing this to trigger compilation failures. In order to work around this issue, this patch removes this use of Stopwatch since we don't use it anywhere else in the Spark codebase. Author: Josh Rosen <joshrosen@databricks.com> Closes #11376 from JoshRosen/remove-stopwatch.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala7
1 files changed, 2 insertions, 5 deletions
diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala
index f803cc7a36..622f361ec2 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala
@@ -19,14 +19,12 @@ package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.util.{Collections, List => JList}
-import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.ReentrantLock
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{Buffer, HashMap, HashSet}
-import com.google.common.base.Stopwatch
import org.apache.mesos.{Scheduler => MScheduler, SchedulerDriver}
import org.apache.mesos.Protos.{TaskInfo => MesosTaskInfo, _}
@@ -493,12 +491,11 @@ private[spark] class CoarseMesosSchedulerBackend(
// Wait for executors to report done, or else mesosDriver.stop() will forcefully kill them.
// See SPARK-12330
- val stopwatch = new Stopwatch()
- stopwatch.start()
+ val startTime = System.nanoTime()
// slaveIdsWithExecutors has no memory barrier, so this is eventually consistent
while (numExecutors() > 0 &&
- stopwatch.elapsed(TimeUnit.MILLISECONDS) < shutdownTimeoutMS) {
+ System.nanoTime() - startTime < shutdownTimeoutMS * 1000L * 1000L) {
Thread.sleep(100)
}