aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala14
1 files changed, 7 insertions, 7 deletions
diff --git a/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
index 2ba63da881..398ac3d620 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala
@@ -391,17 +391,17 @@ private[spark] abstract class MockBackend(
* scheduling.
*/
override def reviveOffers(): Unit = {
- val newTaskDescriptions = taskScheduler.resourceOffers(generateOffers()).flatten
- // get the task now, since that requires a lock on TaskSchedulerImpl, to prevent individual
- // tests from introducing a race if they need it
- val newTasks = taskScheduler.synchronized {
- newTaskDescriptions.map { taskDescription =>
+ // Need a lock on the entire scheduler to protect freeCores -- otherwise, multiple threads
+ // may make offers at the same time, though they are using the same set of freeCores.
+ taskScheduler.synchronized {
+ val newTaskDescriptions = taskScheduler.resourceOffers(generateOffers()).flatten
+ // get the task now, since that requires a lock on TaskSchedulerImpl, to prevent individual
+ // tests from introducing a race if they need it.
+ val newTasks = newTaskDescriptions.map { taskDescription =>
val taskSet = taskScheduler.taskIdToTaskSetManager(taskDescription.taskId).taskSet
val task = taskSet.tasks(taskDescription.index)
(taskDescription, task)
}
- }
- synchronized {
newTasks.foreach { case (taskDescription, _) =>
executorIdToExecutor(taskDescription.executorId).freeCores -= taskScheduler.CPUS_PER_TASK
}