aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMatei Zaharia <matei@eecs.berkeley.edu>2010-07-25 23:53:46 -0400
committerMatei Zaharia <matei@eecs.berkeley.edu>2010-07-25 23:53:46 -0400
commitb56ed67553a946b2f4e94dab33382b8de9957a5b (patch)
treece2cdc2deda0d42607b14e634cf688cc5af326a6 /src
parent4239f76997df82242cbe69192d9525014b7da5f9 (diff)
downloadspark-b56ed67553a946b2f4e94dab33382b8de9957a5b.tar.gz
spark-b56ed67553a946b2f4e94dab33382b8de9957a5b.tar.bz2
spark-b56ed67553a946b2f4e94dab33382b8de9957a5b.zip
Updated code to work with Nexus->Mesos name change
Diffstat (limited to 'src')
-rw-r--r--src/scala/spark/Executor.scala10
-rw-r--r--src/scala/spark/HdfsFile.scala2
-rw-r--r--src/scala/spark/MesosScheduler.scala (renamed from src/scala/spark/NexusScheduler.scala)18
-rw-r--r--src/scala/spark/ParallelArray.scala2
-rw-r--r--src/scala/spark/RDD.scala2
-rw-r--r--src/scala/spark/SparkContext.scala4
-rw-r--r--src/scala/spark/Task.scala2
7 files changed, 20 insertions, 20 deletions
diff --git a/src/scala/spark/Executor.scala b/src/scala/spark/Executor.scala
index 679a61f3c0..68f4cb8aae 100644
--- a/src/scala/spark/Executor.scala
+++ b/src/scala/spark/Executor.scala
@@ -2,14 +2,14 @@ package spark
import java.util.concurrent.{Executors, ExecutorService}
-import nexus.{ExecutorArgs, ExecutorDriver, NexusExecutorDriver}
-import nexus.{TaskDescription, TaskState, TaskStatus}
+import mesos.{ExecutorArgs, ExecutorDriver, MesosExecutorDriver}
+import mesos.{TaskDescription, TaskState, TaskStatus}
object Executor {
def main(args: Array[String]) {
- System.loadLibrary("nexus")
+ System.loadLibrary("mesos")
- val exec = new nexus.Executor() {
+ val exec = new mesos.Executor() {
var classLoader: ClassLoader = null
var threadPool: ExecutorService = null
@@ -66,6 +66,6 @@ object Executor {
}
}
- new NexusExecutorDriver(exec).run()
+ new MesosExecutorDriver(exec).run()
}
}
diff --git a/src/scala/spark/HdfsFile.scala b/src/scala/spark/HdfsFile.scala
index 6aa0e22338..e6c693c67c 100644
--- a/src/scala/spark/HdfsFile.scala
+++ b/src/scala/spark/HdfsFile.scala
@@ -1,6 +1,6 @@
package spark
-import nexus.SlaveOffer
+import mesos.SlaveOffer
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.io.Text
diff --git a/src/scala/spark/NexusScheduler.scala b/src/scala/spark/MesosScheduler.scala
index 752df2b15c..081f720bbd 100644
--- a/src/scala/spark/NexusScheduler.scala
+++ b/src/scala/spark/MesosScheduler.scala
@@ -4,10 +4,10 @@ import java.io.File
import scala.collection.mutable.Map
-import nexus.{Scheduler => NScheduler}
-import nexus._
+import mesos.{Scheduler => NScheduler}
+import mesos._
-// The main Scheduler implementation, which talks to Nexus. Clients are expected
+// The main Scheduler implementation, which talks to Mesos. Clients are expected
// to first call start(), then submit tasks through the runTasks method.
//
// This implementation is currently a little quick and dirty. The following
@@ -18,7 +18,7 @@ import nexus._
// 2) Presenting a single slave in ParallelOperation.slaveOffer makes it
// difficult to balance tasks across nodes. It would be better to pass
// all the offers to the ParallelOperation and have it load-balance.
-private class NexusScheduler(
+private class MesosScheduler(
master: String, frameworkName: String, execArg: Array[Byte])
extends NScheduler with spark.Scheduler
{
@@ -41,15 +41,15 @@ extends NScheduler with spark.Scheduler
return id
}
- // Driver for talking to Nexus
+ // Driver for talking to Mesos
var driver: SchedulerDriver = null
override def start() {
new Thread("Spark scheduler") {
setDaemon(true)
override def run {
- val ns = NexusScheduler.this
- ns.driver = new NexusSchedulerDriver(ns, master)
+ val ns = MesosScheduler.this
+ ns.driver = new MesosSchedulerDriver(ns, master)
ns.driver.run()
}
}.start
@@ -142,7 +142,7 @@ extends NScheduler with spark.Scheduler
case e: Exception => e.printStackTrace
}
} else {
- val msg = "Nexus error: %s (error code: %d)".format(message, code)
+ val msg = "Mesos error: %s (error code: %d)".format(message, code)
System.err.println(msg)
System.exit(1)
}
@@ -166,7 +166,7 @@ trait ParallelOperation {
class SimpleParallelOperation[T: ClassManifest](
- sched: NexusScheduler, tasks: Array[Task[T]])
+ sched: MesosScheduler, tasks: Array[Task[T]])
extends ParallelOperation
{
// Maximum time to wait to run a task in a preferred location (in ms)
diff --git a/src/scala/spark/ParallelArray.scala b/src/scala/spark/ParallelArray.scala
index 39ca867cb9..c4df837eff 100644
--- a/src/scala/spark/ParallelArray.scala
+++ b/src/scala/spark/ParallelArray.scala
@@ -1,6 +1,6 @@
package spark
-import nexus.SlaveOffer
+import mesos.SlaveOffer
import java.util.concurrent.atomic.AtomicLong
diff --git a/src/scala/spark/RDD.scala b/src/scala/spark/RDD.scala
index f9a16ed782..073215ebea 100644
--- a/src/scala/spark/RDD.scala
+++ b/src/scala/spark/RDD.scala
@@ -7,7 +7,7 @@ import java.util.HashSet
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.Map
-import nexus._
+import mesos._
import com.google.common.collect.MapMaker
diff --git a/src/scala/spark/SparkContext.scala b/src/scala/spark/SparkContext.scala
index 6b5a07cff1..bad4bf5721 100644
--- a/src/scala/spark/SparkContext.scala
+++ b/src/scala/spark/SparkContext.scala
@@ -28,8 +28,8 @@ class SparkContext(master: String, frameworkName: String) {
private var scheduler: Scheduler = master match {
case "local" => new LocalScheduler(1)
case LOCAL_REGEX(threads) => new LocalScheduler(threads.toInt)
- case _ => { System.loadLibrary("nexus");
- new NexusScheduler(master, frameworkName, createExecArg()) }
+ case _ => { System.loadLibrary("mesos");
+ new MesosScheduler(master, frameworkName, createExecArg()) }
}
private val local = scheduler.isInstanceOf[LocalScheduler]
diff --git a/src/scala/spark/Task.scala b/src/scala/spark/Task.scala
index efb864472d..6e94009f6e 100644
--- a/src/scala/spark/Task.scala
+++ b/src/scala/spark/Task.scala
@@ -1,6 +1,6 @@
package spark
-import nexus._
+import mesos._
@serializable
trait Task[T] {