aboutsummaryrefslogtreecommitdiff
path: root/core/src/main
diff options
context:
space:
mode:
authorReynold Xin <rxin@cs.berkeley.edu>2012-10-02 15:45:25 -0700
committerReynold Xin <rxin@cs.berkeley.edu>2012-10-02 15:45:25 -0700
commit79975856165099c74ee04e0e5443a9832014eaff (patch)
tree02453ab141bd11aad11581f48a503ea10cf726af /core/src/main
parent0898a21b95f5b3b190aa4d38a3a73c5a0c21eef3 (diff)
downloadspark-79975856165099c74ee04e0e5443a9832014eaff.tar.gz
spark-79975856165099c74ee04e0e5443a9832014eaff.tar.bz2
spark-79975856165099c74ee04e0e5443a9832014eaff.zip
Added a check to make sure SPARK_MEM <= memoryPerSlave for local cluster
mode.
Diffstat (limited to 'core/src/main')
-rw-r--r--core/src/main/scala/spark/SparkContext.scala15
1 files changed, 13 insertions, 2 deletions
diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala
index 0a0a5d772b..1ef1712c56 100644
--- a/core/src/main/scala/spark/SparkContext.scala
+++ b/core/src/main/scala/spark/SparkContext.scala
@@ -113,9 +113,20 @@ class SparkContext(
scheduler.initialize(backend)
scheduler
- case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerlave) =>
+ case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) =>
+ // Check to make sure SPARK_MEM <= memoryPerSlave. Otherwise Spark will just hang.
+ val memoryPerSlaveInt = memoryPerSlave.toInt
+ val sparkMemEnv = System.getenv("SPARK_MEM")
+ val sparkMemEnvInt = if (sparkMemEnv != null) Utils.memoryStringToMb(sparkMemEnv) else 512
+ if (sparkMemEnvInt > memoryPerSlaveInt) {
+ throw new SparkException(
+ "Slave memory (%d MB) cannot be smaller than SPARK_MEM (%d MB)".format(
+ memoryPerSlaveInt, sparkMemEnvInt))
+ }
+
val scheduler = new ClusterScheduler(this)
- val localCluster = new LocalSparkCluster(numSlaves.toInt, coresPerSlave.toInt, memoryPerlave.toInt)
+ val localCluster = new LocalSparkCluster(
+ numSlaves.toInt, coresPerSlave.toInt, memoryPerSlaveInt)
val sparkUrl = localCluster.start()
val backend = new SparkDeploySchedulerBackend(scheduler, this, sparkUrl, frameworkName)
scheduler.initialize(backend)