aboutsummaryrefslogtreecommitdiff
path: root/repl/scala-2.10
diff options
context:
space:
mode:
authorMatei Zaharia <matei@databricks.com>2015-07-22 15:28:09 -0700
committerMatei Zaharia <matei@databricks.com>2015-07-22 15:28:09 -0700
commitfe26584a1f5b472fb2e87aa7259aec822a619a3b (patch)
treed568c3aeda422e91d2b3d1a9335605da55be73fa /repl/scala-2.10
parent1aca9c13c144fa336af6afcfa666128bf77c49d4 (diff)
downloadspark-fe26584a1f5b472fb2e87aa7259aec822a619a3b.tar.gz
spark-fe26584a1f5b472fb2e87aa7259aec822a619a3b.tar.bz2
spark-fe26584a1f5b472fb2e87aa7259aec822a619a3b.zip
[SPARK-9244] Increase some memory defaults
There are a few memory limits that people hit often and that we could make higher, especially now that memory sizes have grown. - spark.akka.frameSize: This defaults at 10 but is often hit for map output statuses in large shuffles. This memory is not fully allocated up-front, so we can just make this larger and still not affect jobs that never sent a status that large. We increase it to 128. - spark.executor.memory: Defaults at 512m, which is really small. We increase it to 1g. Author: Matei Zaharia <matei@databricks.com> Closes #7586 from mateiz/configs and squashes the following commits: ce0038a [Matei Zaharia] [SPARK-9244] Increase some memory defaults
Diffstat (limited to 'repl/scala-2.10')
-rw-r--r--repl/scala-2.10/src/test/scala/org/apache/spark/repl/ReplSuite.scala10
1 files changed, 5 insertions, 5 deletions
diff --git a/repl/scala-2.10/src/test/scala/org/apache/spark/repl/ReplSuite.scala b/repl/scala-2.10/src/test/scala/org/apache/spark/repl/ReplSuite.scala
index f150fec7db..5674dcd669 100644
--- a/repl/scala-2.10/src/test/scala/org/apache/spark/repl/ReplSuite.scala
+++ b/repl/scala-2.10/src/test/scala/org/apache/spark/repl/ReplSuite.scala
@@ -211,7 +211,7 @@ class ReplSuite extends SparkFunSuite {
}
test("local-cluster mode") {
- val output = runInterpreter("local-cluster[1,1,512]",
+ val output = runInterpreter("local-cluster[1,1,1024]",
"""
|var v = 7
|def getV() = v
@@ -233,7 +233,7 @@ class ReplSuite extends SparkFunSuite {
}
test("SPARK-1199 two instances of same class don't type check.") {
- val output = runInterpreter("local-cluster[1,1,512]",
+ val output = runInterpreter("local-cluster[1,1,1024]",
"""
|case class Sum(exp: String, exp2: String)
|val a = Sum("A", "B")
@@ -256,7 +256,7 @@ class ReplSuite extends SparkFunSuite {
test("SPARK-2576 importing SQLContext.implicits._") {
// We need to use local-cluster to test this case.
- val output = runInterpreter("local-cluster[1,1,512]",
+ val output = runInterpreter("local-cluster[1,1,1024]",
"""
|val sqlContext = new org.apache.spark.sql.SQLContext(sc)
|import sqlContext.implicits._
@@ -325,9 +325,9 @@ class ReplSuite extends SparkFunSuite {
assertDoesNotContain("Exception", output)
assertContains("ret: Array[Foo] = Array(Foo(1),", output)
}
-
+
test("collecting objects of class defined in repl - shuffling") {
- val output = runInterpreter("local-cluster[1,1,512]",
+ val output = runInterpreter("local-cluster[1,1,1024]",
"""
|case class Foo(i: Int)
|val list = List((1, Foo(1)), (1, Foo(2)))