aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHossein <hossein@databricks.com>2014-07-26 01:04:56 -0700
committerMatei Zaharia <matei@databricks.com>2014-07-26 01:04:56 -0700
commit66f26a4610aede57322cb7e193a50aecb6c57d22 (patch)
treee45cb6dbf2a6970f0b8b341a0384352b2106122d
parentcf3e9fd84dc64f8a57ecbcfdd6b22f5492d41bd7 (diff)
downloadspark-66f26a4610aede57322cb7e193a50aecb6c57d22.tar.gz
spark-66f26a4610aede57322cb7e193a50aecb6c57d22.tar.bz2
spark-66f26a4610aede57322cb7e193a50aecb6c57d22.zip
[SPARK-2696] Reduce default value of spark.serializer.objectStreamReset
The current default value of spark.serializer.objectStreamReset is 10,000. When trying to re-partition (e.g., to 64 partitions) a large file (e.g., 500MB), containing 1MB records, the serializer will cache 10000 x 1MB x 64 ~= 640 GB which will cause out of memory errors. This patch sets the default value to a more reasonable default value (100). Author: Hossein <hossein@databricks.com> Closes #1595 from falaki/objectStreamReset and squashes the following commits: 650a935 [Hossein] Updated documentation 1aa0df8 [Hossein] Reduce default value of spark.serializer.objectStreamReset
-rw-r--r--core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala2
-rw-r--r--docs/configuration.md4
2 files changed, 3 insertions, 3 deletions
diff --git a/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala b/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
index 0a7e1ec539..a7fa057ee0 100644
--- a/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
+++ b/core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala
@@ -108,7 +108,7 @@ private[spark] class JavaSerializerInstance(counterReset: Int) extends Serialize
*/
@DeveloperApi
class JavaSerializer(conf: SparkConf) extends Serializer with Externalizable {
- private var counterReset = conf.getInt("spark.serializer.objectStreamReset", 10000)
+ private var counterReset = conf.getInt("spark.serializer.objectStreamReset", 100)
def newInstance(): SerializerInstance = new JavaSerializerInstance(counterReset)
diff --git a/docs/configuration.md b/docs/configuration.md
index dac8bb1d52..4e4b78153a 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -380,13 +380,13 @@ Apart from these, the following properties are also available, and may be useful
</tr>
<tr>
<td><code>spark.serializer.objectStreamReset</code></td>
- <td>10000</td>
+ <td>100</td>
<td>
When serializing using org.apache.spark.serializer.JavaSerializer, the serializer caches
objects to prevent writing redundant data, however that stops garbage collection of those
objects. By calling 'reset' you flush that info from the serializer, and allow old
objects to be collected. To turn off this periodic reset set it to a value &lt;= 0.
- By default it will reset the serializer every 10,000 objects.
+ By default it will reset the serializer every 100 objects.
</td>
</tr>
<tr>