aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerryLead <JerryLead@163.com>2014-12-02 23:53:29 -0800
committerAnkur Dave <ankurdave@gmail.com>2014-12-02 23:53:29 -0800
commit77be8b986fd21b7bbe28aa8db1042cb22bc74fe7 (patch)
tree7532cdc65dc7f5fb01474a7bc122574251b5b496
parent17c162f6682520e6e2790626e37da3a074471793 (diff)
downloadspark-77be8b986fd21b7bbe28aa8db1042cb22bc74fe7.tar.gz
spark-77be8b986fd21b7bbe28aa8db1042cb22bc74fe7.tar.bz2
spark-77be8b986fd21b7bbe28aa8db1042cb22bc74fe7.zip
[SPARK-4672][Core]Checkpoint() should clear f to shorten the serialization chain
The related JIRA is https://issues.apache.org/jira/browse/SPARK-4672 The f closure of `PartitionsRDD(ZippedPartitionsRDD2)` contains a `$outer` that references EdgeRDD/VertexRDD, which causes task's serialization chain become very long in iterative GraphX applications. As a result, StackOverflow error will occur. If we set "f = null" in `clearDependencies()`, checkpoint() can cut off the long serialization chain. More details and explanation can be found in the JIRA. Author: JerryLead <JerryLead@163.com> Author: Lijie Xu <csxulijie@gmail.com> Closes #3545 from JerryLead/my_core and squashes the following commits: f7faea5 [JerryLead] checkpoint() should clear the f to avoid StackOverflow error c0169da [JerryLead] Merge branch 'master' of https://github.com/apache/spark 52799e3 [Lijie Xu] Merge pull request #1 from apache/master
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/ZippedPartitionsRDD.scala9
1 files changed, 6 insertions, 3 deletions
diff --git a/core/src/main/scala/org/apache/spark/rdd/ZippedPartitionsRDD.scala b/core/src/main/scala/org/apache/spark/rdd/ZippedPartitionsRDD.scala
index 996f2cd3f3..95b2dd954e 100644
--- a/core/src/main/scala/org/apache/spark/rdd/ZippedPartitionsRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/ZippedPartitionsRDD.scala
@@ -77,7 +77,7 @@ private[spark] abstract class ZippedPartitionsBaseRDD[V: ClassTag](
private[spark] class ZippedPartitionsRDD2[A: ClassTag, B: ClassTag, V: ClassTag](
sc: SparkContext,
- f: (Iterator[A], Iterator[B]) => Iterator[V],
+ var f: (Iterator[A], Iterator[B]) => Iterator[V],
var rdd1: RDD[A],
var rdd2: RDD[B],
preservesPartitioning: Boolean = false)
@@ -92,13 +92,14 @@ private[spark] class ZippedPartitionsRDD2[A: ClassTag, B: ClassTag, V: ClassTag]
super.clearDependencies()
rdd1 = null
rdd2 = null
+ f = null
}
}
private[spark] class ZippedPartitionsRDD3
[A: ClassTag, B: ClassTag, C: ClassTag, V: ClassTag](
sc: SparkContext,
- f: (Iterator[A], Iterator[B], Iterator[C]) => Iterator[V],
+ var f: (Iterator[A], Iterator[B], Iterator[C]) => Iterator[V],
var rdd1: RDD[A],
var rdd2: RDD[B],
var rdd3: RDD[C],
@@ -117,13 +118,14 @@ private[spark] class ZippedPartitionsRDD3
rdd1 = null
rdd2 = null
rdd3 = null
+ f = null
}
}
private[spark] class ZippedPartitionsRDD4
[A: ClassTag, B: ClassTag, C: ClassTag, D:ClassTag, V: ClassTag](
sc: SparkContext,
- f: (Iterator[A], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V],
+ var f: (Iterator[A], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V],
var rdd1: RDD[A],
var rdd2: RDD[B],
var rdd3: RDD[C],
@@ -145,5 +147,6 @@ private[spark] class ZippedPartitionsRDD4
rdd2 = null
rdd3 = null
rdd4 = null
+ f = null
}
}