aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorIsmael Juma <ismael@juma.me.uk>2011-05-26 22:36:43 +0100
committerIsmael Juma <ismael@juma.me.uk>2011-05-27 07:57:10 +0100
commit164ef4c75157c328a680f5e9ef9bcbd05f2b93df (patch)
treed19bdb6932a16fe29b302a31b1b5657c41fed55d /core
parentcfbe2da1a625b4d1132646580ce063ce4f7637c5 (diff)
downloadspark-164ef4c75157c328a680f5e9ef9bcbd05f2b93df.tar.gz
spark-164ef4c75157c328a680f5e9ef9bcbd05f2b93df.tar.bz2
spark-164ef4c75157c328a680f5e9ef9bcbd05f2b93df.zip
Use explicit asInstanceOf instead of misleading unchecked pattern matching.
Also enable -unchecked warnings in SBT build file.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/spark/CacheTracker.scala2
-rw-r--r--core/src/main/scala/spark/CoGroupedRDD.scala4
2 files changed, 3 insertions, 3 deletions
diff --git a/core/src/main/scala/spark/CacheTracker.scala b/core/src/main/scala/spark/CacheTracker.scala
index 6826c7897c..223c5dc5f7 100644
--- a/core/src/main/scala/spark/CacheTracker.scala
+++ b/core/src/main/scala/spark/CacheTracker.scala
@@ -96,7 +96,7 @@ class CacheTracker(isMaster: Boolean, theCache: Cache) extends Logging {
// Get a snapshot of the currently known locations
def getLocationsSnapshot(): HashMap[Int, Array[List[String]]] = {
(trackerActor !? GetCacheLocations) match {
- case h: HashMap[Int, Array[List[String]]] => h
+ case h: HashMap[_, _] => h.asInstanceOf[HashMap[Int, Array[List[String]]]]
case _ => throw new SparkException(
"Internal error: CacheTrackerActor did not reply with a HashMap")
}
diff --git a/core/src/main/scala/spark/CoGroupedRDD.scala b/core/src/main/scala/spark/CoGroupedRDD.scala
index ea9e2d38a9..101c8c99d8 100644
--- a/core/src/main/scala/spark/CoGroupedRDD.scala
+++ b/core/src/main/scala/spark/CoGroupedRDD.scala
@@ -75,8 +75,8 @@ extends RDD[(K, Seq[Seq[_]])](rdds.head.context) with Logging {
for ((dep, depNum) <- split.deps.zipWithIndex) dep match {
case NarrowCoGroupSplitDep(rdd, itsSplit) => {
// Read them from the parent
- for ((k: K, v) <- rdd.iterator(itsSplit)) {
- getSeq(k)(depNum) += v
+ for ((k, v) <- rdd.iterator(itsSplit)) {
+ getSeq(k.asInstanceOf[K])(depNum) += v
}
}
case ShuffleCoGroupSplitDep(shuffleId) => {