aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorCharles Reiss <charles@eecs.berkeley.edu>2013-01-01 07:59:16 -0800
committerCharles Reiss <charles@eecs.berkeley.edu>2013-01-01 08:07:44 -0800
commit58072a7340e20251ed810457bc67a79f106bae42 (patch)
tree53da9f03ed254690037abc1c5874a91fc7e21666 /core
parent21636ee4faf30126b36ad568753788327e634857 (diff)
downloadspark-58072a7340e20251ed810457bc67a79f106bae42.tar.gz
spark-58072a7340e20251ed810457bc67a79f106bae42.tar.bz2
spark-58072a7340e20251ed810457bc67a79f106bae42.zip
Remove some dead comments
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/spark/CacheTracker.scala6
1 files changed, 0 insertions, 6 deletions
diff --git a/core/src/main/scala/spark/CacheTracker.scala b/core/src/main/scala/spark/CacheTracker.scala
index c8c4063cad..04c26b2e40 100644
--- a/core/src/main/scala/spark/CacheTracker.scala
+++ b/core/src/main/scala/spark/CacheTracker.scala
@@ -204,17 +204,11 @@ private[spark] class CacheTracker(actorSystem: ActorSystem, isMaster: Boolean, b
}
try {
// If we got here, we have to load the split
- // Tell the master that we're doing so
- //val host = System.getProperty("spark.hostname", Utils.localHostName)
- //val future = trackerActor !! AddedToCache(rdd.id, split.index, host)
- // TODO: fetch any remote copy of the split that may be available
- // TODO: also register a listener for when it unloads
val elements = new ArrayBuffer[Any]
logInfo("Computing partition " + split)
elements ++= rdd.compute(split, context)
// Try to put this block in the blockManager
blockManager.put(key, elements, storageLevel, true)
- //future.apply() // Wait for the reply from the cache tracker
return elements.iterator.asInstanceOf[Iterator[T]]
} finally {
loading.synchronized {