aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatei Zaharia <matei@eecs.berkeley.edu>2011-05-26 23:53:10 -0700
committerMatei Zaharia <matei@eecs.berkeley.edu>2011-05-26 23:53:10 -0700
commitcfbe2da1a625b4d1132646580ce063ce4f7637c5 (patch)
treea29608e8a4aea8f04a4945bcc8bb6bc785ebe6cd
parentf7078565223d930851f88f84b2f9955767fe0256 (diff)
parent89c8ea2bb229a7599365b376b0d310fdc5daccea (diff)
downloadspark-cfbe2da1a625b4d1132646580ce063ce4f7637c5.tar.gz
spark-cfbe2da1a625b4d1132646580ce063ce4f7637c5.tar.bz2
spark-cfbe2da1a625b4d1132646580ce063ce4f7637c5.zip
Merge pull request #52 from ijuma/master
Fix deprecations when compiled with Scala 2.8.1
-rw-r--r--core/src/main/scala/spark/BitTorrentBroadcast.scala12
-rw-r--r--core/src/main/scala/spark/CacheTracker.scala8
-rw-r--r--core/src/main/scala/spark/ChainedBroadcast.scala2
-rw-r--r--core/src/main/scala/spark/CoGroupedRDD.scala4
-rw-r--r--core/src/main/scala/spark/DAGScheduler.scala2
-rw-r--r--core/src/main/scala/spark/RDD.scala2
-rw-r--r--core/src/main/scala/spark/Stage.scala2
-rw-r--r--core/src/main/scala/spark/repl/SparkCompletion.scala2
-rw-r--r--examples/src/main/scala/spark/examples/LocalALS.scala4
-rw-r--r--examples/src/main/scala/spark/examples/LocalFileLR.scala2
-rw-r--r--examples/src/main/scala/spark/examples/LocalLR.scala4
-rw-r--r--examples/src/main/scala/spark/examples/SparkALS.scala4
-rw-r--r--examples/src/main/scala/spark/examples/SparkLR.scala2
-rw-r--r--project/plugins/SparkProjectPlugins.scala2
14 files changed, 26 insertions, 26 deletions
diff --git a/core/src/main/scala/spark/BitTorrentBroadcast.scala b/core/src/main/scala/spark/BitTorrentBroadcast.scala
index dba9d39abf..126e61dc7d 100644
--- a/core/src/main/scala/spark/BitTorrentBroadcast.scala
+++ b/core/src/main/scala/spark/BitTorrentBroadcast.scala
@@ -113,7 +113,7 @@ extends Broadcast[T] with Logging {
}
// In the beginning, this is the only known source to Guide
- listOfSources = listOfSources + masterSource
+ listOfSources += masterSource
// Register with the Tracker
BitTorrentBroadcast.registerValue (uuid,
@@ -203,7 +203,7 @@ extends Broadcast[T] with Logging {
var blockID = 0
for (i <- 0 until (byteArray.length, blockSize)) {
- val thisBlockSize = Math.min (blockSize, byteArray.length - i)
+ val thisBlockSize = math.min (blockSize, byteArray.length - i)
var tempByteArray = new Array[Byte] (thisBlockSize)
val hasRead = bais.read (tempByteArray, 0, thisBlockSize)
@@ -268,7 +268,7 @@ extends Broadcast[T] with Logging {
if (listOfSources.contains(newSourceInfo)) {
listOfSources = listOfSources - newSourceInfo
}
- listOfSources = listOfSources + newSourceInfo
+ listOfSources += newSourceInfo
}
}
@@ -435,7 +435,7 @@ extends Broadcast[T] with Logging {
while (hasBlocks < totalBlocks) {
var numThreadsToCreate =
- Math.min (listOfSources.size, BitTorrentBroadcast.MaxTxPeers) -
+ math.min (listOfSources.size, BitTorrentBroadcast.MaxTxPeers) -
threadPool.getActiveCount
while (hasBlocks < totalBlocks && numThreadsToCreate > 0) {
@@ -446,7 +446,7 @@ extends Broadcast[T] with Logging {
// Add to peersNowTalking. Remove in the thread. We have to do this
// ASAP, otherwise pickPeerToTalkTo picks the same peer more than once
peersNowTalking.synchronized {
- peersNowTalking = peersNowTalking + peerToTalkTo
+ peersNowTalking += peerToTalkTo
}
}
@@ -878,7 +878,7 @@ extends Broadcast[T] with Logging {
i = i - 1
}
- selectedSources = selectedSources + curPeer
+ selectedSources += curPeer
alreadyPicked.set (i)
picksLeft = picksLeft - 1
diff --git a/core/src/main/scala/spark/CacheTracker.scala b/core/src/main/scala/spark/CacheTracker.scala
index 7040d4e147..6826c7897c 100644
--- a/core/src/main/scala/spark/CacheTracker.scala
+++ b/core/src/main/scala/spark/CacheTracker.scala
@@ -38,7 +38,7 @@ class CacheTrackerActor extends DaemonActor with Logging {
case DroppedFromCache(rddId, partition, host) =>
logInfo("Cache entry removed: (%s, %s) on %s".format(rddId, partition, host))
- locs(rddId)(partition) -= host
+ locs(rddId)(partition) = locs(rddId)(partition).filterNot(_ == host)
case MemoryCacheLost(host) =>
logInfo("Memory cache lost on " + host)
@@ -111,7 +111,7 @@ class CacheTracker(isMaster: Boolean, theCache: Cache) extends Logging {
if (cachedVal != null) {
// Split is in cache, so just return its values
logInfo("Found partition in cache!")
- return Iterator.fromArray(cachedVal.asInstanceOf[Array[T]])
+ return cachedVal.asInstanceOf[Array[T]].iterator
} else {
// Mark the split as loading (unless someone else marks it first)
loading.synchronized {
@@ -119,7 +119,7 @@ class CacheTracker(isMaster: Boolean, theCache: Cache) extends Logging {
while (loading.contains(key)) {
try {loading.wait()} catch {case _ =>}
}
- return Iterator.fromArray(cache.get(key).asInstanceOf[Array[T]])
+ return cache.get(key).asInstanceOf[Array[T]].iterator
} else {
loading.add(key)
}
@@ -138,7 +138,7 @@ class CacheTracker(isMaster: Boolean, theCache: Cache) extends Logging {
loading.notifyAll()
}
future.apply() // Wait for the reply from the cache tracker
- return Iterator.fromArray(array)
+ return array.iterator
}
}
diff --git a/core/src/main/scala/spark/ChainedBroadcast.scala b/core/src/main/scala/spark/ChainedBroadcast.scala
index 8021f5da06..6f2cc3f6f0 100644
--- a/core/src/main/scala/spark/ChainedBroadcast.scala
+++ b/core/src/main/scala/spark/ChainedBroadcast.scala
@@ -166,7 +166,7 @@ extends Broadcast[T] with Logging {
var blockID = 0
for (i <- 0 until (byteArray.length, blockSize)) {
- val thisBlockSize = Math.min (blockSize, byteArray.length - i)
+ val thisBlockSize = math.min (blockSize, byteArray.length - i)
var tempByteArray = new Array[Byte] (thisBlockSize)
val hasRead = bais.read (tempByteArray, 0, thisBlockSize)
diff --git a/core/src/main/scala/spark/CoGroupedRDD.scala b/core/src/main/scala/spark/CoGroupedRDD.scala
index dbd5d45157..ea9e2d38a9 100644
--- a/core/src/main/scala/spark/CoGroupedRDD.scala
+++ b/core/src/main/scala/spark/CoGroupedRDD.scala
@@ -26,7 +26,7 @@ class CoGroupAggregator extends Aggregator[Any, Any, ArrayBuffer[Any]] (
)
class CoGroupedRDD[K](rdds: Seq[RDD[(_, _)]], part: Partitioner)
-extends RDD[(K, Seq[Seq[_]])](rdds.first.context) with Logging {
+extends RDD[(K, Seq[Seq[_]])](rdds.head.context) with Logging {
val aggr = new CoGroupAggregator
override val dependencies = {
@@ -45,7 +45,7 @@ extends RDD[(K, Seq[Seq[_]])](rdds.first.context) with Logging {
}
@transient val splits_ : Array[Split] = {
- val firstRdd = rdds.first
+ val firstRdd = rdds.head
val array = new Array[Split](part.numPartitions)
for (i <- 0 until array.size) {
array(i) = new CoGroupSplit(i, rdds.zipWithIndex.map { case (r, j) =>
diff --git a/core/src/main/scala/spark/DAGScheduler.scala b/core/src/main/scala/spark/DAGScheduler.scala
index d49047d74a..a970fb6526 100644
--- a/core/src/main/scala/spark/DAGScheduler.scala
+++ b/core/src/main/scala/spark/DAGScheduler.scala
@@ -237,7 +237,7 @@ private trait DAGScheduler extends Scheduler with Logging {
if (stage.shuffleDep != None) {
mapOutputTracker.registerMapOutputs(
stage.shuffleDep.get.shuffleId,
- stage.outputLocs.map(_.first).toArray)
+ stage.outputLocs.map(_.head).toArray)
}
updateCacheLocs()
val newlyRunnable = new ArrayBuffer[Stage]
diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala
index 6334896cb6..590106388a 100644
--- a/core/src/main/scala/spark/RDD.scala
+++ b/core/src/main/scala/spark/RDD.scala
@@ -178,7 +178,7 @@ class SplitRDD[T: ClassManifest](prev: RDD[T])
extends RDD[Array[T]](prev.context) {
override def splits = prev.splits
override val dependencies = List(new OneToOneDependency(prev))
- override def compute(split: Split) = Iterator.fromArray(Array(prev.iterator(split).toArray))
+ override def compute(split: Split) = Array(prev.iterator(split).toArray).iterator
}
diff --git a/core/src/main/scala/spark/Stage.scala b/core/src/main/scala/spark/Stage.scala
index 82b70ce60d..401b33bd16 100644
--- a/core/src/main/scala/spark/Stage.scala
+++ b/core/src/main/scala/spark/Stage.scala
@@ -22,7 +22,7 @@ class Stage(val id: Int, val rdd: RDD[_], val shuffleDep: Option[ShuffleDependen
def removeOutputLoc(partition: Int, host: String) {
val prevList = outputLocs(partition)
- val newList = prevList - host
+ val newList = prevList.filterNot(_ == host)
outputLocs(partition) = newList
if (prevList != Nil && newList == Nil)
numAvailableOutputs -= 1
diff --git a/core/src/main/scala/spark/repl/SparkCompletion.scala b/core/src/main/scala/spark/repl/SparkCompletion.scala
index 9fa41736f3..c6ed1860f0 100644
--- a/core/src/main/scala/spark/repl/SparkCompletion.scala
+++ b/core/src/main/scala/spark/repl/SparkCompletion.scala
@@ -107,7 +107,7 @@ class SparkCompletion(val repl: SparkInterpreter) extends SparkCompletionOutput
class TypeMemberCompletion(val tp: Type) extends CompletionAware with CompilerCompletion {
def excludeEndsWith: List[String] = Nil
def excludeStartsWith: List[String] = List("<") // <byname>, <repeated>, etc.
- def excludeNames: List[String] = anyref.methodNames -- anyRefMethodsToShow ++ List("_root_")
+ def excludeNames: List[String] = anyref.methodNames.filterNot(anyRefMethodsToShow.contains) ++ List("_root_")
def methodSignatureString(sym: Symbol) = {
def asString = new MethodSymbolOutput(sym).methodString()
diff --git a/examples/src/main/scala/spark/examples/LocalALS.scala b/examples/src/main/scala/spark/examples/LocalALS.scala
index 10360dab3d..10e03359c9 100644
--- a/examples/src/main/scala/spark/examples/LocalALS.scala
+++ b/examples/src/main/scala/spark/examples/LocalALS.scala
@@ -106,8 +106,8 @@ object LocalALS {
val R = generateR()
// Initialize m and u randomly
- var ms = Array.fromFunction(_ => factory1D.random(F))(M)
- var us = Array.fromFunction(_ => factory1D.random(F))(U)
+ var ms = Array.fill(M)(factory1D.random(F))
+ var us = Array.fill(U)(factory1D.random(F))
// Iteratively update movies then users
for (iter <- 1 to ITERATIONS) {
diff --git a/examples/src/main/scala/spark/examples/LocalFileLR.scala b/examples/src/main/scala/spark/examples/LocalFileLR.scala
index cc14aa7090..2e94ccbec2 100644
--- a/examples/src/main/scala/spark/examples/LocalFileLR.scala
+++ b/examples/src/main/scala/spark/examples/LocalFileLR.scala
@@ -27,7 +27,7 @@ object LocalFileLR {
println("On iteration " + i)
var gradient = Vector.zeros(D)
for (p <- points) {
- val scale = (1 / (1 + Math.exp(-p.y * (w dot p.x))) - 1) * p.y
+ val scale = (1 / (1 + math.exp(-p.y * (w dot p.x))) - 1) * p.y
gradient += scale * p.x
}
w -= gradient
diff --git a/examples/src/main/scala/spark/examples/LocalLR.scala b/examples/src/main/scala/spark/examples/LocalLR.scala
index 3fd3f88fa8..72c5009109 100644
--- a/examples/src/main/scala/spark/examples/LocalLR.scala
+++ b/examples/src/main/scala/spark/examples/LocalLR.scala
@@ -18,7 +18,7 @@ object LocalLR {
val x = Vector(D, _ => rand.nextGaussian + y * R)
DataPoint(x, y)
}
- Array.fromFunction(generatePoint _)(N)
+ Array.tabulate(N)(generatePoint)
}
def main(args: Array[String]) {
@@ -32,7 +32,7 @@ object LocalLR {
println("On iteration " + i)
var gradient = Vector.zeros(D)
for (p <- data) {
- val scale = (1 / (1 + Math.exp(-p.y * (w dot p.x))) - 1) * p.y
+ val scale = (1 / (1 + math.exp(-p.y * (w dot p.x))) - 1) * p.y
gradient += scale * p.x
}
w -= gradient
diff --git a/examples/src/main/scala/spark/examples/SparkALS.scala b/examples/src/main/scala/spark/examples/SparkALS.scala
index 08e0420371..8ee1418067 100644
--- a/examples/src/main/scala/spark/examples/SparkALS.scala
+++ b/examples/src/main/scala/spark/examples/SparkALS.scala
@@ -117,8 +117,8 @@ object SparkALS {
val R = generateR()
// Initialize m and u randomly
- var ms = Array.fromFunction(_ => factory1D.random(F))(M)
- var us = Array.fromFunction(_ => factory1D.random(F))(U)
+ var ms = Array.fill(M)(factory1D.random(F))
+ var us = Array.fill(U)(factory1D.random(F))
// Iteratively update movies then users
val Rc = spark.broadcast(R)
diff --git a/examples/src/main/scala/spark/examples/SparkLR.scala b/examples/src/main/scala/spark/examples/SparkLR.scala
index d08f5d3f01..faa8471824 100644
--- a/examples/src/main/scala/spark/examples/SparkLR.scala
+++ b/examples/src/main/scala/spark/examples/SparkLR.scala
@@ -20,7 +20,7 @@ object SparkLR {
val x = Vector(D, _ => rand.nextGaussian + y * R)
DataPoint(x, y)
}
- Array.fromFunction(generatePoint _)(N)
+ Array.tabulate(N)(generatePoint)
}
def main(args: Array[String]) {
diff --git a/project/plugins/SparkProjectPlugins.scala b/project/plugins/SparkProjectPlugins.scala
index b07dfafcfb..565f160829 100644
--- a/project/plugins/SparkProjectPlugins.scala
+++ b/project/plugins/SparkProjectPlugins.scala
@@ -4,7 +4,7 @@ class SparkProjectPlugins(info: ProjectInfo) extends PluginDefinition(info) {
val eclipse = "de.element34" % "sbt-eclipsify" % "0.7.0"
val sbtIdeaRepo = "sbt-idea-repo" at "http://mpeltonen.github.com/maven/"
- val sbtIdea = "com.github.mpeltonen" % "sbt-idea-plugin" % "0.2.0"
+ val sbtIdea = "com.github.mpeltonen" % "sbt-idea-plugin" % "0.4.0"
val codaRepo = "Coda Hale's Repository" at "http://repo.codahale.com/"
val assemblySBT = "com.codahale" % "assembly-sbt" % "0.1.1"