summaryrefslogtreecommitdiff
path: root/src/library/scala
diff options
context:
space:
mode:
authorPaul Phillips <paulp@improving.org>2012-03-14 10:00:30 -0700
committerPaul Phillips <paulp@improving.org>2012-03-14 10:08:36 -0700
commit3ab383ae01a66208df4955bf2117dd2ea8eb2afe (patch)
treeac83e7e89310955ccbc317f57f34165f9781117c /src/library/scala
parentb6dde2b6cf5f66e96fc92c09c1fbe3b4a8dc348c (diff)
parent5dca64cefeed4bc3289e641949b103e5e806aa32 (diff)
downloadscala-3ab383ae01a66208df4955bf2117dd2ea8eb2afe.tar.gz
scala-3ab383ae01a66208df4955bf2117dd2ea8eb2afe.tar.bz2
scala-3ab383ae01a66208df4955bf2117dd2ea8eb2afe.zip
Merge branch 'master' into merge-inline
Conflicts: lib/scala-compiler.jar.desired.sha1 lib/scala-library-src.jar.desired.sha1 lib/scala-library.jar.desired.sha1 src/compiler/scala/reflect/internal/Definitions.scala src/compiler/scala/reflect/internal/Importers.scala src/compiler/scala/reflect/internal/Symbols.scala src/compiler/scala/reflect/internal/Trees.scala src/compiler/scala/reflect/internal/Types.scala src/compiler/scala/tools/nsc/Global.scala src/compiler/scala/tools/nsc/transform/Erasure.scala src/compiler/scala/tools/nsc/transform/LiftCode.scala src/compiler/scala/tools/nsc/transform/UnCurry.scala src/compiler/scala/tools/nsc/typechecker/RefChecks.scala src/compiler/scala/tools/nsc/typechecker/Typers.scala test/files/run/programmatic-main.check test/files/speclib/instrumented.jar.desired.sha1
Diffstat (limited to 'src/library/scala')
-rw-r--r--src/library/scala/AnyValCompanion.scala2
-rw-r--r--src/library/scala/Enumeration.scala6
-rw-r--r--src/library/scala/Function0.scala10
-rw-r--r--src/library/scala/Function1.scala10
-rw-r--r--src/library/scala/Function10.scala4
-rw-r--r--src/library/scala/Function11.scala4
-rw-r--r--src/library/scala/Function12.scala4
-rw-r--r--src/library/scala/Function13.scala4
-rw-r--r--src/library/scala/Function14.scala4
-rw-r--r--src/library/scala/Function15.scala4
-rw-r--r--src/library/scala/Function16.scala4
-rw-r--r--src/library/scala/Function17.scala4
-rw-r--r--src/library/scala/Function18.scala4
-rw-r--r--src/library/scala/Function19.scala4
-rw-r--r--src/library/scala/Function2.scala12
-rw-r--r--src/library/scala/Function20.scala4
-rw-r--r--src/library/scala/Function21.scala4
-rw-r--r--src/library/scala/Function22.scala4
-rw-r--r--src/library/scala/Function3.scala4
-rw-r--r--src/library/scala/Function4.scala4
-rw-r--r--src/library/scala/Function5.scala4
-rw-r--r--src/library/scala/Function6.scala4
-rw-r--r--src/library/scala/Function7.scala4
-rw-r--r--src/library/scala/Function8.scala4
-rw-r--r--src/library/scala/Function9.scala4
-rw-r--r--src/library/scala/PartialFunction.scala10
-rw-r--r--src/library/scala/Predef.scala3
-rw-r--r--src/library/scala/Specializable.scala29
-rw-r--r--src/library/scala/SpecializableCompanion.scala1
-rw-r--r--src/library/scala/StringContext.scala29
-rw-r--r--src/library/scala/Tuple2.scala2
-rw-r--r--src/library/scala/annotation/elidable.scala55
-rw-r--r--src/library/scala/collection/GenTraversableLike.scala2
-rw-r--r--src/library/scala/collection/JavaConversions.scala12
-rw-r--r--src/library/scala/collection/SeqLike.scala2
-rw-r--r--src/library/scala/collection/generic/MutableSortedSetFactory.scala6
-rw-r--r--src/library/scala/collection/immutable/BitSet.scala2
-rw-r--r--src/library/scala/collection/immutable/HashMap.scala19
-rw-r--r--src/library/scala/collection/immutable/IntMap.scala8
-rw-r--r--src/library/scala/collection/immutable/List.scala20
-rw-r--r--src/library/scala/collection/immutable/LongMap.scala8
-rw-r--r--src/library/scala/collection/immutable/Range.scala13
-rw-r--r--src/library/scala/collection/immutable/RedBlack.scala7
-rw-r--r--src/library/scala/collection/immutable/RedBlackTree.scala485
-rw-r--r--src/library/scala/collection/immutable/TreeMap.scala101
-rw-r--r--src/library/scala/collection/immutable/TreeSet.scala91
-rw-r--r--src/library/scala/collection/mutable/AVLTree.scala26
-rw-r--r--src/library/scala/collection/mutable/BasicNode.java20
-rw-r--r--src/library/scala/collection/mutable/CNodeBase.java35
-rw-r--r--src/library/scala/collection/mutable/Ctrie.scala1075
-rw-r--r--src/library/scala/collection/mutable/FlatHashTable.scala40
-rw-r--r--src/library/scala/collection/mutable/Gen.java18
-rw-r--r--src/library/scala/collection/mutable/HashTable.scala30
-rw-r--r--src/library/scala/collection/mutable/INodeBase.java35
-rw-r--r--src/library/scala/collection/mutable/ListBuffer.scala16
-rw-r--r--src/library/scala/collection/mutable/MainNode.java40
-rw-r--r--src/library/scala/collection/mutable/SortedSet.scala10
-rw-r--r--src/library/scala/collection/mutable/TreeSet.scala14
-rw-r--r--src/library/scala/collection/parallel/Combiner.scala33
-rw-r--r--src/library/scala/collection/parallel/ParIterableLike.scala469
-rw-r--r--src/library/scala/collection/parallel/ParIterableViewLike.scala3
-rw-r--r--src/library/scala/collection/parallel/ParMapLike.scala2
-rw-r--r--src/library/scala/collection/parallel/ParSeqLike.scala135
-rw-r--r--src/library/scala/collection/parallel/ParSeqViewLike.scala3
-rw-r--r--src/library/scala/collection/parallel/RemainsIterator.scala61
-rw-r--r--src/library/scala/collection/parallel/TaskSupport.scala37
-rw-r--r--src/library/scala/collection/parallel/Tasks.scala244
-rw-r--r--src/library/scala/collection/parallel/immutable/ParHashMap.scala31
-rw-r--r--src/library/scala/collection/parallel/immutable/ParHashSet.scala30
-rw-r--r--src/library/scala/collection/parallel/immutable/ParRange.scala18
-rw-r--r--src/library/scala/collection/parallel/immutable/ParVector.scala7
-rw-r--r--src/library/scala/collection/parallel/immutable/package.scala12
-rw-r--r--src/library/scala/collection/parallel/mutable/ParArray.scala35
-rw-r--r--src/library/scala/collection/parallel/mutable/ParCtrie.scala193
-rw-r--r--src/library/scala/collection/parallel/mutable/ParHashMap.scala27
-rw-r--r--src/library/scala/collection/parallel/mutable/ParHashSet.scala21
-rw-r--r--src/library/scala/collection/parallel/mutable/ParHashTable.scala2
-rw-r--r--src/library/scala/collection/parallel/mutable/ResizableParArrayCombiner.scala12
-rw-r--r--src/library/scala/collection/parallel/mutable/UnrolledParArrayCombiner.scala8
-rw-r--r--src/library/scala/collection/parallel/package.scala31
-rw-r--r--src/library/scala/concurrent/Awaitable.scala24
-rw-r--r--src/library/scala/concurrent/Channel.scala1
-rw-r--r--src/library/scala/concurrent/ConcurrentPackageObject.scala111
-rw-r--r--src/library/scala/concurrent/DelayedLazyVal.scala5
-rw-r--r--src/library/scala/concurrent/ExecutionContext.scala132
-rw-r--r--src/library/scala/concurrent/Future.scala492
-rw-r--r--src/library/scala/concurrent/FutureTaskRunner.scala1
-rw-r--r--src/library/scala/concurrent/JavaConversions.scala7
-rw-r--r--src/library/scala/concurrent/ManagedBlocker.scala1
-rw-r--r--src/library/scala/concurrent/Promise.scala132
-rw-r--r--src/library/scala/concurrent/Scheduler.scala54
-rw-r--r--src/library/scala/concurrent/Task.scala13
-rw-r--r--src/library/scala/concurrent/TaskRunner.scala1
-rw-r--r--src/library/scala/concurrent/TaskRunners.scala1
-rw-r--r--src/library/scala/concurrent/ThreadPoolRunner.scala1
-rw-r--r--src/library/scala/concurrent/default/SchedulerImpl.scala.disabled44
-rw-r--r--src/library/scala/concurrent/default/TaskImpl.scala.disabled313
-rw-r--r--src/library/scala/concurrent/impl/AbstractPromise.java21
-rw-r--r--src/library/scala/concurrent/impl/ExecutionContextImpl.scala139
-rw-r--r--src/library/scala/concurrent/impl/Future.scala89
-rw-r--r--src/library/scala/concurrent/impl/Promise.scala258
-rw-r--r--src/library/scala/concurrent/ops.scala1
-rw-r--r--src/library/scala/concurrent/package.scala57
-rw-r--r--src/library/scala/concurrent/package.scala.disabled108
-rw-r--r--src/library/scala/package.scala6
-rw-r--r--src/library/scala/reflect/ClassManifest.scala4
-rw-r--r--src/library/scala/reflect/Code.scala23
-rw-r--r--src/library/scala/reflect/Manifest.scala25
-rw-r--r--src/library/scala/reflect/ReflectionUtils.scala4
-rw-r--r--src/library/scala/reflect/api/Mirror.scala36
-rw-r--r--src/library/scala/reflect/api/Modifier.scala83
-rwxr-xr-xsrc/library/scala/reflect/api/Names.scala15
-rwxr-xr-xsrc/library/scala/reflect/api/StandardDefinitions.scala19
-rw-r--r--src/library/scala/reflect/api/StandardNames.scala21
-rwxr-xr-xsrc/library/scala/reflect/api/Symbols.scala103
-rw-r--r--src/library/scala/reflect/api/TreeBuildUtil.scala14
-rw-r--r--src/library/scala/reflect/api/TreePrinters.scala24
-rw-r--r--src/library/scala/reflect/api/Trees.scala142
-rwxr-xr-xsrc/library/scala/reflect/api/Types.scala24
-rwxr-xr-xsrc/library/scala/reflect/api/Universe.scala3
-rw-r--r--src/library/scala/reflect/macro/Context.scala25
-rw-r--r--src/library/scala/runtime/AbstractFunction1.scala2
-rw-r--r--src/library/scala/runtime/NonLocalReturnControl.scala4
-rw-r--r--src/library/scala/runtime/ScalaRunTime.scala11
-rw-r--r--src/library/scala/specialized.scala13
-rw-r--r--src/library/scala/sys/process/BasicIO.scala128
-rw-r--r--src/library/scala/sys/process/Process.scala67
-rw-r--r--src/library/scala/sys/process/ProcessBuilder.scala306
-rw-r--r--src/library/scala/sys/process/ProcessIO.scala49
-rw-r--r--src/library/scala/sys/process/ProcessLogger.scala26
-rw-r--r--src/library/scala/sys/process/package.scala212
-rw-r--r--src/library/scala/util/Duration.scala485
-rw-r--r--src/library/scala/util/Properties.scala2
-rw-r--r--src/library/scala/util/Timeout.scala33
-rw-r--r--src/library/scala/util/Try.scala165
-rw-r--r--src/library/scala/util/parsing/combinator/Parsers.scala7
136 files changed, 6608 insertions, 1277 deletions
diff --git a/src/library/scala/AnyValCompanion.scala b/src/library/scala/AnyValCompanion.scala
index d6cb498185..47555938a0 100644
--- a/src/library/scala/AnyValCompanion.scala
+++ b/src/library/scala/AnyValCompanion.scala
@@ -18,4 +18,4 @@ package scala
* }}}
*
*/
-private[scala] trait AnyValCompanion extends SpecializableCompanion { }
+private[scala] trait AnyValCompanion extends Specializable { }
diff --git a/src/library/scala/Enumeration.scala b/src/library/scala/Enumeration.scala
index 3d85f2f52f..80571943e5 100644
--- a/src/library/scala/Enumeration.scala
+++ b/src/library/scala/Enumeration.scala
@@ -55,7 +55,7 @@ abstract class Enumeration (initial: Int) extends Serializable {
thisenum =>
def this() = this(0)
-
+
@deprecated("Names should be specified individually or discovered via reflection", "2.10.0")
def this(initial: Int, names: String*) = {
this(initial)
@@ -201,7 +201,7 @@ abstract class Enumeration (initial: Int) extends Serializable {
case _ => false
}
override def hashCode: Int = id.##
-
+
/** Create a ValueSet which contains this value and another one */
def + (v: Value) = ValueSet(this, v)
}
@@ -266,7 +266,7 @@ abstract class Enumeration (initial: Int) extends Serializable {
* new array of longs */
def toBitMask: Array[Long] = nnIds.toBitMask
}
-
+
/** A factory object for value sets */
object ValueSet {
import generic.CanBuildFrom
diff --git a/src/library/scala/Function0.scala b/src/library/scala/Function0.scala
index f68bbcc454..dceed26439 100644
--- a/src/library/scala/Function0.scala
+++ b/src/library/scala/Function0.scala
@@ -6,7 +6,7 @@
** |/ **
\* */
// GENERATED CODE: DO NOT EDIT.
-// genprod generated these sources at: Sun Jul 31 00:37:30 CEST 2011
+// genprod generated these sources at: Tue Feb 14 16:49:03 PST 2012
package scala
@@ -17,7 +17,7 @@ package scala
* shorthand for the anonymous class definition anonfun0:
*
* {{{
- * object Main extends Application {
+ * object Main extends App {
* val javaVersion = () => sys.props("java.version")
*
* val anonfun0 = new Function0[String] {
@@ -26,6 +26,12 @@ package scala
* assert(javaVersion() == anonfun0())
* }
* }}}
+ *
+ * Note that `Function1` does not define a total function, as might
+ * be suggested by the existence of [[scala.PartialFunction]]. The only
+ * distinction between `Function1` and `PartialFunction` is that the
+ * latter can specify inputs which it will not handle.
+
*/
trait Function0[@specialized +R] extends AnyRef { self =>
/** Apply the body of this function to the arguments.
diff --git a/src/library/scala/Function1.scala b/src/library/scala/Function1.scala
index 7517e6604b..8995ef912b 100644
--- a/src/library/scala/Function1.scala
+++ b/src/library/scala/Function1.scala
@@ -16,7 +16,7 @@ package scala
* shorthand for the anonymous class definition anonfun1:
*
* {{{
- * object Main extends Application {
+ * object Main extends App {
* val succ = (x: Int) => x + 1
* val anonfun1 = new Function1[Int, Int] {
* def apply(x: Int): Int = x + 1
@@ -29,13 +29,11 @@ package scala
* be suggested by the existence of [[scala.PartialFunction]]. The only
* distinction between `Function1` and `PartialFunction` is that the
* latter can specify inputs which it will not handle.
- *
+
*/
@annotation.implicitNotFound(msg = "No implicit view available from ${T1} => ${R}.")
-trait Function1[@specialized(scala.Int, scala.Long, scala.Float, scala.Double) -T1, @specialized(scala.Unit, scala.Boolean, scala.Int, scala.Float, scala.Long, scala.Double) +R] extends AnyRef { self =>
- /** Apply the body of this function to the argument. It may throw an
- * exception.
- *
+trait Function1[@specialized(scala.Int, scala.Long, scala.Float, scala.Double, scala.AnyRef) -T1, @specialized(scala.Unit, scala.Boolean, scala.Int, scala.Float, scala.Long, scala.Double, scala.AnyRef) +R] extends AnyRef { self =>
+ /** Apply the body of this function to the argument.
* @return the result of function application.
*/
def apply(v1: T1): R
diff --git a/src/library/scala/Function10.scala b/src/library/scala/Function10.scala
index 6f17606afd..9e107fc53d 100644
--- a/src/library/scala/Function10.scala
+++ b/src/library/scala/Function10.scala
@@ -18,12 +18,10 @@ trait Function10[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, +R] extends
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10)).curried
}
diff --git a/src/library/scala/Function11.scala b/src/library/scala/Function11.scala
index 7a73bd35bf..783a86ab5d 100644
--- a/src/library/scala/Function11.scala
+++ b/src/library/scala/Function11.scala
@@ -18,12 +18,10 @@ trait Function11[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, +R] ex
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11)).curried
}
diff --git a/src/library/scala/Function12.scala b/src/library/scala/Function12.scala
index c099c0436a..7f4dee6216 100644
--- a/src/library/scala/Function12.scala
+++ b/src/library/scala/Function12.scala
@@ -18,12 +18,10 @@ trait Function12[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12)).curried
}
diff --git a/src/library/scala/Function13.scala b/src/library/scala/Function13.scala
index f13db28f30..23853dde69 100644
--- a/src/library/scala/Function13.scala
+++ b/src/library/scala/Function13.scala
@@ -18,12 +18,10 @@ trait Function13[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13)).curried
}
diff --git a/src/library/scala/Function14.scala b/src/library/scala/Function14.scala
index d0345cc552..372f1cfafb 100644
--- a/src/library/scala/Function14.scala
+++ b/src/library/scala/Function14.scala
@@ -18,12 +18,10 @@ trait Function14[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14)).curried
}
diff --git a/src/library/scala/Function15.scala b/src/library/scala/Function15.scala
index 69ff039f5b..47c7309695 100644
--- a/src/library/scala/Function15.scala
+++ b/src/library/scala/Function15.scala
@@ -18,12 +18,10 @@ trait Function15[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14, v15: T15): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14)(x15) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15)).curried
}
diff --git a/src/library/scala/Function16.scala b/src/library/scala/Function16.scala
index d544d89303..8eea42de5b 100644
--- a/src/library/scala/Function16.scala
+++ b/src/library/scala/Function16.scala
@@ -18,12 +18,10 @@ trait Function16[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14, v15: T15, v16: T16): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14)(x15)(x16) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)).curried
}
diff --git a/src/library/scala/Function17.scala b/src/library/scala/Function17.scala
index 16c71e7ada..2d93af34f2 100644
--- a/src/library/scala/Function17.scala
+++ b/src/library/scala/Function17.scala
@@ -18,12 +18,10 @@ trait Function17[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14, v15: T15, v16: T16, v17: T17): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14)(x15)(x16)(x17) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17)).curried
}
diff --git a/src/library/scala/Function18.scala b/src/library/scala/Function18.scala
index dfd70c2353..ffca98c443 100644
--- a/src/library/scala/Function18.scala
+++ b/src/library/scala/Function18.scala
@@ -18,12 +18,10 @@ trait Function18[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14, v15: T15, v16: T16, v17: T17, v18: T18): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14)(x15)(x16)(x17)(x18) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18)).curried
}
diff --git a/src/library/scala/Function19.scala b/src/library/scala/Function19.scala
index 63decd03ad..f661ea7707 100644
--- a/src/library/scala/Function19.scala
+++ b/src/library/scala/Function19.scala
@@ -18,12 +18,10 @@ trait Function19[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14, v15: T15, v16: T16, v17: T17, v18: T18, v19: T19): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14)(x15)(x16)(x17)(x18)(x19) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => T19 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => T19 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18, x19: T19) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19)).curried
}
diff --git a/src/library/scala/Function2.scala b/src/library/scala/Function2.scala
index a4ad87fa97..cacb96ef5d 100644
--- a/src/library/scala/Function2.scala
+++ b/src/library/scala/Function2.scala
@@ -16,7 +16,7 @@ package scala
* shorthand for the anonymous class definition anonfun2:
*
* {{{
- * object Main extends Application {
+ * object Main extends App {
* val max = (x: Int, y: Int) => if (x < y) y else x
*
* val anonfun2 = new Function2[Int, Int, Int] {
@@ -25,18 +25,22 @@ package scala
* assert(max(0, 1) == anonfun2(0, 1))
* }
* }}}
+ *
+ * Note that `Function1` does not define a total function, as might
+ * be suggested by the existence of [[scala.PartialFunction]]. The only
+ * distinction between `Function1` and `PartialFunction` is that the
+ * latter can specify inputs which it will not handle.
+
*/
trait Function2[@specialized(scala.Int, scala.Long, scala.Double) -T1, @specialized(scala.Int, scala.Long, scala.Double) -T2, @specialized(scala.Unit, scala.Boolean, scala.Int, scala.Float, scala.Long, scala.Double) +R] extends AnyRef { self =>
/** Apply the body of this function to the arguments.
* @return the result of function application.
*/
def apply(v1: T1, v2: T2): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2) == apply(x1, x2)`
- */
- def curried: T1 => T2 => R = {
+ */ def curried: T1 => T2 => R = {
(x1: T1) => (x2: T2) => apply(x1, x2)
}
diff --git a/src/library/scala/Function20.scala b/src/library/scala/Function20.scala
index 7219c9be81..e4fb9f280c 100644
--- a/src/library/scala/Function20.scala
+++ b/src/library/scala/Function20.scala
@@ -18,12 +18,10 @@ trait Function20[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14, v15: T15, v16: T16, v17: T17, v18: T18, v19: T19, v20: T20): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14)(x15)(x16)(x17)(x18)(x19)(x20) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => T19 => T20 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => T19 => T20 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18, x19: T19, x20: T20) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20)).curried
}
diff --git a/src/library/scala/Function21.scala b/src/library/scala/Function21.scala
index c7d55960db..9823386856 100644
--- a/src/library/scala/Function21.scala
+++ b/src/library/scala/Function21.scala
@@ -18,12 +18,10 @@ trait Function21[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14, v15: T15, v16: T16, v17: T17, v18: T18, v19: T19, v20: T20, v21: T21): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14)(x15)(x16)(x17)(x18)(x19)(x20)(x21) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => T19 => T20 => T21 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => T19 => T20 => T21 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18, x19: T19, x20: T20, x21: T21) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21)).curried
}
diff --git a/src/library/scala/Function22.scala b/src/library/scala/Function22.scala
index 196421c830..e708f7f49a 100644
--- a/src/library/scala/Function22.scala
+++ b/src/library/scala/Function22.scala
@@ -18,12 +18,10 @@ trait Function22[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12,
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9, v10: T10, v11: T11, v12: T12, v13: T13, v14: T14, v15: T15, v16: T16, v17: T17, v18: T18, v19: T19, v20: T20, v21: T21, v22: T22): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)(x13)(x14)(x15)(x16)(x17)(x18)(x19)(x20)(x21)(x22) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => T19 => T20 => T21 => T22 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => T13 => T14 => T15 => T16 => T17 => T18 => T19 => T20 => T21 => T22 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12, x13: T13, x14: T14, x15: T15, x16: T16, x17: T17, x18: T18, x19: T19, x20: T20, x21: T21, x22: T22) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22)).curried
}
diff --git a/src/library/scala/Function3.scala b/src/library/scala/Function3.scala
index 09a5aa5828..62a997c1b5 100644
--- a/src/library/scala/Function3.scala
+++ b/src/library/scala/Function3.scala
@@ -18,12 +18,10 @@ trait Function3[-T1, -T2, -T3, +R] extends AnyRef { self =>
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3) == apply(x1, x2, x3)`
- */
- def curried: T1 => T2 => T3 => R = {
+ */ def curried: T1 => T2 => T3 => R = {
(x1: T1) => (x2: T2) => (x3: T3) => apply(x1, x2, x3)
}
diff --git a/src/library/scala/Function4.scala b/src/library/scala/Function4.scala
index 00da84636a..86d2faeac8 100644
--- a/src/library/scala/Function4.scala
+++ b/src/library/scala/Function4.scala
@@ -18,12 +18,10 @@ trait Function4[-T1, -T2, -T3, -T4, +R] extends AnyRef { self =>
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4) == apply(x1, x2, x3, x4)`
- */
- def curried: T1 => T2 => T3 => T4 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => R = {
(x1: T1) => (x2: T2) => (x3: T3) => (x4: T4) => apply(x1, x2, x3, x4)
}
diff --git a/src/library/scala/Function5.scala b/src/library/scala/Function5.scala
index 3915048906..bd9af77f12 100644
--- a/src/library/scala/Function5.scala
+++ b/src/library/scala/Function5.scala
@@ -18,12 +18,10 @@ trait Function5[-T1, -T2, -T3, -T4, -T5, +R] extends AnyRef { self =>
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5) == apply(x1, x2, x3, x4, x5)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5) => self.apply(x1, x2, x3, x4, x5)).curried
}
diff --git a/src/library/scala/Function6.scala b/src/library/scala/Function6.scala
index 183a7332e1..4f601a468c 100644
--- a/src/library/scala/Function6.scala
+++ b/src/library/scala/Function6.scala
@@ -18,12 +18,10 @@ trait Function6[-T1, -T2, -T3, -T4, -T5, -T6, +R] extends AnyRef { self =>
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6) == apply(x1, x2, x3, x4, x5, x6)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6) => self.apply(x1, x2, x3, x4, x5, x6)).curried
}
diff --git a/src/library/scala/Function7.scala b/src/library/scala/Function7.scala
index 10f8e9b599..6978b6545d 100644
--- a/src/library/scala/Function7.scala
+++ b/src/library/scala/Function7.scala
@@ -18,12 +18,10 @@ trait Function7[-T1, -T2, -T3, -T4, -T5, -T6, -T7, +R] extends AnyRef { self =>
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7) == apply(x1, x2, x3, x4, x5, x6, x7)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7) => self.apply(x1, x2, x3, x4, x5, x6, x7)).curried
}
diff --git a/src/library/scala/Function8.scala b/src/library/scala/Function8.scala
index 8144b36101..903551d939 100644
--- a/src/library/scala/Function8.scala
+++ b/src/library/scala/Function8.scala
@@ -18,12 +18,10 @@ trait Function8[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, +R] extends AnyRef { sel
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8) == apply(x1, x2, x3, x4, x5, x6, x7, x8)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8)).curried
}
diff --git a/src/library/scala/Function9.scala b/src/library/scala/Function9.scala
index ee04ed0915..0c273ba929 100644
--- a/src/library/scala/Function9.scala
+++ b/src/library/scala/Function9.scala
@@ -18,12 +18,10 @@ trait Function9[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, +R] extends AnyRef
* @return the result of function application.
*/
def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7, v8: T8, v9: T9): R
-
/** Creates a curried version of this function.
*
* @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9) == apply(x1, x2, x3, x4, x5, x6, x7, x8, x9)`
- */
- def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => R = {
+ */ def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9)).curried
}
diff --git a/src/library/scala/PartialFunction.scala b/src/library/scala/PartialFunction.scala
index 70caff0221..3c5d6d0d23 100644
--- a/src/library/scala/PartialFunction.scala
+++ b/src/library/scala/PartialFunction.scala
@@ -26,18 +26,18 @@ package scala
*
* {{{
* val sample = 1 to 10
- * val isEven: PartialFunction[Int, String] = {
- * case x if x % 2 == 0 => x+" is even"
+ * val isEven: PartialFunction[Int, String] = {
+ * case x if x % 2 == 0 => x+" is even"
* }
*
* // the method collect can use isDefinedAt to select which members to collect
* val evenNumbers = sample collect isEven
*
- * val isOdd: PartialFunction[Int, String] = {
- * case x if x % 2 == 1 => x+" is odd"
+ * val isOdd: PartialFunction[Int, String] = {
+ * case x if x % 2 == 1 => x+" is odd"
* }
*
- * // the method orElse allows chaining another partial function to handle
+ * // the method orElse allows chaining another partial function to handle
* // input outside the declared domain
* val numbers = sample map (isEven orElse isOdd)
* }}}
diff --git a/src/library/scala/Predef.scala b/src/library/scala/Predef.scala
index 824e048e73..a2ee76500c 100644
--- a/src/library/scala/Predef.scala
+++ b/src/library/scala/Predef.scala
@@ -95,7 +95,8 @@ object Predef extends LowPriorityImplicits {
type Set[A] = immutable.Set[A]
val Map = immutable.Map
val Set = immutable.Set
- val AnyRef = new SpecializableCompanion {} // a dummy used by the specialization annotation
+ // @deprecated("Use scala.AnyRef instead", "2.10.0")
+ // def AnyRef = scala.AnyRef
// Manifest types, companions, and incantations for summoning
type ClassManifest[T] = scala.reflect.ClassManifest[T]
diff --git a/src/library/scala/Specializable.scala b/src/library/scala/Specializable.scala
new file mode 100644
index 0000000000..67126b3069
--- /dev/null
+++ b/src/library/scala/Specializable.scala
@@ -0,0 +1,29 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2002-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala
+
+/** A common supertype for companions of specializable types.
+ * Should not be extended in user code.
+ */
+trait Specializable extends SpecializableCompanion
+
+object Specializable {
+ // No type parameter in @specialized annotation.
+ trait SpecializedGroup { }
+
+ // Smuggle a list of types by way of a tuple upon which Group is parameterized.
+ class Group[T >: Null](value: T) extends SpecializedGroup { }
+
+ final val Primitives = new Group(Byte, Short, Int, Long, Char, Float, Double, Boolean, Unit)
+ final val Everything = new Group(Byte, Short, Int, Long, Char, Float, Double, Boolean, Unit, AnyRef)
+ final val Bits32AndUp = new Group(Int, Long, Float, Double)
+ final val Integral = new Group(Byte, Short, Int, Long, Char)
+ final val AllNumeric = new Group(Byte, Short, Int, Long, Char, Float, Double)
+ final val BestOfBreed = new Group(Int, Double, Boolean, Unit, AnyRef)
+}
diff --git a/src/library/scala/SpecializableCompanion.scala b/src/library/scala/SpecializableCompanion.scala
index fbdf42fd0b..ec797c1f15 100644
--- a/src/library/scala/SpecializableCompanion.scala
+++ b/src/library/scala/SpecializableCompanion.scala
@@ -10,4 +10,5 @@ package scala
/** A common supertype for companion classes which specialization takes into account.
*/
+@deprecated("Use Specializable instead", "2.10.0")
private[scala] trait SpecializableCompanion
diff --git a/src/library/scala/StringContext.scala b/src/library/scala/StringContext.scala
index 6116547aa2..1b01355108 100644
--- a/src/library/scala/StringContext.scala
+++ b/src/library/scala/StringContext.scala
@@ -13,7 +13,7 @@ import collection.mutable.ArrayBuffer
/** A class to support string interpolation.
* This class supports string interpolation as outlined in Scala SIP-11.
* It needs to be fully documented once the SIP is accepted.
- *
+ *
* @param parts The parts that make up the interpolated string,
* without the expressions that get inserted by interpolation.
*/
@@ -26,13 +26,13 @@ case class StringContext(parts: String*) {
* @param `args` The arguments to be checked.
* @throws An `IllegalArgumentException` if this is not the case.
*/
- def checkLengths(args: Any*): Unit =
+ def checkLengths(args: Any*): Unit =
if (parts.length != args.length + 1)
throw new IllegalArgumentException("wrong number of arguments for interpolated string")
/** The simple string interpolator.
- *
+ *
* It inserts its arguments between corresponding parts of the string context.
* It also treats standard escape sequences as defined in the Scala specification.
* @param `args` The arguments to be inserted into the resulting string.
@@ -46,30 +46,30 @@ case class StringContext(parts: String*) {
checkLengths(args: _*)
val pi = parts.iterator
val ai = args.iterator
- val bldr = new java.lang.StringBuilder(treatEscapes(pi.next))
+ val bldr = new java.lang.StringBuilder(treatEscapes(pi.next()))
while (ai.hasNext) {
bldr append ai.next
- bldr append treatEscapes(pi.next)
+ bldr append treatEscapes(pi.next())
}
bldr.toString
}
/** The formatted string interpolator.
- *
+ *
* It inserts its arguments between corresponding parts of the string context.
* It also treats standard escape sequences as defined in the Scala specification.
* Finally, if an interpolated expression is followed by a `parts` string
* that starts with a formatting specifier, the expression is formatted according to that
* specifier. All specifiers allowed in Java format strings are handled, and in the same
* way they are treated in Java.
- *
+ *
* @param `args` The arguments to be inserted into the resulting string.
* @throws An `IllegalArgumentException`
* if the number of `parts` in the enclosing `StringContext` does not exceed
* the number of arguments `arg` by exactly 1.
* @throws A `StringContext.InvalidEscapeException` if a `parts` string contains a backslash (`\`) character
* that does not start a valid escape sequence.
- *
+ *
* Note: The `f` method works by assembling a format string from all the `parts` strings and using
* `java.lang.String.format` to format all arguments with that format string. The format string is
* obtained by concatenating all `parts` strings, and performing two transformations:
@@ -89,11 +89,12 @@ case class StringContext(parts: String*) {
val bldr = new java.lang.StringBuilder
val args1 = new ArrayBuffer[Any]
def copyString(first: Boolean): Unit = {
- val str = treatEscapes(pi.next)
+ val str = treatEscapes(pi.next())
+ val strIsEmpty = str.length == 0
var start = 0
var idx = 0
if (!first) {
- if ((str charAt 0) != '%')
+ if (strIsEmpty || (str charAt 0) != '%')
bldr append "%s"
idx = 1
}
@@ -106,11 +107,11 @@ case class StringContext(parts: String*) {
}
idx += 1
}
- bldr append (str substring (start, idx))
+ if (!strIsEmpty) bldr append (str substring (start, idx))
}
copyString(first = true)
while (pi.hasNext) {
- args1 += ai.next
+ args1 += ai.next()
copyString(first = false)
}
bldr.toString format (args1: _*)
@@ -125,14 +126,14 @@ object StringContext {
* @param idx The index of the offending backslash character in `str`.
*/
class InvalidEscapeException(str: String, idx: Int)
- extends IllegalArgumentException("invalid escape character at index "+idx+" in \""+str+"\"")
+ extends IllegalArgumentException("invalid escape character at index "+idx+" in \""+str+"\"")
/** Expands standard Scala escape sequences in a string.
* Escape sequences are:
* control: `\b`, `\t`, `\n`, `\f`, `\r`
* escape: `\\`, `\"`, `\'`
* octal: `\d` `\dd` `\ddd` where `d` is an octal digit between `0` and `7`.
- *
+ *
* @param A string that may contain escape sequences
* @return The string with all escape sequences expanded.
*/
diff --git a/src/library/scala/Tuple2.scala b/src/library/scala/Tuple2.scala
index dd6ac0cfd2..684d2266e8 100644
--- a/src/library/scala/Tuple2.scala
+++ b/src/library/scala/Tuple2.scala
@@ -19,7 +19,7 @@ import scala.collection.generic.{ CanBuildFrom => CBF }
* @param _1 Element 1 of this Tuple2
* @param _2 Element 2 of this Tuple2
*/
-case class Tuple2[@specialized(Int, Long, Double) +T1, @specialized(Int, Long, Double) +T2](_1: T1, _2: T2)
+case class Tuple2[@specialized(Int, Long, Double, Char, Boolean, AnyRef) +T1, @specialized(Int, Long, Double, Char, Boolean, AnyRef) +T2](_1: T1, _2: T2)
extends Product2[T1, T2]
{
override def toString() = "(" + _1 + "," + _2 + ")"
diff --git a/src/library/scala/annotation/elidable.scala b/src/library/scala/annotation/elidable.scala
index 8dc180d7ab..880b645daa 100644
--- a/src/library/scala/annotation/elidable.scala
+++ b/src/library/scala/annotation/elidable.scala
@@ -10,22 +10,53 @@ package scala.annotation
import java.util.logging.Level
-/** An annotation for methods for which invocations might
- * be removed in the generated code.
+/** An annotation for methods whose bodies may be excluded
+ * from compiler-generated bytecode.
*
* Behavior is influenced by passing `-Xelide-below <arg>` to `scalac`.
- * Methods marked elidable will be omitted from generated code if the
- * priority given the annotation is lower than to the command line argument.
- * Examples:
- * {{{
- * import annotation.elidable._
+ * Calls to methods marked elidable (as well as the method body) will
+ * be omitted from generated code if the priority given the annotation
+ * is lower than that given on the command line.
*
- * @elidable(WARNING) def foo = log("foo")
- * @elidable(FINE) def bar = log("bar")
+ * @elidable(123) // annotation priority
+ * scalac -Xelide-below 456 // command line priority
*
- * scalac -Xelide-below=1000
- * }}}
- * @since 2.8
+ * The method call will be replaced with an expression which depends on
+ * the type of the elided expression. In decreasing order of precedence:
+ *
+ * Unit ()
+ * Boolean false
+ * T <: AnyVal 0
+ * T >: Null null
+ * T >: Nothing Predef.???
+ *
+ * Complete example:
+ {{{
+ import annotation._, elidable._
+ object Test extends App {
+ def expensiveComputation(): Int = { Thread.sleep(1000) ; 172 }
+
+ @elidable(WARNING) def warning(msg: String) = println(msg)
+ @elidable(FINE) def debug(msg: String) = println(msg)
+ @elidable(FINE) def computedValue = expensiveComputation()
+
+ warning("Warning! Danger! Warning!")
+ debug("Debug! Danger! Debug!")
+ println("I computed a value: " + computedValue)
+ }
+ % scalac example.scala && scala Test
+ Warning! Danger! Warning!
+ Debug! Danger! Debug!
+ I computed a value: 172
+
+ // INFO lies between WARNING and FINE
+ % scalac -Xelide-below INFO example.scala && scala Test
+ Warning! Danger! Warning!
+ I computed a value: 0
+ }}}
+ *
+ * @author Paul Phillips
+ * @since 2.8
*/
final class elidable(final val level: Int) extends annotation.StaticAnnotation {}
diff --git a/src/library/scala/collection/GenTraversableLike.scala b/src/library/scala/collection/GenTraversableLike.scala
index c837775cf9..1dcc0bdac7 100644
--- a/src/library/scala/collection/GenTraversableLike.scala
+++ b/src/library/scala/collection/GenTraversableLike.scala
@@ -318,7 +318,7 @@ trait GenTraversableLike[+A, +Repr] extends GenTraversableOnce[A] with Paralleli
* $orderDependent
*
* @param from the lowest index to include from this $coll.
- * @param until the highest index to EXCLUDE from this $coll.
+ * @param until the lowest index to EXCLUDE from this $coll.
* @return a $coll containing the elements greater than or equal to
* index `from` extending up to (but not including) index `until`
* of this $coll.
diff --git a/src/library/scala/collection/JavaConversions.scala b/src/library/scala/collection/JavaConversions.scala
index d5011fc6aa..50919e506a 100644
--- a/src/library/scala/collection/JavaConversions.scala
+++ b/src/library/scala/collection/JavaConversions.scala
@@ -69,7 +69,7 @@ object JavaConversions {
* @return A Java Iterator view of the argument.
*/
implicit def asJavaIterator[A](it: Iterator[A]): ju.Iterator[A] = it match {
- case JIteratorWrapper(wrapped) => wrapped
+ case JIteratorWrapper(wrapped) => wrapped.asInstanceOf[ju.Iterator[A]]
case _ => IteratorWrapper(it)
}
@@ -87,7 +87,7 @@ object JavaConversions {
* @return A Java Enumeration view of the argument.
*/
implicit def asJavaEnumeration[A](it: Iterator[A]): ju.Enumeration[A] = it match {
- case JEnumerationWrapper(wrapped) => wrapped
+ case JEnumerationWrapper(wrapped) => wrapped.asInstanceOf[ju.Enumeration[A]]
case _ => IteratorWrapper(it)
}
@@ -105,7 +105,7 @@ object JavaConversions {
* @return A Java Iterable view of the argument.
*/
implicit def asJavaIterable[A](i: Iterable[A]): jl.Iterable[A] = i match {
- case JIterableWrapper(wrapped) => wrapped
+ case JIterableWrapper(wrapped) => wrapped.asInstanceOf[jl.Iterable[A]]
case _ => IterableWrapper(i)
}
@@ -121,7 +121,7 @@ object JavaConversions {
* @return A Java Collection view of the argument.
*/
implicit def asJavaCollection[A](it: Iterable[A]): ju.Collection[A] = it match {
- case JCollectionWrapper(wrapped) => wrapped
+ case JCollectionWrapper(wrapped) => wrapped.asInstanceOf[ju.Collection[A]]
case _ => new IterableWrapper(it)
}
@@ -179,7 +179,7 @@ object JavaConversions {
* @return A Java List view of the argument.
*/
implicit def seqAsJavaList[A](seq: Seq[A]): ju.List[A] = seq match {
- case JListWrapper(wrapped) => wrapped
+ case JListWrapper(wrapped) => wrapped.asInstanceOf[ju.List[A]]
case _ => new SeqWrapper(seq)
}
@@ -286,7 +286,7 @@ object JavaConversions {
*/
implicit def mapAsJavaMap[A, B](m: Map[A, B]): ju.Map[A, B] = m match {
//case JConcurrentMapWrapper(wrapped) => wrapped
- case JMapWrapper(wrapped) => wrapped
+ case JMapWrapper(wrapped) => wrapped.asInstanceOf[ju.Map[A, B]]
case _ => new MapWrapper(m)
}
diff --git a/src/library/scala/collection/SeqLike.scala b/src/library/scala/collection/SeqLike.scala
index 02298ef096..b51a37cf9e 100644
--- a/src/library/scala/collection/SeqLike.scala
+++ b/src/library/scala/collection/SeqLike.scala
@@ -151,7 +151,7 @@ trait SeqLike[+A, +Repr] extends IterableLike[A, Repr] with GenSeqLike[A, Repr]
def next(): Repr = {
if (!hasNext)
Iterator.empty.next
-
+
val forcedElms = new mutable.ArrayBuffer[A](elms.size) ++= elms
val result = (self.newBuilder ++= forcedElms).result
var i = idxs.length - 2
diff --git a/src/library/scala/collection/generic/MutableSortedSetFactory.scala b/src/library/scala/collection/generic/MutableSortedSetFactory.scala
index b235379575..cbbedc0231 100644
--- a/src/library/scala/collection/generic/MutableSortedSetFactory.scala
+++ b/src/library/scala/collection/generic/MutableSortedSetFactory.scala
@@ -11,12 +11,12 @@ package generic
import scala.collection.mutable.{ Builder, GrowingBuilder }
-/**
+/**
* @define Coll mutable.SortedSet
* @define coll mutable sorted
*
* @author Lucien Pereira
- *
+ *
*/
abstract class MutableSortedSetFactory[CC[A] <: mutable.SortedSet[A] with SortedSetLike[A, CC[A]] with mutable.Set[A] with mutable.SetLike[A, CC[A]]] extends SortedSetFactory[CC] {
@@ -26,7 +26,7 @@ abstract class MutableSortedSetFactory[CC[A] <: mutable.SortedSet[A] with Sorted
* is evaluated elems is cloned (which is O(n)).
*
* Fortunately GrowingBuilder comes to rescue.
- *
+ *
*/
override def newBuilder[A](implicit ord: Ordering[A]): Builder[A, CC[A]] = new GrowingBuilder[A, CC[A]](empty)
diff --git a/src/library/scala/collection/immutable/BitSet.scala b/src/library/scala/collection/immutable/BitSet.scala
index abccd91f9c..870d5534dc 100644
--- a/src/library/scala/collection/immutable/BitSet.scala
+++ b/src/library/scala/collection/immutable/BitSet.scala
@@ -131,7 +131,7 @@ object BitSet extends BitSetFactory[BitSet] {
* the capacity of two long values). The constructor wraps an existing
* bit mask without copying, thus exposing a mutable part of the internal
* implementation. Care needs to be taken not to modify the exposed
- * array.
+ * array.
*/
class BitSetN(val elems: Array[Long]) extends BitSet {
protected def nwords = elems.length
diff --git a/src/library/scala/collection/immutable/HashMap.scala b/src/library/scala/collection/immutable/HashMap.scala
index 9cde20f1df..6b11371bec 100644
--- a/src/library/scala/collection/immutable/HashMap.scala
+++ b/src/library/scala/collection/immutable/HashMap.scala
@@ -138,8 +138,10 @@ object HashMap extends ImmutableMapFactory[HashMap] with BitOperations.Int {
override def updated0[B1 >: B](key: A, hash: Int, level: Int, value: B1, kv: (A, B1), merger: Merger[B1]): HashMap[A, B1] =
if (hash == this.hash && key == this.key ) {
- if (merger eq null) new HashMap1(key, hash, value, kv)
- else new HashMap1(key, hash, value, merger(this.kv, kv))
+ if (merger eq null) {
+ if(this.value.asInstanceOf[AnyRef] eq value.asInstanceOf[AnyRef]) this
+ else new HashMap1(key, hash, value, kv)
+ } else new HashMap1(key, hash, value, merger(this.kv, kv))
} else {
var thatindex = (hash >>> level) & 0x1f
var thisindex = (this.hash >>> level) & 0x1f
@@ -271,13 +273,15 @@ object HashMap extends ImmutableMapFactory[HashMap] with BitOperations.Int {
val mask = (1 << index)
val offset = Integer.bitCount(bitmap & (mask-1))
if ((bitmap & mask) != 0) {
- val elemsNew = new Array[HashMap[A,B1]](elems.length)
- Array.copy(elems, 0, elemsNew, 0, elems.length)
val sub = elems(offset)
// TODO: might be worth checking if sub is HashTrieMap (-> monomorphic call site)
val subNew = sub.updated0(key, hash, level + 5, value, kv, merger)
- elemsNew(offset) = subNew
- new HashTrieMap(bitmap, elemsNew, size + (subNew.size - sub.size))
+ if(subNew eq sub) this else {
+ val elemsNew = new Array[HashMap[A,B1]](elems.length)
+ Array.copy(elems, 0, elemsNew, 0, elems.length)
+ elemsNew(offset) = subNew
+ new HashTrieMap(bitmap, elemsNew, size + (subNew.size - sub.size))
+ }
} else {
val elemsNew = new Array[HashMap[A,B1]](elems.length + 1)
Array.copy(elems, 0, elemsNew, 0, offset)
@@ -295,7 +299,8 @@ object HashMap extends ImmutableMapFactory[HashMap] with BitOperations.Int {
val sub = elems(offset)
// TODO: might be worth checking if sub is HashTrieMap (-> monomorphic call site)
val subNew = sub.removed0(key, hash, level + 5)
- if (subNew.isEmpty) {
+ if (subNew eq sub) this
+ else if (subNew.isEmpty) {
val bitmapNew = bitmap ^ mask
if (bitmapNew != 0) {
val elemsNew = new Array[HashMap[A,B]](elems.length - 1)
diff --git a/src/library/scala/collection/immutable/IntMap.scala b/src/library/scala/collection/immutable/IntMap.scala
index dd6b066878..3c9c0c2f24 100644
--- a/src/library/scala/collection/immutable/IntMap.scala
+++ b/src/library/scala/collection/immutable/IntMap.scala
@@ -353,19 +353,19 @@ extends AbstractMap[Int, T]
def unionWith[S >: T](that : IntMap[S], f : (Int, S, S) => S) : IntMap[S] = (this, that) match{
case (IntMap.Bin(p1, m1, l1, r1), that@(IntMap.Bin(p2, m2, l2, r2))) =>
if (shorter(m1, m2)) {
- if (!hasMatch(p2, p1, m1)) join(p1, this, p2, that);
+ if (!hasMatch(p2, p1, m1)) join[S](p1, this, p2, that); // TODO: remove [S] when SI-5548 is fixed
else if (zero(p2, m1)) IntMap.Bin(p1, m1, l1.unionWith(that, f), r1);
else IntMap.Bin(p1, m1, l1, r1.unionWith(that, f));
} else if (shorter(m2, m1)){
- if (!hasMatch(p1, p2, m2)) join(p1, this, p2, that);
+ if (!hasMatch(p1, p2, m2)) join[S](p1, this, p2, that); // TODO: remove [S] when SI-5548 is fixed
else if (zero(p1, m2)) IntMap.Bin(p2, m2, this.unionWith(l2, f), r2);
else IntMap.Bin(p2, m2, l2, this.unionWith(r2, f));
}
else {
if (p1 == p2) IntMap.Bin(p1, m1, l1.unionWith(l2,f), r1.unionWith(r2, f));
- else join(p1, this, p2, that);
+ else join[S](p1, this, p2, that); // TODO: remove [S] when SI-5548 is fixed
}
- case (IntMap.Tip(key, value), x) => x.updateWith(key, value, (x, y) => f(key, y, x));
+ case (IntMap.Tip(key, value), x) => x.updateWith[S](key, value, (x, y) => f(key, y, x));
case (x, IntMap.Tip(key, value)) => x.updateWith[S](key, value, (x, y) => f(key, x, y));
case (IntMap.Nil, x) => x;
case (x, IntMap.Nil) => x;
diff --git a/src/library/scala/collection/immutable/List.scala b/src/library/scala/collection/immutable/List.scala
index 5f3f9b717f..381fcf3117 100644
--- a/src/library/scala/collection/immutable/List.scala
+++ b/src/library/scala/collection/immutable/List.scala
@@ -205,6 +205,16 @@ sealed abstract class List[+A] extends AbstractSeq[A]
these
}
+ /**
+ * @example {{{
+ * // Given a list
+ * val letters = List('a','b','c','d','e')
+ *
+ * // `slice` returns all elements beginning at index `from` and afterwards,
+ * // up until index `until` (excluding index `until`.)
+ * letters.slice(1,3) // Returns List('b','c')
+ * }}}
+ */
override def slice(from: Int, until: Int): List[A] = {
val lo = math.max(from, 0)
if (until <= lo || isEmpty) Nil
@@ -316,13 +326,13 @@ final case class ::[B](private var hd: B, private[scala] var tl: List[B]) extend
override def head : B = hd
override def tail : List[B] = tl
override def isEmpty: Boolean = false
-
+
private def writeObject(out: ObjectOutputStream) {
out.writeObject(ListSerializeStart) // needed to differentiate with the legacy `::` serialization
out.writeObject(this.hd)
out.writeObject(this.tl)
}
-
+
private def readObject(in: ObjectInputStream) {
val obj = in.readObject()
if (obj == ListSerializeStart) {
@@ -330,7 +340,7 @@ final case class ::[B](private var hd: B, private[scala] var tl: List[B]) extend
this.tl = in.readObject().asInstanceOf[List[B]]
} else oldReadObject(in, obj)
}
-
+
/* The oldReadObject method exists here for compatibility reasons.
* :: objects used to be serialized by serializing all the elements to
* the output stream directly, but this was broken (see SI-5374).
@@ -349,13 +359,13 @@ final case class ::[B](private var hd: B, private[scala] var tl: List[B]) extend
current = list
}
}
-
+
private def oldWriteObject(out: ObjectOutputStream) {
var xs: List[B] = this
while (!xs.isEmpty) { out.writeObject(xs.head); xs = xs.tail }
out.writeObject(ListSerializeEnd)
}
-
+
}
/** $factoryInfo
diff --git a/src/library/scala/collection/immutable/LongMap.scala b/src/library/scala/collection/immutable/LongMap.scala
index 963ddac762..11b5d1e311 100644
--- a/src/library/scala/collection/immutable/LongMap.scala
+++ b/src/library/scala/collection/immutable/LongMap.scala
@@ -349,19 +349,19 @@ extends AbstractMap[Long, T]
def unionWith[S >: T](that : LongMap[S], f : (Long, S, S) => S) : LongMap[S] = (this, that) match{
case (LongMap.Bin(p1, m1, l1, r1), that@(LongMap.Bin(p2, m2, l2, r2))) =>
if (shorter(m1, m2)) {
- if (!hasMatch(p2, p1, m1)) join(p1, this, p2, that);
+ if (!hasMatch(p2, p1, m1)) join[S](p1, this, p2, that); // TODO: remove [S] when SI-5548 is fixed
else if (zero(p2, m1)) LongMap.Bin(p1, m1, l1.unionWith(that, f), r1);
else LongMap.Bin(p1, m1, l1, r1.unionWith(that, f));
} else if (shorter(m2, m1)){
- if (!hasMatch(p1, p2, m2)) join(p1, this, p2, that);
+ if (!hasMatch(p1, p2, m2)) join[S](p1, this, p2, that); // TODO: remove [S] when SI-5548 is fixed
else if (zero(p1, m2)) LongMap.Bin(p2, m2, this.unionWith(l2, f), r2);
else LongMap.Bin(p2, m2, l2, this.unionWith(r2, f));
}
else {
if (p1 == p2) LongMap.Bin(p1, m1, l1.unionWith(l2,f), r1.unionWith(r2, f));
- else join(p1, this, p2, that);
+ else join[S](p1, this, p2, that); // TODO: remove [S] when SI-5548 is fixed
}
- case (LongMap.Tip(key, value), x) => x.updateWith(key, value, (x, y) => f(key, y, x));
+ case (LongMap.Tip(key, value), x) => x.updateWith[S](key, value, (x, y) => f(key, y, x)); // TODO: remove [S] when SI-5548 is fixed
case (x, LongMap.Tip(key, value)) => x.updateWith[S](key, value, (x, y) => f(key, x, y));
case (LongMap.Nil, x) => x;
case (x, LongMap.Nil) => x;
diff --git a/src/library/scala/collection/immutable/Range.scala b/src/library/scala/collection/immutable/Range.scala
index 7537558f0b..b72d83f896 100644
--- a/src/library/scala/collection/immutable/Range.scala
+++ b/src/library/scala/collection/immutable/Range.scala
@@ -77,9 +77,9 @@ extends collection.AbstractSeq[Int]
}
final val lastElement = start + (numRangeElements - 1) * step
final val terminalElement = start + numRangeElements * step
-
+
override def last = if (isEmpty) Nil.last else lastElement
-
+
protected def copy(start: Int, end: Int, step: Int): Range = new Range(start, end, step)
/** Create a new range with the `start` and `end` values of this range and
@@ -93,14 +93,14 @@ extends collection.AbstractSeq[Int]
override def size = length
override def length = if (numRangeElements < 0) fail() else numRangeElements
-
+
private def description = "%d %s %d by %s".format(start, if (isInclusive) "to" else "until", end, step)
private def fail() = throw new IllegalArgumentException(description + ": seqs cannot contain more than Int.MaxValue elements.")
private def validateMaxLength() {
if (numRangeElements < 0)
fail()
}
-
+
def validateRangeBoundaries(f: Int => Any): Boolean = {
validateMaxLength()
@@ -121,7 +121,7 @@ extends collection.AbstractSeq[Int]
if (idx < 0 || idx >= numRangeElements) throw new IndexOutOfBoundsException(idx.toString)
else start + (step * idx)
}
-
+
@inline final override def foreach[@specialized(Unit) U](f: Int => U) {
if (validateRangeBoundaries(f)) {
var i = start
@@ -309,7 +309,7 @@ object Range {
// number of full-sized jumps.
val hasStub = isInclusive || (gap % step != 0)
val result: Long = jumps + ( if (hasStub) 1 else 0 )
-
+
if (result > scala.Int.MaxValue) -1
else result.toInt
}
@@ -405,4 +405,3 @@ object Range {
// super.foreach(f)
}
}
- \ No newline at end of file
diff --git a/src/library/scala/collection/immutable/RedBlack.scala b/src/library/scala/collection/immutable/RedBlack.scala
index 9906c9896e..83eeaa45ee 100644
--- a/src/library/scala/collection/immutable/RedBlack.scala
+++ b/src/library/scala/collection/immutable/RedBlack.scala
@@ -11,10 +11,13 @@
package scala.collection
package immutable
-/** A base class containing the implementations for `TreeMaps` and `TreeSets`.
+/** Old base class that was used by previous implementations of `TreeMaps` and `TreeSets`.
+ *
+ * Deprecated due to various performance bugs (see [[https://issues.scala-lang.org/browse/SI-5331 SI-5331]] for more information).
*
* @since 2.3
*/
+@deprecated("use `TreeMap` or `TreeSet` instead", "2.10")
@SerialVersionUID(8691885935445612921L)
abstract class RedBlack[A] extends Serializable {
@@ -287,5 +290,3 @@ abstract class RedBlack[A] extends Serializable {
def isBlack = true
}
}
-
-
diff --git a/src/library/scala/collection/immutable/RedBlackTree.scala b/src/library/scala/collection/immutable/RedBlackTree.scala
new file mode 100644
index 0000000000..0f28c4997b
--- /dev/null
+++ b/src/library/scala/collection/immutable/RedBlackTree.scala
@@ -0,0 +1,485 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2005-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+
+
+package scala.collection
+package immutable
+
+import annotation.tailrec
+import annotation.meta.getter
+
+/** An object containing the RedBlack tree implementation used by for `TreeMaps` and `TreeSets`.
+ *
+ * Implementation note: since efficiency is important for data structures this implementation
+ * uses <code>null</code> to represent empty trees. This also means pattern matching cannot
+ * easily be used. The API represented by the RedBlackTree object tries to hide these
+ * optimizations behind a reasonably clean API.
+ *
+ * @since 2.10
+ */
+private[immutable]
+object RedBlackTree {
+
+ def isEmpty(tree: Tree[_, _]): Boolean = tree eq null
+
+ def contains[A](tree: Tree[A, _], x: A)(implicit ordering: Ordering[A]): Boolean = lookup(tree, x) ne null
+ def get[A, B](tree: Tree[A, B], x: A)(implicit ordering: Ordering[A]): Option[B] = lookup(tree, x) match {
+ case null => None
+ case tree => Some(tree.value)
+ }
+
+ @tailrec
+ def lookup[A, B](tree: Tree[A, B], x: A)(implicit ordering: Ordering[A]): Tree[A, B] = if (tree eq null) null else {
+ val cmp = ordering.compare(x, tree.key)
+ if (cmp < 0) lookup(tree.left, x)
+ else if (cmp > 0) lookup(tree.right, x)
+ else tree
+ }
+
+ def count(tree: Tree[_, _]) = if (tree eq null) 0 else tree.count
+ def update[A, B, B1 >: B](tree: Tree[A, B], k: A, v: B1)(implicit ordering: Ordering[A]): Tree[A, B1] = blacken(upd(tree, k, v))
+ def delete[A, B](tree: Tree[A, B], k: A)(implicit ordering: Ordering[A]): Tree[A, B] = blacken(del(tree, k))
+ def rangeImpl[A: Ordering, B](tree: Tree[A, B], from: Option[A], until: Option[A]): Tree[A, B] = (from, until) match {
+ case (Some(from), Some(until)) => this.range(tree, from, until)
+ case (Some(from), None) => this.from(tree, from)
+ case (None, Some(until)) => this.until(tree, until)
+ case (None, None) => tree
+ }
+ def range[A: Ordering, B](tree: Tree[A, B], from: A, until: A): Tree[A, B] = blacken(doRange(tree, from, until))
+ def from[A: Ordering, B](tree: Tree[A, B], from: A): Tree[A, B] = blacken(doFrom(tree, from))
+ def to[A: Ordering, B](tree: Tree[A, B], to: A): Tree[A, B] = blacken(doTo(tree, to))
+ def until[A: Ordering, B](tree: Tree[A, B], key: A): Tree[A, B] = blacken(doUntil(tree, key))
+
+ def drop[A: Ordering, B](tree: Tree[A, B], n: Int): Tree[A, B] = blacken(doDrop(tree, n))
+ def take[A: Ordering, B](tree: Tree[A, B], n: Int): Tree[A, B] = blacken(doTake(tree, n))
+ def slice[A: Ordering, B](tree: Tree[A, B], from: Int, until: Int): Tree[A, B] = blacken(doSlice(tree, from, until))
+
+ def smallest[A, B](tree: Tree[A, B]): Tree[A, B] = {
+ if (tree eq null) throw new NoSuchElementException("empty map")
+ var result = tree
+ while (result.left ne null) result = result.left
+ result
+ }
+ def greatest[A, B](tree: Tree[A, B]): Tree[A, B] = {
+ if (tree eq null) throw new NoSuchElementException("empty map")
+ var result = tree
+ while (result.right ne null) result = result.right
+ result
+ }
+
+ def foreach[A, B, U](tree: Tree[A, B], f: ((A, B)) => U): Unit = if (tree ne null) {
+ if (tree.left ne null) foreach(tree.left, f)
+ f((tree.key, tree.value))
+ if (tree.right ne null) foreach(tree.right, f)
+ }
+ def foreachKey[A, U](tree: Tree[A, _], f: A => U): Unit = if (tree ne null) {
+ if (tree.left ne null) foreachKey(tree.left, f)
+ f(tree.key)
+ if (tree.right ne null) foreachKey(tree.right, f)
+ }
+
+ def iterator[A, B](tree: Tree[A, B]): Iterator[(A, B)] = new EntriesIterator(tree)
+ def keysIterator[A, _](tree: Tree[A, _]): Iterator[A] = new KeysIterator(tree)
+ def valuesIterator[_, B](tree: Tree[_, B]): Iterator[B] = new ValuesIterator(tree)
+
+ @tailrec
+ def nth[A, B](tree: Tree[A, B], n: Int): Tree[A, B] = {
+ val count = this.count(tree.left)
+ if (n < count) nth(tree.left, n)
+ else if (n > count) nth(tree.right, n - count - 1)
+ else tree
+ }
+
+ def isBlack(tree: Tree[_, _]) = (tree eq null) || isBlackTree(tree)
+
+ private[this] def isRedTree(tree: Tree[_, _]) = tree.isInstanceOf[RedTree[_, _]]
+ private[this] def isBlackTree(tree: Tree[_, _]) = tree.isInstanceOf[BlackTree[_, _]]
+
+ private[this] def blacken[A, B](t: Tree[A, B]): Tree[A, B] = if (t eq null) null else t.black
+
+ private[this] def mkTree[A, B](isBlack: Boolean, k: A, v: B, l: Tree[A, B], r: Tree[A, B]) =
+ if (isBlack) BlackTree(k, v, l, r) else RedTree(k, v, l, r)
+
+ private[this] def balanceLeft[A, B, B1 >: B](isBlack: Boolean, z: A, zv: B, l: Tree[A, B1], d: Tree[A, B1]): Tree[A, B1] = {
+ if (isRedTree(l) && isRedTree(l.left))
+ RedTree(l.key, l.value, BlackTree(l.left.key, l.left.value, l.left.left, l.left.right), BlackTree(z, zv, l.right, d))
+ else if (isRedTree(l) && isRedTree(l.right))
+ RedTree(l.right.key, l.right.value, BlackTree(l.key, l.value, l.left, l.right.left), BlackTree(z, zv, l.right.right, d))
+ else
+ mkTree(isBlack, z, zv, l, d)
+ }
+ private[this] def balanceRight[A, B, B1 >: B](isBlack: Boolean, x: A, xv: B, a: Tree[A, B1], r: Tree[A, B1]): Tree[A, B1] = {
+ if (isRedTree(r) && isRedTree(r.left))
+ RedTree(r.left.key, r.left.value, BlackTree(x, xv, a, r.left.left), BlackTree(r.key, r.value, r.left.right, r.right))
+ else if (isRedTree(r) && isRedTree(r.right))
+ RedTree(r.key, r.value, BlackTree(x, xv, a, r.left), BlackTree(r.right.key, r.right.value, r.right.left, r.right.right))
+ else
+ mkTree(isBlack, x, xv, a, r)
+ }
+ private[this] def upd[A, B, B1 >: B](tree: Tree[A, B], k: A, v: B1)(implicit ordering: Ordering[A]): Tree[A, B1] = if (tree eq null) {
+ RedTree(k, v, null, null)
+ } else {
+ val cmp = ordering.compare(k, tree.key)
+ if (cmp < 0) balanceLeft(isBlackTree(tree), tree.key, tree.value, upd(tree.left, k, v), tree.right)
+ else if (cmp > 0) balanceRight(isBlackTree(tree), tree.key, tree.value, tree.left, upd(tree.right, k, v))
+ else mkTree(isBlackTree(tree), k, v, tree.left, tree.right)
+ }
+
+ // Based on Stefan Kahrs' Haskell version of Okasaki's Red&Black Trees
+ // http://www.cse.unsw.edu.au/~dons/data/RedBlackTree.html
+ private[this] def del[A, B](tree: Tree[A, B], k: A)(implicit ordering: Ordering[A]): Tree[A, B] = if (tree eq null) null else {
+ def balance(x: A, xv: B, tl: Tree[A, B], tr: Tree[A, B]) = if (isRedTree(tl)) {
+ if (isRedTree(tr)) {
+ RedTree(x, xv, tl.black, tr.black)
+ } else if (isRedTree(tl.left)) {
+ RedTree(tl.key, tl.value, tl.left.black, BlackTree(x, xv, tl.right, tr))
+ } else if (isRedTree(tl.right)) {
+ RedTree(tl.right.key, tl.right.value, BlackTree(tl.key, tl.value, tl.left, tl.right.left), BlackTree(x, xv, tl.right.right, tr))
+ } else {
+ BlackTree(x, xv, tl, tr)
+ }
+ } else if (isRedTree(tr)) {
+ if (isRedTree(tr.right)) {
+ RedTree(tr.key, tr.value, BlackTree(x, xv, tl, tr.left), tr.right.black)
+ } else if (isRedTree(tr.left)) {
+ RedTree(tr.left.key, tr.left.value, BlackTree(x, xv, tl, tr.left.left), BlackTree(tr.key, tr.value, tr.left.right, tr.right))
+ } else {
+ BlackTree(x, xv, tl, tr)
+ }
+ } else {
+ BlackTree(x, xv, tl, tr)
+ }
+ def subl(t: Tree[A, B]) =
+ if (t.isInstanceOf[BlackTree[_, _]]) t.red
+ else sys.error("Defect: invariance violation; expected black, got "+t)
+
+ def balLeft(x: A, xv: B, tl: Tree[A, B], tr: Tree[A, B]) = if (isRedTree(tl)) {
+ RedTree(x, xv, tl.black, tr)
+ } else if (isBlackTree(tr)) {
+ balance(x, xv, tl, tr.red)
+ } else if (isRedTree(tr) && isBlackTree(tr.left)) {
+ RedTree(tr.left.key, tr.left.value, BlackTree(x, xv, tl, tr.left.left), balance(tr.key, tr.value, tr.left.right, subl(tr.right)))
+ } else {
+ sys.error("Defect: invariance violation")
+ }
+ def balRight(x: A, xv: B, tl: Tree[A, B], tr: Tree[A, B]) = if (isRedTree(tr)) {
+ RedTree(x, xv, tl, tr.black)
+ } else if (isBlackTree(tl)) {
+ balance(x, xv, tl.red, tr)
+ } else if (isRedTree(tl) && isBlackTree(tl.right)) {
+ RedTree(tl.right.key, tl.right.value, balance(tl.key, tl.value, subl(tl.left), tl.right.left), BlackTree(x, xv, tl.right.right, tr))
+ } else {
+ sys.error("Defect: invariance violation")
+ }
+ def delLeft = if (isBlackTree(tree.left)) balLeft(tree.key, tree.value, del(tree.left, k), tree.right) else RedTree(tree.key, tree.value, del(tree.left, k), tree.right)
+ def delRight = if (isBlackTree(tree.right)) balRight(tree.key, tree.value, tree.left, del(tree.right, k)) else RedTree(tree.key, tree.value, tree.left, del(tree.right, k))
+ def append(tl: Tree[A, B], tr: Tree[A, B]): Tree[A, B] = if (tl eq null) {
+ tr
+ } else if (tr eq null) {
+ tl
+ } else if (isRedTree(tl) && isRedTree(tr)) {
+ val bc = append(tl.right, tr.left)
+ if (isRedTree(bc)) {
+ RedTree(bc.key, bc.value, RedTree(tl.key, tl.value, tl.left, bc.left), RedTree(tr.key, tr.value, bc.right, tr.right))
+ } else {
+ RedTree(tl.key, tl.value, tl.left, RedTree(tr.key, tr.value, bc, tr.right))
+ }
+ } else if (isBlackTree(tl) && isBlackTree(tr)) {
+ val bc = append(tl.right, tr.left)
+ if (isRedTree(bc)) {
+ RedTree(bc.key, bc.value, BlackTree(tl.key, tl.value, tl.left, bc.left), BlackTree(tr.key, tr.value, bc.right, tr.right))
+ } else {
+ balLeft(tl.key, tl.value, tl.left, BlackTree(tr.key, tr.value, bc, tr.right))
+ }
+ } else if (isRedTree(tr)) {
+ RedTree(tr.key, tr.value, append(tl, tr.left), tr.right)
+ } else if (isRedTree(tl)) {
+ RedTree(tl.key, tl.value, tl.left, append(tl.right, tr))
+ } else {
+ sys.error("unmatched tree on append: " + tl + ", " + tr)
+ }
+
+ val cmp = ordering.compare(k, tree.key)
+ if (cmp < 0) delLeft
+ else if (cmp > 0) delRight
+ else append(tree.left, tree.right)
+ }
+
+ private[this] def doFrom[A, B](tree: Tree[A, B], from: A)(implicit ordering: Ordering[A]): Tree[A, B] = {
+ if (tree eq null) return null
+ if (ordering.lt(tree.key, from)) return doFrom(tree.right, from)
+ val newLeft = doFrom(tree.left, from)
+ if (newLeft eq tree.left) tree
+ else if (newLeft eq null) upd(tree.right, tree.key, tree.value)
+ else rebalance(tree, newLeft, tree.right)
+ }
+ private[this] def doTo[A, B](tree: Tree[A, B], to: A)(implicit ordering: Ordering[A]): Tree[A, B] = {
+ if (tree eq null) return null
+ if (ordering.lt(to, tree.key)) return doTo(tree.left, to)
+ val newRight = doTo(tree.right, to)
+ if (newRight eq tree.right) tree
+ else if (newRight eq null) upd(tree.left, tree.key, tree.value)
+ else rebalance(tree, tree.left, newRight)
+ }
+ private[this] def doUntil[A, B](tree: Tree[A, B], until: A)(implicit ordering: Ordering[A]): Tree[A, B] = {
+ if (tree eq null) return null
+ if (ordering.lteq(until, tree.key)) return doUntil(tree.left, until)
+ val newRight = doUntil(tree.right, until)
+ if (newRight eq tree.right) tree
+ else if (newRight eq null) upd(tree.left, tree.key, tree.value)
+ else rebalance(tree, tree.left, newRight)
+ }
+ private[this] def doRange[A, B](tree: Tree[A, B], from: A, until: A)(implicit ordering: Ordering[A]): Tree[A, B] = {
+ if (tree eq null) return null
+ if (ordering.lt(tree.key, from)) return doRange(tree.right, from, until);
+ if (ordering.lteq(until, tree.key)) return doRange(tree.left, from, until);
+ val newLeft = doFrom(tree.left, from)
+ val newRight = doUntil(tree.right, until)
+ if ((newLeft eq tree.left) && (newRight eq tree.right)) tree
+ else if (newLeft eq null) upd(newRight, tree.key, tree.value);
+ else if (newRight eq null) upd(newLeft, tree.key, tree.value);
+ else rebalance(tree, newLeft, newRight)
+ }
+
+ private[this] def doDrop[A: Ordering, B](tree: Tree[A, B], n: Int): Tree[A, B] = {
+ if (n <= 0) return tree
+ if (n >= this.count(tree)) return null
+ val count = this.count(tree.left)
+ if (n > count) return doDrop(tree.right, n - count - 1)
+ val newLeft = doDrop(tree.left, n)
+ if (newLeft eq tree.left) tree
+ else if (newLeft eq null) upd(tree.right, tree.key, tree.value)
+ else rebalance(tree, newLeft, tree.right)
+ }
+ private[this] def doTake[A: Ordering, B](tree: Tree[A, B], n: Int): Tree[A, B] = {
+ if (n <= 0) return null
+ if (n >= this.count(tree)) return tree
+ val count = this.count(tree.left)
+ if (n <= count) return doTake(tree.left, n)
+ val newRight = doTake(tree.right, n - count - 1)
+ if (newRight eq tree.right) tree
+ else if (newRight eq null) upd(tree.left, tree.key, tree.value)
+ else rebalance(tree, tree.left, newRight)
+ }
+ private[this] def doSlice[A: Ordering, B](tree: Tree[A, B], from: Int, until: Int): Tree[A, B] = {
+ if (tree eq null) return null
+ val count = this.count(tree.left)
+ if (from > count) return doSlice(tree.right, from - count - 1, until - count - 1)
+ if (until <= count) return doSlice(tree.left, from, until)
+ val newLeft = doDrop(tree.left, from)
+ val newRight = doTake(tree.right, until - count - 1)
+ if ((newLeft eq tree.left) && (newRight eq tree.right)) tree
+ else if (newLeft eq null) upd(newRight, tree.key, tree.value)
+ else if (newRight eq null) upd(newLeft, tree.key, tree.value)
+ else rebalance(tree, newLeft, newRight)
+ }
+
+ // The zipper returned might have been traversed left-most (always the left child)
+ // or right-most (always the right child). Left trees are traversed right-most,
+ // and right trees are traversed leftmost.
+
+ // Returns the zipper for the side with deepest black nodes depth, a flag
+ // indicating whether the trees were unbalanced at all, and a flag indicating
+ // whether the zipper was traversed left-most or right-most.
+
+ // If the trees were balanced, returns an empty zipper
+ private[this] def compareDepth[A, B](left: Tree[A, B], right: Tree[A, B]): (List[Tree[A, B]], Boolean, Boolean, Int) = {
+ // Once a side is found to be deeper, unzip it to the bottom
+ def unzip(zipper: List[Tree[A, B]], leftMost: Boolean): List[Tree[A, B]] = {
+ val next = if (leftMost) zipper.head.left else zipper.head.right
+ next match {
+ case null => zipper
+ case node => unzip(node :: zipper, leftMost)
+ }
+ }
+
+ // Unzip left tree on the rightmost side and right tree on the leftmost side until one is
+ // found to be deeper, or the bottom is reached
+ def unzipBoth(left: Tree[A, B],
+ right: Tree[A, B],
+ leftZipper: List[Tree[A, B]],
+ rightZipper: List[Tree[A, B]],
+ smallerDepth: Int): (List[Tree[A, B]], Boolean, Boolean, Int) = {
+ if (isBlackTree(left) && isBlackTree(right)) {
+ unzipBoth(left.right, right.left, left :: leftZipper, right :: rightZipper, smallerDepth + 1)
+ } else if (isRedTree(left) && isRedTree(right)) {
+ unzipBoth(left.right, right.left, left :: leftZipper, right :: rightZipper, smallerDepth)
+ } else if (isRedTree(right)) {
+ unzipBoth(left, right.left, leftZipper, right :: rightZipper, smallerDepth)
+ } else if (isRedTree(left)) {
+ unzipBoth(left.right, right, left :: leftZipper, rightZipper, smallerDepth)
+ } else if ((left eq null) && (right eq null)) {
+ (Nil, true, false, smallerDepth)
+ } else if ((left eq null) && isBlackTree(right)) {
+ val leftMost = true
+ (unzip(right :: rightZipper, leftMost), false, leftMost, smallerDepth)
+ } else if (isBlackTree(left) && (right eq null)) {
+ val leftMost = false
+ (unzip(left :: leftZipper, leftMost), false, leftMost, smallerDepth)
+ } else {
+ sys.error("unmatched trees in unzip: " + left + ", " + right)
+ }
+ }
+ unzipBoth(left, right, Nil, Nil, 0)
+ }
+
+ private[this] def rebalance[A, B](tree: Tree[A, B], newLeft: Tree[A, B], newRight: Tree[A, B]) = {
+ // This is like drop(n-1), but only counting black nodes
+ def findDepth(zipper: List[Tree[A, B]], depth: Int): List[Tree[A, B]] = zipper match {
+ case head :: tail if isBlackTree(head) =>
+ if (depth == 1) zipper else findDepth(tail, depth - 1)
+ case _ :: tail => findDepth(tail, depth)
+ case Nil => sys.error("Defect: unexpected empty zipper while computing range")
+ }
+
+ // Blackening the smaller tree avoids balancing problems on union;
+ // this can't be done later, though, or it would change the result of compareDepth
+ val blkNewLeft = blacken(newLeft)
+ val blkNewRight = blacken(newRight)
+ val (zipper, levelled, leftMost, smallerDepth) = compareDepth(blkNewLeft, blkNewRight)
+
+ if (levelled) {
+ BlackTree(tree.key, tree.value, blkNewLeft, blkNewRight)
+ } else {
+ val zipFrom = findDepth(zipper, smallerDepth)
+ val union = if (leftMost) {
+ RedTree(tree.key, tree.value, blkNewLeft, zipFrom.head)
+ } else {
+ RedTree(tree.key, tree.value, zipFrom.head, blkNewRight)
+ }
+ val zippedTree = zipFrom.tail.foldLeft(union: Tree[A, B]) { (tree, node) =>
+ if (leftMost)
+ balanceLeft(isBlackTree(node), node.key, node.value, tree, node.right)
+ else
+ balanceRight(isBlackTree(node), node.key, node.value, node.left, tree)
+ }
+ zippedTree
+ }
+ }
+
+ /*
+ * Forcing direct fields access using the @inline annotation helps speed up
+ * various operations (especially smallest/greatest and update/delete).
+ *
+ * Unfortunately the direct field access is not guaranteed to work (but
+ * works on the current implementation of the Scala compiler).
+ *
+ * An alternative is to implement the these classes using plain old Java code...
+ */
+ sealed abstract class Tree[A, +B](
+ @(inline @getter) final val key: A,
+ @(inline @getter) final val value: B,
+ @(inline @getter) final val left: Tree[A, B],
+ @(inline @getter) final val right: Tree[A, B])
+ extends Serializable {
+ final val count: Int = 1 + RedBlackTree.count(left) + RedBlackTree.count(right)
+ def black: Tree[A, B]
+ def red: Tree[A, B]
+ }
+ final class RedTree[A, +B](key: A,
+ value: B,
+ left: Tree[A, B],
+ right: Tree[A, B]) extends Tree[A, B](key, value, left, right) {
+ override def black: Tree[A, B] = BlackTree(key, value, left, right)
+ override def red: Tree[A, B] = this
+ override def toString: String = "RedTree(" + key + ", " + value + ", " + left + ", " + right + ")"
+ }
+ final class BlackTree[A, +B](key: A,
+ value: B,
+ left: Tree[A, B],
+ right: Tree[A, B]) extends Tree[A, B](key, value, left, right) {
+ override def black: Tree[A, B] = this
+ override def red: Tree[A, B] = RedTree(key, value, left, right)
+ override def toString: String = "BlackTree(" + key + ", " + value + ", " + left + ", " + right + ")"
+ }
+
+ object RedTree {
+ @inline def apply[A, B](key: A, value: B, left: Tree[A, B], right: Tree[A, B]) = new RedTree(key, value, left, right)
+ def unapply[A, B](t: RedTree[A, B]) = Some((t.key, t.value, t.left, t.right))
+ }
+ object BlackTree {
+ @inline def apply[A, B](key: A, value: B, left: Tree[A, B], right: Tree[A, B]) = new BlackTree(key, value, left, right)
+ def unapply[A, B](t: BlackTree[A, B]) = Some((t.key, t.value, t.left, t.right))
+ }
+
+ private[this] abstract class TreeIterator[A, B, R](tree: Tree[A, B]) extends Iterator[R] {
+ protected[this] def nextResult(tree: Tree[A, B]): R
+
+ override def hasNext: Boolean = next ne null
+
+ override def next: R = next match {
+ case null =>
+ throw new NoSuchElementException("next on empty iterator")
+ case tree =>
+ next = findNext(tree.right)
+ nextResult(tree)
+ }
+
+ @tailrec
+ private[this] def findNext(tree: Tree[A, B]): Tree[A, B] = {
+ if (tree eq null) popPath()
+ else if (tree.left eq null) tree
+ else {
+ pushPath(tree)
+ findNext(tree.left)
+ }
+ }
+
+ private[this] def pushPath(tree: Tree[A, B]) {
+ try {
+ path(index) = tree
+ index += 1
+ } catch {
+ case _: ArrayIndexOutOfBoundsException =>
+ /*
+ * Either the tree became unbalanced or we calculated the maximum height incorrectly.
+ * To avoid crashing the iterator we expand the path array. Obviously this should never
+ * happen...
+ *
+ * An exception handler is used instead of an if-condition to optimize the normal path.
+ * This makes a large difference in iteration speed!
+ */
+ assert(index >= path.length)
+ path :+= null
+ pushPath(tree)
+ }
+ }
+ private[this] def popPath(): Tree[A, B] = if (index == 0) null else {
+ index -= 1
+ path(index)
+ }
+
+ private[this] var path = if (tree eq null) null else {
+ /*
+ * According to "Ralf Hinze. Constructing red-black trees" [http://www.cs.ox.ac.uk/ralf.hinze/publications/#P5]
+ * the maximum height of a red-black tree is 2*log_2(n + 2) - 2.
+ *
+ * According to {@see Integer#numberOfLeadingZeros} ceil(log_2(n)) = (32 - Integer.numberOfLeadingZeros(n - 1))
+ *
+ * We also don't store the deepest nodes in the path so the maximum path length is further reduced by one.
+ */
+ val maximumHeight = 2 * (32 - Integer.numberOfLeadingZeros(tree.count + 2 - 1)) - 2 - 1
+ new Array[Tree[A, B]](maximumHeight)
+ }
+ private[this] var index = 0
+ private[this] var next: Tree[A, B] = findNext(tree)
+ }
+
+ private[this] class EntriesIterator[A, B](tree: Tree[A, B]) extends TreeIterator[A, B, (A, B)](tree) {
+ override def nextResult(tree: Tree[A, B]) = (tree.key, tree.value)
+ }
+
+ private[this] class KeysIterator[A, B](tree: Tree[A, B]) extends TreeIterator[A, B, A](tree) {
+ override def nextResult(tree: Tree[A, B]) = tree.key
+ }
+
+ private[this] class ValuesIterator[A, B](tree: Tree[A, B]) extends TreeIterator[A, B, B](tree) {
+ override def nextResult(tree: Tree[A, B]) = tree.value
+ }
+}
diff --git a/src/library/scala/collection/immutable/TreeMap.scala b/src/library/scala/collection/immutable/TreeMap.scala
index ef0eac3701..dc4f79be35 100644
--- a/src/library/scala/collection/immutable/TreeMap.scala
+++ b/src/library/scala/collection/immutable/TreeMap.scala
@@ -12,6 +12,7 @@ package scala.collection
package immutable
import generic._
+import immutable.{RedBlackTree => RB}
import mutable.Builder
import annotation.bridge
@@ -23,7 +24,6 @@ object TreeMap extends ImmutableSortedMapFactory[TreeMap] {
def empty[A, B](implicit ord: Ordering[A]) = new TreeMap[A, B]()(ord)
/** $sortedMapCanBuildFromInfo */
implicit def canBuildFrom[A, B](implicit ord: Ordering[A]): CanBuildFrom[Coll, (A, B), TreeMap[A, B]] = new SortedMapCanBuildFrom[A, B]
- private def make[A, B](s: Int, t: RedBlack[A]#Tree[B])(implicit ord: Ordering[A]) = new TreeMap[A, B](s, t)(ord)
}
/** This class implements immutable maps using a tree.
@@ -46,31 +46,79 @@ object TreeMap extends ImmutableSortedMapFactory[TreeMap] {
* @define mayNotTerminateInf
* @define willNotTerminateInf
*/
-class TreeMap[A, +B](override val size: Int, t: RedBlack[A]#Tree[B])(implicit val ordering: Ordering[A])
- extends RedBlack[A]
- with SortedMap[A, B]
+class TreeMap[A, +B] private (tree: RB.Tree[A, B])(implicit val ordering: Ordering[A])
+ extends SortedMap[A, B]
with SortedMapLike[A, B, TreeMap[A, B]]
with MapLike[A, B, TreeMap[A, B]]
with Serializable {
+ @deprecated("use `ordering.lt` instead", "2.10")
def isSmaller(x: A, y: A) = ordering.lt(x, y)
override protected[this] def newBuilder : Builder[(A, B), TreeMap[A, B]] =
TreeMap.newBuilder[A, B]
- def this()(implicit ordering: Ordering[A]) = this(0, null)(ordering)
+ override def size = RB.count(tree)
- protected val tree: RedBlack[A]#Tree[B] = if (size == 0) Empty else t
+ def this()(implicit ordering: Ordering[A]) = this(null)(ordering)
- override def rangeImpl(from : Option[A], until : Option[A]): TreeMap[A,B] = {
- val ntree = tree.range(from,until)
- new TreeMap[A,B](ntree.count, ntree)
- }
+ override def rangeImpl(from: Option[A], until: Option[A]): TreeMap[A, B] = new TreeMap[A, B](RB.rangeImpl(tree, from, until))
+ override def range(from: A, until: A): TreeMap[A, B] = new TreeMap[A, B](RB.range(tree, from, until))
+ override def from(from: A): TreeMap[A, B] = new TreeMap[A, B](RB.from(tree, from))
+ override def to(to: A): TreeMap[A, B] = new TreeMap[A, B](RB.to(tree, to))
+ override def until(until: A): TreeMap[A, B] = new TreeMap[A, B](RB.until(tree, until))
- override def firstKey = t.first
- override def lastKey = t.last
+ override def firstKey = RB.smallest(tree).key
+ override def lastKey = RB.greatest(tree).key
override def compare(k0: A, k1: A): Int = ordering.compare(k0, k1)
+ override def head = {
+ val smallest = RB.smallest(tree)
+ (smallest.key, smallest.value)
+ }
+ override def headOption = if (RB.isEmpty(tree)) None else Some(head)
+ override def last = {
+ val greatest = RB.greatest(tree)
+ (greatest.key, greatest.value)
+ }
+ override def lastOption = if (RB.isEmpty(tree)) None else Some(last)
+
+ override def tail = new TreeMap(RB.delete(tree, firstKey))
+ override def init = new TreeMap(RB.delete(tree, lastKey))
+
+ override def drop(n: Int) = {
+ if (n <= 0) this
+ else if (n >= size) empty
+ else new TreeMap(RB.drop(tree, n))
+ }
+
+ override def take(n: Int) = {
+ if (n <= 0) empty
+ else if (n >= size) this
+ else new TreeMap(RB.take(tree, n))
+ }
+
+ override def slice(from: Int, until: Int) = {
+ if (until <= from) empty
+ else if (from <= 0) take(until)
+ else if (until >= size) drop(from)
+ else new TreeMap(RB.slice(tree, from, until))
+ }
+
+ override def dropRight(n: Int) = take(size - n)
+ override def takeRight(n: Int) = drop(size - n)
+ override def splitAt(n: Int) = (take(n), drop(n))
+
+ private[this] def countWhile(p: ((A, B)) => Boolean): Int = {
+ var result = 0
+ val it = iterator
+ while (it.hasNext && p(it.next)) result += 1
+ result
+ }
+ override def dropWhile(p: ((A, B)) => Boolean) = drop(countWhile(p))
+ override def takeWhile(p: ((A, B)) => Boolean) = take(countWhile(p))
+ override def span(p: ((A, B)) => Boolean) = splitAt(countWhile(p))
+
/** A factory to create empty maps of the same type of keys.
*/
override def empty: TreeMap[A, B] = TreeMap.empty[A, B](ordering)
@@ -84,10 +132,7 @@ class TreeMap[A, +B](override val size: Int, t: RedBlack[A]#Tree[B])(implicit va
* @param value the value to be associated with `key`
* @return a new $coll with the updated binding
*/
- override def updated [B1 >: B](key: A, value: B1): TreeMap[A, B1] = {
- val newsize = if (tree.lookup(key).isEmpty) size + 1 else size
- TreeMap.make(newsize, tree.update(key, value))
- }
+ override def updated [B1 >: B](key: A, value: B1): TreeMap[A, B1] = new TreeMap(RB.update(tree, key, value))
/** Add a key/value pair to this map.
* @tparam B1 type of the value of the new binding, a supertype of `B`
@@ -128,14 +173,13 @@ class TreeMap[A, +B](override val size: Int, t: RedBlack[A]#Tree[B])(implicit va
* @return a new $coll with the inserted binding, if it wasn't present in the map
*/
def insert [B1 >: B](key: A, value: B1): TreeMap[A, B1] = {
- assert(tree.lookup(key).isEmpty)
- TreeMap.make(size + 1, tree.update(key, value))
+ assert(!RB.contains(tree, key))
+ new TreeMap(RB.update(tree, key, value))
}
def - (key:A): TreeMap[A, B] =
- if (tree.lookup(key).isEmpty) this
- else if (size == 1) empty
- else TreeMap.make(size - 1, tree.delete(key))
+ if (!RB.contains(tree, key)) this
+ else new TreeMap(RB.delete(tree, key))
/** Check if this map maps `key` to a value and return the
* value if it exists.
@@ -143,21 +187,22 @@ class TreeMap[A, +B](override val size: Int, t: RedBlack[A]#Tree[B])(implicit va
* @param key the key of the mapping of interest
* @return the value of the mapping, if it exists
*/
- override def get(key: A): Option[B] = tree.lookup(key) match {
- case n: NonEmpty[b] => Some(n.value)
- case _ => None
- }
+ override def get(key: A): Option[B] = RB.get(tree, key)
/** Creates a new iterator over all elements contained in this
* object.
*
* @return the new iterator
*/
- def iterator: Iterator[(A, B)] = tree.toStream.iterator
+ override def iterator: Iterator[(A, B)] = RB.iterator(tree)
+
+ override def keysIterator: Iterator[A] = RB.keysIterator(tree)
+ override def valuesIterator: Iterator[B] = RB.valuesIterator(tree)
- override def toStream: Stream[(A, B)] = tree.toStream
+ override def contains(key: A): Boolean = RB.contains(tree, key)
+ override def isDefinedAt(key: A): Boolean = RB.contains(tree, key)
- override def foreach[U](f : ((A,B)) => U) = tree foreach { case (x, y) => f(x, y) }
+ override def foreach[U](f : ((A,B)) => U) = RB.foreach(tree, f)
}
diff --git a/src/library/scala/collection/immutable/TreeSet.scala b/src/library/scala/collection/immutable/TreeSet.scala
index 8b90ece143..1b3d72ceb7 100644
--- a/src/library/scala/collection/immutable/TreeSet.scala
+++ b/src/library/scala/collection/immutable/TreeSet.scala
@@ -12,6 +12,7 @@ package scala.collection
package immutable
import generic._
+import immutable.{RedBlackTree => RB}
import mutable.{ Builder, SetBuilder }
/** $factoryInfo
@@ -46,20 +47,61 @@ object TreeSet extends ImmutableSortedSetFactory[TreeSet] {
* @define mayNotTerminateInf
* @define willNotTerminateInf
*/
-@SerialVersionUID(-234066569443569402L)
-class TreeSet[A](override val size: Int, t: RedBlack[A]#Tree[Unit])
- (implicit val ordering: Ordering[A])
- extends RedBlack[A] with SortedSet[A] with SortedSetLike[A, TreeSet[A]] with Serializable {
+@SerialVersionUID(-5685982407650748405L)
+class TreeSet[A] private (tree: RB.Tree[A, Unit])(implicit val ordering: Ordering[A])
+ extends SortedSet[A] with SortedSetLike[A, TreeSet[A]] with Serializable {
override def stringPrefix = "TreeSet"
- def isSmaller(x: A, y: A) = compare(x,y) < 0
+ override def size = RB.count(tree)
+
+ override def head = RB.smallest(tree).key
+ override def headOption = if (RB.isEmpty(tree)) None else Some(head)
+ override def last = RB.greatest(tree).key
+ override def lastOption = if (RB.isEmpty(tree)) None else Some(last)
+
+ override def tail = new TreeSet(RB.delete(tree, firstKey))
+ override def init = new TreeSet(RB.delete(tree, lastKey))
+
+ override def drop(n: Int) = {
+ if (n <= 0) this
+ else if (n >= size) empty
+ else newSet(RB.drop(tree, n))
+ }
- def this()(implicit ordering: Ordering[A]) = this(0, null)(ordering)
+ override def take(n: Int) = {
+ if (n <= 0) empty
+ else if (n >= size) this
+ else newSet(RB.take(tree, n))
+ }
- protected val tree: RedBlack[A]#Tree[Unit] = if (size == 0) Empty else t
+ override def slice(from: Int, until: Int) = {
+ if (until <= from) empty
+ else if (from <= 0) take(until)
+ else if (until >= size) drop(from)
+ else newSet(RB.slice(tree, from, until))
+ }
- private def newSet(s: Int, t: RedBlack[A]#Tree[Unit]) = new TreeSet[A](s, t)
+ override def dropRight(n: Int) = take(size - n)
+ override def takeRight(n: Int) = drop(size - n)
+ override def splitAt(n: Int) = (take(n), drop(n))
+
+ private[this] def countWhile(p: A => Boolean): Int = {
+ var result = 0
+ val it = iterator
+ while (it.hasNext && p(it.next)) result += 1
+ result
+ }
+ override def dropWhile(p: A => Boolean) = drop(countWhile(p))
+ override def takeWhile(p: A => Boolean) = take(countWhile(p))
+ override def span(p: A => Boolean) = splitAt(countWhile(p))
+
+ @deprecated("use `ordering.lt` instead", "2.10")
+ def isSmaller(x: A, y: A) = compare(x,y) < 0
+
+ def this()(implicit ordering: Ordering[A]) = this(null)(ordering)
+
+ private def newSet(t: RB.Tree[A, Unit]) = new TreeSet[A](t)
/** A factory to create empty sets of the same type of keys.
*/
@@ -70,10 +112,7 @@ class TreeSet[A](override val size: Int, t: RedBlack[A]#Tree[Unit])
* @param elem a new element to add.
* @return a new $coll containing `elem` and all the elements of this $coll.
*/
- def + (elem: A): TreeSet[A] = {
- val newsize = if (tree.lookup(elem).isEmpty) size + 1 else size
- newSet(newsize, tree.update(elem, ()))
- }
+ def + (elem: A): TreeSet[A] = newSet(RB.update(tree, elem, ()))
/** A new `TreeSet` with the entry added is returned,
* assuming that elem is <em>not</em> in the TreeSet.
@@ -82,8 +121,8 @@ class TreeSet[A](override val size: Int, t: RedBlack[A]#Tree[Unit])
* @return a new $coll containing `elem` and all the elements of this $coll.
*/
def insert(elem: A): TreeSet[A] = {
- assert(tree.lookup(elem).isEmpty)
- newSet(size + 1, tree.update(elem, ()))
+ assert(!RB.contains(tree, elem))
+ newSet(RB.update(tree, elem, ()))
}
/** Creates a new `TreeSet` with the entry removed.
@@ -92,31 +131,31 @@ class TreeSet[A](override val size: Int, t: RedBlack[A]#Tree[Unit])
* @return a new $coll containing all the elements of this $coll except `elem`.
*/
def - (elem:A): TreeSet[A] =
- if (tree.lookup(elem).isEmpty) this
- else newSet(size - 1, tree delete elem)
+ if (!RB.contains(tree, elem)) this
+ else newSet(RB.delete(tree, elem))
/** Checks if this set contains element `elem`.
*
* @param elem the element to check for membership.
* @return true, iff `elem` is contained in this set.
*/
- def contains(elem: A): Boolean = !tree.lookup(elem).isEmpty
+ def contains(elem: A): Boolean = RB.contains(tree, elem)
/** Creates a new iterator over all elements contained in this
* object.
*
* @return the new iterator
*/
- def iterator: Iterator[A] = tree.toStream.iterator map (_._1)
+ def iterator: Iterator[A] = RB.keysIterator(tree)
- override def toStream: Stream[A] = tree.toStream map (_._1)
+ override def foreach[U](f: A => U) = RB.foreachKey(tree, f)
- override def foreach[U](f: A => U) = tree foreach { (x, y) => f(x) }
+ override def rangeImpl(from: Option[A], until: Option[A]): TreeSet[A] = newSet(RB.rangeImpl(tree, from, until))
+ override def range(from: A, until: A): TreeSet[A] = newSet(RB.range(tree, from, until))
+ override def from(from: A): TreeSet[A] = newSet(RB.from(tree, from))
+ override def to(to: A): TreeSet[A] = newSet(RB.to(tree, to))
+ override def until(until: A): TreeSet[A] = newSet(RB.until(tree, until))
- override def rangeImpl(from: Option[A], until: Option[A]): TreeSet[A] = {
- val tree = this.tree.range(from, until)
- newSet(tree.count, tree)
- }
- override def firstKey = tree.first
- override def lastKey = tree.last
+ override def firstKey = head
+ override def lastKey = last
}
diff --git a/src/library/scala/collection/mutable/AVLTree.scala b/src/library/scala/collection/mutable/AVLTree.scala
index ba2af8f120..9aea25f330 100644
--- a/src/library/scala/collection/mutable/AVLTree.scala
+++ b/src/library/scala/collection/mutable/AVLTree.scala
@@ -12,9 +12,9 @@ package mutable
/**
* An immutable AVL Tree implementation used by mutable.TreeSet
- *
+ *
* @author Lucien Pereira
- *
+ *
*/
private[mutable] sealed trait AVLTree[+A] extends Serializable {
def balance: Int
@@ -28,28 +28,28 @@ private[mutable] sealed trait AVLTree[+A] extends Serializable {
/**
* Returns a new tree containing the given element.
* Thows an IllegalArgumentException if element is already present.
- *
+ *
*/
def insert[B >: A](value: B, ordering: Ordering[B]): AVLTree[B] = Node(value, Leaf, Leaf)
/**
* Return a new tree which not contains given element.
- *
+ *
*/
def remove[B >: A](value: B, ordering: Ordering[B]): AVLTree[A] =
throw new NoSuchElementException(String.valueOf(value))
-
+
/**
* Return a tuple containing the smallest element of the provided tree
* and a new tree from which this element has been extracted.
- *
+ *
*/
def removeMin[B >: A]: (B, AVLTree[B]) = sys.error("Should not happen.")
-
+
/**
* Return a tuple containing the biggest element of the provided tree
* and a new tree from which this element has been extracted.
- *
+ *
*/
def removeMax[B >: A]: (B, AVLTree[B]) = sys.error("Should not happen.")
@@ -90,7 +90,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
/**
* Returns a new tree containing the given element.
* Thows an IllegalArgumentException if element is already present.
- *
+ *
*/
override def insert[B >: A](value: B, ordering: Ordering[B]) = {
val ord = ordering.compare(value, data)
@@ -104,7 +104,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
/**
* Return a new tree which not contains given element.
- *
+ *
*/
override def remove[B >: A](value: B, ordering: Ordering[B]): AVLTree[A] = {
val ord = ordering.compare(value, data)
@@ -130,7 +130,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
/**
* Return a tuple containing the smallest element of the provided tree
* and a new tree from which this element has been extracted.
- *
+ *
*/
override def removeMin[B >: A]: (B, AVLTree[B]) = {
if (Leaf == left)
@@ -144,7 +144,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
/**
* Return a tuple containing the biggest element of the provided tree
* and a new tree from which this element has been extracted.
- *
+ *
*/
override def removeMax[B >: A]: (B, AVLTree[B]) = {
if (Leaf == right)
@@ -154,7 +154,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
(max, Node(data, left, newRight).rebalance)
}
}
-
+
override def rebalance[B >: A] = {
if (-2 == balance) {
if (1 == left.balance)
diff --git a/src/library/scala/collection/mutable/BasicNode.java b/src/library/scala/collection/mutable/BasicNode.java
new file mode 100644
index 0000000000..c05009470a
--- /dev/null
+++ b/src/library/scala/collection/mutable/BasicNode.java
@@ -0,0 +1,20 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2012, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.collection.mutable;
+
+
+
+
+
+
+public abstract class BasicNode {
+
+ public abstract String string(int lev);
+
+} \ No newline at end of file
diff --git a/src/library/scala/collection/mutable/CNodeBase.java b/src/library/scala/collection/mutable/CNodeBase.java
new file mode 100644
index 0000000000..4374943b8d
--- /dev/null
+++ b/src/library/scala/collection/mutable/CNodeBase.java
@@ -0,0 +1,35 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2012, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.collection.mutable;
+
+
+
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+
+
+
+abstract class CNodeBase<K, V> extends MainNode<K, V> {
+
+ public static final AtomicIntegerFieldUpdater<CNodeBase> updater = AtomicIntegerFieldUpdater.newUpdater(CNodeBase.class, "csize");
+
+ public volatile int csize = -1;
+
+ public boolean CAS_SIZE(int oldval, int nval) {
+ return updater.compareAndSet(this, oldval, nval);
+ }
+
+ public void WRITE_SIZE(int nval) {
+ updater.set(this, nval);
+ }
+
+ public int READ_SIZE() {
+ return updater.get(this);
+ }
+
+} \ No newline at end of file
diff --git a/src/library/scala/collection/mutable/Ctrie.scala b/src/library/scala/collection/mutable/Ctrie.scala
new file mode 100644
index 0000000000..cbec118aa9
--- /dev/null
+++ b/src/library/scala/collection/mutable/Ctrie.scala
@@ -0,0 +1,1075 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2012, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.collection
+package mutable
+
+
+
+import java.util.concurrent.atomic._
+import collection.immutable.{ ListMap => ImmutableListMap }
+import collection.parallel.mutable.ParCtrie
+import generic._
+import annotation.tailrec
+import annotation.switch
+
+
+
+private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends INodeBase[K, V](g) {
+ import INodeBase._
+
+ WRITE(bn)
+
+ def this(g: Gen) = this(null, g)
+
+ @inline final def WRITE(nval: MainNode[K, V]) = INodeBase.updater.set(this, nval)
+
+ @inline final def CAS(old: MainNode[K, V], n: MainNode[K, V]) = INodeBase.updater.compareAndSet(this, old, n)
+
+ final def gcasRead(ct: Ctrie[K, V]): MainNode[K, V] = GCAS_READ(ct)
+
+ @inline final def GCAS_READ(ct: Ctrie[K, V]): MainNode[K, V] = {
+ val m = /*READ*/mainnode
+ val prevval = /*READ*/m.prev
+ if (prevval eq null) m
+ else GCAS_Complete(m, ct)
+ }
+
+ @tailrec private def GCAS_Complete(m: MainNode[K, V], ct: Ctrie[K, V]): MainNode[K, V] = if (m eq null) null else {
+ // complete the GCAS
+ val prev = /*READ*/m.prev
+ val ctr = ct.readRoot(true)
+
+ prev match {
+ case null =>
+ m
+ case fn: FailedNode[_, _] => // try to commit to previous value
+ if (CAS(m, fn.prev)) fn.prev
+ else GCAS_Complete(/*READ*/mainnode, ct)
+ case vn: MainNode[_, _] =>
+ // Assume that you've read the root from the generation G.
+ // Assume that the snapshot algorithm is correct.
+ // ==> you can only reach nodes in generations <= G.
+ // ==> `gen` is <= G.
+ // We know that `ctr.gen` is >= G.
+ // ==> if `ctr.gen` = `gen` then they are both equal to G.
+ // ==> otherwise, we know that either `ctr.gen` > G, `gen` < G,
+ // or both
+ if ((ctr.gen eq gen) && ct.nonReadOnly) {
+ // try to commit
+ if (m.CAS_PREV(prev, null)) m
+ else GCAS_Complete(m, ct)
+ } else {
+ // try to abort
+ m.CAS_PREV(prev, new FailedNode(prev))
+ GCAS_Complete(/*READ*/mainnode, ct)
+ }
+ }
+ }
+
+ @inline final def GCAS(old: MainNode[K, V], n: MainNode[K, V], ct: Ctrie[K, V]): Boolean = {
+ n.WRITE_PREV(old)
+ if (CAS(old, n)) {
+ GCAS_Complete(n, ct)
+ /*READ*/n.prev eq null
+ } else false
+ }
+
+ @inline private def inode(cn: MainNode[K, V]) = {
+ val nin = new INode[K, V](gen)
+ nin.WRITE(cn)
+ nin
+ }
+
+ final def copyToGen(ngen: Gen, ct: Ctrie[K, V]) = {
+ val nin = new INode[K, V](ngen)
+ val main = GCAS_READ(ct)
+ nin.WRITE(main)
+ nin
+ }
+
+ /** Inserts a key value pair, overwriting the old pair if the keys match.
+ *
+ * @return true if successful, false otherwise
+ */
+ @tailrec final def rec_insert(k: K, v: V, hc: Int, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): Boolean = {
+ val m = GCAS_READ(ct) // use -Yinline!
+
+ m match {
+ case cn: CNode[K, V] => // 1) a multiway node
+ val idx = (hc >>> lev) & 0x1f
+ val flag = 1 << idx
+ val bmp = cn.bitmap
+ val mask = flag - 1
+ val pos = Integer.bitCount(bmp & mask)
+ if ((bmp & flag) != 0) {
+ // 1a) insert below
+ cn.array(pos) match {
+ case in: INode[K, V] =>
+ if (startgen eq in.gen) in.rec_insert(k, v, hc, lev + 5, this, startgen, ct)
+ else {
+ if (GCAS(cn, cn.renewed(startgen, ct), ct)) rec_insert(k, v, hc, lev, parent, startgen, ct)
+ else false
+ }
+ case sn: SNode[K, V] =>
+ if (sn.hc == hc && sn.k == k) GCAS(cn, cn.updatedAt(pos, new SNode(k, v, hc), gen), ct)
+ else {
+ val rn = if (cn.gen eq gen) cn else cn.renewed(gen, ct)
+ val nn = rn.updatedAt(pos, inode(CNode.dual(sn, sn.hc, new SNode(k, v, hc), hc, lev + 5, gen)), gen)
+ GCAS(cn, nn, ct)
+ }
+ }
+ } else {
+ val rn = if (cn.gen eq gen) cn else cn.renewed(gen, ct)
+ val ncnode = rn.insertedAt(pos, flag, new SNode(k, v, hc), gen)
+ GCAS(cn, ncnode, ct)
+ }
+ case tn: TNode[K, V] =>
+ clean(parent, ct, lev - 5)
+ false
+ case ln: LNode[K, V] => // 3) an l-node
+ val nn = ln.inserted(k, v)
+ GCAS(ln, nn, ct)
+ }
+ }
+
+ /** Inserts a new key value pair, given that a specific condition is met.
+ *
+ * @param cond null - don't care if the key was there; KEY_ABSENT - key wasn't there; KEY_PRESENT - key was there; other value `v` - key must be bound to `v`
+ * @return null if unsuccessful, Option[V] otherwise (indicating previous value bound to the key)
+ */
+ @tailrec final def rec_insertif(k: K, v: V, hc: Int, cond: AnyRef, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): Option[V] = {
+ val m = GCAS_READ(ct) // use -Yinline!
+
+ m match {
+ case cn: CNode[K, V] => // 1) a multiway node
+ val idx = (hc >>> lev) & 0x1f
+ val flag = 1 << idx
+ val bmp = cn.bitmap
+ val mask = flag - 1
+ val pos = Integer.bitCount(bmp & mask)
+ if ((bmp & flag) != 0) {
+ // 1a) insert below
+ cn.array(pos) match {
+ case in: INode[K, V] =>
+ if (startgen eq in.gen) in.rec_insertif(k, v, hc, cond, lev + 5, this, startgen, ct)
+ else {
+ if (GCAS(cn, cn.renewed(startgen, ct), ct)) rec_insertif(k, v, hc, cond, lev, parent, startgen, ct)
+ else null
+ }
+ case sn: SNode[K, V] => cond match {
+ case null =>
+ if (sn.hc == hc && sn.k == k) {
+ if (GCAS(cn, cn.updatedAt(pos, new SNode(k, v, hc), gen), ct)) Some(sn.v) else null
+ } else {
+ val rn = if (cn.gen eq gen) cn else cn.renewed(gen, ct)
+ val nn = rn.updatedAt(pos, inode(CNode.dual(sn, sn.hc, new SNode(k, v, hc), hc, lev + 5, gen)), gen)
+ if (GCAS(cn, nn, ct)) None
+ else null
+ }
+ case INode.KEY_ABSENT =>
+ if (sn.hc == hc && sn.k == k) Some(sn.v)
+ else {
+ val rn = if (cn.gen eq gen) cn else cn.renewed(gen, ct)
+ val nn = rn.updatedAt(pos, inode(CNode.dual(sn, sn.hc, new SNode(k, v, hc), hc, lev + 5, gen)), gen)
+ if (GCAS(cn, nn, ct)) None
+ else null
+ }
+ case INode.KEY_PRESENT =>
+ if (sn.hc == hc && sn.k == k) {
+ if (GCAS(cn, cn.updatedAt(pos, new SNode(k, v, hc), gen), ct)) Some(sn.v) else null
+ } else None
+ case otherv: V =>
+ if (sn.hc == hc && sn.k == k && sn.v == otherv) {
+ if (GCAS(cn, cn.updatedAt(pos, new SNode(k, v, hc), gen), ct)) Some(sn.v) else null
+ } else None
+ }
+ }
+ } else cond match {
+ case null | INode.KEY_ABSENT =>
+ val rn = if (cn.gen eq gen) cn else cn.renewed(gen, ct)
+ val ncnode = rn.insertedAt(pos, flag, new SNode(k, v, hc), gen)
+ if (GCAS(cn, ncnode, ct)) None else null
+ case INode.KEY_PRESENT => None
+ case otherv: V => None
+ }
+ case sn: TNode[K, V] =>
+ clean(parent, ct, lev - 5)
+ null
+ case ln: LNode[K, V] => // 3) an l-node
+ @inline def insertln() = {
+ val nn = ln.inserted(k, v)
+ GCAS(ln, nn, ct)
+ }
+ cond match {
+ case null =>
+ val optv = ln.get(k)
+ if (insertln()) optv else null
+ case INode.KEY_ABSENT =>
+ ln.get(k) match {
+ case None => if (insertln()) None else null
+ case optv => optv
+ }
+ case INode.KEY_PRESENT =>
+ ln.get(k) match {
+ case Some(v0) => if (insertln()) Some(v0) else null
+ case None => None
+ }
+ case otherv: V =>
+ ln.get(k) match {
+ case Some(v0) if v0 == otherv => if (insertln()) Some(otherv) else null
+ case _ => None
+ }
+ }
+ }
+ }
+
+ /** Looks up the value associated with the key.
+ *
+ * @return null if no value has been found, RESTART if the operation wasn't successful, or any other value otherwise
+ */
+ @tailrec final def rec_lookup(k: K, hc: Int, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): AnyRef = {
+ val m = GCAS_READ(ct) // use -Yinline!
+
+ m match {
+ case cn: CNode[K, V] => // 1) a multinode
+ val idx = (hc >>> lev) & 0x1f
+ val flag = 1 << idx
+ val bmp = cn.bitmap
+ if ((bmp & flag) == 0) null // 1a) bitmap shows no binding
+ else { // 1b) bitmap contains a value - descend
+ val pos = if (bmp == 0xffffffff) idx else Integer.bitCount(bmp & (flag - 1))
+ val sub = cn.array(pos)
+ sub match {
+ case in: INode[K, V] =>
+ if (ct.isReadOnly || (startgen eq in.gen)) in.rec_lookup(k, hc, lev + 5, this, startgen, ct)
+ else {
+ if (GCAS(cn, cn.renewed(startgen, ct), ct)) rec_lookup(k, hc, lev, parent, startgen, ct)
+ else return RESTART // used to be throw RestartException
+ }
+ case sn: SNode[K, V] => // 2) singleton node
+ if (sn.hc == hc && sn.k == k) sn.v.asInstanceOf[AnyRef]
+ else null
+ }
+ }
+ case tn: TNode[K, V] => // 3) non-live node
+ def cleanReadOnly(tn: TNode[K, V]) = if (ct.nonReadOnly) {
+ clean(parent, ct, lev - 5)
+ RESTART // used to be throw RestartException
+ } else {
+ if (tn.hc == hc && tn.k == k) tn.v.asInstanceOf[AnyRef]
+ else null
+ }
+ cleanReadOnly(tn)
+ case ln: LNode[K, V] => // 5) an l-node
+ ln.get(k).asInstanceOf[Option[AnyRef]].orNull
+ }
+ }
+
+ /** Removes the key associated with the given value.
+ *
+ * @param v if null, will remove the key irregardless of the value; otherwise removes only if binding contains that exact key and value
+ * @return null if not successful, an Option[V] indicating the previous value otherwise
+ */
+ final def rec_remove(k: K, v: V, hc: Int, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): Option[V] = {
+ val m = GCAS_READ(ct) // use -Yinline!
+
+ m match {
+ case cn: CNode[K, V] =>
+ val idx = (hc >>> lev) & 0x1f
+ val bmp = cn.bitmap
+ val flag = 1 << idx
+ if ((bmp & flag) == 0) None
+ else {
+ val pos = Integer.bitCount(bmp & (flag - 1))
+ val sub = cn.array(pos)
+ val res = sub match {
+ case in: INode[K, V] =>
+ if (startgen eq in.gen) in.rec_remove(k, v, hc, lev + 5, this, startgen, ct)
+ else {
+ if (GCAS(cn, cn.renewed(startgen, ct), ct)) rec_remove(k, v, hc, lev, parent, startgen, ct)
+ else null
+ }
+ case sn: SNode[K, V] =>
+ if (sn.hc == hc && sn.k == k && (v == null || sn.v == v)) {
+ val ncn = cn.removedAt(pos, flag, gen).toContracted(lev)
+ if (GCAS(cn, ncn, ct)) Some(sn.v) else null
+ } else None
+ }
+
+ if (res == None || (res eq null)) res
+ else {
+ @tailrec def cleanParent(nonlive: AnyRef) {
+ val pm = parent.GCAS_READ(ct)
+ pm match {
+ case cn: CNode[K, V] =>
+ val idx = (hc >>> (lev - 5)) & 0x1f
+ val bmp = cn.bitmap
+ val flag = 1 << idx
+ if ((bmp & flag) == 0) {} // somebody already removed this i-node, we're done
+ else {
+ val pos = Integer.bitCount(bmp & (flag - 1))
+ val sub = cn.array(pos)
+ if (sub eq this) nonlive match {
+ case tn: TNode[K, V] =>
+ val ncn = cn.updatedAt(pos, tn.copyUntombed, gen).toContracted(lev - 5)
+ if (!parent.GCAS(cn, ncn, ct))
+ if (ct.readRoot().gen == startgen) cleanParent(nonlive)
+ }
+ }
+ case _ => // parent is no longer a cnode, we're done
+ }
+ }
+
+ if (parent ne null) { // never tomb at root
+ val n = GCAS_READ(ct)
+ if (n.isInstanceOf[TNode[_, _]])
+ cleanParent(n)
+ }
+
+ res
+ }
+ }
+ case tn: TNode[K, V] =>
+ clean(parent, ct, lev - 5)
+ null
+ case ln: LNode[K, V] =>
+ if (v == null) {
+ val optv = ln.get(k)
+ val nn = ln.removed(k)
+ if (GCAS(ln, nn, ct)) optv else null
+ } else ln.get(k) match {
+ case optv @ Some(v0) if v0 == v =>
+ val nn = ln.removed(k)
+ if (GCAS(ln, nn, ct)) optv else null
+ case _ => None
+ }
+ }
+ }
+
+ private def clean(nd: INode[K, V], ct: Ctrie[K, V], lev: Int) {
+ val m = nd.GCAS_READ(ct)
+ m match {
+ case cn: CNode[K, V] => nd.GCAS(cn, cn.toCompressed(ct, lev, gen), ct)
+ case _ =>
+ }
+ }
+
+ final def isNullInode(ct: Ctrie[K, V]) = GCAS_READ(ct) eq null
+
+ final def cachedSize(ct: Ctrie[K, V]): Int = {
+ val m = GCAS_READ(ct)
+ m.cachedSize(ct)
+ }
+
+ /* this is a quiescent method! */
+ def string(lev: Int) = "%sINode -> %s".format(" " * lev, mainnode match {
+ case null => "<null>"
+ case tn: TNode[_, _] => "TNode(%s, %s, %d, !)".format(tn.k, tn.v, tn.hc)
+ case cn: CNode[_, _] => cn.string(lev)
+ case ln: LNode[_, _] => ln.string(lev)
+ case x => "<elem: %s>".format(x)
+ })
+
+}
+
+
+private[mutable] object INode {
+ val KEY_PRESENT = new AnyRef
+ val KEY_ABSENT = new AnyRef
+
+ def newRootNode[K, V] = {
+ val gen = new Gen
+ val cn = new CNode[K, V](0, new Array(0), gen)
+ new INode[K, V](cn, gen)
+ }
+}
+
+
+private[mutable] final class FailedNode[K, V](p: MainNode[K, V]) extends MainNode[K, V] {
+ WRITE_PREV(p)
+
+ def string(lev: Int) = throw new UnsupportedOperationException
+
+ def cachedSize(ct: AnyRef): Int = throw new UnsupportedOperationException
+
+ override def toString = "FailedNode(%s)".format(p)
+}
+
+
+private[mutable] trait KVNode[K, V] {
+ def kvPair: (K, V)
+}
+
+
+private[collection] final class SNode[K, V](final val k: K, final val v: V, final val hc: Int)
+extends BasicNode with KVNode[K, V] {
+ final def copy = new SNode(k, v, hc)
+ final def copyTombed = new TNode(k, v, hc)
+ final def copyUntombed = new SNode(k, v, hc)
+ final def kvPair = (k, v)
+ final def string(lev: Int) = (" " * lev) + "SNode(%s, %s, %x)".format(k, v, hc)
+}
+
+
+private[collection] final class TNode[K, V](final val k: K, final val v: V, final val hc: Int)
+extends MainNode[K, V] with KVNode[K, V] {
+ final def copy = new TNode(k, v, hc)
+ final def copyTombed = new TNode(k, v, hc)
+ final def copyUntombed = new SNode(k, v, hc)
+ final def kvPair = (k, v)
+ final def cachedSize(ct: AnyRef): Int = 1
+ final def string(lev: Int) = (" " * lev) + "TNode(%s, %s, %x, !)".format(k, v, hc)
+}
+
+
+private[collection] final class LNode[K, V](final val listmap: ImmutableListMap[K, V])
+extends MainNode[K, V] {
+ def this(k: K, v: V) = this(ImmutableListMap(k -> v))
+ def this(k1: K, v1: V, k2: K, v2: V) = this(ImmutableListMap(k1 -> v1, k2 -> v2))
+ def inserted(k: K, v: V) = new LNode(listmap + ((k, v)))
+ def removed(k: K): MainNode[K, V] = {
+ val updmap = listmap - k
+ if (updmap.size > 1) new LNode(updmap)
+ else {
+ val (k, v) = updmap.iterator.next
+ new TNode(k, v, Ctrie.computeHash(k)) // create it tombed so that it gets compressed on subsequent accesses
+ }
+ }
+ def get(k: K) = listmap.get(k)
+ def cachedSize(ct: AnyRef): Int = listmap.size
+ def string(lev: Int) = (" " * lev) + "LNode(%s)".format(listmap.mkString(", "))
+}
+
+
+private[collection] final class CNode[K, V](final val bitmap: Int, final val array: Array[BasicNode], final val gen: Gen)
+extends CNodeBase[K, V] {
+
+ // this should only be called from within read-only snapshots
+ final def cachedSize(ct: AnyRef) = {
+ val currsz = READ_SIZE()
+ if (currsz != -1) currsz
+ else {
+ val sz = computeSize(ct.asInstanceOf[Ctrie[K, V]])
+ while (READ_SIZE() == -1) CAS_SIZE(-1, sz)
+ READ_SIZE()
+ }
+ }
+
+ // lends itself towards being parallelizable by choosing
+ // a random starting offset in the array
+ // => if there are concurrent size computations, they start
+ // at different positions, so they are more likely to
+ // to be independent
+ private def computeSize(ct: Ctrie[K, V]): Int = {
+ var i = 0
+ var sz = 0
+ val offset = math.abs(util.Random.nextInt()) % array.length
+ while (i < array.length) {
+ val pos = (i + offset) % array.length
+ array(pos) match {
+ case sn: SNode[_, _] => sz += 1
+ case in: INode[K, V] => sz += in.cachedSize(ct)
+ }
+ i += 1
+ }
+ sz
+ }
+
+ final def updatedAt(pos: Int, nn: BasicNode, gen: Gen) = {
+ val len = array.length
+ val narr = new Array[BasicNode](len)
+ Array.copy(array, 0, narr, 0, len)
+ narr(pos) = nn
+ new CNode[K, V](bitmap, narr, gen)
+ }
+
+ final def removedAt(pos: Int, flag: Int, gen: Gen) = {
+ val arr = array
+ val len = arr.length
+ val narr = new Array[BasicNode](len - 1)
+ Array.copy(arr, 0, narr, 0, pos)
+ Array.copy(arr, pos + 1, narr, pos, len - pos - 1)
+ new CNode[K, V](bitmap ^ flag, narr, gen)
+ }
+
+ final def insertedAt(pos: Int, flag: Int, nn: BasicNode, gen: Gen) = {
+ val len = array.length
+ val bmp = bitmap
+ val narr = new Array[BasicNode](len + 1)
+ Array.copy(array, 0, narr, 0, pos)
+ narr(pos) = nn
+ Array.copy(array, pos, narr, pos + 1, len - pos)
+ new CNode[K, V](bmp | flag, narr, gen)
+ }
+
+ /** Returns a copy of this cnode such that all the i-nodes below it are copied
+ * to the specified generation `ngen`.
+ */
+ final def renewed(ngen: Gen, ct: Ctrie[K, V]) = {
+ var i = 0
+ val arr = array
+ val len = arr.length
+ val narr = new Array[BasicNode](len)
+ while (i < len) {
+ arr(i) match {
+ case in: INode[K, V] => narr(i) = in.copyToGen(ngen, ct)
+ case bn: BasicNode => narr(i) = bn
+ }
+ i += 1
+ }
+ new CNode[K, V](bitmap, narr, ngen)
+ }
+
+ private def resurrect(inode: INode[K, V], inodemain: AnyRef): BasicNode = inodemain match {
+ case tn: TNode[_, _] => tn.copyUntombed
+ case _ => inode
+ }
+
+ final def toContracted(lev: Int): MainNode[K, V] = if (array.length == 1 && lev > 0) array(0) match {
+ case sn: SNode[K, V] => sn.copyTombed
+ case _ => this
+ } else this
+
+ // - if the branching factor is 1 for this CNode, and the child
+ // is a tombed SNode, returns its tombed version
+ // - otherwise, if there is at least one non-null node below,
+ // returns the version of this node with at least some null-inodes
+ // removed (those existing when the op began)
+ // - if there are only null-i-nodes below, returns null
+ final def toCompressed(ct: Ctrie[K, V], lev: Int, gen: Gen) = {
+ var bmp = bitmap
+ var i = 0
+ val arr = array
+ val tmparray = new Array[BasicNode](arr.length)
+ while (i < arr.length) { // construct new bitmap
+ val sub = arr(i)
+ sub match {
+ case in: INode[K, V] =>
+ val inodemain = in.gcasRead(ct)
+ assert(inodemain ne null)
+ tmparray(i) = resurrect(in, inodemain)
+ case sn: SNode[K, V] =>
+ tmparray(i) = sn
+ }
+ i += 1
+ }
+
+ new CNode[K, V](bmp, tmparray, gen).toContracted(lev)
+ }
+
+ private[mutable] def string(lev: Int): String = "CNode %x\n%s".format(bitmap, array.map(_.string(lev + 1)).mkString("\n"))
+
+ /* quiescently consistent - don't call concurrently to anything involving a GCAS!! */
+ protected def collectElems: Seq[(K, V)] = array flatMap {
+ case sn: SNode[K, V] => Some(sn.kvPair)
+ case in: INode[K, V] => in.mainnode match {
+ case tn: TNode[K, V] => Some(tn.kvPair)
+ case ln: LNode[K, V] => ln.listmap.toList
+ case cn: CNode[K, V] => cn.collectElems
+ }
+ }
+
+ protected def collectLocalElems: Seq[String] = array flatMap {
+ case sn: SNode[K, V] => Some(sn.kvPair._2.toString)
+ case in: INode[K, V] => Some(in.toString.drop(14) + "(" + in.gen + ")")
+ }
+
+ override def toString = {
+ val elems = collectLocalElems
+ "CNode(sz: %d; %s)".format(elems.size, elems.sorted.mkString(", "))
+ }
+}
+
+
+private[mutable] object CNode {
+
+ def dual[K, V](x: SNode[K, V], xhc: Int, y: SNode[K, V], yhc: Int, lev: Int, gen: Gen): MainNode[K, V] = if (lev < 35) {
+ val xidx = (xhc >>> lev) & 0x1f
+ val yidx = (yhc >>> lev) & 0x1f
+ val bmp = (1 << xidx) | (1 << yidx)
+ if (xidx == yidx) {
+ val subinode = new INode[K, V](gen)//(Ctrie.inodeupdater)
+ subinode.mainnode = dual(x, xhc, y, yhc, lev + 5, gen)
+ new CNode(bmp, Array(subinode), gen)
+ } else {
+ if (xidx < yidx) new CNode(bmp, Array(x, y), gen)
+ else new CNode(bmp, Array(y, x), gen)
+ }
+ } else {
+ new LNode(x.k, x.v, y.k, y.v)
+ }
+
+}
+
+
+private[mutable] case class RDCSS_Descriptor[K, V](old: INode[K, V], expectedmain: MainNode[K, V], nv: INode[K, V]) {
+ @volatile var committed = false
+}
+
+
+/** A concurrent hash-trie or Ctrie is a concurrent thread-safe lock-free
+ * implementation of a hash array mapped trie. It is used to implement the
+ * concurrent map abstraction. It has particularly scalable concurrent insert
+ * and remove operations and is memory-efficient. It supports O(1), atomic,
+ * lock-free snapshots which are used to implement linearizable lock-free size,
+ * iterator and clear operations. The cost of evaluating the (lazy) snapshot is
+ * distributed across subsequent updates, thus making snapshot evaluation horizontally scalable.
+ *
+ * For details, see: http://lampwww.epfl.ch/~prokopec/ctries-snapshot.pdf
+ *
+ * @author Aleksandar Prokopec
+ * @since 2.10
+ */
+@SerialVersionUID(0L - 6402774413839597105L)
+final class Ctrie[K, V] private (r: AnyRef, rtupd: AtomicReferenceFieldUpdater[Ctrie[K, V], AnyRef])
+extends ConcurrentMap[K, V]
+ with MapLike[K, V, Ctrie[K, V]]
+ with CustomParallelizable[(K, V), ParCtrie[K, V]]
+ with Serializable
+{
+ import Ctrie.computeHash
+
+ private var rootupdater = rtupd
+ @volatile var root = r
+
+ def this() = this(
+ INode.newRootNode,
+ AtomicReferenceFieldUpdater.newUpdater(classOf[Ctrie[K, V]], classOf[AnyRef], "root")
+ )
+
+ /* internal methods */
+
+ private def writeObject(out: java.io.ObjectOutputStream) {
+ val it = iterator
+ while (it.hasNext) {
+ val (k, v) = it.next()
+ out.writeObject(k)
+ out.writeObject(v)
+ }
+ out.writeObject(CtrieSerializationEnd)
+ }
+
+ private def readObject(in: java.io.ObjectInputStream) {
+ root = INode.newRootNode
+ rootupdater = AtomicReferenceFieldUpdater.newUpdater(classOf[Ctrie[K, V]], classOf[AnyRef], "root")
+
+ var obj: AnyRef = null
+ do {
+ obj = in.readObject()
+ if (obj != CtrieSerializationEnd) {
+ val k = obj.asInstanceOf[K]
+ val v = in.readObject().asInstanceOf[V]
+ update(k, v)
+ }
+ } while (obj != CtrieSerializationEnd)
+ }
+
+ @inline final def CAS_ROOT(ov: AnyRef, nv: AnyRef) = rootupdater.compareAndSet(this, ov, nv)
+
+ final def readRoot(abort: Boolean = false): INode[K, V] = RDCSS_READ_ROOT(abort)
+
+ @inline final def RDCSS_READ_ROOT(abort: Boolean = false): INode[K, V] = {
+ val r = /*READ*/root
+ r match {
+ case in: INode[K, V] => in
+ case desc: RDCSS_Descriptor[K, V] => RDCSS_Complete(abort)
+ }
+ }
+
+ @tailrec private def RDCSS_Complete(abort: Boolean): INode[K, V] = {
+ val v = /*READ*/root
+ v match {
+ case in: INode[K, V] => in
+ case desc: RDCSS_Descriptor[K, V] =>
+ val RDCSS_Descriptor(ov, exp, nv) = desc
+ if (abort) {
+ if (CAS_ROOT(desc, ov)) ov
+ else RDCSS_Complete(abort)
+ } else {
+ val oldmain = ov.gcasRead(this)
+ if (oldmain eq exp) {
+ if (CAS_ROOT(desc, nv)) {
+ desc.committed = true
+ nv
+ } else RDCSS_Complete(abort)
+ } else {
+ if (CAS_ROOT(desc, ov)) ov
+ else RDCSS_Complete(abort)
+ }
+ }
+ }
+ }
+
+ private def RDCSS_ROOT(ov: INode[K, V], expectedmain: MainNode[K, V], nv: INode[K, V]): Boolean = {
+ val desc = RDCSS_Descriptor(ov, expectedmain, nv)
+ if (CAS_ROOT(ov, desc)) {
+ RDCSS_Complete(false)
+ /*READ*/desc.committed
+ } else false
+ }
+
+ @tailrec private def inserthc(k: K, hc: Int, v: V) {
+ val r = RDCSS_READ_ROOT()
+ if (!r.rec_insert(k, v, hc, 0, null, r.gen, this)) inserthc(k, hc, v)
+ }
+
+ @tailrec private def insertifhc(k: K, hc: Int, v: V, cond: AnyRef): Option[V] = {
+ val r = RDCSS_READ_ROOT()
+
+ val ret = r.rec_insertif(k, v, hc, cond, 0, null, r.gen, this)
+ if (ret eq null) insertifhc(k, hc, v, cond)
+ else ret
+ }
+
+ @tailrec private def lookuphc(k: K, hc: Int): AnyRef = {
+ val r = RDCSS_READ_ROOT()
+ val res = r.rec_lookup(k, hc, 0, null, r.gen, this)
+ if (res eq INodeBase.RESTART) lookuphc(k, hc)
+ else res
+ }
+
+ /* slower:
+ //@tailrec
+ private def lookuphc(k: K, hc: Int): AnyRef = {
+ val r = RDCSS_READ_ROOT()
+ try {
+ r.rec_lookup(k, hc, 0, null, r.gen, this)
+ } catch {
+ case RestartException =>
+ lookuphc(k, hc)
+ }
+ }
+ */
+
+ @tailrec private def removehc(k: K, v: V, hc: Int): Option[V] = {
+ val r = RDCSS_READ_ROOT()
+ val res = r.rec_remove(k, v, hc, 0, null, r.gen, this)
+ if (res ne null) res
+ else removehc(k, v, hc)
+ }
+
+ def string = RDCSS_READ_ROOT().string(0)
+
+ /* public methods */
+
+ override def seq = this
+
+ override def par = new ParCtrie(this)
+
+ override def empty: Ctrie[K, V] = new Ctrie[K, V]
+
+ final def isReadOnly = rootupdater eq null
+
+ final def nonReadOnly = rootupdater ne null
+
+ /** Returns a snapshot of this Ctrie.
+ * This operation is lock-free and linearizable.
+ *
+ * The snapshot is lazily updated - the first time some branch
+ * in the snapshot or this Ctrie are accessed, they are rewritten.
+ * This means that the work of rebuilding both the snapshot and this
+ * Ctrie is distributed across all the threads doing updates or accesses
+ * subsequent to the snapshot creation.
+ */
+ @tailrec final def snapshot(): Ctrie[K, V] = {
+ val r = RDCSS_READ_ROOT()
+ val expmain = r.gcasRead(this)
+ if (RDCSS_ROOT(r, expmain, r.copyToGen(new Gen, this))) new Ctrie(r.copyToGen(new Gen, this), rootupdater)
+ else snapshot()
+ }
+
+ /** Returns a read-only snapshot of this Ctrie.
+ * This operation is lock-free and linearizable.
+ *
+ * The snapshot is lazily updated - the first time some branch
+ * of this Ctrie are accessed, it is rewritten. The work of creating
+ * the snapshot is thus distributed across subsequent updates
+ * and accesses on this Ctrie by all threads.
+ * Note that the snapshot itself is never rewritten unlike when calling
+ * the `snapshot` method, but the obtained snapshot cannot be modified.
+ *
+ * This method is used by other methods such as `size` and `iterator`.
+ */
+ @tailrec final def readOnlySnapshot(): collection.Map[K, V] = {
+ val r = RDCSS_READ_ROOT()
+ val expmain = r.gcasRead(this)
+ if (RDCSS_ROOT(r, expmain, r.copyToGen(new Gen, this))) new Ctrie(r, null)
+ else readOnlySnapshot()
+ }
+
+ @tailrec final override def clear() {
+ val r = RDCSS_READ_ROOT()
+ if (!RDCSS_ROOT(r, r.gcasRead(this), INode.newRootNode[K, V])) clear()
+ }
+
+ final def lookup(k: K): V = {
+ val hc = computeHash(k)
+ lookuphc(k, hc).asInstanceOf[V]
+ }
+
+ final override def apply(k: K): V = {
+ val hc = computeHash(k)
+ val res = lookuphc(k, hc)
+ if (res eq null) throw new NoSuchElementException
+ else res.asInstanceOf[V]
+ }
+
+ final def get(k: K): Option[V] = {
+ val hc = computeHash(k)
+ Option(lookuphc(k, hc)).asInstanceOf[Option[V]]
+ }
+
+ override def put(key: K, value: V): Option[V] = {
+ val hc = computeHash(key)
+ insertifhc(key, hc, value, null)
+ }
+
+ final override def update(k: K, v: V) {
+ val hc = computeHash(k)
+ inserthc(k, hc, v)
+ }
+
+ final def +=(kv: (K, V)) = {
+ update(kv._1, kv._2)
+ this
+ }
+
+ final override def remove(k: K): Option[V] = {
+ val hc = computeHash(k)
+ removehc(k, null.asInstanceOf[V], hc)
+ }
+
+ final def -=(k: K) = {
+ remove(k)
+ this
+ }
+
+ def putIfAbsent(k: K, v: V): Option[V] = {
+ val hc = computeHash(k)
+ insertifhc(k, hc, v, INode.KEY_ABSENT)
+ }
+
+ def remove(k: K, v: V): Boolean = {
+ val hc = computeHash(k)
+ removehc(k, v, hc).nonEmpty
+ }
+
+ def replace(k: K, oldvalue: V, newvalue: V): Boolean = {
+ val hc = computeHash(k)
+ insertifhc(k, hc, newvalue, oldvalue.asInstanceOf[AnyRef]).nonEmpty
+ }
+
+ def replace(k: K, v: V): Option[V] = {
+ val hc = computeHash(k)
+ insertifhc(k, hc, v, INode.KEY_PRESENT)
+ }
+
+ def iterator: Iterator[(K, V)] =
+ if (nonReadOnly) readOnlySnapshot().iterator
+ else new CtrieIterator(0, this)
+
+ private def cachedSize() = {
+ val r = RDCSS_READ_ROOT()
+ r.cachedSize(this)
+ }
+
+ override def size: Int =
+ if (nonReadOnly) readOnlySnapshot().size
+ else cachedSize()
+
+ override def stringPrefix = "Ctrie"
+
+}
+
+
+object Ctrie extends MutableMapFactory[Ctrie] {
+ val inodeupdater = AtomicReferenceFieldUpdater.newUpdater(classOf[INodeBase[_, _]], classOf[MainNode[_, _]], "mainnode")
+
+ implicit def canBuildFrom[K, V]: CanBuildFrom[Coll, (K, V), Ctrie[K, V]] = new MapCanBuildFrom[K, V]
+
+ def empty[K, V]: Ctrie[K, V] = new Ctrie[K, V]
+
+ @inline final def computeHash[K](k: K): Int = {
+ var hcode = k.hashCode
+ hcode = hcode * 0x9e3775cd
+ hcode = java.lang.Integer.reverseBytes(hcode)
+ hcode * 0x9e3775cd
+ }
+
+}
+
+
+private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ctrie[K, V], mustInit: Boolean = true) extends Iterator[(K, V)] {
+ var stack = new Array[Array[BasicNode]](7)
+ var stackpos = new Array[Int](7)
+ var depth = -1
+ var subiter: Iterator[(K, V)] = null
+ var current: KVNode[K, V] = null
+
+ if (mustInit) initialize()
+
+ def hasNext = (current ne null) || (subiter ne null)
+
+ def next() = if (hasNext) {
+ var r: (K, V) = null
+ if (subiter ne null) {
+ r = subiter.next()
+ checkSubiter()
+ } else {
+ r = current.kvPair
+ advance()
+ }
+ r
+ } else Iterator.empty.next()
+
+ private def readin(in: INode[K, V]) = in.gcasRead(ct) match {
+ case cn: CNode[K, V] =>
+ depth += 1
+ stack(depth) = cn.array
+ stackpos(depth) = -1
+ advance()
+ case tn: TNode[K, V] =>
+ current = tn
+ case ln: LNode[K, V] =>
+ subiter = ln.listmap.iterator
+ checkSubiter()
+ case null =>
+ current = null
+ }
+
+ @inline private def checkSubiter() = if (!subiter.hasNext) {
+ subiter = null
+ advance()
+ }
+
+ @inline private def initialize() {
+ assert(ct.isReadOnly)
+
+ val r = ct.RDCSS_READ_ROOT()
+ readin(r)
+ }
+
+ def advance(): Unit = if (depth >= 0) {
+ val npos = stackpos(depth) + 1
+ if (npos < stack(depth).length) {
+ stackpos(depth) = npos
+ stack(depth)(npos) match {
+ case sn: SNode[K, V] =>
+ current = sn
+ case in: INode[K, V] =>
+ readin(in)
+ }
+ } else {
+ depth -= 1
+ advance()
+ }
+ } else current = null
+
+ protected def newIterator(_lev: Int, _ct: Ctrie[K, V], _mustInit: Boolean) = new CtrieIterator[K, V](_lev, _ct, _mustInit)
+
+ protected def dupTo(it: CtrieIterator[K, V]) = {
+ it.level = this.level
+ it.ct = this.ct
+ it.depth = this.depth
+ it.current = this.current
+
+ // these need a deep copy
+ Array.copy(this.stack, 0, it.stack, 0, 7)
+ Array.copy(this.stackpos, 0, it.stackpos, 0, 7)
+
+ // this one needs to be evaluated
+ if (this.subiter == null) it.subiter = null
+ else {
+ val lst = this.subiter.toList
+ this.subiter = lst.iterator
+ it.subiter = lst.iterator
+ }
+ }
+
+ /** Returns a sequence of iterators over subsets of this iterator.
+ * It's used to ease the implementation of splitters for a parallel version of the Ctrie.
+ */
+ protected def subdivide(): Seq[Iterator[(K, V)]] = if (subiter ne null) {
+ // the case where an LNode is being iterated
+ val it = subiter
+ subiter = null
+ advance()
+ this.level += 1
+ Seq(it, this)
+ } else if (depth == -1) {
+ this.level += 1
+ Seq(this)
+ } else {
+ var d = 0
+ while (d <= depth) {
+ val rem = stack(d).length - 1 - stackpos(d)
+ if (rem > 0) {
+ val (arr1, arr2) = stack(d).drop(stackpos(d) + 1).splitAt(rem / 2)
+ stack(d) = arr1
+ stackpos(d) = -1
+ val it = newIterator(level + 1, ct, false)
+ it.stack(0) = arr2
+ it.stackpos(0) = -1
+ it.depth = 0
+ it.advance() // <-- fix it
+ this.level += 1
+ return Seq(this, it)
+ }
+ d += 1
+ }
+ this.level += 1
+ Seq(this)
+ }
+
+ def printDebug {
+ println("ctrie iterator")
+ println(stackpos.mkString(","))
+ println("depth: " + depth)
+ println("curr.: " + current)
+ println(stack.mkString("\n"))
+ }
+
+}
+
+
+private[mutable] object RestartException extends util.control.ControlThrowable
+
+
+/** Only used for ctrie serialization. */
+@SerialVersionUID(0L - 7237891413820527142L)
+private[mutable] case object CtrieSerializationEnd
+
+
+private[mutable] object Debug {
+ import collection._
+
+ lazy val logbuffer = new java.util.concurrent.ConcurrentLinkedQueue[AnyRef]
+
+ def log(s: AnyRef) = logbuffer.add(s)
+
+ def flush() {
+ for (s <- JavaConversions.asScalaIterator(logbuffer.iterator())) Console.out.println(s.toString)
+ logbuffer.clear()
+ }
+
+ def clear() {
+ logbuffer.clear()
+ }
+
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/src/library/scala/collection/mutable/FlatHashTable.scala b/src/library/scala/collection/mutable/FlatHashTable.scala
index f3fb6738eb..ee6d4d1d22 100644
--- a/src/library/scala/collection/mutable/FlatHashTable.scala
+++ b/src/library/scala/collection/mutable/FlatHashTable.scala
@@ -43,19 +43,19 @@ trait FlatHashTable[A] extends FlatHashTable.HashUtils[A] {
/** The array keeping track of number of elements in 32 element blocks.
*/
@transient protected var sizemap: Array[Int] = null
-
+
@transient var seedvalue: Int = tableSizeSeed
-
+
import HashTable.powerOfTwo
-
+
protected def capacity(expectedSize: Int) = if (expectedSize == 0) 1 else powerOfTwo(expectedSize)
-
+
private def initialCapacity = capacity(initialSize)
-
+
protected def randomSeed = seedGenerator.get.nextInt()
-
+
protected def tableSizeSeed = Integer.bitCount(table.length - 1)
-
+
/**
* Initializes the collection from the input stream. `f` will be called for each element
* read from the input stream in the order determined by the stream. This is useful for
@@ -65,22 +65,22 @@ trait FlatHashTable[A] extends FlatHashTable.HashUtils[A] {
*/
private[collection] def init(in: java.io.ObjectInputStream, f: A => Unit) {
in.defaultReadObject
-
+
_loadFactor = in.readInt()
assert(_loadFactor > 0)
-
+
val size = in.readInt()
tableSize = 0
assert(size >= 0)
-
+
table = new Array(capacity(sizeForThreshold(size, _loadFactor)))
threshold = newThreshold(_loadFactor, table.size)
-
+
seedvalue = in.readInt()
-
+
val smDefined = in.readBoolean()
if (smDefined) sizeMapInit(table.length) else sizemap = null
-
+
var index = 0
while (index < size) {
val elem = in.readObject().asInstanceOf[A]
@@ -295,12 +295,12 @@ trait FlatHashTable[A] extends FlatHashTable.HashUtils[A] {
protected final def index(hcode: Int) = {
// version 1 (no longer used - did not work with parallel hash tables)
// improve(hcode) & (table.length - 1)
-
+
// version 2 (allows for parallel hash table construction)
val improved = improve(hcode, seedvalue)
val ones = table.length - 1
(improved >>> (32 - java.lang.Integer.bitCount(ones))) & ones
-
+
// version 3 (solves SI-5293 in most cases, but such a case would still arise for parallel hash tables)
// val hc = improve(hcode)
// val bbp = blockbitpos
@@ -345,17 +345,17 @@ trait FlatHashTable[A] extends FlatHashTable.HashUtils[A] {
private[collection] object FlatHashTable {
-
+
/** Creates a specific seed to improve hashcode of a hash table instance
* and ensure that iteration order vulnerabilities are not 'felt' in other
* hash tables.
- *
+ *
* See SI-5293.
*/
final def seedGenerator = new ThreadLocal[util.Random] {
override def initialValue = new util.Random
}
-
+
/** The load factor for the hash table; must be < 500 (0.5)
*/
def defaultLoadFactor: Int = 450
@@ -396,11 +396,11 @@ private[collection] object FlatHashTable {
//h = h ^ (h >>> 14)
//h = h + (h << 4)
//h ^ (h >>> 10)
-
+
var i = hcode * 0x9e3775cd
i = java.lang.Integer.reverseBytes(i)
val improved = i * 0x9e3775cd
-
+
// for the remainder, see SI-5293
// to ensure that different bits are used for different hash tables, we have to rotate based on the seed
val rotation = seed % 32
diff --git a/src/library/scala/collection/mutable/Gen.java b/src/library/scala/collection/mutable/Gen.java
new file mode 100644
index 0000000000..0c9a30d198
--- /dev/null
+++ b/src/library/scala/collection/mutable/Gen.java
@@ -0,0 +1,18 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2012, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.collection.mutable;
+
+
+
+
+
+
+final class Gen {
+}
+
diff --git a/src/library/scala/collection/mutable/HashTable.scala b/src/library/scala/collection/mutable/HashTable.scala
index cdf1b78f29..cc0aed6963 100644
--- a/src/library/scala/collection/mutable/HashTable.scala
+++ b/src/library/scala/collection/mutable/HashTable.scala
@@ -52,6 +52,10 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
*/
@transient protected var sizemap: Array[Int] = null
+ @transient var seedvalue: Int = tableSizeSeed
+
+ protected def tableSizeSeed = Integer.bitCount(table.length - 1)
+
protected def initialSize: Int = HashTable.initialSize
private def lastPopulatedIndex = {
@@ -70,14 +74,16 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
private[collection] def init[B](in: java.io.ObjectInputStream, f: (A, B) => Entry) {
in.defaultReadObject
- _loadFactor = in.readInt
+ _loadFactor = in.readInt()
assert(_loadFactor > 0)
- val size = in.readInt
+ val size = in.readInt()
tableSize = 0
assert(size >= 0)
- val smDefined = in.readBoolean
+ seedvalue = in.readInt()
+
+ val smDefined = in.readBoolean()
table = new Array(capacity(sizeForThreshold(_loadFactor, size)))
threshold = newThreshold(_loadFactor, table.size)
@@ -86,7 +92,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
var index = 0
while (index < size) {
- addEntry(f(in.readObject.asInstanceOf[A], in.readObject.asInstanceOf[B]))
+ addEntry(f(in.readObject().asInstanceOf[A], in.readObject().asInstanceOf[B]))
index += 1
}
}
@@ -103,6 +109,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
out.defaultWriteObject
out.writeInt(_loadFactor)
out.writeInt(tableSize)
+ out.writeInt(seedvalue)
out.writeBoolean(isSizeMapDefined)
foreachEntry { entry =>
out.writeObject(entry.key)
@@ -314,7 +321,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
// this is of crucial importance when populating the table in parallel
protected final def index(hcode: Int) = {
val ones = table.length - 1
- val improved = improve(hcode)
+ val improved = improve(hcode, seedvalue)
val shifted = (improved >> (32 - java.lang.Integer.bitCount(ones))) & ones
shifted
}
@@ -325,6 +332,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
table = c.table
tableSize = c.tableSize
threshold = c.threshold
+ seedvalue = c.seedvalue
sizemap = c.sizemap
}
if (alwaysInitSizeMap && sizemap == null) sizeMapInitAndRebuild
@@ -335,6 +343,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
table,
tableSize,
threshold,
+ seedvalue,
sizemap
)
}
@@ -368,7 +377,7 @@ private[collection] object HashTable {
protected def elemHashCode(key: KeyType) = key.##
- protected final def improve(hcode: Int) = {
+ protected final def improve(hcode: Int, seed: Int) = {
/* Murmur hash
* m = 0x5bd1e995
* r = 24
@@ -396,7 +405,7 @@ private[collection] object HashTable {
* */
var i = hcode * 0x9e3775cd
i = java.lang.Integer.reverseBytes(i)
- i * 0x9e3775cd
+ i = i * 0x9e3775cd
// a slower alternative for byte reversal:
// i = (i << 16) | (i >> 16)
// i = ((i >> 8) & 0x00ff00ff) | ((i << 8) & 0xff00ff00)
@@ -420,6 +429,11 @@ private[collection] object HashTable {
// h = h ^ (h >>> 14)
// h = h + (h << 4)
// h ^ (h >>> 10)
+
+ // the rest of the computation is due to SI-5293
+ val rotation = seed % 32
+ val rotated = (i >>> rotation) | (i << (32 - rotation))
+ rotated
}
}
@@ -442,6 +456,7 @@ private[collection] object HashTable {
val table: Array[HashEntry[A, Entry]],
val tableSize: Int,
val threshold: Int,
+ val seedvalue: Int,
val sizemap: Array[Int]
) {
import collection.DebugUtils._
@@ -452,6 +467,7 @@ private[collection] object HashTable {
append("Table: [" + arrayString(table, 0, table.length) + "]")
append("Table size: " + tableSize)
append("Load factor: " + loadFactor)
+ append("Seedvalue: " + seedvalue)
append("Threshold: " + threshold)
append("Sizemap: [" + arrayString(sizemap, 0, sizemap.length) + "]")
}
diff --git a/src/library/scala/collection/mutable/INodeBase.java b/src/library/scala/collection/mutable/INodeBase.java
new file mode 100644
index 0000000000..487b5cfc28
--- /dev/null
+++ b/src/library/scala/collection/mutable/INodeBase.java
@@ -0,0 +1,35 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2012, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.collection.mutable;
+
+
+
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+
+
+
+abstract class INodeBase<K, V> extends BasicNode {
+
+ public static final AtomicReferenceFieldUpdater<INodeBase, MainNode> updater = AtomicReferenceFieldUpdater.newUpdater(INodeBase.class, MainNode.class, "mainnode");
+
+ public static final Object RESTART = new Object();
+
+ public volatile MainNode<K, V> mainnode = null;
+
+ public final Gen gen;
+
+ public INodeBase(Gen generation) {
+ gen = generation;
+ }
+
+ public BasicNode prev() {
+ return null;
+ }
+
+} \ No newline at end of file
diff --git a/src/library/scala/collection/mutable/ListBuffer.scala b/src/library/scala/collection/mutable/ListBuffer.scala
index 53c876ec08..037f3b2939 100644
--- a/src/library/scala/collection/mutable/ListBuffer.scala
+++ b/src/library/scala/collection/mutable/ListBuffer.scala
@@ -62,22 +62,22 @@ final class ListBuffer[A]
private var len = 0
protected def underlying: immutable.Seq[A] = start
-
+
private def writeObject(out: ObjectOutputStream) {
// write start
var xs: List[A] = start
while (!xs.isEmpty) { out.writeObject(xs.head); xs = xs.tail }
out.writeObject(ListSerializeEnd)
-
+
// no need to write last0
-
+
// write if exported
out.writeBoolean(exported)
-
+
// write the length
out.writeInt(len)
}
-
+
private def readObject(in: ObjectInputStream) {
// read start, set last0 appropriately
var elem: A = in.readObject.asInstanceOf[A]
@@ -97,14 +97,14 @@ final class ListBuffer[A]
last0 = current
start
}
-
+
// read if exported
exported = in.readBoolean()
-
+
// read the length
len = in.readInt()
}
-
+
/** The current length of the buffer.
*
* This operation takes constant time.
diff --git a/src/library/scala/collection/mutable/MainNode.java b/src/library/scala/collection/mutable/MainNode.java
new file mode 100644
index 0000000000..0578de676d
--- /dev/null
+++ b/src/library/scala/collection/mutable/MainNode.java
@@ -0,0 +1,40 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2012, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.collection.mutable;
+
+
+
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+
+
+
+abstract class MainNode<K, V> extends BasicNode {
+
+ public static final AtomicReferenceFieldUpdater<MainNode, MainNode> updater = AtomicReferenceFieldUpdater.newUpdater(MainNode.class, MainNode.class, "prev");
+
+ public volatile MainNode<K, V> prev = null;
+
+ public abstract int cachedSize(Object ct);
+
+ public boolean CAS_PREV(MainNode<K, V> oldval, MainNode<K, V> nval) {
+ return updater.compareAndSet(this, oldval, nval);
+ }
+
+ public void WRITE_PREV(MainNode<K, V> nval) {
+ updater.set(this, nval);
+ }
+
+ // do we need this? unclear in the javadocs...
+ // apparently not - volatile reads are supposed to be safe
+ // irregardless of whether there are concurrent ARFU updates
+ public MainNode<K, V> READ_PREV() {
+ return updater.get(this);
+ }
+
+} \ No newline at end of file
diff --git a/src/library/scala/collection/mutable/SortedSet.scala b/src/library/scala/collection/mutable/SortedSet.scala
index d87fc0b4a2..f41a51d3ef 100644
--- a/src/library/scala/collection/mutable/SortedSet.scala
+++ b/src/library/scala/collection/mutable/SortedSet.scala
@@ -13,12 +13,12 @@ import generic._
/**
* Base trait for mutable sorted set.
- *
+ *
* @define Coll mutable.SortedSet
* @define coll mutable sorted set
*
* @author Lucien Pereira
- *
+ *
*/
trait SortedSet[A] extends collection.SortedSet[A] with collection.SortedSetLike[A,SortedSet[A]]
with mutable.Set[A] with mutable.SetLike[A, SortedSet[A]] {
@@ -39,11 +39,11 @@ trait SortedSet[A] extends collection.SortedSet[A] with collection.SortedSetLike
* Standard `CanBuildFrom` instance for sorted sets.
*
* @author Lucien Pereira
- *
+ *
*/
object SortedSet extends MutableSortedSetFactory[SortedSet] {
implicit def canBuildFrom[A](implicit ord: Ordering[A]): CanBuildFrom[Coll, A, SortedSet[A]] = new SortedSetCanBuildFrom[A]
-
+
def empty[A](implicit ord: Ordering[A]): SortedSet[A] = TreeSet.empty[A]
-
+
}
diff --git a/src/library/scala/collection/mutable/TreeSet.scala b/src/library/scala/collection/mutable/TreeSet.scala
index e0f1c3adfe..02ee811193 100644
--- a/src/library/scala/collection/mutable/TreeSet.scala
+++ b/src/library/scala/collection/mutable/TreeSet.scala
@@ -11,14 +11,14 @@ package mutable
import generic._
-/**
+/**
* @define Coll mutable.TreeSet
* @define coll mutable tree set
* @factoryInfo
* Companion object of TreeSet providing factory related utilities.
- *
+ *
* @author Lucien Pereira
- *
+ *
*/
object TreeSet extends MutableSortedSetFactory[TreeSet] {
/**
@@ -32,7 +32,7 @@ object TreeSet extends MutableSortedSetFactory[TreeSet] {
* A mutable SortedSet using an immutable AVL Tree as underlying data structure.
*
* @author Lucien Pereira
- *
+ *
*/
class TreeSet[A](implicit val ordering: Ordering[A]) extends SortedSet[A] with SetLike[A, TreeSet[A]]
with SortedSetLike[A, TreeSet[A]] with Set[A] with Serializable {
@@ -67,7 +67,7 @@ class TreeSet[A](implicit val ordering: Ordering[A]) extends SortedSet[A] with S
* Cardinality store the set size, unfortunately a
* set view (given by rangeImpl)
* cannot take advantage of this optimisation
- *
+ *
*/
override def size: Int = base.map(_ => super.size).getOrElse(cardinality)
@@ -101,7 +101,7 @@ class TreeSet[A](implicit val ordering: Ordering[A]) extends SortedSet[A] with S
* Thanks to the immutable nature of the
* underlying AVL Tree, we can share it with
* the clone. So clone complexity in time is O(1).
- *
+ *
*/
override def clone: TreeSet[A] = {
val clone = new TreeSet[A](base, from, until)
@@ -119,5 +119,5 @@ class TreeSet[A](implicit val ordering: Ordering[A]) extends SortedSet[A] with S
override def iterator: Iterator[A] = resolve.avl.iterator
.dropWhile(e => !isLeftAcceptable(from, ordering)(e))
.takeWhile(e => isRightAcceptable(until, ordering)(e))
-
+
}
diff --git a/src/library/scala/collection/parallel/Combiner.scala b/src/library/scala/collection/parallel/Combiner.scala
index d1453c9ce9..6afe901258 100644
--- a/src/library/scala/collection/parallel/Combiner.scala
+++ b/src/library/scala/collection/parallel/Combiner.scala
@@ -33,9 +33,21 @@ import scala.collection.generic.Sizing
* @since 2.9
*/
trait Combiner[-Elem, +To] extends Builder[Elem, To] with Sizing with Parallel {
-//self: EnvironmentPassingCombiner[Elem, To] =>
- private[collection] final val tasksupport = getTaskSupport
-
+
+ @transient
+ @volatile
+ var _combinerTaskSupport = defaultTaskSupport
+
+ def combinerTaskSupport = {
+ val cts = _combinerTaskSupport
+ if (cts eq null) {
+ _combinerTaskSupport = defaultTaskSupport
+ defaultTaskSupport
+ } else cts
+ }
+
+ def combinerTaskSupport_=(cts: TaskSupport) = _combinerTaskSupport = cts
+
/** Combines the contents of the receiver builder and the `other` builder,
* producing a new builder containing both their elements.
*
@@ -63,6 +75,21 @@ trait Combiner[-Elem, +To] extends Builder[Elem, To] with Sizing with Parallel {
*/
def combine[N <: Elem, NewTo >: To](other: Combiner[N, NewTo]): Combiner[N, NewTo]
+ /** Returns `true` if this combiner has a thread-safe `+=` and is meant to be shared
+ * across several threads constructing the collection.
+ *
+ * By default, this method returns `false`.
+ */
+ def canBeShared: Boolean = false
+
+ /** Constructs the result and sets the appropriate tasksupport object to the resulting collection
+ * if this is applicable.
+ */
+ def resultWithTaskSupport: To = {
+ val res = result
+ setTaskSupport(res, combinerTaskSupport)
+ }
+
}
diff --git a/src/library/scala/collection/parallel/ParIterableLike.scala b/src/library/scala/collection/parallel/ParIterableLike.scala
index 390bd72ab5..5e6bf8c1a3 100644
--- a/src/library/scala/collection/parallel/ParIterableLike.scala
+++ b/src/library/scala/collection/parallel/ParIterableLike.scala
@@ -28,7 +28,7 @@ import immutable.HashMapCombiner
import java.util.concurrent.atomic.AtomicBoolean
import annotation.unchecked.uncheckedVariance
-
+import annotation.unchecked.uncheckedStable
/** A template trait for parallel collections of type `ParIterable[T]`.
@@ -96,17 +96,6 @@ import annotation.unchecked.uncheckedVariance
* The combination of methods `toMap`, `toSeq` or `toSet` along with `par` and `seq` is a flexible
* way to change between different collection types.
*
- * The method:
- *
- * {{{
- * def threshold(sz: Int, p: Int): Int
- * }}}
- *
- * provides an estimate on the minimum number of elements the collection has before
- * the splitting stops and depends on the number of elements in the collection. A rule of the
- * thumb is the number of elements divided by 8 times the parallelism level. This method may
- * be overridden in concrete implementations if necessary.
- *
* Since this trait extends the `Iterable` trait, methods like `size` must also
* be implemented in concrete collections, while `iterator` forwards to `splitter` by
* default.
@@ -165,49 +154,25 @@ extends GenIterableLike[T, Repr]
with HasNewCombiner[T, Repr]
{
self: ParIterableLike[T, Repr, Sequential] =>
-
- import tasksupport._
-
+
+ @transient
+ @volatile
+ private var _tasksupport = defaultTaskSupport
+
+ def tasksupport = {
+ val ts = _tasksupport
+ if (ts eq null) {
+ _tasksupport = defaultTaskSupport
+ defaultTaskSupport
+ } else ts
+ }
+
+ def tasksupport_=(ts: TaskSupport) = _tasksupport = ts
+
def seq: Sequential
def repr: Repr = this.asInstanceOf[Repr]
- /** Parallel iterators are split iterators that have additional accessor and
- * transformer methods defined in terms of methods `next` and `hasNext`.
- * When creating a new parallel collection, one might want to override these
- * new methods to make them more efficient.
- *
- * Parallel iterators are augmented with signalling capabilities. This means
- * that a signalling object can be assigned to them as needed.
- *
- * The self-type ensures that signal context passing behaviour gets mixed in
- * a concrete object instance.
- */
- trait ParIterator extends IterableSplitter[T] {
- me: SignalContextPassingIterator[ParIterator] =>
- var signalDelegate: Signalling = IdleSignalling
- def repr = self.repr
- def split: Seq[IterableSplitter[T]]
- }
-
- /** A stackable modification that ensures signal contexts get passed along the iterators.
- * A self-type requirement in `ParIterator` ensures that this trait gets mixed into
- * concrete iterators.
- */
- trait SignalContextPassingIterator[+IterRepr <: ParIterator] extends ParIterator {
- // Note: This functionality must be factored out to this inner trait to avoid boilerplate.
- // Also, one could omit the cast below. However, this leads to return type inconsistencies,
- // due to inability to override the return type of _abstract overrides_.
- // Be aware that this stackable modification has to be subclassed, so it shouldn't be rigid
- // on the type of iterators it splits.
- // The alternative is some boilerplate - better to tradeoff some type safety to avoid it here.
- abstract override def split: Seq[IterRepr] = {
- val pits = super.split
- pits foreach { _.signalDelegate = signalDelegate }
- pits.asInstanceOf[Seq[IterRepr]]
- }
- }
-
def hasDefiniteSize = true
def nonEmpty = size != 0
@@ -242,18 +207,6 @@ self: ParIterableLike[T, Repr, Sequential] =>
*/
def isStrictSplitterCollection = true
- /** Some minimal number of elements after which this collection should be handled
- * sequentially by different processors.
- *
- * This method depends on the size of the collection and the parallelism level, which
- * are both specified as arguments.
- *
- * @param sz the size based on which to compute the threshold
- * @param p the parallelism level based on which to compute the threshold
- * @return the maximum number of elements for performing operations sequentially
- */
- def threshold(sz: Int, p: Int): Int = thresholdFromSize(sz, p)
-
/** The `newBuilder` operation returns a parallel builder assigned to this collection's fork/join pool.
* This method forwards the call to `newCombiner`.
*/
@@ -365,7 +318,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
* if this $coll is empty.
*/
def reduce[U >: T](op: (U, U) => U): U = {
- executeAndWaitResult(new Reduce(op, splitter) mapResult { _.get })
+ tasksupport.executeAndWaitResult(new Reduce(op, splitter) mapResult { _.get })
}
/** Optionally reduces the elements of this sequence using the specified associative binary operator.
@@ -400,7 +353,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
* @return the result of applying fold operator `op` between all the elements and `z`
*/
def fold[U >: T](z: U)(op: (U, U) => U): U = {
- executeAndWaitResult(new Fold(z, op, splitter))
+ tasksupport.executeAndWaitResult(new Fold(z, op, splitter))
}
/** Aggregates the results of applying an operator to subsequent elements.
@@ -432,13 +385,9 @@ self: ParIterableLike[T, Repr, Sequential] =>
* @param combop an associative operator used to combine results from different partitions
*/
def aggregate[S](z: S)(seqop: (S, T) => S, combop: (S, S) => S): S = {
- executeAndWaitResult(new Aggregate(z, seqop, combop, splitter))
+ tasksupport.executeAndWaitResult(new Aggregate(z, seqop, combop, splitter))
}
- def /:[S](z: S)(op: (S, T) => S): S = foldLeft(z)(op)
-
- def :\[S](z: S)(op: (T, S) => S): S = foldRight(z)(op)
-
def foldLeft[S](z: S)(op: (S, T) => S): S = seq.foldLeft(z)(op)
def foldRight[S](z: S)(op: (T, S) => S): S = seq.foldRight(z)(op)
@@ -451,47 +400,33 @@ self: ParIterableLike[T, Repr, Sequential] =>
def reduceRightOption[U >: T](op: (T, U) => U): Option[U] = seq.reduceRightOption(op)
- /*
- /** Applies a function `f` to all the elements of $coll. Does so in a nondefined order,
- * and in parallel.
- *
- * $undefinedorder
- *
- * @tparam U the result type of the function applied to each element, which is always discarded
- * @param f function applied to each element
- */
- def pareach[U](f: T => U): Unit = {
- executeAndWaitResult(new Foreach(f, splitter))
- }
- */
-
/** Applies a function `f` to all the elements of $coll in a sequential order.
*
* @tparam U the result type of the function applied to each element, which is always discarded
* @param f function applied to each element
*/
def foreach[U](f: T => U) = {
- executeAndWaitResult(new Foreach(f, splitter))
+ tasksupport.executeAndWaitResult(new Foreach(f, splitter))
}
def count(p: T => Boolean): Int = {
- executeAndWaitResult(new Count(p, splitter))
+ tasksupport.executeAndWaitResult(new Count(p, splitter))
}
def sum[U >: T](implicit num: Numeric[U]): U = {
- executeAndWaitResult(new Sum[U](num, splitter))
+ tasksupport.executeAndWaitResult(new Sum[U](num, splitter))
}
def product[U >: T](implicit num: Numeric[U]): U = {
- executeAndWaitResult(new Product[U](num, splitter))
+ tasksupport.executeAndWaitResult(new Product[U](num, splitter))
}
def min[U >: T](implicit ord: Ordering[U]): T = {
- executeAndWaitResult(new Min(ord, splitter) mapResult { _.get }).asInstanceOf[T]
+ tasksupport.executeAndWaitResult(new Min(ord, splitter) mapResult { _.get }).asInstanceOf[T]
}
def max[U >: T](implicit ord: Ordering[U]): T = {
- executeAndWaitResult(new Max(ord, splitter) mapResult { _.get }).asInstanceOf[T]
+ tasksupport.executeAndWaitResult(new Max(ord, splitter) mapResult { _.get }).asInstanceOf[T]
}
def maxBy[S](f: T => S)(implicit cmp: Ordering[S]): T = {
@@ -507,24 +442,24 @@ self: ParIterableLike[T, Repr, Sequential] =>
}
def map[S, That](f: T => S)(implicit bf: CanBuildFrom[Repr, S, That]): That = if (bf(repr).isCombiner) {
- executeAndWaitResult(new Map[S, That](f, () => bf(repr).asCombiner, splitter) mapResult { _.result })
- } else seq.map(f)(bf2seq(bf))
+ tasksupport.executeAndWaitResult(new Map[S, That](f, combinerFactory(() => bf(repr).asCombiner), splitter) mapResult { _.resultWithTaskSupport })
+ } else setTaskSupport(seq.map(f)(bf2seq(bf)), tasksupport)
/*bf ifParallel { pbf =>
- executeAndWaitResult(new Map[S, That](f, pbf, splitter) mapResult { _.result })
+ tasksupport.executeAndWaitResult(new Map[S, That](f, pbf, splitter) mapResult { _.result })
} otherwise seq.map(f)(bf2seq(bf))*/
def collect[S, That](pf: PartialFunction[T, S])(implicit bf: CanBuildFrom[Repr, S, That]): That = if (bf(repr).isCombiner) {
- executeAndWaitResult(new Collect[S, That](pf, () => bf(repr).asCombiner, splitter) mapResult { _.result })
- } else seq.collect(pf)(bf2seq(bf))
+ tasksupport.executeAndWaitResult(new Collect[S, That](pf, combinerFactory(() => bf(repr).asCombiner), splitter) mapResult { _.resultWithTaskSupport })
+ } else setTaskSupport(seq.collect(pf)(bf2seq(bf)), tasksupport)
/*bf ifParallel { pbf =>
- executeAndWaitResult(new Collect[S, That](pf, pbf, splitter) mapResult { _.result })
+ tasksupport.executeAndWaitResult(new Collect[S, That](pf, pbf, splitter) mapResult { _.result })
} otherwise seq.collect(pf)(bf2seq(bf))*/
def flatMap[S, That](f: T => GenTraversableOnce[S])(implicit bf: CanBuildFrom[Repr, S, That]): That = if (bf(repr).isCombiner) {
- executeAndWaitResult(new FlatMap[S, That](f, () => bf(repr).asCombiner, splitter) mapResult { _.result })
- } else seq.flatMap(f)(bf2seq(bf))
+ tasksupport.executeAndWaitResult(new FlatMap[S, That](f, combinerFactory(() => bf(repr).asCombiner), splitter) mapResult { _.resultWithTaskSupport })
+ } else setTaskSupport(seq.flatMap(f)(bf2seq(bf)), tasksupport)
/*bf ifParallel { pbf =>
- executeAndWaitResult(new FlatMap[S, That](f, pbf, splitter) mapResult { _.result })
+ tasksupport.executeAndWaitResult(new FlatMap[S, That](f, pbf, splitter) mapResult { _.result })
} otherwise seq.flatMap(f)(bf2seq(bf))*/
/** Tests whether a predicate holds for all elements of this $coll.
@@ -535,7 +470,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
* @return true if `p` holds for all elements, false otherwise
*/
def forall(pred: T => Boolean): Boolean = {
- executeAndWaitResult(new Forall(pred, splitter assign new DefaultSignalling with VolatileAbort))
+ tasksupport.executeAndWaitResult(new Forall(pred, splitter assign new DefaultSignalling with VolatileAbort))
}
/** Tests whether a predicate holds for some element of this $coll.
@@ -546,7 +481,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
* @return true if `p` holds for some element, false otherwise
*/
def exists(pred: T => Boolean): Boolean = {
- executeAndWaitResult(new Exists(pred, splitter assign new DefaultSignalling with VolatileAbort))
+ tasksupport.executeAndWaitResult(new Exists(pred, splitter assign new DefaultSignalling with VolatileAbort))
}
/** Finds some element in the collection for which the predicate holds, if such
@@ -561,19 +496,52 @@ self: ParIterableLike[T, Repr, Sequential] =>
* @return an option value with the element if such an element exists, or `None` otherwise
*/
def find(pred: T => Boolean): Option[T] = {
- executeAndWaitResult(new Find(pred, splitter assign new DefaultSignalling with VolatileAbort))
+ tasksupport.executeAndWaitResult(new Find(pred, splitter assign new DefaultSignalling with VolatileAbort))
+ }
+
+ /** Creates a combiner factory. Each combiner factory instance is used
+ * once per invocation of a parallel transformer method for a single
+ * collection.
+ *
+ * The default combiner factory creates a new combiner every time it
+ * is requested, unless the combiner is thread-safe as indicated by its
+ * `canBeShared` method. In this case, the method returns a factory which
+ * returns the same combiner each time. This is typically done for
+ * concurrent parallel collections, the combiners of which allow
+ * thread safe access.
+ */
+ protected[this] def combinerFactory = {
+ val combiner = newCombiner
+ combiner.combinerTaskSupport = tasksupport
+ if (combiner.canBeShared) new CombinerFactory[T, Repr] {
+ val shared = combiner
+ def apply() = shared
+ def doesShareCombiners = true
+ } else new CombinerFactory[T, Repr] {
+ def apply() = newCombiner
+ def doesShareCombiners = false
+ }
}
- protected[this] def cbfactory ={
- () => newCombiner
+ protected[this] def combinerFactory[S, That](cbf: () => Combiner[S, That]) = {
+ val combiner = cbf()
+ combiner.combinerTaskSupport = tasksupport
+ if (combiner.canBeShared) new CombinerFactory[S, That] {
+ val shared = combiner
+ def apply() = shared
+ def doesShareCombiners = true
+ } else new CombinerFactory[S, That] {
+ def apply() = cbf()
+ def doesShareCombiners = false
+ }
}
def filter(pred: T => Boolean): Repr = {
- executeAndWaitResult(new Filter(pred, cbfactory, splitter) mapResult { _.result })
+ tasksupport.executeAndWaitResult(new Filter(pred, combinerFactory, splitter) mapResult { _.resultWithTaskSupport })
}
def filterNot(pred: T => Boolean): Repr = {
- executeAndWaitResult(new FilterNot(pred, cbfactory, splitter) mapResult { _.result })
+ tasksupport.executeAndWaitResult(new FilterNot(pred, combinerFactory, splitter) mapResult { _.resultWithTaskSupport })
}
def ++[U >: T, That](that: GenTraversableOnce[U])(implicit bf: CanBuildFrom[Repr, U, That]): That = {
@@ -581,49 +549,54 @@ self: ParIterableLike[T, Repr, Sequential] =>
// println("case both are parallel")
val other = that.asParIterable
val pbf = bf.asParallel
- val copythis = new Copy(() => pbf(repr), splitter)
+ val cfactory = combinerFactory(() => pbf(repr))
+ val copythis = new Copy(cfactory, splitter)
val copythat = wrap {
- val othtask = new other.Copy(() => pbf(self.repr), other.splitter)
+ val othtask = new other.Copy(cfactory, other.splitter)
tasksupport.executeAndWaitResult(othtask)
}
val task = (copythis parallel copythat) { _ combine _ } mapResult {
- _.result
+ _.resultWithTaskSupport
}
- executeAndWaitResult(task)
- } else if (bf.isParallel) {
+ tasksupport.executeAndWaitResult(task)
+ } else if (bf(repr).isCombiner) {
// println("case parallel builder, `that` not parallel")
- val pbf = bf.asParallel
- val copythis = new Copy(() => pbf(repr), splitter)
+ val copythis = new Copy(combinerFactory(() => bf(repr).asCombiner), splitter)
val copythat = wrap {
- val cb = pbf(repr)
+ val cb = bf(repr).asCombiner
for (elem <- that.seq) cb += elem
cb
}
- executeAndWaitResult((copythis parallel copythat) { _ combine _ } mapResult { _.result })
+ tasksupport.executeAndWaitResult((copythis parallel copythat) { _ combine _ } mapResult { _.resultWithTaskSupport })
} else {
// println("case not a parallel builder")
val b = bf(repr)
this.splitter.copy2builder[U, That, Builder[U, That]](b)
for (elem <- that.seq) b += elem
- b.result
+ setTaskSupport(b.result, tasksupport)
}
}
def partition(pred: T => Boolean): (Repr, Repr) = {
- executeAndWaitResult(new Partition(pred, cbfactory, splitter) mapResult { p => (p._1.result, p._2.result) })
+ tasksupport.executeAndWaitResult(
+ new Partition(pred, combinerFactory, combinerFactory, splitter) mapResult {
+ p => (p._1.resultWithTaskSupport, p._2.resultWithTaskSupport)
+ }
+ )
}
def groupBy[K](f: T => K): immutable.ParMap[K, Repr] = {
- executeAndWaitResult(new GroupBy(f, () => HashMapCombiner[K, T], splitter) mapResult {
- rcb => rcb.groupByKey(cbfactory)
+ val r = tasksupport.executeAndWaitResult(new GroupBy(f, () => HashMapCombiner[K, T], splitter) mapResult {
+ rcb => rcb.groupByKey(() => combinerFactory())
})
+ setTaskSupport(r, tasksupport)
}
def take(n: Int): Repr = {
val actualn = if (size > n) n else size
if (actualn < MIN_FOR_COPY) take_sequential(actualn)
- else executeAndWaitResult(new Take(actualn, cbfactory, splitter) mapResult {
- _.result
+ else tasksupport.executeAndWaitResult(new Take(actualn, combinerFactory, splitter) mapResult {
+ _.resultWithTaskSupport
})
}
@@ -636,13 +609,13 @@ self: ParIterableLike[T, Repr, Sequential] =>
cb += it.next
left -= 1
}
- cb.result
+ cb.resultWithTaskSupport
}
def drop(n: Int): Repr = {
val actualn = if (size > n) n else size
if ((size - actualn) < MIN_FOR_COPY) drop_sequential(actualn)
- else executeAndWaitResult(new Drop(actualn, cbfactory, splitter) mapResult { _.result })
+ else tasksupport.executeAndWaitResult(new Drop(actualn, combinerFactory, splitter) mapResult { _.resultWithTaskSupport })
}
private def drop_sequential(n: Int) = {
@@ -650,14 +623,14 @@ self: ParIterableLike[T, Repr, Sequential] =>
val cb = newCombiner
cb.sizeHint(size - n)
while (it.hasNext) cb += it.next
- cb.result
+ cb.resultWithTaskSupport
}
override def slice(unc_from: Int, unc_until: Int): Repr = {
val from = unc_from min size max 0
val until = unc_until min size max from
if ((until - from) <= MIN_FOR_COPY) slice_sequential(from, until)
- else executeAndWaitResult(new Slice(from, until, cbfactory, splitter) mapResult { _.result })
+ else tasksupport.executeAndWaitResult(new Slice(from, until, combinerFactory, splitter) mapResult { _.resultWithTaskSupport })
}
private def slice_sequential(from: Int, until: Int): Repr = {
@@ -668,11 +641,15 @@ self: ParIterableLike[T, Repr, Sequential] =>
cb += it.next
left -= 1
}
- cb.result
+ cb.resultWithTaskSupport
}
def splitAt(n: Int): (Repr, Repr) = {
- executeAndWaitResult(new SplitAt(n, cbfactory, splitter) mapResult { p => (p._1.result, p._2.result) })
+ tasksupport.executeAndWaitResult(
+ new SplitAt(n, combinerFactory, combinerFactory, splitter) mapResult {
+ p => (p._1.resultWithTaskSupport, p._2.resultWithTaskSupport)
+ }
+ )
}
/** Computes a prefix scan of the elements of the collection.
@@ -690,20 +667,19 @@ self: ParIterableLike[T, Repr, Sequential] =>
*
* @return a new $coll containing the prefix scan of the elements in this $coll
*/
- def scan[U >: T, That](z: U)(op: (U, U) => U)(implicit bf: CanBuildFrom[Repr, U, That]): That = if (bf.isParallel) {
- val cbf = bf.asParallel
- if (parallelismLevel > 1) {
- if (size > 0) executeAndWaitResult(new CreateScanTree(0, size, z, op, splitter) mapResult {
- tree => executeAndWaitResult(new FromScanTree(tree, z, op, cbf) mapResult {
- cb => cb.result
+ def scan[U >: T, That](z: U)(op: (U, U) => U)(implicit bf: CanBuildFrom[Repr, U, That]): That = if (bf(repr).isCombiner) {
+ if (tasksupport.parallelismLevel > 1) {
+ if (size > 0) tasksupport.executeAndWaitResult(new CreateScanTree(0, size, z, op, splitter) mapResult {
+ tree => tasksupport.executeAndWaitResult(new FromScanTree(tree, z, op, combinerFactory(() => bf(repr).asCombiner)) mapResult {
+ cb => cb.resultWithTaskSupport
})
- }) else (cbf(self.repr) += z).result
- } else seq.scan(z)(op)(bf2seq(bf))
- } else seq.scan(z)(op)(bf2seq(bf))
+ }) else setTaskSupport((bf(repr) += z).result, tasksupport)
+ } else setTaskSupport(seq.scan(z)(op)(bf2seq(bf)), tasksupport)
+ } else setTaskSupport(seq.scan(z)(op)(bf2seq(bf)), tasksupport)
- def scanLeft[S, That](z: S)(op: (S, T) => S)(implicit bf: CanBuildFrom[Repr, S, That]) = seq.scanLeft(z)(op)(bf2seq(bf))
+ def scanLeft[S, That](z: S)(op: (S, T) => S)(implicit bf: CanBuildFrom[Repr, S, That]) = setTaskSupport(seq.scanLeft(z)(op)(bf2seq(bf)), tasksupport)
- def scanRight[S, That](z: S)(op: (T, S) => S)(implicit bf: CanBuildFrom[Repr, S, That]) = seq.scanRight(z)(op)(bf2seq(bf))
+ def scanRight[S, That](z: S)(op: (T, S) => S)(implicit bf: CanBuildFrom[Repr, S, That]) = setTaskSupport(seq.scanRight(z)(op)(bf2seq(bf)), tasksupport)
/** Takes the longest prefix of elements that satisfy the predicate.
*
@@ -714,9 +690,19 @@ self: ParIterableLike[T, Repr, Sequential] =>
* @return the longest prefix of this $coll of elements that satisy the predicate `pred`
*/
def takeWhile(pred: T => Boolean): Repr = {
- val cntx = new DefaultSignalling with AtomicIndexFlag
- cntx.setIndexFlag(Int.MaxValue)
- executeAndWaitResult(new TakeWhile(0, pred, cbfactory, splitter assign cntx) mapResult { _._1.result })
+ val cbf = combinerFactory
+ if (cbf.doesShareCombiners) {
+ val parseqspan = toSeq.takeWhile(pred)
+ tasksupport.executeAndWaitResult(new Copy(combinerFactory, parseqspan.splitter) mapResult {
+ _.resultWithTaskSupport
+ })
+ } else {
+ val cntx = new DefaultSignalling with AtomicIndexFlag
+ cntx.setIndexFlag(Int.MaxValue)
+ tasksupport.executeAndWaitResult(new TakeWhile(0, pred, combinerFactory, splitter assign cntx) mapResult {
+ _._1.resultWithTaskSupport
+ })
+ }
}
/** Splits this $coll into a prefix/suffix pair according to a predicate.
@@ -729,11 +715,22 @@ self: ParIterableLike[T, Repr, Sequential] =>
* the elements satisfy `pred`, and the rest of the collection
*/
def span(pred: T => Boolean): (Repr, Repr) = {
- val cntx = new DefaultSignalling with AtomicIndexFlag
- cntx.setIndexFlag(Int.MaxValue)
- executeAndWaitResult(new Span(0, pred, cbfactory, splitter assign cntx) mapResult {
- p => (p._1.result, p._2.result)
- })
+ val cbf = combinerFactory
+ if (cbf.doesShareCombiners) {
+ val (xs, ys) = toSeq.span(pred)
+ val copyxs = new Copy(combinerFactory, xs.splitter) mapResult { _.resultWithTaskSupport }
+ val copyys = new Copy(combinerFactory, ys.splitter) mapResult { _.resultWithTaskSupport }
+ val copyall = (copyxs parallel copyys) {
+ (xr, yr) => (xr, yr)
+ }
+ tasksupport.executeAndWaitResult(copyall)
+ } else {
+ val cntx = new DefaultSignalling with AtomicIndexFlag
+ cntx.setIndexFlag(Int.MaxValue)
+ tasksupport.executeAndWaitResult(new Span(0, pred, combinerFactory, combinerFactory, splitter assign cntx) mapResult {
+ p => (p._1.resultWithTaskSupport, p._2.resultWithTaskSupport)
+ })
+ }
}
/** Drops all elements in the longest prefix of elements that satisfy the predicate,
@@ -749,7 +746,11 @@ self: ParIterableLike[T, Repr, Sequential] =>
def dropWhile(pred: T => Boolean): Repr = {
val cntx = new DefaultSignalling with AtomicIndexFlag
cntx.setIndexFlag(Int.MaxValue)
- executeAndWaitResult(new Span(0, pred, cbfactory, splitter assign cntx) mapResult { _._2.result })
+ tasksupport.executeAndWaitResult(
+ new Span(0, pred, combinerFactory, combinerFactory, splitter assign cntx) mapResult {
+ _._2.resultWithTaskSupport
+ }
+ )
}
def copyToArray[U >: T](xs: Array[U]) = copyToArray(xs, 0)
@@ -757,31 +758,33 @@ self: ParIterableLike[T, Repr, Sequential] =>
def copyToArray[U >: T](xs: Array[U], start: Int) = copyToArray(xs, start, xs.length - start)
def copyToArray[U >: T](xs: Array[U], start: Int, len: Int) = if (len > 0) {
- executeAndWaitResult(new CopyToArray(start, len, xs, splitter))
+ tasksupport.executeAndWaitResult(new CopyToArray(start, len, xs, splitter))
}
def sameElements[U >: T](that: GenIterable[U]) = seq.sameElements(that)
- def zip[U >: T, S, That](that: GenIterable[S])(implicit bf: CanBuildFrom[Repr, (U, S), That]): That = if (bf.isParallel && that.isParSeq) {
- val pbf = bf.asParallel
+ def zip[U >: T, S, That](that: GenIterable[S])(implicit bf: CanBuildFrom[Repr, (U, S), That]): That = if (bf(repr).isCombiner && that.isParSeq) {
val thatseq = that.asParSeq
- executeAndWaitResult(new Zip(pbf, splitter, thatseq.splitter) mapResult { _.result });
- } else seq.zip(that)(bf2seq(bf))
+ tasksupport.executeAndWaitResult(new Zip(combinerFactory(() => bf(repr).asCombiner), splitter, thatseq.splitter) mapResult { _.resultWithTaskSupport });
+ } else setTaskSupport(seq.zip(that)(bf2seq(bf)), tasksupport)
def zipWithIndex[U >: T, That](implicit bf: CanBuildFrom[Repr, (U, Int), That]): That = this zip immutable.ParRange(0, size, 1, false)
- def zipAll[S, U >: T, That](that: GenIterable[S], thisElem: U, thatElem: S)(implicit bf: CanBuildFrom[Repr, (U, S), That]): That = if (bf.isParallel && that.isParSeq) {
- val pbf = bf.asParallel
+ def zipAll[S, U >: T, That](that: GenIterable[S], thisElem: U, thatElem: S)(implicit bf: CanBuildFrom[Repr, (U, S), That]): That = if (bf(repr).isCombiner && that.isParSeq) {
val thatseq = that.asParSeq
- executeAndWaitResult(new ZipAll(size max thatseq.length, thisElem, thatElem, pbf, splitter, thatseq.splitter) mapResult { _.result });
- } else seq.zipAll(that, thisElem, thatElem)(bf2seq(bf))
+ tasksupport.executeAndWaitResult(
+ new ZipAll(size max thatseq.length, thisElem, thatElem, combinerFactory(() => bf(repr).asCombiner), splitter, thatseq.splitter) mapResult {
+ _.resultWithTaskSupport
+ }
+ );
+ } else setTaskSupport(seq.zipAll(that, thisElem, thatElem)(bf2seq(bf)), tasksupport)
protected def toParCollection[U >: T, That](cbf: () => Combiner[U, That]): That = {
- executeAndWaitResult(new ToParCollection(cbf, splitter) mapResult { _.result });
+ tasksupport.executeAndWaitResult(new ToParCollection(combinerFactory(cbf), splitter) mapResult { _.resultWithTaskSupport });
}
protected def toParMap[K, V, That](cbf: () => Combiner[(K, V), That])(implicit ev: T <:< (K, V)): That = {
- executeAndWaitResult(new ToParMap(cbf, splitter)(ev) mapResult { _.result })
+ tasksupport.executeAndWaitResult(new ToParMap(combinerFactory(cbf), splitter)(ev) mapResult { _.resultWithTaskSupport })
}
def view = new ParIterableView[T, Repr, Sequential] {
@@ -838,8 +841,8 @@ self: ParIterableLike[T, Repr, Sequential] =>
extends StrictSplitterCheckTask[R, Tp] {
protected[this] val pit: IterableSplitter[T]
protected[this] def newSubtask(p: IterableSplitter[T]): Accessor[R, Tp]
- def shouldSplitFurther = pit.remaining > threshold(size, parallelismLevel)
- def split = pit.split.map(newSubtask(_)) // default split procedure
+ def shouldSplitFurther = pit.shouldSplitFurther(self.repr, tasksupport.parallelismLevel)
+ def split = pit.splitWithSignalling.map(newSubtask(_)) // default split procedure
private[parallel] override def signalAbort = pit.abort
override def toString = this.getClass.getSimpleName + "(" + pit.toString + ")(" + result + ")(supername: " + super.toString + ")"
}
@@ -869,22 +872,22 @@ self: ParIterableLike[T, Repr, Sequential] =>
/** Sequentially performs one task after another. */
protected[this] abstract class SeqComposite[FR, SR, R, First <: StrictSplitterCheckTask[FR, _], Second <: StrictSplitterCheckTask[SR, _]]
- (f: First, s: Second)
+ (f: First, s: Second)
extends Composite[FR, SR, R, First, Second](f, s) {
def leaf(prevr: Option[R]) = {
- executeAndWaitResult(ft)
- executeAndWaitResult(st)
+ tasksupport.executeAndWaitResult(ft)
+ tasksupport.executeAndWaitResult(st)
mergeSubtasks
}
}
/** Performs two tasks in parallel, and waits for both to finish. */
protected[this] abstract class ParComposite[FR, SR, R, First <: StrictSplitterCheckTask[FR, _], Second <: StrictSplitterCheckTask[SR, _]]
- (f: First, s: Second)
+ (f: First, s: Second)
extends Composite[FR, SR, R, First, Second](f, s) {
def leaf(prevr: Option[R]) = {
- val ftfuture = execute(ft)
- executeAndWaitResult(st)
+ val ftfuture = tasksupport.execute(ft)
+ tasksupport.executeAndWaitResult(st)
ftfuture()
mergeSubtasks
}
@@ -895,7 +898,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
@volatile var result: R1 = null.asInstanceOf[R1]
def map(r: R): R1
def leaf(prevr: Option[R1]) = {
- val initialResult = executeAndWaitResult(inner)
+ val initialResult = tasksupport.executeAndWaitResult(inner)
result = map(initialResult)
}
private[parallel] override def signalAbort() {
@@ -906,13 +909,15 @@ self: ParIterableLike[T, Repr, Sequential] =>
protected trait Transformer[R, Tp] extends Accessor[R, Tp]
- protected[this] class Foreach[S](op: T => S, protected[this] val pit: IterableSplitter[T]) extends Accessor[Unit, Foreach[S]] {
+ protected[this] class Foreach[S](op: T => S, protected[this] val pit: IterableSplitter[T])
+ extends Accessor[Unit, Foreach[S]] {
@volatile var result: Unit = ()
def leaf(prevr: Option[Unit]) = pit.foreach(op)
protected[this] def newSubtask(p: IterableSplitter[T]) = new Foreach[S](op, p)
}
- protected[this] class Count(pred: T => Boolean, protected[this] val pit: IterableSplitter[T]) extends Accessor[Int, Count] {
+ protected[this] class Count(pred: T => Boolean, protected[this] val pit: IterableSplitter[T])
+ extends Accessor[Int, Count] {
// val pittxt = pit.toString
@volatile var result: Int = 0
def leaf(prevr: Option[Int]) = result = pit.count(pred)
@@ -921,7 +926,8 @@ self: ParIterableLike[T, Repr, Sequential] =>
// override def toString = "CountTask(" + pittxt + ")"
}
- protected[this] class Reduce[U >: T](op: (U, U) => U, protected[this] val pit: IterableSplitter[T]) extends Accessor[Option[U], Reduce[U]] {
+ protected[this] class Reduce[U >: T](op: (U, U) => U, protected[this] val pit: IterableSplitter[T])
+ extends Accessor[Option[U], Reduce[U]] {
@volatile var result: Option[U] = None
def leaf(prevr: Option[Option[U]]) = if (pit.remaining > 0) result = Some(pit.reduce(op))
protected[this] def newSubtask(p: IterableSplitter[T]) = new Reduce(op, p)
@@ -931,7 +937,8 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def requiresStrictSplitters = true
}
- protected[this] class Fold[U >: T](z: U, op: (U, U) => U, protected[this] val pit: IterableSplitter[T]) extends Accessor[U, Fold[U]] {
+ protected[this] class Fold[U >: T](z: U, op: (U, U) => U, protected[this] val pit: IterableSplitter[T])
+ extends Accessor[U, Fold[U]] {
@volatile var result: U = null.asInstanceOf[U]
def leaf(prevr: Option[U]) = result = pit.fold(z)(op)
protected[this] def newSubtask(p: IterableSplitter[T]) = new Fold(z, op, p)
@@ -946,21 +953,24 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def merge(that: Aggregate[S]) = result = combop(result, that.result)
}
- protected[this] class Sum[U >: T](num: Numeric[U], protected[this] val pit: IterableSplitter[T]) extends Accessor[U, Sum[U]] {
+ protected[this] class Sum[U >: T](num: Numeric[U], protected[this] val pit: IterableSplitter[T])
+ extends Accessor[U, Sum[U]] {
@volatile var result: U = null.asInstanceOf[U]
def leaf(prevr: Option[U]) = result = pit.sum(num)
protected[this] def newSubtask(p: IterableSplitter[T]) = new Sum(num, p)
override def merge(that: Sum[U]) = result = num.plus(result, that.result)
}
- protected[this] class Product[U >: T](num: Numeric[U], protected[this] val pit: IterableSplitter[T]) extends Accessor[U, Product[U]] {
+ protected[this] class Product[U >: T](num: Numeric[U], protected[this] val pit: IterableSplitter[T])
+ extends Accessor[U, Product[U]] {
@volatile var result: U = null.asInstanceOf[U]
def leaf(prevr: Option[U]) = result = pit.product(num)
protected[this] def newSubtask(p: IterableSplitter[T]) = new Product(num, p)
override def merge(that: Product[U]) = result = num.times(result, that.result)
}
- protected[this] class Min[U >: T](ord: Ordering[U], protected[this] val pit: IterableSplitter[T]) extends Accessor[Option[U], Min[U]] {
+ protected[this] class Min[U >: T](ord: Ordering[U], protected[this] val pit: IterableSplitter[T])
+ extends Accessor[Option[U], Min[U]] {
@volatile var result: Option[U] = None
def leaf(prevr: Option[Option[U]]) = if (pit.remaining > 0) result = Some(pit.min(ord))
protected[this] def newSubtask(p: IterableSplitter[T]) = new Min(ord, p)
@@ -970,7 +980,8 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def requiresStrictSplitters = true
}
- protected[this] class Max[U >: T](ord: Ordering[U], protected[this] val pit: IterableSplitter[T]) extends Accessor[Option[U], Max[U]] {
+ protected[this] class Max[U >: T](ord: Ordering[U], protected[this] val pit: IterableSplitter[T])
+ extends Accessor[Option[U], Max[U]] {
@volatile var result: Option[U] = None
def leaf(prevr: Option[Option[U]]) = if (pit.remaining > 0) result = Some(pit.max(ord))
protected[this] def newSubtask(p: IterableSplitter[T]) = new Max(ord, p)
@@ -980,16 +991,16 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def requiresStrictSplitters = true
}
- protected[this] class Map[S, That](f: T => S, pbf: () => Combiner[S, That], protected[this] val pit: IterableSplitter[T])
+ protected[this] class Map[S, That](f: T => S, cbf: CombinerFactory[S, That], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[S, That], Map[S, That]] {
@volatile var result: Combiner[S, That] = null
- def leaf(prev: Option[Combiner[S, That]]) = result = pit.map2combiner(f, reuse(prev, pbf()))
- protected[this] def newSubtask(p: IterableSplitter[T]) = new Map(f, pbf, p)
+ def leaf(prev: Option[Combiner[S, That]]) = result = pit.map2combiner(f, reuse(prev, cbf()))
+ protected[this] def newSubtask(p: IterableSplitter[T]) = new Map(f, cbf, p)
override def merge(that: Map[S, That]) = result = result combine that.result
}
protected[this] class Collect[S, That]
- (pf: PartialFunction[T, S], pbf: () => Combiner[S, That], protected[this] val pit: IterableSplitter[T])
+ (pf: PartialFunction[T, S], pbf: CombinerFactory[S, That], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[S, That], Collect[S, That]] {
@volatile var result: Combiner[S, That] = null
def leaf(prev: Option[Combiner[S, That]]) = result = pit.collect2combiner[S, That](pf, pbf())
@@ -998,7 +1009,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
}
protected[this] class FlatMap[S, That]
- (f: T => GenTraversableOnce[S], pbf: () => Combiner[S, That], protected[this] val pit: IterableSplitter[T])
+ (f: T => GenTraversableOnce[S], pbf: CombinerFactory[S, That], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[S, That], FlatMap[S, That]] {
@volatile var result: Combiner[S, That] = null
def leaf(prev: Option[Combiner[S, That]]) = result = pit.flatmap2combiner(f, pbf())
@@ -1010,28 +1021,31 @@ self: ParIterableLike[T, Repr, Sequential] =>
}
}
- protected[this] class Forall(pred: T => Boolean, protected[this] val pit: IterableSplitter[T]) extends Accessor[Boolean, Forall] {
+ protected[this] class Forall(pred: T => Boolean, protected[this] val pit: IterableSplitter[T])
+ extends Accessor[Boolean, Forall] {
@volatile var result: Boolean = true
def leaf(prev: Option[Boolean]) = { if (!pit.isAborted) result = pit.forall(pred); if (result == false) pit.abort }
protected[this] def newSubtask(p: IterableSplitter[T]) = new Forall(pred, p)
override def merge(that: Forall) = result = result && that.result
}
- protected[this] class Exists(pred: T => Boolean, protected[this] val pit: IterableSplitter[T]) extends Accessor[Boolean, Exists] {
+ protected[this] class Exists(pred: T => Boolean, protected[this] val pit: IterableSplitter[T])
+ extends Accessor[Boolean, Exists] {
@volatile var result: Boolean = false
def leaf(prev: Option[Boolean]) = { if (!pit.isAborted) result = pit.exists(pred); if (result == true) pit.abort }
protected[this] def newSubtask(p: IterableSplitter[T]) = new Exists(pred, p)
override def merge(that: Exists) = result = result || that.result
}
- protected[this] class Find[U >: T](pred: T => Boolean, protected[this] val pit: IterableSplitter[T]) extends Accessor[Option[U], Find[U]] {
+ protected[this] class Find[U >: T](pred: T => Boolean, protected[this] val pit: IterableSplitter[T])
+ extends Accessor[Option[U], Find[U]] {
@volatile var result: Option[U] = None
def leaf(prev: Option[Option[U]]) = { if (!pit.isAborted) result = pit.find(pred); if (result != None) pit.abort }
protected[this] def newSubtask(p: IterableSplitter[T]) = new Find(pred, p)
override def merge(that: Find[U]) = if (this.result == None) result = that.result
}
- protected[this] class Filter[U >: T, This >: Repr](pred: T => Boolean, cbf: () => Combiner[U, This], protected[this] val pit: IterableSplitter[T])
+ protected[this] class Filter[U >: T, This >: Repr](pred: T => Boolean, cbf: CombinerFactory[U, This], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[U, This], Filter[U, This]] {
@volatile var result: Combiner[U, This] = null
def leaf(prev: Option[Combiner[U, This]]) = {
@@ -1041,7 +1055,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def merge(that: Filter[U, This]) = result = result combine that.result
}
- protected[this] class FilterNot[U >: T, This >: Repr](pred: T => Boolean, cbf: () => Combiner[U, This], protected[this] val pit: IterableSplitter[T])
+ protected[this] class FilterNot[U >: T, This >: Repr](pred: T => Boolean, cbf: CombinerFactory[U, This], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[U, This], FilterNot[U, This]] {
@volatile var result: Combiner[U, This] = null
def leaf(prev: Option[Combiner[U, This]]) = {
@@ -1051,7 +1065,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def merge(that: FilterNot[U, This]) = result = result combine that.result
}
- protected class Copy[U >: T, That](cfactory: () => Combiner[U, That], protected[this] val pit: IterableSplitter[T])
+ protected class Copy[U >: T, That](cfactory: CombinerFactory[U, That], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[U, That], Copy[U, That]] {
@volatile var result: Combiner[U, That] = null
def leaf(prev: Option[Combiner[U, That]]) = result = pit.copy2builder[U, That, Combiner[U, That]](reuse(prev, cfactory()))
@@ -1059,11 +1073,12 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def merge(that: Copy[U, That]) = result = result combine that.result
}
- protected[this] class Partition[U >: T, This >: Repr](pred: T => Boolean, cbf: () => Combiner[U, This], protected[this] val pit: IterableSplitter[T])
+ protected[this] class Partition[U >: T, This >: Repr]
+ (pred: T => Boolean, cbfTrue: CombinerFactory[U, This], cbfFalse: CombinerFactory[U, This], protected[this] val pit: IterableSplitter[T])
extends Transformer[(Combiner[U, This], Combiner[U, This]), Partition[U, This]] {
@volatile var result: (Combiner[U, This], Combiner[U, This]) = null
- def leaf(prev: Option[(Combiner[U, This], Combiner[U, This])]) = result = pit.partition2combiners(pred, reuse(prev.map(_._1), cbf()), reuse(prev.map(_._2), cbf()))
- protected[this] def newSubtask(p: IterableSplitter[T]) = new Partition(pred, cbf, p)
+ def leaf(prev: Option[(Combiner[U, This], Combiner[U, This])]) = result = pit.partition2combiners(pred, reuse(prev.map(_._1), cbfTrue()), reuse(prev.map(_._2), cbfFalse()))
+ protected[this] def newSubtask(p: IterableSplitter[T]) = new Partition(pred, cbfTrue, cbfFalse, p)
override def merge(that: Partition[U, This]) = result = (result._1 combine that.result._1, result._2 combine that.result._2)
}
@@ -1090,7 +1105,8 @@ self: ParIterableLike[T, Repr, Sequential] =>
}
}
- protected[this] class Take[U >: T, This >: Repr](n: Int, cbf: () => Combiner[U, This], protected[this] val pit: IterableSplitter[T])
+ protected[this] class Take[U >: T, This >: Repr]
+ (n: Int, cbf: CombinerFactory[U, This], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[U, This], Take[U, This]] {
@volatile var result: Combiner[U, This] = null
def leaf(prev: Option[Combiner[U, This]]) = {
@@ -1098,7 +1114,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
}
protected[this] def newSubtask(p: IterableSplitter[T]) = throw new UnsupportedOperationException
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
val sizes = pits.scanLeft(0)(_ + _.remaining)
for ((p, untilp) <- pits zip sizes; if untilp <= n) yield {
if (untilp + p.remaining < n) new Take(p.remaining, cbf, p)
@@ -1109,13 +1125,14 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def requiresStrictSplitters = true
}
- protected[this] class Drop[U >: T, This >: Repr](n: Int, cbf: () => Combiner[U, This], protected[this] val pit: IterableSplitter[T])
+ protected[this] class Drop[U >: T, This >: Repr]
+ (n: Int, cbf: CombinerFactory[U, This], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[U, This], Drop[U, This]] {
@volatile var result: Combiner[U, This] = null
def leaf(prev: Option[Combiner[U, This]]) = result = pit.drop2combiner(n, reuse(prev, cbf()))
protected[this] def newSubtask(p: IterableSplitter[T]) = throw new UnsupportedOperationException
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
val sizes = pits.scanLeft(0)(_ + _.remaining)
for ((p, withp) <- pits zip sizes.tail; if withp >= n) yield {
if (withp - p.remaining > n) new Drop(0, cbf, p)
@@ -1126,13 +1143,14 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def requiresStrictSplitters = true
}
- protected[this] class Slice[U >: T, This >: Repr](from: Int, until: Int, cbf: () => Combiner[U, This], protected[this] val pit: IterableSplitter[T])
+ protected[this] class Slice[U >: T, This >: Repr]
+ (from: Int, until: Int, cbf: CombinerFactory[U, This], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[U, This], Slice[U, This]] {
@volatile var result: Combiner[U, This] = null
def leaf(prev: Option[Combiner[U, This]]) = result = pit.slice2combiner(from, until, reuse(prev, cbf()))
protected[this] def newSubtask(p: IterableSplitter[T]) = throw new UnsupportedOperationException
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
val sizes = pits.scanLeft(0)(_ + _.remaining)
for ((p, untilp) <- pits zip sizes; if untilp + p.remaining >= from || untilp <= until) yield {
val f = (from max untilp) - untilp
@@ -1144,22 +1162,23 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def requiresStrictSplitters = true
}
- protected[this] class SplitAt[U >: T, This >: Repr](at: Int, cbf: () => Combiner[U, This], protected[this] val pit: IterableSplitter[T])
+ protected[this] class SplitAt[U >: T, This >: Repr]
+ (at: Int, cbfBefore: CombinerFactory[U, This], cbfAfter: CombinerFactory[U, This], protected[this] val pit: IterableSplitter[T])
extends Transformer[(Combiner[U, This], Combiner[U, This]), SplitAt[U, This]] {
@volatile var result: (Combiner[U, This], Combiner[U, This]) = null
- def leaf(prev: Option[(Combiner[U, This], Combiner[U, This])]) = result = pit.splitAt2combiners(at, reuse(prev.map(_._1), cbf()), reuse(prev.map(_._2), cbf()))
+ def leaf(prev: Option[(Combiner[U, This], Combiner[U, This])]) = result = pit.splitAt2combiners(at, reuse(prev.map(_._1), cbfBefore()), reuse(prev.map(_._2), cbfAfter()))
protected[this] def newSubtask(p: IterableSplitter[T]) = throw new UnsupportedOperationException
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
val sizes = pits.scanLeft(0)(_ + _.remaining)
- for ((p, untilp) <- pits zip sizes) yield new SplitAt((at max untilp min (untilp + p.remaining)) - untilp, cbf, p)
+ for ((p, untilp) <- pits zip sizes) yield new SplitAt((at max untilp min (untilp + p.remaining)) - untilp, cbfBefore, cbfAfter, p)
}
override def merge(that: SplitAt[U, This]) = result = (result._1 combine that.result._1, result._2 combine that.result._2)
override def requiresStrictSplitters = true
}
protected[this] class TakeWhile[U >: T, This >: Repr]
- (pos: Int, pred: T => Boolean, cbf: () => Combiner[U, This], protected[this] val pit: IterableSplitter[T])
+ (pos: Int, pred: T => Boolean, cbf: CombinerFactory[U, This], protected[this] val pit: IterableSplitter[T])
extends Transformer[(Combiner[U, This], Boolean), TakeWhile[U, This]] {
@volatile var result: (Combiner[U, This], Boolean) = null
def leaf(prev: Option[(Combiner[U, This], Boolean)]) = if (pos < pit.indexFlag) {
@@ -1168,7 +1187,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
} else result = (reuse(prev.map(_._1), cbf()), false)
protected[this] def newSubtask(p: IterableSplitter[T]) = throw new UnsupportedOperationException
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
for ((p, untilp) <- pits zip pits.scanLeft(0)(_ + _.remaining)) yield new TakeWhile(pos + untilp, pred, cbf, p)
}
override def merge(that: TakeWhile[U, This]) = if (result._2) {
@@ -1178,23 +1197,23 @@ self: ParIterableLike[T, Repr, Sequential] =>
}
protected[this] class Span[U >: T, This >: Repr]
- (pos: Int, pred: T => Boolean, cbf: () => Combiner[U, This], protected[this] val pit: IterableSplitter[T])
+ (pos: Int, pred: T => Boolean, cbfBefore: CombinerFactory[U, This], cbfAfter: CombinerFactory[U, This], protected[this] val pit: IterableSplitter[T])
extends Transformer[(Combiner[U, This], Combiner[U, This]), Span[U, This]] {
@volatile var result: (Combiner[U, This], Combiner[U, This]) = null
def leaf(prev: Option[(Combiner[U, This], Combiner[U, This])]) = if (pos < pit.indexFlag) {
// val lst = pit.toList
// val pa = mutable.ParArray(lst: _*)
// val str = "At leaf we will iterate: " + pa.splitter.toList
- result = pit.span2combiners(pred, cbf(), cbf()) // do NOT reuse old combiners here, lest ye be surprised
+ result = pit.span2combiners(pred, cbfBefore(), cbfAfter()) // do NOT reuse old combiners here, lest ye be surprised
// println("\nAt leaf result is: " + result)
if (result._2.size > 0) pit.setIndexFlagIfLesser(pos)
} else {
- result = (reuse(prev.map(_._2), cbf()), pit.copy2builder[U, This, Combiner[U, This]](reuse(prev.map(_._2), cbf())))
+ result = (reuse(prev.map(_._2), cbfBefore()), pit.copy2builder[U, This, Combiner[U, This]](reuse(prev.map(_._2), cbfAfter())))
}
protected[this] def newSubtask(p: IterableSplitter[T]) = throw new UnsupportedOperationException
override def split = {
- val pits = pit.split
- for ((p, untilp) <- pits zip pits.scanLeft(0)(_ + _.remaining)) yield new Span(pos + untilp, pred, cbf, p)
+ val pits = pit.splitWithSignalling
+ for ((p, untilp) <- pits zip pits.scanLeft(0)(_ + _.remaining)) yield new Span(pos + untilp, pred, cbfBefore, cbfAfter, p)
}
override def merge(that: Span[U, This]) = result = if (result._2.size == 0) {
(result._1 combine that.result._1, that.result._2)
@@ -1204,15 +1223,15 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def requiresStrictSplitters = true
}
- protected[this] class Zip[U >: T, S, That](pbf: CanCombineFrom[Repr, (U, S), That], protected[this] val pit: IterableSplitter[T], val othpit: SeqSplitter[S])
+ protected[this] class Zip[U >: T, S, That](pbf: CombinerFactory[(U, S), That], protected[this] val pit: IterableSplitter[T], val othpit: SeqSplitter[S])
extends Transformer[Combiner[(U, S), That], Zip[U, S, That]] {
@volatile var result: Result = null
- def leaf(prev: Option[Result]) = result = pit.zip2combiner[U, S, That](othpit, pbf(self.repr))
+ def leaf(prev: Option[Result]) = result = pit.zip2combiner[U, S, That](othpit, pbf())
protected[this] def newSubtask(p: IterableSplitter[T]) = unsupported
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
val sizes = pits.map(_.remaining)
- val opits = othpit.psplit(sizes: _*)
+ val opits = othpit.psplitWithSignalling(sizes: _*)
(pits zip opits) map { p => new Zip(pbf, p._1, p._2) }
}
override def merge(that: Zip[U, S, That]) = result = result combine that.result
@@ -1220,18 +1239,18 @@ self: ParIterableLike[T, Repr, Sequential] =>
}
protected[this] class ZipAll[U >: T, S, That]
- (len: Int, thiselem: U, thatelem: S, pbf: CanCombineFrom[Repr, (U, S), That], protected[this] val pit: IterableSplitter[T], val othpit: SeqSplitter[S])
+ (len: Int, thiselem: U, thatelem: S, pbf: CombinerFactory[(U, S), That], protected[this] val pit: IterableSplitter[T], val othpit: SeqSplitter[S])
extends Transformer[Combiner[(U, S), That], ZipAll[U, S, That]] {
@volatile var result: Result = null
- def leaf(prev: Option[Result]) = result = pit.zipAll2combiner[U, S, That](othpit, thiselem, thatelem, pbf(self.repr))
+ def leaf(prev: Option[Result]) = result = pit.zipAll2combiner[U, S, That](othpit, thiselem, thatelem, pbf())
protected[this] def newSubtask(p: IterableSplitter[T]) = unsupported
override def split = if (pit.remaining <= len) {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
val sizes = pits.map(_.remaining)
- val opits = othpit.psplit(sizes: _*)
+ val opits = othpit.psplitWithSignalling(sizes: _*)
((pits zip opits) zip sizes) map { t => new ZipAll(t._2, thiselem, thatelem, pbf, t._1._1, t._1._2) }
} else {
- val opits = othpit.psplit(pit.remaining)
+ val opits = othpit.psplitWithSignalling(pit.remaining)
val diff = len - pit.remaining
Seq(
new ZipAll(pit.remaining, thiselem, thatelem, pbf, pit, opits(0)), // nothing wrong will happen with the cast below - elem T is never accessed
@@ -1248,7 +1267,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
def leaf(prev: Option[Unit]) = pit.copyToArray(array, from, len)
protected[this] def newSubtask(p: IterableSplitter[T]) = unsupported
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
for ((p, untilp) <- pits zip pits.scanLeft(0)(_ + _.remaining); if untilp < len) yield {
val plen = p.remaining min (len - untilp)
new CopyToArray[U, This](from + untilp, plen, array, p)
@@ -1257,7 +1276,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def requiresStrictSplitters = true
}
- protected[this] class ToParCollection[U >: T, That](cbf: () => Combiner[U, That], protected[this] val pit: IterableSplitter[T])
+ protected[this] class ToParCollection[U >: T, That](cbf: CombinerFactory[U, That], protected[this] val pit: IterableSplitter[T])
extends Transformer[Combiner[U, That], ToParCollection[U, That]] {
@volatile var result: Result = null
def leaf(prev: Option[Combiner[U, That]]) {
@@ -1268,7 +1287,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def merge(that: ToParCollection[U, That]) = result = result combine that.result
}
- protected[this] class ToParMap[K, V, That](cbf: () => Combiner[(K, V), That], protected[this] val pit: IterableSplitter[T])(implicit ev: T <:< (K, V))
+ protected[this] class ToParMap[K, V, That](cbf: CombinerFactory[(K, V), That], protected[this] val pit: IterableSplitter[T])(implicit ev: T <:< (K, V))
extends Transformer[Combiner[(K, V), That], ToParMap[K, V, That]] {
@volatile var result: Result = null
def leaf(prev: Option[Combiner[(K, V), That]]) {
@@ -1305,7 +1324,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
} else trees(from)
protected[this] def newSubtask(pit: IterableSplitter[T]) = unsupported
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
for ((p, untilp) <- pits zip pits.scanLeft(from)(_ + _.remaining)) yield {
new CreateScanTree(untilp, p.remaining, z, op, p)
}
@@ -1317,11 +1336,11 @@ self: ParIterableLike[T, Repr, Sequential] =>
}
protected[this] class FromScanTree[U >: T, That]
- (tree: ScanTree[U], z: U, op: (U, U) => U, cbf: CanCombineFrom[Repr, U, That])
+ (tree: ScanTree[U], z: U, op: (U, U) => U, cbf: CombinerFactory[U, That])
extends StrictSplitterCheckTask[Combiner[U, That], FromScanTree[U, That]] {
@volatile var result: Combiner[U, That] = null
def leaf(prev: Option[Combiner[U, That]]) {
- val cb = reuse(prev, cbf(self.repr))
+ val cb = reuse(prev, cbf())
iterate(tree, cb)
result = cb
}
@@ -1351,7 +1370,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
/* scan tree */
- protected[this] def scanBlockSize = (threshold(size, parallelismLevel) / 2) max 1
+ protected[this] def scanBlockSize = (thresholdFromSize(size, tasksupport.parallelismLevel) / 2) max 1
protected[this] trait ScanTree[U >: T] {
def beginsAt: Int
@@ -1392,6 +1411,12 @@ self: ParIterableLike[T, Repr, Sequential] =>
def print(depth: Int) = println((" " * depth) + this)
}
+ /* alias methods */
+
+ def /:[S](z: S)(op: (S, T) => S): S = foldLeft(z)(op);
+
+ def :\[S](z: S)(op: (T, S) => S): S = foldRight(z)(op);
+
/* debug information */
private[parallel] def debugInformation = "Parallel collection: " + this.getClass
diff --git a/src/library/scala/collection/parallel/ParIterableViewLike.scala b/src/library/scala/collection/parallel/ParIterableViewLike.scala
index 1d7659922c..536139c812 100644
--- a/src/library/scala/collection/parallel/ParIterableViewLike.scala
+++ b/src/library/scala/collection/parallel/ParIterableViewLike.scala
@@ -47,7 +47,6 @@ extends GenIterableView[T, Coll]
with ParIterableLike[T, This, ThisSeq]
{
self =>
- import tasksupport._
override def foreach[U](f: T => U): Unit = super[ParIterableLike].foreach(f)
override protected[this] def newCombiner: Combiner[T, This] = throw new UnsupportedOperationException(this + ".newCombiner");
@@ -135,7 +134,7 @@ self =>
newZippedAllTryParSeq(that, thisElem, thatElem).asInstanceOf[That]
override def force[U >: T, That](implicit bf: CanBuildFrom[Coll, U, That]) = bf ifParallel { pbf =>
- executeAndWaitResult(new Force(pbf, splitter).mapResult(_.result).asInstanceOf[Task[That, ResultMapping[_, Force[U, That], That]]])
+ tasksupport.executeAndWaitResult(new Force(pbf, splitter).mapResult(_.result).asInstanceOf[Task[That, ResultMapping[_, Force[U, That], That]]])
} otherwise {
val b = bf(underlying)
b ++= this.iterator
diff --git a/src/library/scala/collection/parallel/ParMapLike.scala b/src/library/scala/collection/parallel/ParMapLike.scala
index beb50a41e1..afd1f30903 100644
--- a/src/library/scala/collection/parallel/ParMapLike.scala
+++ b/src/library/scala/collection/parallel/ParMapLike.scala
@@ -66,7 +66,6 @@ self =>
new IterableSplitter[K] {
i =>
val iter = s
- var signalDelegate: Signalling = IdleSignalling
def hasNext = iter.hasNext
def next() = iter.next._1
def split = {
@@ -84,7 +83,6 @@ self =>
new IterableSplitter[V] {
i =>
val iter = s
- var signalDelegate: Signalling = IdleSignalling
def hasNext = iter.hasNext
def next() = iter.next._2
def split = {
diff --git a/src/library/scala/collection/parallel/ParSeqLike.scala b/src/library/scala/collection/parallel/ParSeqLike.scala
index d0f38b30dc..9f28a286ca 100644
--- a/src/library/scala/collection/parallel/ParSeqLike.scala
+++ b/src/library/scala/collection/parallel/ParSeqLike.scala
@@ -44,39 +44,9 @@ trait ParSeqLike[+T, +Repr <: ParSeq[T], +Sequential <: Seq[T] with SeqLike[T, S
extends scala.collection.GenSeqLike[T, Repr]
with ParIterableLike[T, Repr, Sequential] {
self =>
- import tasksupport._
-
+
type SuperParIterator = IterableSplitter[T]
- /** An iterator that can be split into arbitrary subsets of iterators.
- * The self-type requirement ensures that the signal context passing behaviour gets mixed in
- * the concrete iterator instance in some concrete collection.
- *
- * '''Note:''' In concrete collection classes, collection implementers might want to override the iterator
- * `reverse2builder` method to ensure higher efficiency.
- */
- trait ParIterator extends SeqSplitter[T] with super.ParIterator {
- me: SignalContextPassingIterator[ParIterator] =>
- def split: Seq[ParIterator]
- def psplit(sizes: Int*): Seq[ParIterator]
- }
-
- /** A stackable modification that ensures signal contexts get passed along the iterators.
- * A self-type requirement in `ParIterator` ensures that this trait gets mixed into
- * concrete iterators.
- */
- trait SignalContextPassingIterator[+IterRepr <: ParIterator]
- extends ParIterator with super.SignalContextPassingIterator[IterRepr] {
- // Note: See explanation in `ParallelIterableLike.this.SignalContextPassingIterator`
- // to understand why we do the cast here, and have a type parameter.
- // Bottomline: avoiding boilerplate and fighting against inability to override stackable modifications.
- abstract override def psplit(sizes: Int*): Seq[IterRepr] = {
- val pits = super.psplit(sizes: _*)
- pits foreach { _.signalDelegate = signalDelegate }
- pits.asInstanceOf[Seq[IterRepr]]
- }
- }
-
/** A more refined version of the iterator found in the `ParallelIterable` trait,
* this iterator can be split into arbitrary subsets of iterators.
*
@@ -89,9 +59,7 @@ self =>
override def size = length
/** Used to iterate elements using indices */
- protected abstract class Elements(start: Int, val end: Int) extends ParIterator with BufferedIterator[T] {
- me: SignalContextPassingIterator[ParIterator] =>
-
+ protected abstract class Elements(start: Int, val end: Int) extends SeqSplitter[T] with BufferedIterator[T] {
private var i = start
def hasNext = i < end
@@ -106,14 +74,14 @@ self =>
final def remaining = end - i
- def dup = new Elements(i, end) with SignalContextPassingIterator[ParIterator]
+ def dup = new Elements(i, end) {}
def split = psplit(remaining / 2, remaining - remaining / 2)
def psplit(sizes: Int*) = {
val incr = sizes.scanLeft(0)(_ + _)
for ((from, until) <- incr.init zip incr.tail) yield {
- new Elements(start + from, (start + until) min end) with SignalContextPassingIterator[ParIterator]
+ new Elements(start + from, (start + until) min end) {}
}
}
@@ -138,7 +106,7 @@ self =>
val realfrom = if (from < 0) 0 else from
val ctx = new DefaultSignalling with AtomicIndexFlag
ctx.setIndexFlag(Int.MaxValue)
- executeAndWaitResult(new SegmentLength(p, 0, splitter.psplit(realfrom, length - realfrom)(1) assign ctx))._1
+ tasksupport.executeAndWaitResult(new SegmentLength(p, 0, splitter.psplitWithSignalling(realfrom, length - realfrom)(1) assign ctx))._1
}
/** Finds the first element satisfying some predicate.
@@ -156,7 +124,7 @@ self =>
val realfrom = if (from < 0) 0 else from
val ctx = new DefaultSignalling with AtomicIndexFlag
ctx.setIndexFlag(Int.MaxValue)
- executeAndWaitResult(new IndexWhere(p, realfrom, splitter.psplit(realfrom, length - realfrom)(1) assign ctx))
+ tasksupport.executeAndWaitResult(new IndexWhere(p, realfrom, splitter.psplitWithSignalling(realfrom, length - realfrom)(1) assign ctx))
}
/** Finds the last element satisfying some predicate.
@@ -174,18 +142,20 @@ self =>
val until = if (end >= length) length else end + 1
val ctx = new DefaultSignalling with AtomicIndexFlag
ctx.setIndexFlag(Int.MinValue)
- executeAndWaitResult(new LastIndexWhere(p, 0, splitter.psplit(until, length - until)(0) assign ctx))
+ tasksupport.executeAndWaitResult(new LastIndexWhere(p, 0, splitter.psplitWithSignalling(until, length - until)(0) assign ctx))
}
def reverse: Repr = {
- executeAndWaitResult(new Reverse(() => newCombiner, splitter) mapResult { _.result })
+ tasksupport.executeAndWaitResult(new Reverse(() => newCombiner, splitter) mapResult { _.resultWithTaskSupport })
}
def reverseMap[S, That](f: T => S)(implicit bf: CanBuildFrom[Repr, S, That]): That = if (bf(repr).isCombiner) {
- executeAndWaitResult(new ReverseMap[S, That](f, () => bf(repr).asCombiner, splitter) mapResult { _.result })
- } else seq.reverseMap(f)(bf2seq(bf))
+ tasksupport.executeAndWaitResult(
+ new ReverseMap[S, That](f, () => bf(repr).asCombiner, splitter) mapResult { _.resultWithTaskSupport }
+ )
+ } else setTaskSupport(seq.reverseMap(f)(bf2seq(bf)), tasksupport)
/*bf ifParallel { pbf =>
- executeAndWaitResult(new ReverseMap[S, That](f, pbf, splitter) mapResult { _.result })
+ tasksupport.executeAndWaitResult(new ReverseMap[S, That](f, pbf, splitter) mapResult { _.result })
} otherwise seq.reverseMap(f)(bf2seq(bf))*/
/** Tests whether this $coll contains the given sequence at a given index.
@@ -203,13 +173,15 @@ self =>
else if (pthat.length > length - offset) false
else {
val ctx = new DefaultSignalling with VolatileAbort
- executeAndWaitResult(new SameElements(splitter.psplit(offset, pthat.length)(1) assign ctx, pthat.splitter))
+ tasksupport.executeAndWaitResult(
+ new SameElements(splitter.psplitWithSignalling(offset, pthat.length)(1) assign ctx, pthat.splitter)
+ )
}
} otherwise seq.startsWith(that, offset)
override def sameElements[U >: T](that: GenIterable[U]): Boolean = that ifParSeq { pthat =>
val ctx = new DefaultSignalling with VolatileAbort
- length == pthat.length && executeAndWaitResult(new SameElements(splitter assign ctx, pthat.splitter))
+ length == pthat.length && tasksupport.executeAndWaitResult(new SameElements(splitter assign ctx, pthat.splitter))
} otherwise seq.sameElements(that)
/** Tests whether this $coll ends with the given parallel sequence.
@@ -226,24 +198,24 @@ self =>
else {
val ctx = new DefaultSignalling with VolatileAbort
val tlen = that.length
- executeAndWaitResult(new SameElements(splitter.psplit(length - tlen, tlen)(1) assign ctx, pthat.splitter))
+ tasksupport.executeAndWaitResult(new SameElements(splitter.psplitWithSignalling(length - tlen, tlen)(1) assign ctx, pthat.splitter))
}
} otherwise seq.endsWith(that)
def patch[U >: T, That](from: Int, patch: GenSeq[U], replaced: Int)(implicit bf: CanBuildFrom[Repr, U, That]): That = {
val realreplaced = replaced min (length - from)
- if (patch.isParSeq && bf.isParallel && (size - realreplaced + patch.size) > MIN_FOR_COPY) {
+ if (patch.isParSeq && bf(repr).isCombiner && (size - realreplaced + patch.size) > MIN_FOR_COPY) {
val that = patch.asParSeq
- val pbf = bf.asParallel
- val pits = splitter.psplit(from, replaced, length - from - realreplaced)
- val copystart = new Copy[U, That](() => pbf(repr), pits(0))
+ val pits = splitter.psplitWithSignalling(from, replaced, length - from - realreplaced)
+ val cfactory = combinerFactory(() => bf(repr).asCombiner)
+ val copystart = new Copy[U, That](cfactory, pits(0))
val copymiddle = wrap {
- val tsk = new that.Copy[U, That](() => pbf(repr), that.splitter)
+ val tsk = new that.Copy[U, That](cfactory, that.splitter)
tasksupport.executeAndWaitResult(tsk)
}
- val copyend = new Copy[U, That](() => pbf(repr), pits(2))
- executeAndWaitResult(((copystart parallel copymiddle) { _ combine _ } parallel copyend) { _ combine _ } mapResult {
- _.result
+ val copyend = new Copy[U, That](cfactory, pits(2))
+ tasksupport.executeAndWaitResult(((copystart parallel copymiddle) { _ combine _ } parallel copyend) { _ combine _ } mapResult {
+ _.resultWithTaskSupport
})
} else patch_sequential(from, patch.seq, replaced)
}
@@ -252,18 +224,22 @@ self =>
val from = 0 max fromarg
val b = bf(repr)
val repl = (r min (length - from)) max 0
- val pits = splitter.psplit(from, repl, length - from - repl)
+ val pits = splitter.psplitWithSignalling(from, repl, length - from - repl)
b ++= pits(0)
b ++= patch
b ++= pits(2)
- b.result
+ setTaskSupport(b.result, tasksupport)
}
def updated[U >: T, That](index: Int, elem: U)(implicit bf: CanBuildFrom[Repr, U, That]): That = if (bf(repr).isCombiner) {
- executeAndWaitResult(new Updated(index, elem, () => bf(repr).asCombiner, splitter) mapResult { _.result })
- } else seq.updated(index, elem)(bf2seq(bf))
+ tasksupport.executeAndWaitResult(
+ new Updated(index, elem, combinerFactory(() => bf(repr).asCombiner), splitter) mapResult {
+ _.resultWithTaskSupport
+ }
+ )
+ } else setTaskSupport(seq.updated(index, elem)(bf2seq(bf)), tasksupport)
/*bf ifParallel { pbf =>
- executeAndWaitResult(new Updated(index, elem, pbf, splitter) mapResult { _.result })
+ tasksupport.executeAndWaitResult(new Updated(index, elem, pbf, splitter) mapResult { _.result })
} otherwise seq.updated(index, elem)(bf2seq(bf))*/
def +:[U >: T, That](elem: U)(implicit bf: CanBuildFrom[Repr, U, That]): That = {
@@ -278,10 +254,13 @@ self =>
patch(length, new immutable.Repetition(elem, len - length), 0)
} else patch(length, Nil, 0);
- override def zip[U >: T, S, That](that: GenIterable[S])(implicit bf: CanBuildFrom[Repr, (U, S), That]): That = if (bf.isParallel && that.isParSeq) {
- val pbf = bf.asParallel
+ override def zip[U >: T, S, That](that: GenIterable[S])(implicit bf: CanBuildFrom[Repr, (U, S), That]): That = if (bf(repr).isCombiner && that.isParSeq) {
val thatseq = that.asParSeq
- executeAndWaitResult(new Zip(length min thatseq.length, pbf, splitter, thatseq.splitter) mapResult { _.result });
+ tasksupport.executeAndWaitResult(
+ new Zip(length min thatseq.length, combinerFactory(() => bf(repr).asCombiner), splitter, thatseq.splitter) mapResult {
+ _.resultWithTaskSupport
+ }
+ );
} else super.zip(that)(bf)
/** Tests whether every element of this $coll relates to the
@@ -298,7 +277,7 @@ self =>
*/
def corresponds[S](that: GenSeq[S])(p: (T, S) => Boolean): Boolean = that ifParSeq { pthat =>
val ctx = new DefaultSignalling with VolatileAbort
- length == pthat.length && executeAndWaitResult(new Corresponds(p, splitter assign ctx, pthat.splitter))
+ length == pthat.length && tasksupport.executeAndWaitResult(new Corresponds(p, splitter assign ctx, pthat.splitter))
} otherwise seq.corresponds(that)(p)
def diff[U >: T](that: GenSeq[U]): Repr = sequentially {
@@ -372,7 +351,7 @@ self =>
} else result = (0, false)
protected[this] def newSubtask(p: SuperParIterator) = throw new UnsupportedOperationException
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
for ((p, untilp) <- pits zip pits.scanLeft(0)(_ + _.remaining)) yield new SegmentLength(pred, from + untilp, p)
}
override def merge(that: SegmentLength) = if (result._2) result = (result._1 + that.result._1, that.result._2)
@@ -391,7 +370,7 @@ self =>
}
protected[this] def newSubtask(p: SuperParIterator) = unsupported
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
for ((p, untilp) <- pits zip pits.scanLeft(from)(_ + _.remaining)) yield new IndexWhere(pred, untilp, p)
}
override def merge(that: IndexWhere) = result = if (result == -1) that.result else {
@@ -412,7 +391,7 @@ self =>
}
protected[this] def newSubtask(p: SuperParIterator) = unsupported
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
for ((p, untilp) <- pits zip pits.scanLeft(pos)(_ + _.remaining)) yield new LastIndexWhere(pred, untilp, p)
}
override def merge(that: LastIndexWhere) = result = if (result == -1) that.result else {
@@ -437,7 +416,7 @@ self =>
override def merge(that: ReverseMap[S, That]) = result = that.result combine result
}
- protected[this] class SameElements[U >: T](protected[this] val pit: SeqSplitter[T], val otherpit: PreciseSplitter[U])
+ protected[this] class SameElements[U >: T](protected[this] val pit: SeqSplitter[T], val otherpit: SeqSplitter[U])
extends Accessor[Boolean, SameElements[U]] {
@volatile var result: Boolean = true
def leaf(prev: Option[Boolean]) = if (!pit.isAborted) {
@@ -448,44 +427,44 @@ self =>
override def split = {
val fp = pit.remaining / 2
val sp = pit.remaining - fp
- for ((p, op) <- pit.psplit(fp, sp) zip otherpit.psplit(fp, sp)) yield new SameElements(p, op)
+ for ((p, op) <- pit.psplitWithSignalling(fp, sp) zip otherpit.psplitWithSignalling(fp, sp)) yield new SameElements(p, op)
}
override def merge(that: SameElements[U]) = result = result && that.result
override def requiresStrictSplitters = true
}
- protected[this] class Updated[U >: T, That](pos: Int, elem: U, pbf: () => Combiner[U, That], protected[this] val pit: SeqSplitter[T])
+ protected[this] class Updated[U >: T, That](pos: Int, elem: U, pbf: CombinerFactory[U, That], protected[this] val pit: SeqSplitter[T])
extends Transformer[Combiner[U, That], Updated[U, That]] {
@volatile var result: Combiner[U, That] = null
def leaf(prev: Option[Combiner[U, That]]) = result = pit.updated2combiner(pos, elem, pbf())
protected[this] def newSubtask(p: SuperParIterator) = unsupported
override def split = {
- val pits = pit.split
+ val pits = pit.splitWithSignalling
for ((p, untilp) <- pits zip pits.scanLeft(0)(_ + _.remaining)) yield new Updated(pos - untilp, elem, pbf, p)
}
override def merge(that: Updated[U, That]) = result = result combine that.result
override def requiresStrictSplitters = true
}
- protected[this] class Zip[U >: T, S, That](len: Int, pbf: CanCombineFrom[Repr, (U, S), That], protected[this] val pit: SeqSplitter[T], val otherpit: SeqSplitter[S])
+ protected[this] class Zip[U >: T, S, That](len: Int, cf: CombinerFactory[(U, S), That], protected[this] val pit: SeqSplitter[T], val otherpit: SeqSplitter[S])
extends Transformer[Combiner[(U, S), That], Zip[U, S, That]] {
@volatile var result: Result = null
- def leaf(prev: Option[Result]) = result = pit.zip2combiner[U, S, That](otherpit, pbf(self.repr))
+ def leaf(prev: Option[Result]) = result = pit.zip2combiner[U, S, That](otherpit, cf())
protected[this] def newSubtask(p: SuperParIterator) = unsupported
override def split = {
val fp = len / 2
val sp = len - len / 2
- val pits = pit.psplit(fp, sp)
- val opits = otherpit.psplit(fp, sp)
+ val pits = pit.psplitWithSignalling(fp, sp)
+ val opits = otherpit.psplitWithSignalling(fp, sp)
Seq(
- new Zip(fp, pbf, pits(0), opits(0)),
- new Zip(sp, pbf, pits(1), opits(1))
+ new Zip(fp, cf, pits(0), opits(0)),
+ new Zip(sp, cf, pits(1), opits(1))
)
}
override def merge(that: Zip[U, S, That]) = result = result combine that.result
}
- protected[this] class Corresponds[S](corr: (T, S) => Boolean, protected[this] val pit: SeqSplitter[T], val otherpit: PreciseSplitter[S])
+ protected[this] class Corresponds[S](corr: (T, S) => Boolean, protected[this] val pit: SeqSplitter[T], val otherpit: SeqSplitter[S])
extends Accessor[Boolean, Corresponds[S]] {
@volatile var result: Boolean = true
def leaf(prev: Option[Boolean]) = if (!pit.isAborted) {
@@ -496,7 +475,7 @@ self =>
override def split = {
val fp = pit.remaining / 2
val sp = pit.remaining - fp
- for ((p, op) <- pit.psplit(fp, sp) zip otherpit.psplit(fp, sp)) yield new Corresponds(corr, p, op)
+ for ((p, op) <- pit.psplitWithSignalling(fp, sp) zip otherpit.psplitWithSignalling(fp, sp)) yield new Corresponds(corr, p, op)
}
override def merge(that: Corresponds[S]) = result = result && that.result
override def requiresStrictSplitters = true
diff --git a/src/library/scala/collection/parallel/ParSeqViewLike.scala b/src/library/scala/collection/parallel/ParSeqViewLike.scala
index 6fdc181793..e0d1a7d6ff 100644
--- a/src/library/scala/collection/parallel/ParSeqViewLike.scala
+++ b/src/library/scala/collection/parallel/ParSeqViewLike.scala
@@ -38,7 +38,6 @@ extends GenSeqView[T, Coll]
with ParSeqLike[T, This, ThisSeq]
{
self =>
- import tasksupport._
trait Transformed[+S] extends ParSeqView[S, Coll, CollSeq]
with super[ParIterableView].Transformed[S] with super[GenSeqViewLike].Transformed[S] {
@@ -170,7 +169,7 @@ self =>
override def scanRight[S, That](z: S)(op: (T, S) => S)(implicit bf: CanBuildFrom[This, S, That]): That = newForced(thisParSeq.scanRight(z)(op)).asInstanceOf[That]
override def groupBy[K](f: T => K): immutable.ParMap[K, This] = thisParSeq.groupBy(f).map(kv => (kv._1, newForced(kv._2).asInstanceOf[This]))
override def force[U >: T, That](implicit bf: CanBuildFrom[Coll, U, That]) = bf ifParallel { pbf =>
- executeAndWaitResult(new Force(pbf, splitter).mapResult(_.result).asInstanceOf[Task[That, _]])
+ tasksupport.executeAndWaitResult(new Force(pbf, splitter).mapResult(_.result).asInstanceOf[Task[That, _]])
} otherwise {
val b = bf(underlying)
b ++= this.iterator
diff --git a/src/library/scala/collection/parallel/RemainsIterator.scala b/src/library/scala/collection/parallel/RemainsIterator.scala
index e04e0e9c72..c5910ff2c8 100644
--- a/src/library/scala/collection/parallel/RemainsIterator.scala
+++ b/src/library/scala/collection/parallel/RemainsIterator.scala
@@ -14,6 +14,7 @@ package scala.collection.parallel
import scala.collection.Parallel
import scala.collection.generic.Signalling
import scala.collection.generic.DelegatedSignalling
+import scala.collection.generic.IdleSignalling
import scala.collection.generic.CanCombineFrom
import scala.collection.mutable.Builder
import scala.collection.Iterator.empty
@@ -27,6 +28,11 @@ private[collection] trait RemainsIterator[+T] extends Iterator[T] {
* This method doesn't change the state of the iterator.
*/
def remaining: Int
+
+ /** For most collections, this is a cheap operation.
+ * Exceptions can override this method.
+ */
+ def isRemainingCheap = true
}
@@ -111,7 +117,7 @@ private[collection] trait AugmentedIterableIterator[+T] extends RemainsIterator[
def map2combiner[S, That](f: T => S, cb: Combiner[S, That]): Combiner[S, That] = {
//val cb = pbf(repr)
- cb.sizeHint(remaining)
+ if (isRemainingCheap) cb.sizeHint(remaining)
while (hasNext) cb += f(next)
cb
}
@@ -136,7 +142,7 @@ private[collection] trait AugmentedIterableIterator[+T] extends RemainsIterator[
}
def copy2builder[U >: T, Coll, Bld <: Builder[U, Coll]](b: Bld): Bld = {
- b.sizeHint(remaining)
+ if (isRemainingCheap) b.sizeHint(remaining)
while (hasNext) b += next
b
}
@@ -178,7 +184,7 @@ private[collection] trait AugmentedIterableIterator[+T] extends RemainsIterator[
def drop2combiner[U >: T, This](n: Int, cb: Combiner[U, This]): Combiner[U, This] = {
drop(n)
- cb.sizeHint(remaining)
+ if (isRemainingCheap) cb.sizeHint(remaining)
while (hasNext) cb += next
cb
}
@@ -196,7 +202,7 @@ private[collection] trait AugmentedIterableIterator[+T] extends RemainsIterator[
def splitAt2combiners[U >: T, This](at: Int, before: Combiner[U, This], after: Combiner[U, This]) = {
before.sizeHint(at)
- after.sizeHint(remaining - at)
+ if (isRemainingCheap) after.sizeHint(remaining - at)
var left = at
while (left > 0) {
before += next
@@ -222,7 +228,7 @@ private[collection] trait AugmentedIterableIterator[+T] extends RemainsIterator[
val curr = next
if (p(curr)) before += curr
else {
- after.sizeHint(remaining + 1)
+ if (isRemainingCheap) after.sizeHint(remaining + 1)
after += curr
isBefore = false
}
@@ -262,7 +268,7 @@ private[collection] trait AugmentedIterableIterator[+T] extends RemainsIterator[
}
def zip2combiner[U >: T, S, That](otherpit: RemainsIterator[S], cb: Combiner[(U, S), That]): Combiner[(U, S), That] = {
- cb.sizeHint(remaining min otherpit.remaining)
+ if (isRemainingCheap && otherpit.isRemainingCheap) cb.sizeHint(remaining min otherpit.remaining)
while (hasNext && otherpit.hasNext) {
cb += ((next, otherpit.next))
}
@@ -270,7 +276,7 @@ private[collection] trait AugmentedIterableIterator[+T] extends RemainsIterator[
}
def zipAll2combiner[U >: T, S, That](that: RemainsIterator[S], thiselem: U, thatelem: S, cb: Combiner[(U, S), That]): Combiner[(U, S), That] = {
- cb.sizeHint(remaining max that.remaining)
+ if (isRemainingCheap && that.isRemainingCheap) cb.sizeHint(remaining max that.remaining)
while (this.hasNext && that.hasNext) cb += ((this.next, that.next))
while (this.hasNext) cb += ((this.next, thatelem))
while (that.hasNext) cb += ((thiselem, that.next))
@@ -329,7 +335,7 @@ private[collection] trait AugmentedSeqIterator[+T] extends AugmentedIterableIter
/* transformers */
def reverse2combiner[U >: T, This](cb: Combiner[U, This]): Combiner[U, This] = {
- cb.sizeHint(remaining)
+ if (isRemainingCheap) cb.sizeHint(remaining)
var lst = List[T]()
while (hasNext) lst ::= next
while (lst != Nil) {
@@ -341,7 +347,7 @@ private[collection] trait AugmentedSeqIterator[+T] extends AugmentedIterableIter
def reverseMap2combiner[S, That](f: T => S, cb: Combiner[S, That]): Combiner[S, That] = {
//val cb = cbf(repr)
- cb.sizeHint(remaining)
+ if (isRemainingCheap) cb.sizeHint(remaining)
var lst = List[S]()
while (hasNext) lst ::= f(next)
while (lst != Nil) {
@@ -353,7 +359,7 @@ private[collection] trait AugmentedSeqIterator[+T] extends AugmentedIterableIter
def updated2combiner[U >: T, That](index: Int, elem: U, cb: Combiner[U, That]): Combiner[U, That] = {
//val cb = cbf(repr)
- cb.sizeHint(remaining)
+ if (isRemainingCheap) cb.sizeHint(remaining)
var j = 0
while (hasNext) {
if (j == index) {
@@ -381,11 +387,21 @@ extends AugmentedIterableIterator[T]
{
self =>
+ var signalDelegate: Signalling = IdleSignalling
+
/** Creates a copy of this iterator. */
def dup: IterableSplitter[T]
def split: Seq[IterableSplitter[T]]
+ def splitWithSignalling: Seq[IterableSplitter[T]] = {
+ val pits = split
+ pits foreach { _.signalDelegate = signalDelegate }
+ pits
+ }
+
+ def shouldSplitFurther[S](coll: ParIterable[S], parallelismLevel: Int) = remaining > thresholdFromSize(coll.size, parallelismLevel)
+
/** The number of elements this iterator has yet to traverse. This method
* doesn't change the state of the iterator.
*
@@ -421,7 +437,6 @@ self =>
/* iterator transformers */
class Taken(taken: Int) extends IterableSplitter[T] {
- var signalDelegate = self.signalDelegate
var remaining = taken min self.remaining
def hasNext = remaining > 0
def next = { remaining -= 1; self.next }
@@ -450,7 +465,7 @@ self =>
override def slice(from1: Int, until1: Int): IterableSplitter[T] = newSliceInternal(newTaken(until1), from1)
class Mapped[S](f: T => S) extends IterableSplitter[S] {
- var signalDelegate = self.signalDelegate
+ signalDelegate = self.signalDelegate
def hasNext = self.hasNext
def next = f(self.next)
def remaining = self.remaining
@@ -461,7 +476,7 @@ self =>
override def map[S](f: T => S) = new Mapped(f)
class Appended[U >: T, PI <: IterableSplitter[U]](protected val that: PI) extends IterableSplitter[U] {
- var signalDelegate = self.signalDelegate
+ signalDelegate = self.signalDelegate
protected var curr: IterableSplitter[U] = self
def hasNext = if (curr.hasNext) true else if (curr eq self) {
curr = that
@@ -480,7 +495,7 @@ self =>
def appendParIterable[U >: T, PI <: IterableSplitter[U]](that: PI) = new Appended[U, PI](that)
class Zipped[S](protected val that: SeqSplitter[S]) extends IterableSplitter[(T, S)] {
- var signalDelegate = self.signalDelegate
+ signalDelegate = self.signalDelegate
def hasNext = self.hasNext && that.hasNext
def next = (self.next, that.next)
def remaining = self.remaining min that.remaining
@@ -497,7 +512,7 @@ self =>
class ZippedAll[U >: T, S](protected val that: SeqSplitter[S], protected val thiselem: U, protected val thatelem: S)
extends IterableSplitter[(U, S)] {
- var signalDelegate = self.signalDelegate
+ signalDelegate = self.signalDelegate
def hasNext = self.hasNext || that.hasNext
def next = if (self.hasNext) {
if (that.hasNext) (self.next, that.next)
@@ -534,6 +549,18 @@ self =>
def split: Seq[SeqSplitter[T]]
def psplit(sizes: Int*): Seq[SeqSplitter[T]]
+ override def splitWithSignalling: Seq[SeqSplitter[T]] = {
+ val pits = split
+ pits foreach { _.signalDelegate = signalDelegate }
+ pits
+ }
+
+ def psplitWithSignalling(sizes: Int*): Seq[SeqSplitter[T]] = {
+ val pits = psplit(sizes: _*)
+ pits foreach { _.signalDelegate = signalDelegate }
+ pits
+ }
+
/** The number of elements this iterator has yet to traverse. This method
* doesn't change the state of the iterator. Unlike the version of this method in the supertrait,
* method `remaining` in `ParSeqLike.this.ParIterator` must return an exact number
@@ -626,13 +653,13 @@ self =>
def reverse: SeqSplitter[T] = {
val pa = mutable.ParArray.fromTraversables(self).reverse
- new pa.ParArrayIterator with pa.SCPI {
+ new pa.ParArrayIterator {
override def reverse = self
}
}
class Patched[U >: T](from: Int, patch: SeqSplitter[U], replaced: Int) extends SeqSplitter[U] {
- var signalDelegate = self.signalDelegate
+ signalDelegate = self.signalDelegate
private[this] val trio = {
val pits = self.psplit(from, replaced, self.remaining - from - replaced)
(pits(0).appendParSeq[U, SeqSplitter[U]](patch)) appendParSeq pits(2)
diff --git a/src/library/scala/collection/parallel/TaskSupport.scala b/src/library/scala/collection/parallel/TaskSupport.scala
index 20800250b4..59b75f523f 100644
--- a/src/library/scala/collection/parallel/TaskSupport.scala
+++ b/src/library/scala/collection/parallel/TaskSupport.scala
@@ -11,15 +11,46 @@ package scala.collection.parallel
+import java.util.concurrent.ThreadPoolExecutor
+import scala.concurrent.forkjoin.ForkJoinPool
+import scala.concurrent.ExecutionContext
-
+/** A trait implementing the scheduling of
+ * a parallel collection operation.
+ *
+ * Task support objects handle how a task is split and
+ * distributed across processors. A task support object can be
+ * changed in a parallel collection after it has been created,
+ * but only during a quiescent period, i.e. while there are no
+ * concurrent invocations to parallel collection methods.
+ */
trait TaskSupport extends Tasks
-private[collection] class ForkJoinTaskSupport extends TaskSupport with AdaptiveWorkStealingForkJoinTasks
-private[collection] class ThreadPoolTaskSupport extends TaskSupport with AdaptiveWorkStealingThreadPoolTasks
+/** A task support that uses a fork join pool to schedule tasks */
+class ForkJoinTaskSupport(val environment: ForkJoinPool = ForkJoinTasks.defaultForkJoinPool)
+extends TaskSupport with AdaptiveWorkStealingForkJoinTasks
+
+
+/** A task support that uses a thread pool executor to schedule tasks */
+class ThreadPoolTaskSupport(val environment: ThreadPoolExecutor = ThreadPoolTasks.defaultThreadPool)
+extends TaskSupport with AdaptiveWorkStealingThreadPoolTasks
+
+
+/** A task support that uses an execution context to schedule tasks.
+ *
+ * It can be used with the default execution context implementation in the `scala.concurrent` package.
+ * It internally forwards the call to either a forkjoin based task support or a thread pool executor one,
+ * depending on what the execution context uses.
+ *
+ * By default, parallel collections are parametrized with this task support object, so parallel collections
+ * share the same execution context backend as the rest of the `scala.concurrent` package.
+ */
+class ExecutionContextTaskSupport(val environment: ExecutionContext = scala.concurrent.executionContext)
+extends TaskSupport with ExecutionContextTasks
+
diff --git a/src/library/scala/collection/parallel/Tasks.scala b/src/library/scala/collection/parallel/Tasks.scala
index b705909cad..60a8bb1ed6 100644
--- a/src/library/scala/collection/parallel/Tasks.scala
+++ b/src/library/scala/collection/parallel/Tasks.scala
@@ -6,109 +6,109 @@
** |/ **
\* */
-
package scala.collection.parallel
+import java.util.concurrent.ThreadPoolExecutor
import scala.concurrent.forkjoin._
+import scala.concurrent.ExecutionContext
import scala.util.control.Breaks._
-
import annotation.unchecked.uncheckedVariance
+trait Task[R, +Tp] {
+ type Result = R
-/** A trait that declares task execution capabilities used
- * by parallel collections.
- */
-trait Tasks {
-
- private[parallel] val debugMessages = collection.mutable.ArrayBuffer[String]()
-
- private[parallel] def debuglog(s: String) = synchronized {
- debugMessages += s
- }
-
- trait Task[R, +Tp] {
- type Result = R
+ def repr = this.asInstanceOf[Tp]
- def repr = this.asInstanceOf[Tp]
-
- /** Body of the task - non-divisible unit of work done by this task.
- * Optionally is provided with the result from the previous completed task
- * or `None` if there was no previous task (or the previous task is uncompleted or unknown).
- */
- def leaf(result: Option[R])
+ /** Body of the task - non-divisible unit of work done by this task.
+ * Optionally is provided with the result from the previous completed task
+ * or `None` if there was no previous task (or the previous task is uncompleted or unknown).
+ */
+ def leaf(result: Option[R])
- /** A result that can be accessed once the task is completed. */
- var result: R
+ /** A result that can be accessed once the task is completed. */
+ var result: R
- /** Decides whether or not this task should be split further. */
- def shouldSplitFurther: Boolean
+ /** Decides whether or not this task should be split further. */
+ def shouldSplitFurther: Boolean
- /** Splits this task into a list of smaller tasks. */
- private[parallel] def split: Seq[Task[R, Tp]]
+ /** Splits this task into a list of smaller tasks. */
+ private[parallel] def split: Seq[Task[R, Tp]]
- /** Read of results of `that` task and merge them into results of this one. */
- private[parallel] def merge(that: Tp @uncheckedVariance) {}
+ /** Read of results of `that` task and merge them into results of this one. */
+ private[parallel] def merge(that: Tp @uncheckedVariance) {}
- // exception handling mechanism
- @volatile var throwable: Throwable = null
- def forwardThrowable() = if (throwable != null) throw throwable
+ // exception handling mechanism
+ @volatile var throwable: Throwable = null
+ def forwardThrowable() = if (throwable != null) throw throwable
- // tries to do the leaf computation, storing the possible exception
- private[parallel] def tryLeaf(lastres: Option[R]) {
- try {
- tryBreakable {
- leaf(lastres)
- result = result // ensure that effects of `leaf` are visible to readers of `result`
- } catchBreak {
- signalAbort
- }
- } catch {
- case thr: Exception =>
- result = result // ensure that effects of `leaf` are visible
- throwable = thr
- signalAbort
+ // tries to do the leaf computation, storing the possible exception
+ private[parallel] def tryLeaf(lastres: Option[R]) {
+ try {
+ tryBreakable {
+ leaf(lastres)
+ result = result // ensure that effects of `leaf` are visible to readers of `result`
+ } catchBreak {
+ signalAbort
}
+ } catch {
+ case thr: Exception =>
+ result = result // ensure that effects of `leaf` are visible
+ throwable = thr
+ signalAbort
}
+ }
- private[parallel] def tryMerge(t: Tp @uncheckedVariance) {
- val that = t.asInstanceOf[Task[R, Tp]]
- val local = result // ensure that any effects of modifying `result` are detected
- // checkMerge(that)
- if (this.throwable == null && that.throwable == null) merge(t)
- mergeThrowables(that)
- }
+ private[parallel] def tryMerge(t: Tp @uncheckedVariance) {
+ val that = t.asInstanceOf[Task[R, Tp]]
+ val local = result // ensure that any effects of modifying `result` are detected
+ // checkMerge(that)
+ if (this.throwable == null && that.throwable == null) merge(t)
+ mergeThrowables(that)
+ }
- private def checkMerge(that: Task[R, Tp] @uncheckedVariance) {
- if (this.throwable == null && that.throwable == null && (this.result == null || that.result == null)) {
- println("This: " + this + ", thr=" + this.throwable + "; merged with " + that + ", thr=" + that.throwable)
- } else if (this.throwable != null || that.throwable != null) {
- println("merging this: " + this + " with thr: " + this.throwable + " with " + that + ", thr=" + that.throwable)
- }
+ private def checkMerge(that: Task[R, Tp] @uncheckedVariance) {
+ if (this.throwable == null && that.throwable == null && (this.result == null || that.result == null)) {
+ println("This: " + this + ", thr=" + this.throwable + "; merged with " + that + ", thr=" + that.throwable)
+ } else if (this.throwable != null || that.throwable != null) {
+ println("merging this: " + this + " with thr: " + this.throwable + " with " + that + ", thr=" + that.throwable)
}
+ }
- private[parallel] def mergeThrowables(that: Task[_, _]) {
- if (this.throwable != null && that.throwable != null) {
- // merge exceptions, since there were multiple exceptions
- this.throwable = this.throwable alongWith that.throwable
- } else if (that.throwable != null) this.throwable = that.throwable
+ private[parallel] def mergeThrowables(that: Task[_, _]) {
+ if (this.throwable != null && that.throwable != null) {
+ // merge exceptions, since there were multiple exceptions
+ this.throwable = this.throwable alongWith that.throwable
+ } else if (that.throwable != null) this.throwable = that.throwable
else this.throwable = this.throwable
- }
+ }
+
+ // override in concrete task implementations to signal abort to other tasks
+ private[parallel] def signalAbort() {}
+}
+
+
+/** A trait that declares task execution capabilities used
+ * by parallel collections.
+ */
+trait Tasks {
- // override in concrete task implementations to signal abort to other tasks
- private[parallel] def signalAbort() {}
+ private[parallel] val debugMessages = collection.mutable.ArrayBuffer[String]()
+
+ private[parallel] def debuglog(s: String) = synchronized {
+ debugMessages += s
}
- trait TaskImpl[R, +Tp] {
+ trait WrappedTask[R, +Tp] {
/** the body of this task - what it executes, how it gets split and how results are merged. */
val body: Task[R, Tp]
- def split: Seq[TaskImpl[R, Tp]]
+ def split: Seq[WrappedTask[R, Tp]]
/** Code that gets called after the task gets started - it may spawn other tasks instead of calling `leaf`. */
def compute()
/** Start task. */
@@ -129,13 +129,10 @@ trait Tasks {
def release() {}
}
- protected def newTaskImpl[R, Tp](b: Task[R, Tp]): TaskImpl[R, Tp]
-
/* task control */
- // safe to assume it will always have the same type,
- // because the `tasksupport` in parallel iterable is final
- var environment: AnyRef
+ /** The type of the environment is more specific in the implementations. */
+ val environment: AnyRef
/** Executes a task and returns a future. Forwards an exception if some task threw it. */
def execute[R, Tp](fjtask: Task[R, Tp]): () => R
@@ -155,11 +152,11 @@ trait Tasks {
*/
trait AdaptiveWorkStealingTasks extends Tasks {
- trait TaskImpl[R, Tp] extends super.TaskImpl[R, Tp] {
- @volatile var next: TaskImpl[R, Tp] = null
+ trait WrappedTask[R, Tp] extends super.WrappedTask[R, Tp] {
+ @volatile var next: WrappedTask[R, Tp] = null
@volatile var shouldWaitFor = true
- def split: Seq[TaskImpl[R, Tp]]
+ def split: Seq[WrappedTask[R, Tp]]
def compute() = if (body.shouldSplitFurther) {
internal()
@@ -171,12 +168,12 @@ trait AdaptiveWorkStealingTasks extends Tasks {
def internal() = {
var last = spawnSubtasks()
-
+
last.body.tryLeaf(None)
last.release()
body.result = last.body.result
body.throwable = last.body.throwable
-
+
while (last.next != null) {
// val lastresult = Option(last.body.result)
val beforelast = last
@@ -193,10 +190,10 @@ trait AdaptiveWorkStealingTasks extends Tasks {
body.tryMerge(last.body.repr)
}
}
-
+
def spawnSubtasks() = {
- var last: TaskImpl[R, Tp] = null
- var head: TaskImpl[R, Tp] = this
+ var last: WrappedTask[R, Tp] = null
+ var head: WrappedTask[R, Tp] = this
do {
val subtasks = head.split
head = subtasks.head
@@ -222,7 +219,7 @@ trait AdaptiveWorkStealingTasks extends Tasks {
}
// specialize ctor
- protected def newTaskImpl[R, Tp](b: Task[R, Tp]): TaskImpl[R, Tp]
+ protected def newWrappedTask[R, Tp](b: Task[R, Tp]): WrappedTask[R, Tp]
}
@@ -231,13 +228,13 @@ trait AdaptiveWorkStealingTasks extends Tasks {
trait ThreadPoolTasks extends Tasks {
import java.util.concurrent._
- trait TaskImpl[R, +Tp] extends Runnable with super.TaskImpl[R, Tp] {
+ trait WrappedTask[R, +Tp] extends Runnable with super.WrappedTask[R, Tp] {
// initially, this is null
// once the task is started, this future is set and used for `sync`
// utb: var future: Future[_] = null
@volatile var owned = false
@volatile var completed = false
-
+
def start() = synchronized {
// debuglog("Starting " + body)
// utb: future = executor.submit(this)
@@ -293,9 +290,9 @@ trait ThreadPoolTasks extends Tasks {
}
}
- protected def newTaskImpl[R, Tp](b: Task[R, Tp]): TaskImpl[R, Tp]
+ protected def newWrappedTask[R, Tp](b: Task[R, Tp]): WrappedTask[R, Tp]
- var environment: AnyRef = ThreadPoolTasks.defaultThreadPool
+ val environment: ThreadPoolExecutor
def executor = environment.asInstanceOf[ThreadPoolExecutor]
def queue = executor.getQueue.asInstanceOf[LinkedBlockingQueue[Runnable]]
@volatile var totaltasks = 0
@@ -309,7 +306,7 @@ trait ThreadPoolTasks extends Tasks {
}
def execute[R, Tp](task: Task[R, Tp]): () => R = {
- val t = newTaskImpl(task)
+ val t = newWrappedTask(task)
// debuglog("-----------> Executing without wait: " + task)
t.start()
@@ -322,11 +319,11 @@ trait ThreadPoolTasks extends Tasks {
}
def executeAndWaitResult[R, Tp](task: Task[R, Tp]): R = {
- val t = newTaskImpl(task)
+ val t = newWrappedTask(task)
// debuglog("-----------> Executing with wait: " + task)
t.start()
-
+
t.sync()
t.body.forwardThrowable
t.body.result
@@ -362,10 +359,11 @@ object ThreadPoolTasks {
/** An implementation of tasks objects based on the Java thread pooling API and synchronization using futures. */
+@deprecated("This implementation is not used.")
trait FutureThreadPoolTasks extends Tasks {
import java.util.concurrent._
- trait TaskImpl[R, +Tp] extends Runnable with super.TaskImpl[R, Tp] {
+ trait WrappedTask[R, +Tp] extends Runnable with super.WrappedTask[R, Tp] {
@volatile var future: Future[_] = null
def start() = {
@@ -380,13 +378,13 @@ trait FutureThreadPoolTasks extends Tasks {
}
}
- protected def newTaskImpl[R, Tp](b: Task[R, Tp]): TaskImpl[R, Tp]
+ protected def newWrappedTask[R, Tp](b: Task[R, Tp]): WrappedTask[R, Tp]
- var environment: AnyRef = FutureThreadPoolTasks.defaultThreadPool
+ val environment: AnyRef = FutureThreadPoolTasks.defaultThreadPool
def executor = environment.asInstanceOf[ThreadPoolExecutor]
def execute[R, Tp](task: Task[R, Tp]): () => R = {
- val t = newTaskImpl(task)
+ val t = newWrappedTask(task)
// debuglog("-----------> Executing without wait: " + task)
t.start
@@ -399,7 +397,7 @@ trait FutureThreadPoolTasks extends Tasks {
}
def executeAndWaitResult[R, Tp](task: Task[R, Tp]): R = {
- val t = newTaskImpl(task)
+ val t = newWrappedTask(task)
// debuglog("-----------> Executing with wait: " + task)
t.start
@@ -441,26 +439,26 @@ trait HavingForkJoinPool {
*/
trait ForkJoinTasks extends Tasks with HavingForkJoinPool {
- trait TaskImpl[R, +Tp] extends RecursiveAction with super.TaskImpl[R, Tp] {
+ trait WrappedTask[R, +Tp] extends RecursiveAction with super.WrappedTask[R, Tp] {
def start() = fork
def sync() = join
def tryCancel = tryUnfork
}
// specialize ctor
- protected def newTaskImpl[R, Tp](b: Task[R, Tp]): TaskImpl[R, Tp]
+ protected def newWrappedTask[R, Tp](b: Task[R, Tp]): WrappedTask[R, Tp]
/** The fork/join pool of this collection.
*/
def forkJoinPool: ForkJoinPool = environment.asInstanceOf[ForkJoinPool]
- var environment: AnyRef = ForkJoinTasks.defaultForkJoinPool
+ val environment: ForkJoinPool
/** Executes a task and does not wait for it to finish - instead returns a future.
*
* $fjdispatch
*/
def execute[R, Tp](task: Task[R, Tp]): () => R = {
- val fjtask = newTaskImpl(task)
+ val fjtask = newWrappedTask(task)
if (Thread.currentThread.isInstanceOf[ForkJoinWorkerThread]) {
fjtask.fork
@@ -483,7 +481,7 @@ trait ForkJoinTasks extends Tasks with HavingForkJoinPool {
* @return the result of the task
*/
def executeAndWaitResult[R, Tp](task: Task[R, Tp]): R = {
- val fjtask = newTaskImpl(task)
+ val fjtask = newWrappedTask(task)
if (Thread.currentThread.isInstanceOf[ForkJoinWorkerThread]) {
fjtask.fork
@@ -513,25 +511,50 @@ object ForkJoinTasks {
*/
trait AdaptiveWorkStealingForkJoinTasks extends ForkJoinTasks with AdaptiveWorkStealingTasks {
- class TaskImpl[R, Tp](val body: Task[R, Tp])
- extends super[ForkJoinTasks].TaskImpl[R, Tp] with super[AdaptiveWorkStealingTasks].TaskImpl[R, Tp] {
- def split = body.split.map(b => newTaskImpl(b))
+ class WrappedTask[R, Tp](val body: Task[R, Tp])
+ extends super[ForkJoinTasks].WrappedTask[R, Tp] with super[AdaptiveWorkStealingTasks].WrappedTask[R, Tp] {
+ def split = body.split.map(b => newWrappedTask(b))
}
- def newTaskImpl[R, Tp](b: Task[R, Tp]) = new TaskImpl[R, Tp](b)
+ def newWrappedTask[R, Tp](b: Task[R, Tp]) = new WrappedTask[R, Tp](b)
}
trait AdaptiveWorkStealingThreadPoolTasks extends ThreadPoolTasks with AdaptiveWorkStealingTasks {
- class TaskImpl[R, Tp](val body: Task[R, Tp])
- extends super[ThreadPoolTasks].TaskImpl[R, Tp] with super[AdaptiveWorkStealingTasks].TaskImpl[R, Tp] {
- def split = body.split.map(b => newTaskImpl(b))
+ class WrappedTask[R, Tp](val body: Task[R, Tp])
+ extends super[ThreadPoolTasks].WrappedTask[R, Tp] with super[AdaptiveWorkStealingTasks].WrappedTask[R, Tp] {
+ def split = body.split.map(b => newWrappedTask(b))
}
- def newTaskImpl[R, Tp](b: Task[R, Tp]) = new TaskImpl[R, Tp](b)
+ def newWrappedTask[R, Tp](b: Task[R, Tp]) = new WrappedTask[R, Tp](b)
+
+}
+
+trait ExecutionContextTasks extends Tasks {
+
+ def executionContext = environment
+
+ val environment: ExecutionContext
+
+ // this part is a hack which allows switching
+ val driver: Tasks = executionContext match {
+ case eci: scala.concurrent.impl.ExecutionContextImpl => eci.executorService match {
+ case fjp: ForkJoinPool => new ForkJoinTaskSupport(fjp)
+ case tpe: ThreadPoolExecutor => new ThreadPoolTaskSupport(tpe)
+ case _ => ???
+ }
+ case _ => ???
+ }
+
+ def execute[R, Tp](task: Task[R, Tp]): () => R = driver execute task
+
+ def executeAndWaitResult[R, Tp](task: Task[R, Tp]): R = driver executeAndWaitResult task
+
+ def parallelismLevel = driver.parallelismLevel
+
}
@@ -541,3 +564,6 @@ trait AdaptiveWorkStealingThreadPoolTasks extends ThreadPoolTasks with AdaptiveW
+
+
+
diff --git a/src/library/scala/collection/parallel/immutable/ParHashMap.scala b/src/library/scala/collection/parallel/immutable/ParHashMap.scala
index e785932933..266b179401 100644
--- a/src/library/scala/collection/parallel/immutable/ParHashMap.scala
+++ b/src/library/scala/collection/parallel/immutable/ParHashMap.scala
@@ -8,6 +8,8 @@
package scala.collection.parallel.immutable
+
+
import scala.collection.parallel.ParMapLike
import scala.collection.parallel.Combiner
import scala.collection.parallel.IterableSplitter
@@ -19,6 +21,9 @@ import scala.collection.generic.GenericParMapTemplate
import scala.collection.generic.GenericParMapCompanion
import scala.collection.immutable.{ HashMap, TrieIterator }
import annotation.unchecked.uncheckedVariance
+import collection.parallel.Task
+
+
/** Immutable parallel hash map, based on hash tries.
*
@@ -52,7 +57,7 @@ self =>
protected[this] override def newCombiner = HashMapCombiner[K, V]
- def splitter: IterableSplitter[(K, V)] = new ParHashMapIterator(trie.iterator, trie.size) with SCPI
+ def splitter: IterableSplitter[(K, V)] = new ParHashMapIterator(trie.iterator, trie.size)
override def seq = trie
@@ -69,11 +74,8 @@ self =>
case None => newc
}
- type SCPI = SignalContextPassingIterator[ParHashMapIterator]
-
class ParHashMapIterator(var triter: Iterator[(K, V @uncheckedVariance)], val sz: Int)
- extends super.ParIterator {
- self: SignalContextPassingIterator[ParHashMapIterator] =>
+ extends IterableSplitter[(K, V)] {
var i = 0
def dup = triter match {
case t: TrieIterator[_] =>
@@ -84,24 +86,24 @@ self =>
dupFromIterator(buff.iterator)
}
private def dupFromIterator(it: Iterator[(K, V @uncheckedVariance)]) = {
- val phit = new ParHashMapIterator(it, sz) with SCPI
+ val phit = new ParHashMapIterator(it, sz)
phit.i = i
phit
}
- def split: Seq[ParIterator] = if (remaining < 2) Seq(this) else triter match {
+ def split: Seq[IterableSplitter[(K, V)]] = if (remaining < 2) Seq(this) else triter match {
case t: TrieIterator[_] =>
val previousRemaining = remaining
val ((fst, fstlength), snd) = t.split
val sndlength = previousRemaining - fstlength
Seq(
- new ParHashMapIterator(fst, fstlength) with SCPI,
- new ParHashMapIterator(snd, sndlength) with SCPI
+ new ParHashMapIterator(fst, fstlength),
+ new ParHashMapIterator(snd, sndlength)
)
case _ =>
// iterator of the collision map case
val buff = triter.toBuffer
val (fp, sp) = buff.splitAt(buff.length / 2)
- Seq(fp, sp) map { b => new ParHashMapIterator(b.iterator, b.length) with SCPI }
+ Seq(fp, sp) map { b => new ParHashMapIterator(b.iterator, b.length) }
}
def next(): (K, V) = {
i += 1
@@ -156,7 +158,6 @@ private[parallel] abstract class HashMapCombiner[K, V]
extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], (K, V), HashMapCombiner[K, V]](HashMapCombiner.rootsize) {
//self: EnvironmentPassingCombiner[(K, V), ParHashMap[K, V]] =>
import HashMapCombiner._
- import collection.parallel.tasksupport._
val emptyTrie = HashMap.empty[K, V]
def +=(elem: (K, V)) = {
@@ -176,7 +177,7 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], (K, V), Has
val bucks = buckets.filter(_ != null).map(_.headPtr)
val root = new Array[HashMap[K, V]](bucks.length)
- executeAndWaitResult(new CreateTrie(bucks, root, 0, bucks.length))
+ combinerTaskSupport.executeAndWaitResult(new CreateTrie(bucks, root, 0, bucks.length))
var bitmap = 0
var i = 0
@@ -198,7 +199,7 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], (K, V), Has
val bucks = buckets.filter(_ != null).map(_.headPtr)
val root = new Array[HashMap[K, AnyRef]](bucks.length)
- executeAndWaitResult(new CreateGroupedTrie(cbf, bucks, root, 0, bucks.length))
+ combinerTaskSupport.executeAndWaitResult(new CreateGroupedTrie(cbf, bucks, root, 0, bucks.length))
var bitmap = 0
var i = 0
@@ -259,7 +260,7 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], (K, V), Has
val fp = howmany / 2
List(new CreateTrie(bucks, root, offset, fp), new CreateTrie(bucks, root, offset + fp, howmany - fp))
}
- def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(root.length, parallelismLevel)
+ def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(root.length, combinerTaskSupport.parallelismLevel)
}
class CreateGroupedTrie[Repr](cbf: () => Combiner[V, Repr], bucks: Array[Unrolled[(K, V)]], root: Array[HashMap[K, AnyRef]], offset: Int, howmany: Int)
@@ -324,7 +325,7 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], (K, V), Has
val fp = howmany / 2
List(new CreateGroupedTrie(cbf, bucks, root, offset, fp), new CreateGroupedTrie(cbf, bucks, root, offset + fp, howmany - fp))
}
- def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(root.length, parallelismLevel)
+ def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(root.length, combinerTaskSupport.parallelismLevel)
}
}
diff --git a/src/library/scala/collection/parallel/immutable/ParHashSet.scala b/src/library/scala/collection/parallel/immutable/ParHashSet.scala
index 8332167b90..0d7f04976e 100644
--- a/src/library/scala/collection/parallel/immutable/ParHashSet.scala
+++ b/src/library/scala/collection/parallel/immutable/ParHashSet.scala
@@ -8,6 +8,8 @@
package scala.collection.parallel.immutable
+
+
import scala.collection.parallel.ParSetLike
import scala.collection.parallel.Combiner
import scala.collection.parallel.IterableSplitter
@@ -19,6 +21,9 @@ import scala.collection.generic.GenericParTemplate
import scala.collection.generic.GenericParCompanion
import scala.collection.generic.GenericCompanion
import scala.collection.immutable.{ HashSet, TrieIterator }
+import collection.parallel.Task
+
+
/** Immutable parallel hash set, based on hash tries.
*
@@ -49,7 +54,7 @@ self =>
override def empty: ParHashSet[T] = new ParHashSet[T]
- def splitter: IterableSplitter[T] = new ParHashSetIterator(trie.iterator, trie.size) with SCPI
+ def splitter: IterableSplitter[T] = new ParHashSetIterator(trie.iterator, trie.size)
override def seq = trie
@@ -66,11 +71,8 @@ self =>
case None => newc
}
- type SCPI = SignalContextPassingIterator[ParHashSetIterator]
-
class ParHashSetIterator(var triter: Iterator[T], val sz: Int)
- extends super.ParIterator {
- self: SignalContextPassingIterator[ParHashSetIterator] =>
+ extends IterableSplitter[T] {
var i = 0
def dup = triter match {
case t: TrieIterator[_] =>
@@ -81,24 +83,24 @@ self =>
dupFromIterator(buff.iterator)
}
private def dupFromIterator(it: Iterator[T]) = {
- val phit = new ParHashSetIterator(it, sz) with SCPI
+ val phit = new ParHashSetIterator(it, sz)
phit.i = i
phit
}
- def split: Seq[ParIterator] = if (remaining < 2) Seq(this) else triter match {
+ def split: Seq[IterableSplitter[T]] = if (remaining < 2) Seq(this) else triter match {
case t: TrieIterator[_] =>
val previousRemaining = remaining
val ((fst, fstlength), snd) = t.split
val sndlength = previousRemaining - fstlength
Seq(
- new ParHashSetIterator(fst, fstlength) with SCPI,
- new ParHashSetIterator(snd, sndlength) with SCPI
+ new ParHashSetIterator(fst, fstlength),
+ new ParHashSetIterator(snd, sndlength)
)
case _ =>
// iterator of the collision map case
val buff = triter.toBuffer
val (fp, sp) = buff.splitAt(buff.length / 2)
- Seq(fp, sp) map { b => new ParHashSetIterator(b.iterator, b.length) with SCPI }
+ Seq(fp, sp) map { b => new ParHashSetIterator(b.iterator, b.length) }
}
def next(): T = {
i += 1
@@ -111,6 +113,7 @@ self =>
}
}
+
/** $factoryInfo
* @define Coll immutable.ParHashSet
* @define coll immutable parallel hash set
@@ -124,11 +127,11 @@ object ParHashSet extends ParSetFactory[ParHashSet] {
def fromTrie[T](t: HashSet[T]) = new ParHashSet(t)
}
+
private[immutable] abstract class HashSetCombiner[T]
extends collection.parallel.BucketCombiner[T, ParHashSet[T], Any, HashSetCombiner[T]](HashSetCombiner.rootsize) {
//self: EnvironmentPassingCombiner[T, ParHashSet[T]] =>
import HashSetCombiner._
- import collection.parallel.tasksupport._
val emptyTrie = HashSet.empty[T]
def +=(elem: T) = {
@@ -148,7 +151,7 @@ extends collection.parallel.BucketCombiner[T, ParHashSet[T], Any, HashSetCombine
val bucks = buckets.filter(_ != null).map(_.headPtr)
val root = new Array[HashSet[T]](bucks.length)
- executeAndWaitResult(new CreateTrie(bucks, root, 0, bucks.length))
+ combinerTaskSupport.executeAndWaitResult(new CreateTrie(bucks, root, 0, bucks.length))
var bitmap = 0
var i = 0
@@ -203,10 +206,11 @@ extends collection.parallel.BucketCombiner[T, ParHashSet[T], Any, HashSetCombine
val fp = howmany / 2
List(new CreateTrie(bucks, root, offset, fp), new CreateTrie(bucks, root, offset + fp, howmany - fp))
}
- def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(root.length, parallelismLevel)
+ def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(root.length, combinerTaskSupport.parallelismLevel)
}
}
+
object HashSetCombiner {
def apply[T] = new HashSetCombiner[T] {} // was: with EnvironmentPassingCombiner[T, ParHashSet[T]] {}
diff --git a/src/library/scala/collection/parallel/immutable/ParRange.scala b/src/library/scala/collection/parallel/immutable/ParRange.scala
index 350e64739f..64e07ce4ff 100644
--- a/src/library/scala/collection/parallel/immutable/ParRange.scala
+++ b/src/library/scala/collection/parallel/immutable/ParRange.scala
@@ -10,6 +10,7 @@ package scala.collection.parallel.immutable
import scala.collection.immutable.Range
import scala.collection.parallel.Combiner
+import scala.collection.parallel.SeqSplitter
import scala.collection.generic.CanCombineFrom
import scala.collection.parallel.IterableSplitter
import scala.collection.Iterator
@@ -41,13 +42,10 @@ self =>
@inline final def apply(idx: Int) = range.apply(idx);
- def splitter = new ParRangeIterator with SCPI
-
- type SCPI = SignalContextPassingIterator[ParRangeIterator]
+ def splitter = new ParRangeIterator
class ParRangeIterator(range: Range = self.range)
- extends ParIterator {
- me: SignalContextPassingIterator[ParRangeIterator] =>
+ extends SeqSplitter[Int] {
override def toString = "ParRangeIterator(over: " + range + ")"
private var ind = 0
private val len = range.length
@@ -64,15 +62,15 @@ self =>
private def rangeleft = range.drop(ind)
- def dup = new ParRangeIterator(rangeleft) with SCPI
+ def dup = new ParRangeIterator(rangeleft)
def split = {
val rleft = rangeleft
val elemleft = rleft.length
- if (elemleft < 2) Seq(new ParRangeIterator(rleft) with SCPI)
+ if (elemleft < 2) Seq(new ParRangeIterator(rleft))
else Seq(
- new ParRangeIterator(rleft.take(elemleft / 2)) with SCPI,
- new ParRangeIterator(rleft.drop(elemleft / 2)) with SCPI
+ new ParRangeIterator(rleft.take(elemleft / 2)),
+ new ParRangeIterator(rleft.drop(elemleft / 2))
)
}
@@ -81,7 +79,7 @@ self =>
for (sz <- sizes) yield {
val fronttaken = rleft.take(sz)
rleft = rleft.drop(sz)
- new ParRangeIterator(fronttaken) with SCPI
+ new ParRangeIterator(fronttaken)
}
}
diff --git a/src/library/scala/collection/parallel/immutable/ParVector.scala b/src/library/scala/collection/parallel/immutable/ParVector.scala
index fdeaefc3ff..5d9c431bc1 100644
--- a/src/library/scala/collection/parallel/immutable/ParVector.scala
+++ b/src/library/scala/collection/parallel/immutable/ParVector.scala
@@ -48,22 +48,19 @@ extends ParSeq[T]
def this() = this(Vector())
- type SCPI = SignalContextPassingIterator[ParVectorIterator]
-
def apply(idx: Int) = vector.apply(idx)
def length = vector.length
def splitter: SeqSplitter[T] = {
- val pit = new ParVectorIterator(vector.startIndex, vector.endIndex) with SCPI
+ val pit = new ParVectorIterator(vector.startIndex, vector.endIndex)
vector.initIterator(pit)
pit
}
override def seq: Vector[T] = vector
- class ParVectorIterator(_start: Int, _end: Int) extends VectorIterator[T](_start, _end) with ParIterator {
- self: SCPI =>
+ class ParVectorIterator(_start: Int, _end: Int) extends VectorIterator[T](_start, _end) with SeqSplitter[T] {
def remaining: Int = remainingElementCount
def dup: SeqSplitter[T] = (new ParVector(remainingVector)).splitter
def split: Seq[ParVectorIterator] = {
diff --git a/src/library/scala/collection/parallel/immutable/package.scala b/src/library/scala/collection/parallel/immutable/package.scala
index 7b1e39d092..63635537d7 100644
--- a/src/library/scala/collection/parallel/immutable/package.scala
+++ b/src/library/scala/collection/parallel/immutable/package.scala
@@ -22,23 +22,19 @@ package immutable {
override def seq = throw new UnsupportedOperationException
def update(idx: Int, elem: T) = throw new UnsupportedOperationException
- type SCPI = SignalContextPassingIterator[ParIterator]
-
- class ParIterator(var i: Int = 0, val until: Int = length, elem: T = self.elem) extends super.ParIterator {
- me: SignalContextPassingIterator[ParIterator] =>
-
+ class ParIterator(var i: Int = 0, val until: Int = length, elem: T = self.elem) extends SeqSplitter[T] {
def remaining = until - i
def hasNext = i < until
def next = { i += 1; elem }
- def dup = new ParIterator(i, until, elem) with SCPI
+ def dup = new ParIterator(i, until, elem)
def psplit(sizes: Int*) = {
val incr = sizes.scanLeft(0)(_ + _)
- for ((start, end) <- incr.init zip incr.tail) yield new ParIterator(i + start, (i + end) min until, elem) with SCPI
+ for ((start, end) <- incr.init zip incr.tail) yield new ParIterator(i + start, (i + end) min until, elem)
}
def split = psplit(remaining / 2, remaining - remaining / 2)
}
- def splitter = new ParIterator with SCPI
+ def splitter = new ParIterator
}
}
diff --git a/src/library/scala/collection/parallel/mutable/ParArray.scala b/src/library/scala/collection/parallel/mutable/ParArray.scala
index a1eb3beb0c..5c3da66be0 100644
--- a/src/library/scala/collection/parallel/mutable/ParArray.scala
+++ b/src/library/scala/collection/parallel/mutable/ParArray.scala
@@ -19,7 +19,9 @@ import scala.collection.generic.CanBuildFrom
import scala.collection.generic.ParFactory
import scala.collection.generic.Sizing
import scala.collection.parallel.Combiner
+import scala.collection.parallel.SeqSplitter
import scala.collection.parallel.ParSeqLike
+import scala.collection.parallel.Task
import scala.collection.parallel.CHECK_RATE
import scala.collection.mutable.ArraySeq
import scala.collection.mutable.Builder
@@ -55,7 +57,6 @@ extends ParSeq[T]
with Serializable
{
self =>
- import collection.parallel.tasksupport._
@transient private var array: Array[Any] = arrayseq.array.asInstanceOf[Array[Any]]
@@ -74,17 +75,13 @@ self =>
override def seq = arrayseq
- type SCPI = SignalContextPassingIterator[ParArrayIterator]
-
protected[parallel] def splitter: ParArrayIterator = {
- val pit = new ParArrayIterator with SCPI
+ val pit = new ParArrayIterator
pit
}
class ParArrayIterator(var i: Int = 0, val until: Int = length, val arr: Array[Any] = array)
- extends super.ParIterator {
- me: SignalContextPassingIterator[ParArrayIterator] =>
-
+ extends SeqSplitter[T] {
def hasNext = i < until
def next = {
@@ -95,9 +92,9 @@ self =>
def remaining = until - i
- def dup = new ParArrayIterator(i, until, arr) with SCPI
+ def dup = new ParArrayIterator(i, until, arr)
- def psplit(sizesIncomplete: Int*): Seq[ParIterator] = {
+ def psplit(sizesIncomplete: Int*): Seq[ParArrayIterator] = {
var traversed = i
val total = sizesIncomplete.reduceLeft(_ + _)
val left = remaining
@@ -106,19 +103,19 @@ self =>
val start = traversed
val end = (traversed + sz) min until
traversed = end
- new ParArrayIterator(start, end, arr) with SCPI
+ new ParArrayIterator(start, end, arr)
} else {
- new ParArrayIterator(traversed, traversed, arr) with SCPI
+ new ParArrayIterator(traversed, traversed, arr)
}
}
- override def split: Seq[ParIterator] = {
+ override def split: Seq[ParArrayIterator] = {
val left = remaining
if (left >= 2) {
val splitpoint = left / 2
val sq = Seq(
- new ParArrayIterator(i, i + splitpoint, arr) with SCPI,
- new ParArrayIterator(i + splitpoint, until, arr) with SCPI)
+ new ParArrayIterator(i, i + splitpoint, arr),
+ new ParArrayIterator(i + splitpoint, until, arr))
i = until
sq
} else {
@@ -587,22 +584,22 @@ self =>
val targetarr = targarrseq.array.asInstanceOf[Array[Any]]
// fill it in parallel
- executeAndWaitResult(new Map[S](f, targetarr, 0, length))
+ tasksupport.executeAndWaitResult(new Map[S](f, targetarr, 0, length))
// wrap it into a parallel array
(new ParArray[S](targarrseq)).asInstanceOf[That]
} else super.map(f)(bf)
override def scan[U >: T, That](z: U)(op: (U, U) => U)(implicit cbf: CanBuildFrom[ParArray[T], U, That]): That =
- if (parallelismLevel > 1 && buildsArray(cbf(repr))) {
+ if (tasksupport.parallelismLevel > 1 && buildsArray(cbf(repr))) {
// reserve an array
val targarrseq = new ArraySeq[U](length + 1)
val targetarr = targarrseq.array.asInstanceOf[Array[Any]]
targetarr(0) = z
// do a parallel prefix scan
- if (length > 0) executeAndWaitResult(new CreateScanTree[U](0, size, z, op, splitter) mapResult {
- tree => executeAndWaitResult(new ScanToArray(tree, z, op, targetarr))
+ if (length > 0) tasksupport.executeAndWaitResult(new CreateScanTree[U](0, size, z, op, splitter) mapResult {
+ tree => tasksupport.executeAndWaitResult(new ScanToArray(tree, z, op, targetarr))
})
// wrap the array into a parallel array
@@ -664,7 +661,7 @@ self =>
val fp = howmany / 2
List(new Map(f, targetarr, offset, fp), new Map(f, targetarr, offset + fp, howmany - fp))
}
- def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(length, parallelismLevel)
+ def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(length, tasksupport.parallelismLevel)
}
/* serialization */
diff --git a/src/library/scala/collection/parallel/mutable/ParCtrie.scala b/src/library/scala/collection/parallel/mutable/ParCtrie.scala
new file mode 100644
index 0000000000..470972adad
--- /dev/null
+++ b/src/library/scala/collection/parallel/mutable/ParCtrie.scala
@@ -0,0 +1,193 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2012, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.collection.parallel.mutable
+
+
+
+import scala.collection.generic._
+import scala.collection.parallel.Combiner
+import scala.collection.parallel.IterableSplitter
+import scala.collection.parallel.Task
+import scala.collection.mutable.BasicNode
+import scala.collection.mutable.TNode
+import scala.collection.mutable.LNode
+import scala.collection.mutable.CNode
+import scala.collection.mutable.SNode
+import scala.collection.mutable.INode
+import scala.collection.mutable.Ctrie
+import scala.collection.mutable.CtrieIterator
+
+
+
+/** Parallel Ctrie collection.
+ *
+ * It has its bulk operations parallelized, but uses the snapshot operation
+ * to create the splitter. This means that parallel bulk operations can be
+ * called concurrently with the modifications.
+ *
+ * @author Aleksandar Prokopec
+ * @since 2.10
+ */
+final class ParCtrie[K, V] private[collection] (private val ctrie: Ctrie[K, V])
+extends ParMap[K, V]
+ with GenericParMapTemplate[K, V, ParCtrie]
+ with ParMapLike[K, V, ParCtrie[K, V], Ctrie[K, V]]
+ with ParCtrieCombiner[K, V]
+ with Serializable
+{
+ def this() = this(new Ctrie)
+
+ override def mapCompanion: GenericParMapCompanion[ParCtrie] = ParCtrie
+
+ override def empty: ParCtrie[K, V] = ParCtrie.empty
+
+ protected[this] override def newCombiner = ParCtrie.newCombiner
+
+ override def seq = ctrie
+
+ def splitter = new ParCtrieSplitter(0, ctrie.readOnlySnapshot().asInstanceOf[Ctrie[K, V]], true)
+
+ override def clear() = ctrie.clear()
+
+ def result = this
+
+ def get(key: K): Option[V] = ctrie.get(key)
+
+ def put(key: K, value: V): Option[V] = ctrie.put(key, value)
+
+ def update(key: K, value: V): Unit = ctrie.update(key, value)
+
+ def remove(key: K): Option[V] = ctrie.remove(key)
+
+ def +=(kv: (K, V)): this.type = {
+ ctrie.+=(kv)
+ this
+ }
+
+ def -=(key: K): this.type = {
+ ctrie.-=(key)
+ this
+ }
+
+ override def size = {
+ val in = ctrie.readRoot()
+ val r = in.gcasRead(ctrie)
+ r match {
+ case tn: TNode[_, _] => tn.cachedSize(ctrie)
+ case ln: LNode[_, _] => ln.cachedSize(ctrie)
+ case cn: CNode[_, _] =>
+ tasksupport.executeAndWaitResult(new Size(0, cn.array.length, cn.array))
+ cn.cachedSize(ctrie)
+ }
+ }
+
+ override def stringPrefix = "ParCtrie"
+
+ /* tasks */
+
+ /** Computes Ctrie size in parallel. */
+ class Size(offset: Int, howmany: Int, array: Array[BasicNode]) extends Task[Int, Size] {
+ var result = -1
+ def leaf(prev: Option[Int]) = {
+ var sz = 0
+ var i = offset
+ val until = offset + howmany
+ while (i < until) {
+ array(i) match {
+ case sn: SNode[_, _] => sz += 1
+ case in: INode[K, V] => sz += in.cachedSize(ctrie)
+ }
+ i += 1
+ }
+ result = sz
+ }
+ def split = {
+ val fp = howmany / 2
+ Seq(new Size(offset, fp, array), new Size(offset + fp, howmany - fp, array))
+ }
+ def shouldSplitFurther = howmany > 1
+ override def merge(that: Size) = result = result + that.result
+ }
+
+}
+
+
+private[collection] class ParCtrieSplitter[K, V](lev: Int, ct: Ctrie[K, V], mustInit: Boolean)
+extends CtrieIterator[K, V](lev, ct, mustInit)
+ with IterableSplitter[(K, V)]
+{
+ // only evaluated if `remaining` is invoked (which is not used by most tasks)
+ lazy val totalsize = ct.par.size
+ var iterated = 0
+
+ protected override def newIterator(_lev: Int, _ct: Ctrie[K, V], _mustInit: Boolean) = new ParCtrieSplitter[K, V](_lev, _ct, _mustInit)
+
+ override def shouldSplitFurther[S](coll: collection.parallel.ParIterable[S], parallelismLevel: Int) = {
+ val maxsplits = 3 + Integer.highestOneBit(parallelismLevel)
+ level < maxsplits
+ }
+
+ def dup = {
+ val it = newIterator(0, ct, false)
+ dupTo(it)
+ it.iterated = this.iterated
+ it
+ }
+
+ override def next() = {
+ iterated += 1
+ super.next()
+ }
+
+ def split: Seq[IterableSplitter[(K, V)]] = subdivide().asInstanceOf[Seq[IterableSplitter[(K, V)]]]
+
+ override def isRemainingCheap = false
+
+ def remaining: Int = totalsize - iterated
+}
+
+
+/** Only used within the `ParCtrie`. */
+private[mutable] trait ParCtrieCombiner[K, V] extends Combiner[(K, V), ParCtrie[K, V]] {
+
+ def combine[N <: (K, V), NewTo >: ParCtrie[K, V]](other: Combiner[N, NewTo]): Combiner[N, NewTo] = if (this eq other) this else {
+ throw new UnsupportedOperationException("This shouldn't have been called in the first place.")
+
+ val thiz = this.asInstanceOf[ParCtrie[K, V]]
+ val that = other.asInstanceOf[ParCtrie[K, V]]
+ val result = new ParCtrie[K, V]
+
+ result ++= thiz.iterator
+ result ++= that.iterator
+
+ result
+ }
+
+ override def canBeShared = true
+
+}
+
+
+object ParCtrie extends ParMapFactory[ParCtrie] {
+
+ def empty[K, V]: ParCtrie[K, V] = new ParCtrie[K, V]
+
+ def newCombiner[K, V]: Combiner[(K, V), ParCtrie[K, V]] = new ParCtrie[K, V]
+
+ implicit def canBuildFrom[K, V]: CanCombineFrom[Coll, (K, V), ParCtrie[K, V]] = new CanCombineFromMap[K, V]
+
+}
+
+
+
+
+
+
+
+
diff --git a/src/library/scala/collection/parallel/mutable/ParHashMap.scala b/src/library/scala/collection/parallel/mutable/ParHashMap.scala
index 31750b0b0d..6ce6c45460 100644
--- a/src/library/scala/collection/parallel/mutable/ParHashMap.scala
+++ b/src/library/scala/collection/parallel/mutable/ParHashMap.scala
@@ -12,12 +12,12 @@ package mutable
-
import collection.generic._
import collection.mutable.DefaultEntry
import collection.mutable.HashEntry
import collection.mutable.HashTable
import collection.mutable.UnrolledBuffer
+import collection.parallel.Task
@@ -56,7 +56,7 @@ self =>
override def seq = new collection.mutable.HashMap[K, V](hashTableContents)
- def splitter = new ParHashMapIterator(1, table.length, size, table(0).asInstanceOf[DefaultEntry[K, V]]) with SCPI
+ def splitter = new ParHashMapIterator(1, table.length, size, table(0).asInstanceOf[DefaultEntry[K, V]])
override def size = tableSize
@@ -93,14 +93,11 @@ self =>
override def stringPrefix = "ParHashMap"
- type SCPI = SignalContextPassingIterator[ParHashMapIterator]
-
class ParHashMapIterator(start: Int, untilIdx: Int, totalSize: Int, e: DefaultEntry[K, V])
- extends EntryIterator[(K, V), ParHashMapIterator](start, untilIdx, totalSize, e) with ParIterator {
- me: SCPI =>
+ extends EntryIterator[(K, V), ParHashMapIterator](start, untilIdx, totalSize, e) {
def entry2item(entry: DefaultEntry[K, V]) = (entry.key, entry.value);
def newIterator(idxFrom: Int, idxUntil: Int, totalSz: Int, es: DefaultEntry[K, V]) =
- new ParHashMapIterator(idxFrom, idxUntil, totalSz, es) with SCPI
+ new ParHashMapIterator(idxFrom, idxUntil, totalSz, es)
}
private def writeObject(out: java.io.ObjectOutputStream) {
@@ -160,14 +157,13 @@ private[mutable] abstract class ParHashMapCombiner[K, V](private val tableLoadFa
extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], DefaultEntry[K, V], ParHashMapCombiner[K, V]](ParHashMapCombiner.numblocks)
with collection.mutable.HashTable.HashUtils[K]
{
-//self: EnvironmentPassingCombiner[(K, V), ParHashMap[K, V]] =>
- import collection.parallel.tasksupport._
private var mask = ParHashMapCombiner.discriminantmask
private var nonmasklen = ParHashMapCombiner.nonmasklength
+ private var seedvalue = 27
def +=(elem: (K, V)) = {
sz += 1
- val hc = improve(elemHashCode(elem._1))
+ val hc = improve(elemHashCode(elem._1), seedvalue)
val pos = (hc >>> nonmasklen)
if (buckets(pos) eq null) {
// initialize bucket
@@ -180,9 +176,9 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], DefaultEntr
def result: ParHashMap[K, V] = if (size >= (ParHashMapCombiner.numblocks * sizeMapBucketSize)) { // 1024
// construct table
- val table = new AddingHashTable(size, tableLoadFactor)
+ val table = new AddingHashTable(size, tableLoadFactor, seedvalue)
val bucks = buckets.map(b => if (b ne null) b.headPtr else null)
- val insertcount = executeAndWaitResult(new FillBlocks(bucks, table, 0, bucks.length))
+ val insertcount = combinerTaskSupport.executeAndWaitResult(new FillBlocks(bucks, table, 0, bucks.length))
table.setSize(insertcount)
// TODO compare insertcount and size to see if compression is needed
val c = table.hashTableContents
@@ -214,11 +210,12 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], DefaultEntr
* and true if the key was successfully inserted. It does not update the number of elements
* in the table.
*/
- private[ParHashMapCombiner] class AddingHashTable(numelems: Int, lf: Int) extends HashTable[K, DefaultEntry[K, V]] {
+ private[ParHashMapCombiner] class AddingHashTable(numelems: Int, lf: Int, _seedvalue: Int) extends HashTable[K, DefaultEntry[K, V]] {
import HashTable._
_loadFactor = lf
table = new Array[HashEntry[K, DefaultEntry[K, V]]](capacity(sizeForThreshold(_loadFactor, numelems)))
tableSize = 0
+ seedvalue = _seedvalue
threshold = newThreshold(_loadFactor, table.length)
sizeMapInit(table.length)
def setSize(sz: Int) = tableSize = sz
@@ -289,7 +286,7 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], DefaultEntr
insertcount
}
private def assertCorrectBlock(block: Int, k: K) {
- val hc = improve(elemHashCode(k))
+ val hc = improve(elemHashCode(k), seedvalue)
if ((hc >>> nonmasklen) != block) {
println(hc + " goes to " + (hc >>> nonmasklen) + ", while expected block is " + block)
assert((hc >>> nonmasklen) == block)
@@ -302,7 +299,7 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], DefaultEntr
override def merge(that: FillBlocks) {
this.result += that.result
}
- def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(ParHashMapCombiner.numblocks, parallelismLevel)
+ def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(ParHashMapCombiner.numblocks, combinerTaskSupport.parallelismLevel)
}
}
diff --git a/src/library/scala/collection/parallel/mutable/ParHashSet.scala b/src/library/scala/collection/parallel/mutable/ParHashSet.scala
index 7763cdf318..e0a2ab03df 100644
--- a/src/library/scala/collection/parallel/mutable/ParHashSet.scala
+++ b/src/library/scala/collection/parallel/mutable/ParHashSet.scala
@@ -8,10 +8,15 @@
package scala.collection.parallel.mutable
+
+
import collection.generic._
import collection.mutable.FlatHashTable
import collection.parallel.Combiner
import collection.mutable.UnrolledBuffer
+import collection.parallel.Task
+
+
/** A parallel hash set.
*
@@ -66,14 +71,11 @@ extends ParSet[T]
def contains(elem: T) = containsEntry(elem)
- def splitter = new ParHashSetIterator(0, table.length, size) with SCPI
-
- type SCPI = SignalContextPassingIterator[ParHashSetIterator]
+ def splitter = new ParHashSetIterator(0, table.length, size)
class ParHashSetIterator(start: Int, iteratesUntil: Int, totalElements: Int)
- extends ParFlatHashTableIterator(start, iteratesUntil, totalElements) with ParIterator {
- me: SCPI =>
- def newIterator(start: Int, until: Int, total: Int) = new ParHashSetIterator(start, until, total) with SCPI
+ extends ParFlatHashTableIterator(start, iteratesUntil, totalElements) {
+ def newIterator(start: Int, until: Int, total: Int) = new ParHashSetIterator(start, until, total)
}
private def writeObject(s: java.io.ObjectOutputStream) {
@@ -116,11 +118,10 @@ private[mutable] abstract class ParHashSetCombiner[T](private val tableLoadFacto
extends collection.parallel.BucketCombiner[T, ParHashSet[T], Any, ParHashSetCombiner[T]](ParHashSetCombiner.numblocks)
with collection.mutable.FlatHashTable.HashUtils[T] {
//self: EnvironmentPassingCombiner[T, ParHashSet[T]] =>
- import collection.parallel.tasksupport._
private var mask = ParHashSetCombiner.discriminantmask
private var nonmasklen = ParHashSetCombiner.nonmasklength
private var seedvalue = 27
-
+
def +=(elem: T) = {
sz += 1
val hc = improve(elemHashCode(elem), seedvalue)
@@ -142,7 +143,7 @@ with collection.mutable.FlatHashTable.HashUtils[T] {
private def parPopulate: FlatHashTable.Contents[T] = {
// construct it in parallel
val table = new AddingFlatHashTable(size, tableLoadFactor, seedvalue)
- val (inserted, leftovers) = executeAndWaitResult(new FillBlocks(buckets, table, 0, buckets.length))
+ val (inserted, leftovers) = combinerTaskSupport.executeAndWaitResult(new FillBlocks(buckets, table, 0, buckets.length))
var leftinserts = 0
for (elem <- leftovers) leftinserts += table.insertEntry(0, table.tableLength, elem.asInstanceOf[T])
table.setSize(leftinserts + inserted)
@@ -307,7 +308,7 @@ with collection.mutable.FlatHashTable.HashUtils[T] {
// the total number of successfully inserted elements is adjusted accordingly
result = (this.result._1 + that.result._1 + inserted, remainingLeftovers concat that.result._2)
}
- def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(ParHashMapCombiner.numblocks, parallelismLevel)
+ def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(ParHashMapCombiner.numblocks, combinerTaskSupport.parallelismLevel)
}
}
diff --git a/src/library/scala/collection/parallel/mutable/ParHashTable.scala b/src/library/scala/collection/parallel/mutable/ParHashTable.scala
index 9b8e233b95..8c93732427 100644
--- a/src/library/scala/collection/parallel/mutable/ParHashTable.scala
+++ b/src/library/scala/collection/parallel/mutable/ParHashTable.scala
@@ -29,7 +29,7 @@ trait ParHashTable[K, Entry >: Null <: HashEntry[K, Entry]] extends collection.m
/** A parallel iterator returning all the entries.
*/
abstract class EntryIterator[T, +IterRepr <: IterableSplitter[T]]
- (private var idx: Int, private val until: Int, private val totalsize: Int, private var es: Entry)
+ (private var idx: Int, private val until: Int, private val totalsize: Int, private var es: Entry)
extends IterableSplitter[T] with SizeMapUtils {
private val itertable = table
private var traversed = 0
diff --git a/src/library/scala/collection/parallel/mutable/ResizableParArrayCombiner.scala b/src/library/scala/collection/parallel/mutable/ResizableParArrayCombiner.scala
index eadc93d422..01eb17024e 100644
--- a/src/library/scala/collection/parallel/mutable/ResizableParArrayCombiner.scala
+++ b/src/library/scala/collection/parallel/mutable/ResizableParArrayCombiner.scala
@@ -8,18 +8,20 @@
package scala.collection.parallel.mutable
+
+
import scala.collection.generic.Sizing
import scala.collection.mutable.ArraySeq
import scala.collection.mutable.ArrayBuffer
import scala.collection.parallel.TaskSupport
-//import scala.collection.parallel.EnvironmentPassingCombiner
import scala.collection.parallel.unsupportedop
import scala.collection.parallel.Combiner
+import scala.collection.parallel.Task
+
+
/** An array combiner that uses a chain of arraybuffers to store elements. */
trait ResizableParArrayCombiner[T] extends LazyCombiner[T, ParArray[T], ExposedArrayBuffer[T]] {
-//self: EnvironmentPassingCombiner[T, ParArray[T]] =>
- import collection.parallel.tasksupport._
override def sizeHint(sz: Int) = if (chain.length == 1) chain(0).sizeHint(sz)
@@ -30,7 +32,7 @@ trait ResizableParArrayCombiner[T] extends LazyCombiner[T, ParArray[T], ExposedA
val arrayseq = new ArraySeq[T](size)
val array = arrayseq.array.asInstanceOf[Array[Any]]
- executeAndWaitResult(new CopyChainToArray(array, 0, size))
+ combinerTaskSupport.executeAndWaitResult(new CopyChainToArray(array, 0, size))
new ParArray(arrayseq)
} else { // optimisation if there is only 1 array
@@ -79,7 +81,7 @@ trait ResizableParArrayCombiner[T] extends LazyCombiner[T, ParArray[T], ExposedA
val fp = howmany / 2
List(new CopyChainToArray(array, offset, fp), new CopyChainToArray(array, offset + fp, howmany - fp))
}
- def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(size, parallelismLevel)
+ def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(size, combinerTaskSupport.parallelismLevel)
}
}
diff --git a/src/library/scala/collection/parallel/mutable/UnrolledParArrayCombiner.scala b/src/library/scala/collection/parallel/mutable/UnrolledParArrayCombiner.scala
index dc583fb4e7..410b542a68 100644
--- a/src/library/scala/collection/parallel/mutable/UnrolledParArrayCombiner.scala
+++ b/src/library/scala/collection/parallel/mutable/UnrolledParArrayCombiner.scala
@@ -18,9 +18,9 @@ import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.UnrolledBuffer
import scala.collection.mutable.UnrolledBuffer.Unrolled
import scala.collection.parallel.TaskSupport
-//import scala.collection.parallel.EnvironmentPassingCombiner
import scala.collection.parallel.unsupportedop
import scala.collection.parallel.Combiner
+import scala.collection.parallel.Task
@@ -40,8 +40,6 @@ extends Combiner[T, ParArray[T]] {
// because size is doubling, random access is O(logn)!
val buff = new DoublingUnrolledBuffer[Any]
- import collection.parallel.tasksupport._
-
def +=(elem: T) = {
buff += elem
this
@@ -51,7 +49,7 @@ extends Combiner[T, ParArray[T]] {
val arrayseq = new ArraySeq[T](size)
val array = arrayseq.array.asInstanceOf[Array[Any]]
- executeAndWaitResult(new CopyUnrolledToArray(array, 0, size))
+ combinerTaskSupport.executeAndWaitResult(new CopyUnrolledToArray(array, 0, size))
new ParArray(arrayseq)
}
@@ -109,7 +107,7 @@ extends Combiner[T, ParArray[T]] {
val fp = howmany / 2
List(new CopyUnrolledToArray(array, offset, fp), new CopyUnrolledToArray(array, offset + fp, howmany - fp))
}
- def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(size, parallelismLevel)
+ def shouldSplitFurther = howmany > collection.parallel.thresholdFromSize(size, combinerTaskSupport.parallelismLevel)
override def toString = "CopyUnrolledToArray(" + offset + ", " + howmany + ")"
}
}
diff --git a/src/library/scala/collection/parallel/package.scala b/src/library/scala/collection/parallel/package.scala
index f152629c50..943e0208c7 100644
--- a/src/library/scala/collection/parallel/package.scala
+++ b/src/library/scala/collection/parallel/package.scala
@@ -46,8 +46,16 @@ package object parallel {
else new ThreadPoolTaskSupport
} else new ThreadPoolTaskSupport
- val tasksupport = getTaskSupport
-
+ val defaultTaskSupport: TaskSupport = getTaskSupport
+
+ def setTaskSupport[Coll](c: Coll, t: TaskSupport): Coll = {
+ c match {
+ case pc: ParIterableLike[_, _, _] => pc.tasksupport = t
+ case _ => // do nothing
+ }
+ c
+ }
+
/* implicit conversions */
implicit def factory2ops[From, Elem, To](bf: CanBuildFrom[From, Elem, To]) = new FactoryOps[From, Elem, To] {
@@ -83,6 +91,7 @@ package object parallel {
}
}
+
package parallel {
trait FactoryOps[From, Elem, To] {
trait Otherwise[R] {
@@ -114,6 +123,18 @@ package parallel {
/* classes */
+ trait CombinerFactory[U, Repr] {
+ /** Provides a combiner used to construct a collection. */
+ def apply(): Combiner[U, Repr]
+ /** The call to the `apply` method can create a new combiner each time.
+ * If it does, this method returns `false`.
+ * The same combiner factory may be used each time (typically, this is
+ * the case for concurrent collections, which are thread safe).
+ * If so, the method returns `true`.
+ */
+ def doesShareCombiners: Boolean
+ }
+
/** Composite throwable - thrown when multiple exceptions are thrown at the same time. */
final case class CompositeThrowable(
val throwables: Set[Throwable]
@@ -127,8 +148,9 @@ package parallel {
* Automatically forwards the signal delegate when splitting.
*/
private[parallel] class BufferSplitter[T]
- (private val buffer: collection.mutable.ArrayBuffer[T], private var index: Int, private val until: Int, var signalDelegate: collection.generic.Signalling)
+ (private val buffer: collection.mutable.ArrayBuffer[T], private var index: Int, private val until: Int, _sigdel: collection.generic.Signalling)
extends IterableSplitter[T] {
+ signalDelegate = _sigdel
def hasNext = index < until
def next = {
val r = buffer(index)
@@ -182,7 +204,7 @@ package parallel {
* the receiver (which will be the return value).
*/
private[parallel] abstract class BucketCombiner[-Elem, +To, Buck, +CombinerType <: BucketCombiner[Elem, To, Buck, CombinerType]]
- (private val bucketnumber: Int)
+ (private val bucketnumber: Int)
extends Combiner[Elem, To] {
//self: EnvironmentPassingCombiner[Elem, To] =>
protected var buckets: Array[UnrolledBuffer[Buck]] @uncheckedVariance = new Array[UnrolledBuffer[Buck]](bucketnumber)
@@ -196,6 +218,7 @@ package parallel {
}
def beforeCombine[N <: Elem, NewTo >: To](other: Combiner[N, NewTo]) {}
+
def afterCombine[N <: Elem, NewTo >: To](other: Combiner[N, NewTo]) {}
def combine[N <: Elem, NewTo >: To](other: Combiner[N, NewTo]): Combiner[N, NewTo] = {
diff --git a/src/library/scala/concurrent/Awaitable.scala b/src/library/scala/concurrent/Awaitable.scala
new file mode 100644
index 0000000000..c38e668f30
--- /dev/null
+++ b/src/library/scala/concurrent/Awaitable.scala
@@ -0,0 +1,24 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+
+
+import scala.annotation.implicitNotFound
+import scala.util.Duration
+
+
+
+trait Awaitable[+T] {
+ @implicitNotFound(msg = "Waiting must be done by calling `blocking(timeout) b`, where `b` is the `Awaitable` object or a potentially blocking piece of code.")
+ def await(atMost: Duration)(implicit canawait: CanAwait): T
+}
+
+
+
diff --git a/src/library/scala/concurrent/Channel.scala b/src/library/scala/concurrent/Channel.scala
index 43d684641e..f6d6341151 100644
--- a/src/library/scala/concurrent/Channel.scala
+++ b/src/library/scala/concurrent/Channel.scala
@@ -46,4 +46,5 @@ class Channel[A] {
written = written.next
x
}
+
}
diff --git a/src/library/scala/concurrent/ConcurrentPackageObject.scala b/src/library/scala/concurrent/ConcurrentPackageObject.scala
new file mode 100644
index 0000000000..7d005838d3
--- /dev/null
+++ b/src/library/scala/concurrent/ConcurrentPackageObject.scala
@@ -0,0 +1,111 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+
+
+import java.util.concurrent.{ Executors, ExecutorService }
+import scala.concurrent.forkjoin.ForkJoinPool
+import scala.util.{ Duration, Try, Success, Failure }
+import ConcurrentPackageObject._
+
+
+
+/** This package object contains primitives for concurrent and parallel programming.
+ */
+abstract class ConcurrentPackageObject {
+ /** A global execution environment for executing lightweight tasks.
+ */
+ lazy val executionContext =
+ new impl.ExecutionContextImpl(getExecutorService)
+
+ private[concurrent] def getExecutorService: AnyRef =
+ if (util.Properties.isJavaAtLeast("1.6")) {
+ val vendor = util.Properties.javaVmVendor
+ if ((vendor contains "Oracle") || (vendor contains "Sun") || (vendor contains "Apple")) new ForkJoinPool
+ else Executors.newCachedThreadPool()
+ } else Executors.newCachedThreadPool()
+
+ val handledFutureException: PartialFunction[Throwable, Throwable] = {
+ case t: Throwable if isFutureThrowable(t) => t
+ }
+
+ // TODO rename appropriately and make public
+ private[concurrent] def isFutureThrowable(t: Throwable) = t match {
+ case e: Error => false
+ case t: scala.util.control.ControlThrowable => false
+ case i: InterruptedException => false
+ case _ => true
+ }
+
+ private[concurrent] def resolve[T](source: Try[T]): Try[T] = source match {
+ case Failure(t: scala.runtime.NonLocalReturnControl[_]) => Success(t.value.asInstanceOf[T])
+ case Failure(t: scala.util.control.ControlThrowable) => Failure(new ExecutionException("Boxed ControlThrowable", t))
+ case Failure(t: InterruptedException) => Failure(new ExecutionException("Boxed InterruptedException", t))
+ case Failure(e: Error) => Failure(new ExecutionException("Boxed Error", e))
+ case _ => source
+ }
+
+ private[concurrent] def resolver[T] =
+ resolverFunction.asInstanceOf[PartialFunction[Throwable, Try[T]]]
+
+ /* concurrency constructs */
+
+ def future[T](body: =>T)(implicit execCtx: ExecutionContext = executionContext): Future[T] =
+ execCtx future body
+
+ def promise[T]()(implicit execCtx: ExecutionContext = executionContext): Promise[T] =
+ execCtx promise
+
+ /** Wraps a block of code into an awaitable object. */
+ def body2awaitable[T](body: =>T) = new Awaitable[T] {
+ def await(atMost: Duration)(implicit cb: CanAwait) = body
+ }
+
+ /** Used to block on a piece of code which potentially blocks.
+ *
+ * @param body A piece of code which contains potentially blocking or long running calls.
+ *
+ * Calling this method may throw the following exceptions:
+ * - CancellationException - if the computation was cancelled
+ * - InterruptedException - in the case that a wait within the blockable object was interrupted
+ * - TimeoutException - in the case that the blockable object timed out
+ */
+ def blocking[T](atMost: Duration)(body: =>T)(implicit execCtx: ExecutionContext): T =
+ executionContext.blocking(atMost)(body)
+
+ /** Blocks on an awaitable object.
+ *
+ * @param awaitable An object with a `block` method which runs potentially blocking or long running calls.
+ *
+ * Calling this method may throw the following exceptions:
+ * - CancellationException - if the computation was cancelled
+ * - InterruptedException - in the case that a wait within the blockable object was interrupted
+ * - TimeoutException - in the case that the blockable object timed out
+ */
+ def blocking[T](awaitable: Awaitable[T], atMost: Duration)(implicit execCtx: ExecutionContext = executionContext): T =
+ executionContext.blocking(awaitable, atMost)
+
+ @inline implicit final def int2durationops(x: Int): DurationOps = new DurationOps(x)
+}
+
+private[concurrent] object ConcurrentPackageObject {
+ // TODO, docs, return type
+ // Note that having this in the package object led to failures when
+ // compiling a subset of sources; it seems that the wildcard is not
+ // properly handled, and you get messages like "type _$1 defined twice".
+ // This is consistent with other package object breakdowns.
+ private val resolverFunction: PartialFunction[Throwable, Try[_]] = {
+ case t: scala.runtime.NonLocalReturnControl[_] => Success(t.value)
+ case t: scala.util.control.ControlThrowable => Failure(new ExecutionException("Boxed ControlThrowable", t))
+ case t: InterruptedException => Failure(new ExecutionException("Boxed InterruptedException", t))
+ case e: Error => Failure(new ExecutionException("Boxed Error", e))
+ case t => Failure(t)
+ }
+}
diff --git a/src/library/scala/concurrent/DelayedLazyVal.scala b/src/library/scala/concurrent/DelayedLazyVal.scala
index e308c3b5a6..a17153bad5 100644
--- a/src/library/scala/concurrent/DelayedLazyVal.scala
+++ b/src/library/scala/concurrent/DelayedLazyVal.scala
@@ -8,7 +8,6 @@
package scala.concurrent
-import ops.future
/** A `DelayedLazyVal` is a wrapper for lengthy computations which have a
* valid partially computed result.
@@ -40,8 +39,10 @@ class DelayedLazyVal[T](f: () => T, body: => Unit) {
*/
def apply(): T = if (isDone) complete else f()
- future {
+ // TODO replace with scala.concurrent.future { ... }
+ ops.future {
body
_isDone = true
}
+
}
diff --git a/src/library/scala/concurrent/ExecutionContext.scala b/src/library/scala/concurrent/ExecutionContext.scala
new file mode 100644
index 0000000000..eb1b3355c0
--- /dev/null
+++ b/src/library/scala/concurrent/ExecutionContext.scala
@@ -0,0 +1,132 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+
+
+import java.util.concurrent.atomic.{ AtomicInteger }
+import java.util.concurrent.{ Executors, Future => JFuture, Callable }
+import scala.util.Duration
+import scala.util.{ Try, Success, Failure }
+import scala.concurrent.forkjoin.{ ForkJoinPool, RecursiveTask => FJTask, RecursiveAction, ForkJoinWorkerThread }
+import scala.collection.generic.CanBuildFrom
+import collection._
+
+
+
+trait ExecutionContext {
+
+ protected implicit object CanAwaitEvidence extends CanAwait
+
+ def execute(runnable: Runnable): Unit
+
+ def execute[U](body: () => U): Unit
+
+ def promise[T]: Promise[T]
+
+ def future[T](body: Callable[T]): Future[T] = future(body.call())
+
+ def future[T](body: => T): Future[T]
+
+ def blocking[T](atMost: Duration)(body: =>T): T
+
+ def blocking[T](awaitable: Awaitable[T], atMost: Duration): T
+
+ def reportFailure(t: Throwable): Unit
+
+ /* implementations follow */
+
+ private implicit val executionContext = this
+
+ def keptPromise[T](result: T): Promise[T] = {
+ val p = promise[T]
+ p success result
+ }
+
+ def brokenPromise[T](t: Throwable): Promise[T] = {
+ val p = promise[T]
+ p failure t
+ }
+
+ /** TODO some docs
+ *
+ */
+ def all[T, Coll[X] <: Traversable[X]](futures: Coll[Future[T]])(implicit cbf: CanBuildFrom[Coll[_], T, Coll[T]]): Future[Coll[T]] = {
+ import nondeterministic._
+ val buffer = new mutable.ArrayBuffer[T]
+ val counter = new AtomicInteger(1) // how else could we do this?
+ val p: Promise[Coll[T]] = promise[Coll[T]] // we need an implicit execctx in the signature
+ var idx = 0
+
+ def tryFinish() = if (counter.decrementAndGet() == 0) {
+ val builder = cbf(futures)
+ builder ++= buffer
+ p success builder.result
+ }
+
+ for (f <- futures) {
+ val currentIndex = idx
+ buffer += null.asInstanceOf[T]
+ counter.incrementAndGet()
+ f onComplete {
+ case Failure(t) =>
+ p tryFailure t
+ case Success(v) =>
+ buffer(currentIndex) = v
+ tryFinish()
+ }
+ idx += 1
+ }
+
+ tryFinish()
+
+ p.future
+ }
+
+ /** TODO some docs
+ *
+ */
+ def any[T](futures: Traversable[Future[T]]): Future[T] = {
+ val p = promise[T]
+ val completeFirst: Try[T] => Unit = elem => p tryComplete elem
+
+ futures foreach (_ onComplete completeFirst)
+
+ p.future
+ }
+
+ /** TODO some docs
+ *
+ */
+ def find[T](futures: Traversable[Future[T]])(predicate: T => Boolean): Future[Option[T]] = {
+ if (futures.isEmpty) Promise.kept[Option[T]](None).future
+ else {
+ val result = promise[Option[T]]
+ val count = new AtomicInteger(futures.size)
+ val search: Try[T] => Unit = {
+ v => v match {
+ case Success(r) => if (predicate(r)) result trySuccess Some(r)
+ case _ =>
+ }
+ if (count.decrementAndGet() == 0) result trySuccess None
+ }
+
+ futures.foreach(_ onComplete search)
+
+ result.future
+ }
+ }
+
+}
+
+
+sealed trait CanAwait
+
+
+
diff --git a/src/library/scala/concurrent/Future.scala b/src/library/scala/concurrent/Future.scala
new file mode 100644
index 0000000000..eb54b61db0
--- /dev/null
+++ b/src/library/scala/concurrent/Future.scala
@@ -0,0 +1,492 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+
+
+import java.util.concurrent.{ ConcurrentLinkedQueue, TimeUnit, Callable }
+import java.util.concurrent.TimeUnit.{ NANOSECONDS => NANOS, MILLISECONDS ⇒ MILLIS }
+import java.lang.{ Iterable => JIterable }
+import java.util.{ LinkedList => JLinkedList }
+import java.{ lang => jl }
+import java.util.concurrent.atomic.{ AtomicReferenceFieldUpdater, AtomicInteger, AtomicBoolean }
+
+import scala.util.{ Timeout, Duration, Try, Success, Failure }
+import scala.Option
+
+import scala.annotation.tailrec
+import scala.collection.mutable.Stack
+import scala.collection.mutable.Builder
+import scala.collection.generic.CanBuildFrom
+
+
+
+/** The trait that represents futures.
+ *
+ * Asynchronous computations that yield futures are created with the `future` call:
+ *
+ * {{{
+ * val s = "Hello"
+ * val f: Future[String] = future {
+ * s + " future!"
+ * }
+ * f onSuccess {
+ * case msg => println(msg)
+ * }
+ * }}}
+ *
+ * @author Philipp Haller, Heather Miller, Aleksandar Prokopec, Viktor Klang
+ *
+ * @define multipleCallbacks
+ * Multiple callbacks may be registered; there is no guarantee that they will be
+ * executed in a particular order.
+ *
+ * @define caughtThrowables
+ * The future may contain a throwable object and this means that the future failed.
+ * Futures obtained through combinators have the same exception as the future they were obtained from.
+ * The following throwable objects are not contained in the future:
+ * - `Error` - errors are not contained within futures
+ * - `InterruptedException` - not contained within futures
+ * - all `scala.util.control.ControlThrowable` except `NonLocalReturnControl` - not contained within futures
+ *
+ * Instead, the future is completed with a ExecutionException with one of the exceptions above
+ * as the cause.
+ * If a future is failed with a `scala.runtime.NonLocalReturnControl`,
+ * it is completed with a value instead from that throwable instead instead.
+ *
+ * @define nonDeterministic
+ * Note: using this method yields nondeterministic dataflow programs.
+ *
+ * @define forComprehensionExamples
+ * Example:
+ *
+ * {{{
+ * val f = future { 5 }
+ * val g = future { 3 }
+ * val h = for {
+ * x: Int <- f // returns Future(5)
+ * y: Int <- g // returns Future(5)
+ * } yield x + y
+ * }}}
+ *
+ * is translated to:
+ *
+ * {{{
+ * f flatMap { (x: Int) => g map { (y: Int) => x + y } }
+ * }}}
+ */
+trait Future[+T] extends Awaitable[T] {
+self =>
+
+ /* Callbacks */
+
+ /** When this future is completed successfully (i.e. with a value),
+ * apply the provided partial function to the value if the partial function
+ * is defined at that value.
+ *
+ * If the future has already been completed with a value,
+ * this will either be applied immediately or be scheduled asynchronously.
+ *
+ * $multipleCallbacks
+ */
+ def onSuccess[U](pf: PartialFunction[T, U]): this.type = onComplete {
+ case Failure(t) => // do nothing
+ case Success(v) => if (pf isDefinedAt v) pf(v) else { /*do nothing*/ }
+ }
+
+ /** When this future is completed with a failure (i.e. with a throwable),
+ * apply the provided callback to the throwable.
+ *
+ * $caughtThrowables
+ *
+ * If the future has already been completed with a failure,
+ * this will either be applied immediately or be scheduled asynchronously.
+ *
+ * Will not be called in case that the future is completed with a value.
+ *
+ * $multipleCallbacks
+ */
+ def onFailure[U](callback: PartialFunction[Throwable, U]): this.type = onComplete {
+ case Failure(t) => if (isFutureThrowable(t) && callback.isDefinedAt(t)) callback(t) else { /*do nothing*/ }
+ case Success(v) => // do nothing
+ }
+
+ /** When this future is completed, either through an exception, a timeout, or a value,
+ * apply the provided function.
+ *
+ * If the future has already been completed,
+ * this will either be applied immediately or be scheduled asynchronously.
+ *
+ * $multipleCallbacks
+ */
+ def onComplete[U](func: Try[T] => U): this.type
+
+
+ /* Miscellaneous */
+
+ /** Creates a new promise.
+ */
+ def newPromise[S]: Promise[S]
+
+
+ /* Projections */
+
+ /** Returns a failed projection of this future.
+ *
+ * The failed projection is a future holding a value of type `Throwable`.
+ *
+ * It is completed with a value which is the throwable of the original future
+ * in case the original future is failed.
+ *
+ * It is failed with a `NoSuchElementException` if the original future is completed successfully.
+ *
+ * Blocking on this future returns a value if the original future is completed with an exception
+ * and throws a corresponding exception if the original future fails.
+ */
+ def failed: Future[Throwable] = {
+ def noSuchElem(v: T) =
+ new NoSuchElementException("Future.failed not completed with a throwable. Instead completed with: " + v)
+
+ val p = newPromise[Throwable]
+
+ onComplete {
+ case Failure(t) => p success t
+ case Success(v) => p failure noSuchElem(v)
+ }
+
+ p.future
+ }
+
+
+ /* Monadic operations */
+
+ /** Asynchronously processes the value in the future once the value becomes available.
+ *
+ * Will not be called if the future fails.
+ */
+ def foreach[U](f: T => U): Unit = onComplete {
+ case Success(r) => f(r)
+ case Failure(_) => // do nothing
+ }
+
+ /** Creates a new future by applying a function to the successful result of
+ * this future. If this future is completed with an exception then the new
+ * future will also contain this exception.
+ *
+ * $forComprehensionExample
+ */
+ def map[S](f: T => S): Future[S] = {
+ val p = newPromise[S]
+
+ onComplete {
+ case Failure(t) => p failure t
+ case Success(v) =>
+ try p success f(v)
+ catch {
+ case t => p complete resolver(t)
+ }
+ }
+
+ p.future
+ }
+
+ /** Creates a new future by applying a function to the successful result of
+ * this future, and returns the result of the function as the new future.
+ * If this future is completed with an exception then the new future will
+ * also contain this exception.
+ *
+ * $forComprehensionExample
+ */
+ def flatMap[S](f: T => Future[S]): Future[S] = {
+ val p = newPromise[S]
+
+ onComplete {
+ case Failure(t) => p failure t
+ case Success(v) =>
+ try {
+ f(v) onComplete {
+ case Failure(t) => p failure t
+ case Success(v) => p success v
+ }
+ } catch {
+ case t: Throwable => p complete resolver(t)
+ }
+ }
+
+ p.future
+ }
+
+ /** Creates a new future by filtering the value of the current future with a predicate.
+ *
+ * If the current future contains a value which satisfies the predicate, the new future will also hold that value.
+ * Otherwise, the resulting future will fail with a `NoSuchElementException`.
+ *
+ * If the current future fails or times out, the resulting future also fails or times out, respectively.
+ *
+ * Example:
+ * {{{
+ * val f = future { 5 }
+ * val g = f filter { _ % 2 == 1 }
+ * val h = f filter { _ % 2 == 0 }
+ * await(0) g // evaluates to 5
+ * await(0) h // throw a NoSuchElementException
+ * }}}
+ */
+ def filter(pred: T => Boolean): Future[T] = {
+ val p = newPromise[T]
+
+ onComplete {
+ case Failure(t) => p failure t
+ case Success(v) =>
+ try {
+ if (pred(v)) p success v
+ else p failure new NoSuchElementException("Future.filter predicate is not satisfied by: " + v)
+ } catch {
+ case t: Throwable => p complete resolver(t)
+ }
+ }
+
+ p.future
+ }
+
+ /** Creates a new future by mapping the value of the current future if the given partial function is defined at that value.
+ *
+ * If the current future contains a value for which the partial function is defined, the new future will also hold that value.
+ * Otherwise, the resulting future will fail with a `NoSuchElementException`.
+ *
+ * If the current future fails or times out, the resulting future also fails or times out, respectively.
+ *
+ * Example:
+ * {{{
+ * val f = future { -5 }
+ * val g = f collect {
+ * case x if x < 0 => -x
+ * }
+ * val h = f collect {
+ * case x if x > 0 => x * 2
+ * }
+ * await(0) g // evaluates to 5
+ * await(0) h // throw a NoSuchElementException
+ * }}}
+ */
+ def collect[S](pf: PartialFunction[T, S]): Future[S] = {
+ val p = newPromise[S]
+
+ onComplete {
+ case Failure(t) => p failure t
+ case Success(v) =>
+ try {
+ if (pf.isDefinedAt(v)) p success pf(v)
+ else p failure new NoSuchElementException("Future.collect partial function is not defined at: " + v)
+ } catch {
+ case t: Throwable => p complete resolver(t)
+ }
+ }
+
+ p.future
+ }
+
+ /** Creates a new future that will handle any matching throwable that this
+ * future might contain. If there is no match, or if this future contains
+ * a valid result then the new future will contain the same.
+ *
+ * Example:
+ *
+ * {{{
+ * future (6 / 0) recover { case e: ArithmeticException ⇒ 0 } // result: 0
+ * future (6 / 0) recover { case e: NotFoundException ⇒ 0 } // result: exception
+ * future (6 / 2) recover { case e: ArithmeticException ⇒ 0 } // result: 3
+ * }}}
+ */
+ def recover[U >: T](pf: PartialFunction[Throwable, U]): Future[U] = {
+ val p = newPromise[U]
+
+ onComplete {
+ case Failure(t) if pf isDefinedAt t =>
+ try { p success pf(t) }
+ catch { case t: Throwable => p complete resolver(t) }
+ case otherwise => p complete otherwise
+ }
+
+ p.future
+ }
+
+ /** Creates a new future that will handle any matching throwable that this
+ * future might contain by assigning it a value of another future.
+ *
+ * If there is no match, or if this future contains
+ * a valid result then the new future will contain the same result.
+ *
+ * Example:
+ *
+ * {{{
+ * val f = future { Int.MaxValue }
+ * future (6 / 0) recoverWith { case e: ArithmeticException => f } // result: Int.MaxValue
+ * }}}
+ */
+ def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] = {
+ val p = newPromise[U]
+
+ onComplete {
+ case Failure(t) if pf isDefinedAt t =>
+ try {
+ p completeWith pf(t)
+ } catch {
+ case t: Throwable => p complete resolver(t)
+ }
+ case otherwise => p complete otherwise
+ }
+
+ p.future
+ }
+
+ /** Zips the values of `this` and `that` future, and creates
+ * a new future holding the tuple of their results.
+ *
+ * If `this` future fails, the resulting future is failed
+ * with the throwable stored in `this`.
+ * Otherwise, if `that` future fails, the resulting future is failed
+ * with the throwable stored in `that`.
+ */
+ def zip[U](that: Future[U]): Future[(T, U)] = {
+ val p = newPromise[(T, U)]
+
+ this onComplete {
+ case Failure(t) => p failure t
+ case Success(r) => that onSuccess {
+ case r2 => p success ((r, r2))
+ }
+ }
+
+ that onFailure {
+ case f => p failure f
+ }
+
+ p.future
+ }
+
+ /** Creates a new future which holds the result of this future if it was completed successfully, or, if not,
+ * the result of the `that` future if `that` is completed successfully.
+ * If both futures are failed, the resulting future holds the throwable object of the first future.
+ *
+ * Using this method will not cause concurrent programs to become nondeterministic.
+ *
+ * Example:
+ * {{{
+ * val f = future { sys.error("failed") }
+ * val g = future { 5 }
+ * val h = f orElse g
+ * await(0) h // evaluates to 5
+ * }}}
+ */
+ def fallbackTo[U >: T](that: Future[U]): Future[U] = {
+ val p = newPromise[U]
+
+ onComplete {
+ case Failure(t) => that onComplete {
+ case Failure(_) => p failure t
+ case Success(v) => p success v
+ }
+ case Success(v) => p success v
+ }
+
+ p.future
+ }
+
+ /** Applies the side-effecting function to the result of this future, and returns
+ * a new future with the result of this future.
+ *
+ * This method allows one to enforce that the callbacks are executed in a
+ * specified order.
+ *
+ * Note that if one of the chained `andThen` callbacks throws
+ * an exception, that exception is not propagated to the subsequent `andThen`
+ * callbacks. Instead, the subsequent `andThen` callbacks are given the original
+ * value of this future.
+ *
+ * The following example prints out `5`:
+ *
+ * {{{
+ * val f = future { 5 }
+ * f andThen {
+ * case r => sys.error("runtime exception")
+ * } andThen {
+ * case Failure(t) => println(t)
+ * case Success(v) => println(v)
+ * }
+ * }}}
+ */
+ def andThen[U](pf: PartialFunction[Try[T], U]): Future[T] = {
+ val p = newPromise[T]
+
+ onComplete {
+ case r =>
+ try if (pf isDefinedAt r) pf(r)
+ finally p complete r
+ }
+
+ p.future
+ }
+
+ /** Creates a new future which holds the result of either this future or `that` future, depending on
+ * which future was completed first.
+ *
+ * $nonDeterministic
+ *
+ * Example:
+ * {{{
+ * val f = future { sys.error("failed") }
+ * val g = future { 5 }
+ * val h = f either g
+ * await(0) h // evaluates to either 5 or throws a runtime exception
+ * }}}
+ */
+ def either[U >: T](that: Future[U]): Future[U] = {
+ val p = self.newPromise[U]
+
+ val completePromise: PartialFunction[Try[U], _] = {
+ case Failure(t) => p tryFailure t
+ case Success(v) => p trySuccess v
+ }
+
+ self onComplete completePromise
+ that onComplete completePromise
+
+ p.future
+ }
+
+}
+
+
+
+/** TODO some docs
+ *
+ * @define nonDeterministic
+ * Note: using this method yields nondeterministic dataflow programs.
+ */
+object Future {
+
+ // TODO make more modular by encoding all other helper methods within the execution context
+ /** TODO some docs
+ */
+ def all[T, Coll[X] <: Traversable[X]](futures: Coll[Future[T]])(implicit cbf: CanBuildFrom[Coll[_], T, Coll[T]], ec: ExecutionContext): Future[Coll[T]] =
+ ec.all[T, Coll](futures)
+
+ // move this to future companion object
+ @inline def apply[T](body: =>T)(implicit executor: ExecutionContext): Future[T] = executor.future(body)
+
+ def any[T](futures: Traversable[Future[T]])(implicit ec: ExecutionContext): Future[T] = ec.any(futures)
+
+ def find[T](futures: Traversable[Future[T]])(predicate: T => Boolean)(implicit ec: ExecutionContext): Future[Option[T]] = ec.find(futures)(predicate)
+
+}
+
+
+
+
diff --git a/src/library/scala/concurrent/FutureTaskRunner.scala b/src/library/scala/concurrent/FutureTaskRunner.scala
index c5fcde2d19..75e6299ad9 100644
--- a/src/library/scala/concurrent/FutureTaskRunner.scala
+++ b/src/library/scala/concurrent/FutureTaskRunner.scala
@@ -13,6 +13,7 @@ package scala.concurrent
*
* @author Philipp Haller
*/
+@deprecated("Use `ExecutionContext`s instead.", "2.10.0")
trait FutureTaskRunner extends TaskRunner {
/** The type of the futures that the underlying task runner supports.
diff --git a/src/library/scala/concurrent/JavaConversions.scala b/src/library/scala/concurrent/JavaConversions.scala
index db3c490882..127a0e0055 100644
--- a/src/library/scala/concurrent/JavaConversions.scala
+++ b/src/library/scala/concurrent/JavaConversions.scala
@@ -17,6 +17,7 @@ import java.util.concurrent.{ExecutorService, Executor}
*/
object JavaConversions {
+ @deprecated("Use `asExecutionContext` instead.", "2.10.0")
implicit def asTaskRunner(exec: ExecutorService): FutureTaskRunner =
new ThreadPoolRunner {
override protected def executor =
@@ -26,6 +27,7 @@ object JavaConversions {
exec.shutdown()
}
+ @deprecated("Use `asExecutionContext` instead.", "2.10.0")
implicit def asTaskRunner(exec: Executor): TaskRunner =
new TaskRunner {
type Task[T] = Runnable
@@ -46,4 +48,9 @@ object JavaConversions {
// do nothing
}
}
+
+ implicit def asExecutionContext(exec: ExecutorService): ExecutionContext = null // TODO
+
+ implicit def asExecutionContext(exec: Executor): ExecutionContext = null // TODO
+
}
diff --git a/src/library/scala/concurrent/ManagedBlocker.scala b/src/library/scala/concurrent/ManagedBlocker.scala
index 9c6f4d51d6..0b6d82e76f 100644
--- a/src/library/scala/concurrent/ManagedBlocker.scala
+++ b/src/library/scala/concurrent/ManagedBlocker.scala
@@ -12,6 +12,7 @@ package scala.concurrent
*
* @author Philipp Haller
*/
+@deprecated("Not used.", "2.10.0")
trait ManagedBlocker {
/**
diff --git a/src/library/scala/concurrent/Promise.scala b/src/library/scala/concurrent/Promise.scala
new file mode 100644
index 0000000000..4404e90971
--- /dev/null
+++ b/src/library/scala/concurrent/Promise.scala
@@ -0,0 +1,132 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+import scala.util.{ Try, Success, Failure }
+
+
+
+
+/** Promise is an object which can be completed with a value or failed
+ * with an exception.
+ *
+ * @define promiseCompletion
+ * If the promise has already been fulfilled, failed or has timed out,
+ * calling this method will throw an IllegalStateException.
+ *
+ * @define allowedThrowables
+ * If the throwable used to fail this promise is an error, a control exception
+ * or an interrupted exception, it will be wrapped as a cause within an
+ * `ExecutionException` which will fail the promise.
+ *
+ * @define nonDeterministic
+ * Note: Using this method may result in non-deterministic concurrent programs.
+ */
+trait Promise[T] {
+
+ import nondeterministic._
+
+ /** Future containing the value of this promise.
+ */
+ def future: Future[T]
+
+ /** Completes the promise with either an exception or a value.
+ *
+ * @param result Either the value or the exception to complete the promise with.
+ *
+ * $promiseCompletion
+ */
+ def complete(result:Try[T]): this.type = if (tryComplete(result)) this else throwCompleted
+
+ /** Tries to complete the promise with either a value or the exception.
+ *
+ * $nonDeterministic
+ *
+ * @return If the promise has already been completed returns `false`, or `true` otherwise.
+ */
+ def tryComplete(result: Try[T]): Boolean
+
+ /** Completes this promise with the specified future, once that future is completed.
+ *
+ * @return This promise
+ */
+ final def completeWith(other: Future[T]): this.type = {
+ other onComplete {
+ this complete _
+ }
+ this
+ }
+
+ /** Completes the promise with a value.
+ *
+ * @param value The value to complete the promise with.
+ *
+ * $promiseCompletion
+ */
+ def success(v: T): this.type = if (trySuccess(v)) this else throwCompleted
+
+ /** Tries to complete the promise with a value.
+ *
+ * $nonDeterministic
+ *
+ * @return If the promise has already been completed returns `false`, or `true` otherwise.
+ */
+ def trySuccess(value: T): Boolean = tryComplete(Success(value))
+
+ /** Completes the promise with an exception.
+ *
+ * @param t The throwable to complete the promise with.
+ *
+ * $allowedThrowables
+ *
+ * $promiseCompletion
+ */
+ def failure(t: Throwable): this.type = if (tryFailure(t)) this else throwCompleted
+
+ /** Tries to complete the promise with an exception.
+ *
+ * $nonDeterministic
+ *
+ * @return If the promise has already been completed returns `false`, or `true` otherwise.
+ */
+ def tryFailure(t: Throwable): Boolean = tryComplete(Failure(t))
+
+ /** Wraps a `Throwable` in an `ExecutionException` if necessary. TODO replace with `resolver` from scala.concurrent
+ *
+ * $allowedThrowables
+ */
+ protected def wrap(t: Throwable): Throwable = t match {
+ case t: Throwable if isFutureThrowable(t) => t
+ case _ => new ExecutionException(t)
+ }
+
+ private def throwCompleted = throw new IllegalStateException("Promise already completed.")
+
+}
+
+
+
+object Promise {
+
+ def kept[T](result: T)(implicit execctx: ExecutionContext): Promise[T] =
+ execctx keptPromise result
+
+ def broken[T](t: Throwable)(implicit execctx: ExecutionContext): Promise[T] =
+ execctx brokenPromise t
+
+}
+
+
+
+
+
+
+
+
+
diff --git a/src/library/scala/concurrent/Scheduler.scala b/src/library/scala/concurrent/Scheduler.scala
new file mode 100644
index 0000000000..39d798e6b4
--- /dev/null
+++ b/src/library/scala/concurrent/Scheduler.scala
@@ -0,0 +1,54 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+import scala.util.Duration
+
+/** A service for scheduling tasks and thunks for one-time, or periodic execution.
+ */
+trait Scheduler {
+
+ /** Schedules a thunk for repeated execution with an initial delay and a frequency.
+ *
+ * @param delay the initial delay after which the thunk should be executed
+ * the first time
+ * @param frequency the frequency with which the thunk should be executed,
+ * as a time period between subsequent executions
+ */
+ def schedule(delay: Duration, frequency: Duration)(thunk: => Unit): Cancellable
+
+ /** Schedules a task for execution after a given delay.
+ *
+ * @param delay the duration after which the task should be executed
+ * @param task the task that is scheduled for execution
+ * @return a `Cancellable` that may be used to cancel the execution
+ * of the task
+ */
+ def scheduleOnce(delay: Duration, task: Runnable): Cancellable
+
+ /** Schedules a thunk for execution after a given delay.
+ *
+ * @param delay the duration after which the thunk should be executed
+ * @param thunk the thunk that is scheduled for execution
+ * @return a `Cancellable` that may be used to cancel the execution
+ * of the thunk
+ */
+ def scheduleOnce(delay: Duration)(task: => Unit): Cancellable
+
+}
+
+
+
+trait Cancellable {
+
+ /** Cancels the underlying task.
+ */
+ def cancel(): Unit
+
+}
diff --git a/src/library/scala/concurrent/Task.scala b/src/library/scala/concurrent/Task.scala
new file mode 100644
index 0000000000..eb3efbb422
--- /dev/null
+++ b/src/library/scala/concurrent/Task.scala
@@ -0,0 +1,13 @@
+package scala.concurrent
+
+
+
+trait Task[+T] {
+
+ def start(): Unit
+
+ def future: Future[T]
+
+}
+
+
diff --git a/src/library/scala/concurrent/TaskRunner.scala b/src/library/scala/concurrent/TaskRunner.scala
index 64e62adfd3..500d79e07f 100644
--- a/src/library/scala/concurrent/TaskRunner.scala
+++ b/src/library/scala/concurrent/TaskRunner.scala
@@ -12,6 +12,7 @@ package scala.concurrent
*
* @author Philipp Haller
*/
+@deprecated("Use `ExecutionContext`s instead.", "2.10.0")
trait TaskRunner {
type Task[T]
diff --git a/src/library/scala/concurrent/TaskRunners.scala b/src/library/scala/concurrent/TaskRunners.scala
index 588073dc5e..7994255b25 100644
--- a/src/library/scala/concurrent/TaskRunners.scala
+++ b/src/library/scala/concurrent/TaskRunners.scala
@@ -14,6 +14,7 @@ import java.util.concurrent.{ThreadPoolExecutor, LinkedBlockingQueue, TimeUnit}
*
* @author Philipp Haller
*/
+@deprecated("Use `ExecutionContext`s instead.", "2.10.0")
object TaskRunners {
implicit val threadRunner: FutureTaskRunner =
diff --git a/src/library/scala/concurrent/ThreadPoolRunner.scala b/src/library/scala/concurrent/ThreadPoolRunner.scala
index 27d8f2cc32..a3e0253634 100644
--- a/src/library/scala/concurrent/ThreadPoolRunner.scala
+++ b/src/library/scala/concurrent/ThreadPoolRunner.scala
@@ -15,6 +15,7 @@ import java.util.concurrent.{ExecutorService, Callable, TimeUnit}
*
* @author Philipp Haller
*/
+@deprecated("Use `ExecutionContext`s instead.", "2.10.0")
trait ThreadPoolRunner extends FutureTaskRunner {
type Task[T] = Callable[T] with Runnable
diff --git a/src/library/scala/concurrent/default/SchedulerImpl.scala.disabled b/src/library/scala/concurrent/default/SchedulerImpl.scala.disabled
new file mode 100644
index 0000000000..745d2d1a15
--- /dev/null
+++ b/src/library/scala/concurrent/default/SchedulerImpl.scala.disabled
@@ -0,0 +1,44 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+package default
+
+import scala.util.Duration
+
+private[concurrent] final class SchedulerImpl extends Scheduler {
+ private val timer =
+ new java.util.Timer(true) // the associated thread runs as a daemon
+
+ def schedule(delay: Duration, frequency: Duration)(thunk: => Unit): Cancellable = ???
+
+ def scheduleOnce(delay: Duration, task: Runnable): Cancellable = {
+ val timerTask = new java.util.TimerTask {
+ def run(): Unit =
+ task.run()
+ }
+ timer.schedule(timerTask, delay.toMillis)
+ new Cancellable {
+ def cancel(): Unit =
+ timerTask.cancel()
+ }
+ }
+
+ def scheduleOnce(delay: Duration)(task: => Unit): Cancellable = {
+ val timerTask = new java.util.TimerTask {
+ def run(): Unit =
+ task
+ }
+ timer.schedule(timerTask, delay.toMillis)
+ new Cancellable {
+ def cancel(): Unit =
+ timerTask.cancel()
+ }
+ }
+
+}
diff --git a/src/library/scala/concurrent/default/TaskImpl.scala.disabled b/src/library/scala/concurrent/default/TaskImpl.scala.disabled
new file mode 100644
index 0000000000..94e54cb372
--- /dev/null
+++ b/src/library/scala/concurrent/default/TaskImpl.scala.disabled
@@ -0,0 +1,313 @@
+package scala.concurrent
+package default
+
+
+
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater
+import scala.concurrent.forkjoin.{ ForkJoinPool, RecursiveAction, ForkJoinWorkerThread }
+import scala.util.Try
+import scala.util
+import scala.util.Duration
+import scala.annotation.tailrec
+
+
+
+private[concurrent] trait Completable[T] {
+self: Future[T] =>
+
+ val executor: ExecutionContextImpl
+
+ def newPromise[S]: Promise[S] = executor promise
+
+ type Callback = Try[T] => Any
+
+ def getState: State[T]
+
+ def casState(oldv: State[T], newv: State[T]): Boolean
+
+ protected def dispatch[U](r: Runnable) = executionContext execute r
+
+ protected def processCallbacks(cbs: List[Callback], r: Try[T]) =
+ for (cb <- cbs) dispatch(new Runnable {
+ override def run() = cb(r)
+ })
+
+ def future: Future[T] = self
+
+ def onComplete[U](callback: Try[T] => U): this.type = {
+ @tailrec def tryAddCallback(): Try[T] = {
+ getState match {
+ case p @ Pending(lst) =>
+ val pt = p.asInstanceOf[Pending[T]]
+ if (casState(pt, Pending(callback :: pt.callbacks))) null
+ else tryAddCallback()
+ case Success(res) => util.Success(res)
+ case Failure(t) => util.Failure(t)
+ }
+ }
+
+ val res = tryAddCallback()
+ if (res != null) dispatch(new Runnable {
+ override def run() =
+ try callback(res)
+ catch handledFutureException andThen {
+ t => Console.err.println(t)
+ }
+ })
+
+ this
+ }
+
+ def isTimedout: Boolean = getState match {
+ case Failure(ft: FutureTimeoutException) => true
+ case _ => false
+ }
+
+}
+
+private[concurrent] class PromiseImpl[T](context: ExecutionContextImpl)
+extends Promise[T] with Future[T] with Completable[T] {
+
+ val executor: scala.concurrent.default.ExecutionContextImpl = context
+
+ @volatile private var state: State[T] = _
+
+ val updater = AtomicReferenceFieldUpdater.newUpdater(classOf[PromiseImpl[T]], classOf[State[T]], "state")
+
+ updater.set(this, Pending(List()))
+
+ def casState(oldv: State[T], newv: State[T]): Boolean = {
+ updater.compareAndSet(this, oldv, newv)
+ }
+
+ def getState: State[T] = {
+ updater.get(this)
+ }
+
+ @tailrec private def tryCompleteState(completed: State[T]): List[Callback] = (getState: @unchecked) match {
+ case p @ Pending(cbs) => if (!casState(p, completed)) tryCompleteState(completed) else cbs
+ case _ => null
+ }
+
+ def tryComplete(r: Try[T]) = r match {
+ case util.Failure(t) => tryFailure(t)
+ case util.Success(v) => trySuccess(v)
+ }
+
+ override def trySuccess(value: T): Boolean = {
+ val cbs = tryCompleteState(Success(value))
+ if (cbs == null)
+ false
+ else {
+ processCallbacks(cbs, util.Success(value))
+ this.synchronized {
+ this.notifyAll()
+ }
+ true
+ }
+ }
+
+ override def tryFailure(t: Throwable): Boolean = {
+ val wrapped = wrap(t)
+ val cbs = tryCompleteState(Failure(wrapped))
+ if (cbs == null)
+ false
+ else {
+ processCallbacks(cbs, util.Failure(wrapped))
+ this.synchronized {
+ this.notifyAll()
+ }
+ true
+ }
+ }
+
+ def await(atMost: Duration)(implicit canawait: scala.concurrent.CanAwait): T = getState match {
+ case Success(res) => res
+ case Failure(t) => throw t
+ case _ =>
+ this.synchronized {
+ while (true)
+ getState match {
+ case Pending(_) => this.wait()
+ case Success(res) => return res
+ case Failure(t) => throw t
+ }
+ }
+ sys.error("unreachable")
+ }
+
+}
+
+private[concurrent] class TaskImpl[T](context: ExecutionContextImpl, body: => T)
+extends RecursiveAction with Task[T] with Future[T] with Completable[T] {
+
+ val executor: ExecutionContextImpl = context
+
+ @volatile private var state: State[T] = _
+
+ val updater = AtomicReferenceFieldUpdater.newUpdater(classOf[TaskImpl[T]], classOf[State[T]], "state")
+
+ updater.set(this, Pending(List()))
+
+ def casState(oldv: State[T], newv: State[T]): Boolean = {
+ updater.compareAndSet(this, oldv, newv)
+ }
+
+ def getState: State[T] = {
+ updater.get(this)
+ }
+
+ @tailrec private def tryCompleteState(completed: State[T]): List[Callback] = (getState: @unchecked) match {
+ case p @ Pending(cbs) => if (!casState(p, completed)) tryCompleteState(completed) else cbs
+ }
+
+ def compute(): Unit = {
+ var cbs: List[Callback] = null
+ try {
+ val res = body
+ processCallbacks(tryCompleteState(Success(res)), util.Success(res))
+ } catch {
+ case t if isFutureThrowable(t) =>
+ processCallbacks(tryCompleteState(Failure(t)), util.Failure(t))
+ case t =>
+ val ee = new ExecutionException(t)
+ processCallbacks(tryCompleteState(Failure(ee)), util.Failure(ee))
+ throw t
+ }
+ }
+
+ def start(): Unit = {
+ Thread.currentThread match {
+ case fj: ForkJoinWorkerThread if fj.getPool eq executor.pool => fork()
+ case _ => executor.pool.execute(this)
+ }
+ }
+
+ // TODO FIXME: handle timeouts
+ def await(atMost: Duration): this.type =
+ await
+
+ def await: this.type = {
+ this.join()
+ this
+ }
+
+ def tryCancel(): Unit =
+ tryUnfork()
+
+ def await(atMost: Duration)(implicit canawait: CanAwait): T = {
+ join() // TODO handle timeout also
+ (updater.get(this): @unchecked) match {
+ case Success(r) => r
+ case Failure(t) => throw t
+ }
+ }
+
+}
+
+
+private[concurrent] sealed abstract class State[T]
+
+
+case class Pending[T](callbacks: List[Try[T] => Any]) extends State[T]
+
+
+case class Success[T](result: T) extends State[T]
+
+
+case class Failure[T](throwable: Throwable) extends State[T]
+
+
+private[concurrent] final class ExecutionContextImpl extends ExecutionContext {
+ import ExecutionContextImpl._
+
+ val pool = {
+ val p = new ForkJoinPool
+ p.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler {
+ def uncaughtException(t: Thread, throwable: Throwable) {
+ Console.err.println(throwable.getMessage)
+ throwable.printStackTrace(Console.err)
+ }
+ })
+ p
+ }
+
+ @inline
+ private def executeTask(task: RecursiveAction) {
+ if (Thread.currentThread.isInstanceOf[ForkJoinWorkerThread])
+ task.fork()
+ else
+ pool execute task
+ }
+
+ def execute(task: Runnable) {
+ val action = new RecursiveAction { def compute() { task.run() } }
+ executeTask(action)
+ }
+
+ def execute[U](body: () => U) {
+ val action = new RecursiveAction { def compute() { body() } }
+ executeTask(action)
+ }
+
+ def task[T](body: => T): Task[T] = {
+ new TaskImpl(this, body)
+ }
+
+ def future[T](body: => T): Future[T] = {
+ val t = task(body)
+ t.start()
+ t.future
+ }
+
+ def promise[T]: Promise[T] =
+ new PromiseImpl[T](this)
+
+ def blocking[T](atMost: Duration)(body: =>T): T = blocking(body2awaitable(body), atMost)
+
+ def blocking[T](awaitable: Awaitable[T], atMost: Duration): T = {
+ currentExecutionContext.get match {
+ case null => awaitable.await(atMost)(null) // outside - TODO - fix timeout case
+ case x if x eq this => this.blockingCall(awaitable) // inside an execution context thread on this executor
+ case x => x.blocking(awaitable, atMost)
+ }
+ }
+
+ private def blockingCall[T](b: Awaitable[T]): T = b match {
+ case fj: TaskImpl[_] if fj.executor.pool eq pool =>
+ fj.await(Duration.fromNanos(0))
+ case _ =>
+ var res: T = null.asInstanceOf[T]
+ @volatile var blockingDone = false
+ // TODO add exception handling here!
+ val mb = new ForkJoinPool.ManagedBlocker {
+ def block() = {
+ res = b.await(Duration.fromNanos(0))(CanAwaitEvidence)
+ blockingDone = true
+ true
+ }
+ def isReleasable = blockingDone
+ }
+ ForkJoinPool.managedBlock(mb, true)
+ res
+ }
+
+ def reportFailure(t: Throwable): Unit = {}
+
+}
+
+
+object ExecutionContextImpl {
+
+ private[concurrent] def currentExecutionContext: ThreadLocal[ExecutionContext] = new ThreadLocal[ExecutionContext] {
+ override protected def initialValue = null
+ }
+
+}
+
+
+
+
+
+
+
diff --git a/src/library/scala/concurrent/impl/AbstractPromise.java b/src/library/scala/concurrent/impl/AbstractPromise.java
new file mode 100644
index 0000000000..5280d67854
--- /dev/null
+++ b/src/library/scala/concurrent/impl/AbstractPromise.java
@@ -0,0 +1,21 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent.impl;
+
+
+
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+
+
+
+abstract class AbstractPromise {
+ private volatile Object _ref = null;
+ protected final static AtomicReferenceFieldUpdater<AbstractPromise, Object> updater =
+ AtomicReferenceFieldUpdater.newUpdater(AbstractPromise.class, Object.class, "_ref");
+}
diff --git a/src/library/scala/concurrent/impl/ExecutionContextImpl.scala b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
new file mode 100644
index 0000000000..7984aa02b7
--- /dev/null
+++ b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
@@ -0,0 +1,139 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent.impl
+
+
+
+import java.util.concurrent.{Callable, ExecutorService}
+import scala.concurrent.forkjoin._
+import scala.concurrent.{ExecutionContext, resolver, Awaitable, body2awaitable}
+import scala.util.{ Duration, Try, Success, Failure }
+import scala.collection.mutable.Stack
+
+
+
+class ExecutionContextImpl(val executorService: AnyRef) extends ExecutionContext {
+ import ExecutionContextImpl._
+
+ def execute(runnable: Runnable): Unit = executorService match {
+ case fj: ForkJoinPool =>
+ if (Thread.currentThread.isInstanceOf[ForkJoinWorkerThread]) {
+ val fjtask = ForkJoinTask.adapt(runnable)
+ fjtask.fork
+ } else {
+ fj.execute(runnable)
+ }
+ case executorService: ExecutorService =>
+ executorService execute runnable
+ }
+
+ def execute[U](body: () => U): Unit = execute(new Runnable {
+ def run() = body()
+ })
+
+ def promise[T]: Promise[T] = new Promise.DefaultPromise[T]()(this)
+
+ def future[T](body: =>T): Future[T] = {
+ val p = promise[T]
+
+ dispatchFuture {
+ () =>
+ p complete {
+ try {
+ Success(body)
+ } catch {
+ case e => resolver(e)
+ }
+ }
+ }
+
+ p.future
+ }
+
+ def blocking[T](atMost: Duration)(body: =>T): T = blocking(body2awaitable(body), atMost)
+
+ def blocking[T](awaitable: Awaitable[T], atMost: Duration): T = {
+ currentExecutionContext.get match {
+ case null => awaitable.await(atMost)(null) // outside - TODO - fix timeout case
+ case x => x.blockingCall(awaitable) // inside an execution context thread
+ }
+ }
+
+ def reportFailure(t: Throwable) = t match {
+ case e: Error => throw e // rethrow serious errors
+ case t => t.printStackTrace()
+ }
+
+ /** Only callable from the tasks running on the same execution context. */
+ private def blockingCall[T](body: Awaitable[T]): T = {
+ releaseStack()
+
+ // TODO see what to do with timeout
+ body.await(Duration.fromNanos(0))(CanAwaitEvidence)
+ }
+
+ // an optimization for batching futures
+ // TODO we should replace this with a public queue,
+ // so that it can be stolen from
+ // OR: a push to the local task queue should be so cheap that this is
+ // not even needed, but stealing is still possible
+ private val _taskStack = new ThreadLocal[Stack[() => Unit]]()
+
+ private def releaseStack(): Unit =
+ _taskStack.get match {
+ case stack if (stack ne null) && stack.nonEmpty =>
+ val tasks = stack.elems
+ stack.clear()
+ _taskStack.remove()
+ dispatchFuture(() => _taskStack.get.elems = tasks, true)
+ case null =>
+ // do nothing - there is no local batching stack anymore
+ case _ =>
+ _taskStack.remove()
+ }
+
+ private[impl] def dispatchFuture(task: () => Unit, force: Boolean = false): Unit =
+ _taskStack.get match {
+ case stack if (stack ne null) && !force => stack push task
+ case _ => this.execute(
+ new Runnable {
+ def run() {
+ try {
+ val taskStack = Stack[() => Unit](task)
+ _taskStack set taskStack
+ while (taskStack.nonEmpty) {
+ val next = taskStack.pop()
+ try {
+ next.apply()
+ } catch {
+ case e =>
+ // TODO catching all and continue isn't good for OOME
+ reportFailure(e)
+ }
+ }
+ } finally {
+ _taskStack.remove()
+ }
+ }
+ }
+ )
+ }
+
+}
+
+
+object ExecutionContextImpl {
+
+ private[concurrent] def currentExecutionContext: ThreadLocal[ExecutionContextImpl] = new ThreadLocal[ExecutionContextImpl] {
+ override protected def initialValue = null
+ }
+
+}
+
+
diff --git a/src/library/scala/concurrent/impl/Future.scala b/src/library/scala/concurrent/impl/Future.scala
new file mode 100644
index 0000000000..9466761d4d
--- /dev/null
+++ b/src/library/scala/concurrent/impl/Future.scala
@@ -0,0 +1,89 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent.impl
+
+import scala.concurrent.{Awaitable, ExecutionContext}
+import scala.util.{ Try, Success, Failure }
+//import scala.util.continuations._
+
+trait Future[+T] extends scala.concurrent.Future[T] with Awaitable[T] {
+
+ implicit def executor: ExecutionContextImpl
+
+ /** For use only within a Future.flow block or another compatible Delimited Continuations reset block.
+ *
+ * Returns the result of this Future without blocking, by suspending execution and storing it as a
+ * continuation until the result is available.
+ */
+ //def apply(): T @cps[Future[Any]] = shift(this flatMap (_: T => Future[Any]))
+
+ /** Tests whether this Future has been completed.
+ */
+ final def isCompleted: Boolean = value.isDefined
+
+ /** The contained value of this Future. Before this Future is completed
+ * the value will be None. After completion the value will be Some(Right(t))
+ * if it contains a valid result, or Some(Left(error)) if it contains
+ * an exception.
+ */
+ def value: Option[Try[T]]
+
+ def onComplete[U](func: Try[T] => U): this.type
+
+ /** Creates a new Future[A] which is completed with this Future's result if
+ * that conforms to A's erased type or a ClassCastException otherwise.
+ */
+ final def mapTo[T](implicit m: Manifest[T]) = {
+ val p = executor.promise[T]
+
+ onComplete {
+ case f @ Failure(t) => p complete f.asInstanceOf[Try[T]]
+ case Success(v) =>
+ p complete (try {
+ Success(Future.boxedType(m.erasure).cast(v).asInstanceOf[T])
+ } catch {
+ case e: ClassCastException ⇒ Failure(e)
+ })
+ }
+
+ p.future
+ }
+
+ /** Used by for-comprehensions.
+ */
+ final def withFilter(p: T => Boolean) = new FutureWithFilter[T](this, p)
+
+ final class FutureWithFilter[+A](self: Future[A], p: A => Boolean) {
+ def foreach(f: A => Unit): Unit = self filter p foreach f
+ def map[B](f: A => B) = self filter p map f
+ def flatMap[B](f: A => Future[B]) = self filter p flatMap f
+ def withFilter(q: A => Boolean): FutureWithFilter[A] = new FutureWithFilter[A](self, x ⇒ p(x) && q(x))
+ }
+
+}
+
+object Future {
+ import java.{ lang => jl }
+
+ private val toBoxed = Map[Class[_], Class[_]](
+ classOf[Boolean] -> classOf[jl.Boolean],
+ classOf[Byte] -> classOf[jl.Byte],
+ classOf[Char] -> classOf[jl.Character],
+ classOf[Short] -> classOf[jl.Short],
+ classOf[Int] -> classOf[jl.Integer],
+ classOf[Long] -> classOf[jl.Long],
+ classOf[Float] -> classOf[jl.Float],
+ classOf[Double] -> classOf[jl.Double],
+ classOf[Unit] -> classOf[scala.runtime.BoxedUnit]
+ )
+
+ def boxedType(c: Class[_]): Class[_] = {
+ if (c.isPrimitive) toBoxed(c) else c
+ }
+}
diff --git a/src/library/scala/concurrent/impl/Promise.scala b/src/library/scala/concurrent/impl/Promise.scala
new file mode 100644
index 0000000000..0087b71ea8
--- /dev/null
+++ b/src/library/scala/concurrent/impl/Promise.scala
@@ -0,0 +1,258 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent.impl
+
+
+
+import java.util.concurrent.TimeUnit.{ NANOSECONDS, MILLISECONDS }
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater
+import scala.concurrent.{Awaitable, ExecutionContext, resolve, resolver, blocking, CanAwait, TimeoutException}
+//import scala.util.continuations._
+import scala.util.Duration
+import scala.util.Try
+import scala.util
+import scala.annotation.tailrec
+//import scala.concurrent.NonDeterministic
+
+
+
+trait Promise[T] extends scala.concurrent.Promise[T] with Future[T] {
+
+ def future = this
+
+ def newPromise[S]: Promise[S] = executor promise
+
+ // TODO refine answer and return types here from Any to type parameters
+ // then move this up in the hierarchy
+ /*
+ final def <<(value: T): Future[T] @cps[Future[Any]] = shift {
+ cont: (Future[T] => Future[Any]) =>
+ cont(complete(Right(value)))
+ }
+
+ final def <<(other: Future[T]): Future[T] @cps[Future[Any]] = shift {
+ cont: (Future[T] => Future[Any]) =>
+ val p = executor.promise[Any]
+ val thisPromise = this
+
+ thisPromise completeWith other
+ thisPromise onComplete { v =>
+ try {
+ p completeWith cont(thisPromise)
+ } catch {
+ case e => p complete resolver(e)
+ }
+ }
+
+ p.future
+ }
+ */
+ // TODO finish this once we introduce something like dataflow streams
+
+ /*
+ final def <<(stream: PromiseStreamOut[T]): Future[T] @cps[Future[Any]] = shift { cont: (Future[T] => Future[Any]) =>
+ val fr = executor.promise[Any]
+ val f = stream.dequeue(this)
+ f.onComplete { _ =>
+ try {
+ fr completeWith cont(f)
+ } catch {
+ case e =>
+ fr failure e
+ }
+ }
+ fr
+ }
+ */
+
+}
+
+
+object Promise {
+ def dur2long(dur: Duration): Long = if (dur.isFinite) dur.toNanos else Long.MaxValue
+
+ def EmptyPending[T](): FState[T] = emptyPendingValue.asInstanceOf[FState[T]]
+
+ /** Represents the internal state.
+ *
+ * [adriaan] it's unsound to make FState covariant (tryComplete won't type check)
+ */
+ sealed trait FState[T] { def value: Option[Try[T]] }
+
+ case class Pending[T](listeners: List[Try[T] => Any] = Nil) extends FState[T] {
+ def value: Option[Try[T]] = None
+ }
+
+ case class Success[T](value: Option[util.Success[T]] = None) extends FState[T] {
+ def result: T = value.get.get
+ }
+
+ case class Failure[T](value: Option[util.Failure[T]] = None) extends FState[T] {
+ def exception: Throwable = value.get.exception
+ }
+
+ private val emptyPendingValue = Pending[Nothing](Nil)
+
+ /** Default promise implementation.
+ */
+ class DefaultPromise[T](implicit val executor: ExecutionContextImpl) extends AbstractPromise with Promise[T] {
+ self =>
+
+ updater.set(this, Promise.EmptyPending())
+
+ protected final def tryAwait(atMost: Duration): Boolean = {
+ @tailrec
+ def awaitUnsafe(waitTimeNanos: Long): Boolean = {
+ if (value.isEmpty && waitTimeNanos > 0) {
+ val ms = NANOSECONDS.toMillis(waitTimeNanos)
+ val ns = (waitTimeNanos % 1000000l).toInt // as per object.wait spec
+ val start = System.nanoTime()
+ try {
+ synchronized {
+ while (value.isEmpty) wait(ms, ns)
+ }
+ } catch {
+ case e: InterruptedException =>
+ }
+
+ awaitUnsafe(waitTimeNanos - (System.nanoTime() - start))
+ } else
+ value.isDefined
+ }
+
+ executor.blocking(concurrent.body2awaitable(awaitUnsafe(dur2long(atMost))), Duration.fromNanos(0))
+ }
+
+ private def ready(atMost: Duration)(implicit permit: CanAwait): this.type =
+ if (value.isDefined || tryAwait(atMost)) this
+ else throw new TimeoutException("Futures timed out after [" + atMost.toMillis + "] milliseconds")
+
+ def await(atMost: Duration)(implicit permit: CanAwait): T =
+ ready(atMost).value.get match {
+ case util.Failure(e) => throw e
+ case util.Success(r) => r
+ }
+
+ def value: Option[Try[T]] = getState.value
+
+ @inline
+ private[this] final def updater = AbstractPromise.updater.asInstanceOf[AtomicReferenceFieldUpdater[AbstractPromise, FState[T]]]
+
+ @inline
+ protected final def updateState(oldState: FState[T], newState: FState[T]): Boolean = updater.compareAndSet(this, oldState, newState)
+
+ @inline
+ protected final def getState: FState[T] = updater.get(this)
+
+ def tryComplete(value: Try[T]): Boolean = {
+ val callbacks: List[Try[T] => Any] = {
+ try {
+ @tailrec
+ def tryComplete(v: Try[T]): List[Try[T] => Any] = {
+ getState match {
+ case cur @ Pending(listeners) =>
+ val newState =
+ if (v.isFailure) Failure(Some(v.asInstanceOf[util.Failure[T]]))
+ else Success(Some(v.asInstanceOf[util.Success[T]]))
+
+ if (updateState(cur, newState)) listeners
+ else tryComplete(v)
+ case _ => null
+ }
+ }
+ tryComplete(resolve(value))
+ } finally {
+ synchronized { notifyAll() } // notify any blockers from `tryAwait`
+ }
+ }
+
+ callbacks match {
+ case null => false
+ case cs if cs.isEmpty => true
+ case cs =>
+ executor dispatchFuture {
+ () => cs.foreach(f => notifyCompleted(f, value))
+ }
+ true
+ }
+ }
+
+ def onComplete[U](func: Try[T] => U): this.type = {
+ @tailrec // Returns whether the future has already been completed or not
+ def tryAddCallback(): Boolean = {
+ val cur = getState
+ cur match {
+ case _: Success[_] | _: Failure[_] => true
+ case p: Pending[_] =>
+ val pt = p.asInstanceOf[Pending[T]]
+ if (updateState(pt, pt.copy(listeners = func :: pt.listeners))) false else tryAddCallback()
+ }
+ }
+
+ if (tryAddCallback()) {
+ val result = value.get
+ executor dispatchFuture {
+ () => notifyCompleted(func, result)
+ }
+ }
+
+ this
+ }
+
+ private final def notifyCompleted(func: Try[T] => Any, result: Try[T]) {
+ try {
+ func(result)
+ } catch {
+ case e => executor.reportFailure(e)
+ }
+ }
+ }
+
+ /** An already completed Future is given its result at creation.
+ *
+ * Useful in Future-composition when a value to contribute is already available.
+ */
+ final class KeptPromise[T](suppliedValue: Try[T])(implicit val executor: ExecutionContextImpl) extends Promise[T] {
+ val value = Some(resolve(suppliedValue))
+
+ def tryComplete(value: Try[T]): Boolean = false
+
+ def onComplete[U](func: Try[T] => U): this.type = {
+ val completedAs = value.get
+ executor dispatchFuture {
+ () => func(completedAs)
+ }
+ this
+ }
+
+ private def ready(atMost: Duration)(implicit permit: CanAwait): this.type = this
+
+ def await(atMost: Duration)(implicit permit: CanAwait): T = value.get match {
+ case util.Failure(e) => throw e
+ case util.Success(r) => r
+ }
+ }
+
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/library/scala/concurrent/ops.scala b/src/library/scala/concurrent/ops.scala
index 92220a8313..2cea29aefe 100644
--- a/src/library/scala/concurrent/ops.scala
+++ b/src/library/scala/concurrent/ops.scala
@@ -15,6 +15,7 @@ import scala.util.control.Exception.allCatch
*
* @author Martin Odersky, Stepan Koltsov, Philipp Haller
*/
+@deprecated("Use `future` instead.", "2.10.0")
object ops
{
val defaultRunner: FutureTaskRunner = TaskRunners.threadRunner
diff --git a/src/library/scala/concurrent/package.scala b/src/library/scala/concurrent/package.scala
new file mode 100644
index 0000000000..7cc48c09b2
--- /dev/null
+++ b/src/library/scala/concurrent/package.scala
@@ -0,0 +1,57 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala
+
+import scala.util.{ Duration, Try, Success, Failure }
+
+/** This package object contains primitives for concurrent and parallel programming.
+ */
+package object concurrent extends scala.concurrent.ConcurrentPackageObject {
+ type ExecutionException = java.util.concurrent.ExecutionException
+ type CancellationException = java.util.concurrent.CancellationException
+ type TimeoutException = java.util.concurrent.TimeoutException
+}
+
+package concurrent {
+ object await {
+ def ready[T](atMost: Duration)(awaitable: Awaitable[T])(implicit execCtx: ExecutionContext = executionContext): Awaitable[T] = {
+ try blocking(awaitable, atMost)
+ catch { case _ => }
+ awaitable
+ }
+
+ def result[T](atMost: Duration)(awaitable: Awaitable[T])(implicit execCtx: ExecutionContext = executionContext): T = {
+ blocking(awaitable, atMost)
+ }
+ }
+
+ /** Importing this object allows using some concurrency primitives
+ * on futures and promises that can yield nondeterministic programs.
+ *
+ * While program determinism is broken when using these primitives,
+ * some programs cannot be written without them (e.g. multiple client threads
+ * cannot send requests to a server thread through regular promises and futures).
+ */
+ object nondeterministic { }
+
+ /** A timeout exception.
+ *
+ * Futures are failed with a timeout exception when their timeout expires.
+ *
+ * Each timeout exception contains an origin future which originally timed out.
+ */
+ class FutureTimeoutException(origin: Future[_], message: String) extends TimeoutException(message) {
+ def this(origin: Future[_]) = this(origin, "Future timed out.")
+ }
+
+ final class DurationOps private[concurrent] (x: Int) {
+ // TODO ADD OTHERS
+ def ns = util.Duration.fromNanos(0)
+ }
+}
diff --git a/src/library/scala/concurrent/package.scala.disabled b/src/library/scala/concurrent/package.scala.disabled
deleted file mode 100644
index 42b4bf954c..0000000000
--- a/src/library/scala/concurrent/package.scala.disabled
+++ /dev/null
@@ -1,108 +0,0 @@
-/* __ *\
-** ________ ___ / / ___ Scala API **
-** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
-** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
-** /____/\___/_/ |_/____/_/ | | **
-** |/ **
-\* */
-
-
-
-package scala
-
-
-
-
-/** This package object contains primitives for parallel programming.
- */
-package object concurrent {
-
- /** Performs a call which can potentially block execution.
- *
- * Example:
- * {{{
- * val lock = new ReentrantLock
- *
- * // ... do something ...
- *
- * blocking {
- * if (!lock.hasLock) lock.lock()
- * }
- * }}}
- *
- * '''Note:''' calling methods that wait arbitrary amounts of time
- * (e.g. for I/O operations or locks) may severely decrease performance
- * or even result in deadlocks. This does not include waiting for
- * results of futures.
- *
- * @tparam T the result type of the blocking operation
- * @param body the blocking operation
- * @param runner the runner used for parallel computations
- * @return the result of the potentially blocking operation
- */
- def blocking[T](body: =>T)(implicit runner: TaskRunner): T = {
- null.asInstanceOf[T]
- }
-
- /** Invokes a computation asynchronously. Does not wait for the computation
- * to finish.
- *
- * @tparam U the result type of the operation
- * @param p the computation to be invoked asynchronously
- * @param runner the runner used for parallel computations
- */
- def spawn[U](p: =>U)(implicit runner: TaskRunner): Unit = {
- }
-
- /** Starts 2 parallel computations and returns once they are completed.
- *
- * $invokingPar
- *
- * @tparam T1 the type of the result of 1st the parallel computation
- * @tparam T2 the type of the result of 2nd the parallel computation
- * @param b1 the 1st computation to be invoked in parallel
- * @param b2 the 2nd computation to be invoked in parallel
- * @param runner the runner used for parallel computations
- * @return a tuple of results corresponding to parallel computations
- */
- def par[T1, T2](b1: =>T1)(b2: =>T2)(implicit runner: TaskRunner): (T1, T2) = {
- null
- }
-
- /** Starts 3 parallel computations and returns once they are completed.
- *
- * $invokingPar
- *
- * @tparam T1 the type of the result of 1st the parallel computation
- * @tparam T2 the type of the result of 2nd the parallel computation
- * @tparam T3 the type of the result of 3rd the parallel computation
- * @param b1 the 1st computation to be invoked in parallel
- * @param b2 the 2nd computation to be invoked in parallel
- * @param b3 the 3rd computation to be invoked in parallel
- * @param runner the runner used for parallel computations
- * @return a tuple of results corresponding to parallel computations
- */
- def par[T1, T2, T3](b1: =>T1)(b2: =>T2)(b3: =>T3)(implicit runner: TaskRunner): (T1, T2, T3) = {
- null
- }
-
- /** Starts 4 parallel computations and returns once they are completed.
- *
- * $invokingPar
- *
- * @tparam T1 the type of the result of 1st the parallel computation
- * @tparam T2 the type of the result of 2nd the parallel computation
- * @tparam T3 the type of the result of 3rd the parallel computation
- * @tparam T4 the type of the result of 4th the parallel computation
- * @param b1 the 1st computation to be invoked in parallel
- * @param b2 the 2nd computation to be invoked in parallel
- * @param b3 the 3rd computation to be invoked in parallel
- * @param b4 the 4th computation to be invoked in parallel
- * @param runner the runner used for parallel computations
- * @return a tuple of results corresponding to parallel computations
- */
- def par[T1, T2, T3, T4](b1: =>T1)(b2: =>T2)(b3: =>T3)(b4: =>T4)(implicit runner: TaskRunner): (T1, T2, T3, T4) = {
- null
- }
-
-}
diff --git a/src/library/scala/package.scala b/src/library/scala/package.scala
index 0c5d10b15e..366af34ee9 100644
--- a/src/library/scala/package.scala
+++ b/src/library/scala/package.scala
@@ -27,6 +27,12 @@ package object scala {
type NoSuchElementException = java.util.NoSuchElementException
type NumberFormatException = java.lang.NumberFormatException
type AbstractMethodError = java.lang.AbstractMethodError
+ type InterruptedException = java.lang.InterruptedException
+
+ // A dummy used by the specialization annotation.
+ val AnyRef = new Specializable {
+ override def toString = "object AnyRef"
+ }
@deprecated("instead of `@serializable class C`, use `class C extends Serializable`", "2.9.0")
type serializable = annotation.serializable
diff --git a/src/library/scala/reflect/ClassManifest.scala b/src/library/scala/reflect/ClassManifest.scala
index 466b57dea7..d393ac47fa 100644
--- a/src/library/scala/reflect/ClassManifest.scala
+++ b/src/library/scala/reflect/ClassManifest.scala
@@ -46,7 +46,9 @@ trait ClassManifest[T] extends OptManifest[T] with Equals with Serializable {
/** The Scala type described by this manifest.
*/
- lazy val tpe: mirror.Type = reflect.mirror.classToType(erasure)
+ lazy val tpe: mirror.Type = mirror.classToType(erasure)
+
+ def symbol: mirror.Symbol = mirror.classToSymbol(erasure)
private def subtype(sub: jClass[_], sup: jClass[_]): Boolean = {
def loop(left: Set[jClass[_]], seen: Set[jClass[_]]): Boolean = {
diff --git a/src/library/scala/reflect/Code.scala b/src/library/scala/reflect/Code.scala
deleted file mode 100644
index 52705d302c..0000000000
--- a/src/library/scala/reflect/Code.scala
+++ /dev/null
@@ -1,23 +0,0 @@
-/* __ *\
-** ________ ___ / / ___ Scala API **
-** / __/ __// _ | / / / _ | (c) 2002-2011, LAMP/EPFL **
-** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
-** /____/\___/_/ |_/____/_/ | | **
-** |/ **
-\* */
-
-
-
-package scala.reflect
-
-/** This type is required by the compiler and <b>should not be used in client code</b>. */
-class Code[T: Manifest](val tree: scala.reflect.mirror.Tree) {
- val manifest = implicitly[Manifest[T]]
- override def toString = "Code(tree = "+tree+", manifest = "+manifest+")"
-}
-
-/** This type is required by the compiler and <b>should not be used in client code</b>. */
-object Code {
- def lift[A](tree: A): Code[A] =
- throw new Error("Code was not lifted by compiler")
-}
diff --git a/src/library/scala/reflect/Manifest.scala b/src/library/scala/reflect/Manifest.scala
index 8bd45c0e33..e5df487be9 100644
--- a/src/library/scala/reflect/Manifest.scala
+++ b/src/library/scala/reflect/Manifest.scala
@@ -76,6 +76,8 @@ abstract class AnyValManifest[T <: AnyVal](override val toString: String) extend
* in client code.
*/
object Manifest {
+ import mirror.{ definitions => mdefs }
+
def valueManifests: List[AnyValManifest[_]] =
List(Byte, Short, Char, Int, Long, Float, Double, Boolean, Unit)
@@ -152,34 +154,40 @@ object Manifest {
}
val Any: Manifest[scala.Any] = new PhantomManifest[scala.Any]("Any") {
+ override def symbol = mdefs.AnyClass
override def <:<(that: ClassManifest[_]): Boolean = (that eq this)
private def readResolve(): Any = Manifest.Any
}
val Object: Manifest[java.lang.Object] = new PhantomManifest[java.lang.Object]("Object") {
+ override def symbol = mdefs.ObjectClass
override def <:<(that: ClassManifest[_]): Boolean = (that eq this) || (that eq Any)
private def readResolve(): Any = Manifest.Object
}
val AnyVal: Manifest[scala.AnyVal] = new PhantomManifest[scala.AnyVal]("AnyVal") {
+ override def symbol = mdefs.AnyValClass
override def <:<(that: ClassManifest[_]): Boolean = (that eq this) || (that eq Any)
private def readResolve(): Any = Manifest.AnyVal
}
val Null: Manifest[scala.Null] = new PhantomManifest[scala.Null]("Null") {
+ override def symbol = mdefs.NullClass
override def <:<(that: ClassManifest[_]): Boolean =
(that ne null) && (that ne Nothing) && !(that <:< AnyVal)
private def readResolve(): Any = Manifest.Null
}
val Nothing: Manifest[scala.Nothing] = new PhantomManifest[scala.Nothing]("Nothing") {
+ override def symbol = mdefs.NothingClass
override def <:<(that: ClassManifest[_]): Boolean = (that ne null)
private def readResolve(): Any = Manifest.Nothing
}
private class SingletonTypeManifest[T <: AnyRef](value: AnyRef) extends Manifest[T] {
lazy val erasure = value.getClass
- override lazy val tpe = mirror.SingleType(mirror.NoPrefix, InstanceRefSymbol(value)) // todo: change to freevar
+ override lazy val symbol = InstanceRefSymbol(value) // todo: change to freevar
+ override lazy val tpe = mirror.SingleType(mirror.NoPrefix, symbol)
override lazy val toString = value.toString + ".type"
}
@@ -208,8 +216,12 @@ object Manifest {
def classType[T](prefix: Manifest[_], clazz: Predef.Class[_], args: Manifest[_]*): Manifest[T] =
new ClassTypeManifest[T](Some(prefix), clazz, args.toList)
+ /** Phantom types have no runtime representation; they all erase to Object,
+ * but the Symbol preserves their identity.
+ */
private abstract class PhantomManifest[T](override val toString: String) extends ClassTypeManifest[T](None, classOf[java.lang.Object], Nil) {
- override def equals(that: Any): Boolean = this eq that.asInstanceOf[AnyRef]
+ override lazy val tpe = namedType(mirror.NoPrefix, symbol, Nil)
+ override def equals(that: Any) = this eq that.asInstanceOf[AnyRef]
override val hashCode = System.identityHashCode(this)
}
@@ -218,13 +230,13 @@ object Manifest {
private class ClassTypeManifest[T](prefix: Option[Manifest[_]],
val erasure: Predef.Class[_],
override val typeArguments: List[Manifest[_]]) extends Manifest[T] {
+
override lazy val tpe = {
- val clazz = classToSymbol(erasure)
val pre = prefix match {
case Some(pm) => pm.tpe
- case None => clazz.owner.thisType
+ case None => symbol.owner.thisPrefix
}
- namedType(pre, clazz, typeArguments map (_.tpe))
+ namedType(pre, symbol, typeArguments map (_.tpe))
}
override def toString =
@@ -282,8 +294,9 @@ object Manifest {
* instead of in scala-compiler.jar.
*/
def apply[T](_tpe: mirror.Type): Manifest[T] = new Manifest[T] {
+ override def symbol = _tpe.typeSymbol
override lazy val tpe = _tpe
- override def erasure = mirror.typeToClass(_tpe.erasedType)
+ override def erasure = mirror.typeToClass(_tpe.erasedType)
override def toString = _tpe.toString
}
}
diff --git a/src/library/scala/reflect/ReflectionUtils.scala b/src/library/scala/reflect/ReflectionUtils.scala
index dfadfb4976..510f0819c6 100644
--- a/src/library/scala/reflect/ReflectionUtils.scala
+++ b/src/library/scala/reflect/ReflectionUtils.scala
@@ -29,13 +29,13 @@ object ReflectionUtils {
def singletonInstance(className: String, cl: ClassLoader = getClass.getClassLoader): AnyRef = {
val name = if (className endsWith "$") className else className + "$"
- val clazz = java.lang.Class.forName(name, true, cl)
+ val clazz = java.lang.Class.forName(name, true, cl)
val singleton = clazz getField "MODULE$" get null
singleton
}
// Retrieves the MODULE$ field for the given class name.
- def singletonInstanceOpt(className: String, cl: ClassLoader = getClass.getClassLoader): Option[AnyRef] =
+ def singletonInstanceOpt(className: String, cl: ClassLoader = getClass.getClassLoader): Option[AnyRef] =
try Some(singletonInstance(className, cl))
catch { case _: ClassNotFoundException => None }
}
diff --git a/src/library/scala/reflect/api/Mirror.scala b/src/library/scala/reflect/api/Mirror.scala
index 136f52b05f..cea9e1a37d 100644
--- a/src/library/scala/reflect/api/Mirror.scala
+++ b/src/library/scala/reflect/api/Mirror.scala
@@ -3,57 +3,59 @@ package api
/** A mirror establishes connections of
* runtime entities such as class names and object instances
- * with a refexive universe.
+ * with a reflexive universe.
*/
trait Mirror extends Universe with RuntimeTypes with TreeBuildUtil {
/** The Scala class symbol that has given fully qualified name
* @param name The fully qualified name of the class to be returned
- * @throws java.lang.ClassNotFoundException if no class wiht that name exists
+ * @throws java.lang.ClassNotFoundException if no class with that name exists
* to do: throws anything else?
*/
- def classWithName(name: String): Symbol
-
- /** Return a reference to the companion object of this class symbol
+ def symbolForName(name: String): Symbol
+
+ /** Return a reference to the companion object of the given class symbol.
*/
- def getCompanionObject(clazz: Symbol): AnyRef
-
- /** The Scala class symbol corresponding to the runtime class of given object
- * @param The object from which the class is returned
+ def companionInstance(clazz: Symbol): AnyRef
+
+ /** The Scala class symbol corresponding to the runtime class of the given instance.
+ * @param instance The instance
+ * @return The class Symbol for the instance
* @throws ?
*/
- def getClass(obj: AnyRef): Symbol
+ def symbolOfInstance(instance: Any): Symbol
- /** The Scala type corresponding to the runtime type of given object.
+ /** The Scala type corresponding to the runtime type of given instance.
* If the underlying class is parameterized, this will be an existential type,
* with unknown type arguments.
*
- * @param The object from which the type is returned
+ * @param instance The instance.
+ * @return The Type of the given instance.
* @throws ?
*/
- def getType(obj: AnyRef): Type
+ def typeOfInstance(instance: Any): Type
/** The value of a field on a receiver instance.
* @param receiver The receiver instance
* @param field The field
* @return The value contained in `receiver.field`.
*/
- def getValue(receiver: AnyRef, field: Symbol): Any
+ def getValueOfField(receiver: AnyRef, field: Symbol): Any
/** Sets the value of a field on a receiver instance.
* @param receiver The receiver instance
* @param field The field
* @param value The new value to be stored in the field.
*/
- def setValue(receiver: AnyRef, field: Symbol, value: Any): Unit
+ def setValueOfField(receiver: AnyRef, field: Symbol, value: Any): Unit
- /** Invokes a method on a reciver instance with some arguments
+ /** Invokes a method on a receiver instance with some arguments
* @param receiver The receiver instance
* @param meth The method
* @param args The method call's arguments
* @return The result of invoking `receiver.meth(args)`
*/
- def invoke(receiver: AnyRef, meth: Symbol, args: Any*): Any
+ def invoke(receiver: AnyRef, meth: Symbol)(args: Any*): Any
/** Maps a Java class to a Scala type reference
* @param clazz The Java class object
diff --git a/src/library/scala/reflect/api/Modifier.scala b/src/library/scala/reflect/api/Modifier.scala
index 8569b103cf..cbfe91e59b 100644
--- a/src/library/scala/reflect/api/Modifier.scala
+++ b/src/library/scala/reflect/api/Modifier.scala
@@ -1,11 +1,82 @@
package scala.reflect.api
-object Modifier extends Enumeration {
+import collection.{ immutable, mutable }
- val `protected`, `private`, `override`, `abstract`, `final`,
- `sealed`, `implicit`, `lazy`, `macro`, `case`, `trait`,
- deferred, interface, mutable, parameter, covariant, contravariant,
- preSuper, abstractOverride, local, java, static, caseAccessor,
- defaultParameter, defaultInit, paramAccessor, bynameParameter = Value
+sealed abstract class Modifier {
+ def name: String
+ def isKeyword: Boolean
+ def sourceString: String = if (isKeyword) "`" + name + "`" else name
+ override def equals(that: Any) = this eq that.asInstanceOf[AnyRef]
+ override def hashCode = name.hashCode
+ override def toString = name
+}
+final class SymbolModifier private (val name: String, val isKeyword: Boolean) extends Modifier {
+ def this(name: String) = this(name, false)
+}
+final class SourceModifier private (val name: String) extends Modifier {
+ def isKeyword = true
+}
+
+object SymbolModifier {
+ private val seen = mutable.ListBuffer[SymbolModifier]()
+ private[api] def apply(name: String): SymbolModifier = {
+ val mod = name match {
+ case "case" | "trait" => new SymbolModifier(name, isKeyword = true)
+ case _ => new SymbolModifier(name)
+ }
+ seen += mod
+ mod
+ }
+ private[api] def all = seen.toList
+}
+object SourceModifier {
+ private val seen = mutable.ListBuffer[SourceModifier]()
+ private[api] def apply(name: String): SourceModifier = {
+ val mod = new SourceModifier(name)
+ seen += mod
+ mod
+ }
+ private[api] def all = seen.toList
+}
+
+object Modifier extends immutable.Set[Modifier] {
+ val `abstract` = SourceModifier("abstract")
+ val `final` = SourceModifier("final")
+ val `implicit` = SourceModifier("implicit")
+ val `lazy` = SourceModifier("lazy")
+ val `macro` = SourceModifier("macro")
+ val `override` = SourceModifier("override")
+ val `private` = SourceModifier("private")
+ val `protected` = SourceModifier("protected")
+ val `sealed` = SourceModifier("sealed")
+
+ val `case` = SymbolModifier("case")
+ val `trait` = SymbolModifier("trait")
+ val abstractOverride = SymbolModifier("abstractOverride")
+ val bynameParameter = SymbolModifier("bynameParameter")
+ val caseAccessor = SymbolModifier("caseAccessor")
+ val contravariant = SymbolModifier("contravariant")
+ val covariant = SymbolModifier("covariant")
+ val defaultInit = SymbolModifier("defaultInit")
+ val defaultParameter = SymbolModifier("defaultParameter")
+ val deferred = SymbolModifier("deferred")
+ val interface = SymbolModifier("interface")
+ val java = SymbolModifier("java")
+ val local = SymbolModifier("local")
+ val mutable = SymbolModifier("mutable")
+ val paramAccessor = SymbolModifier("paramAccessor")
+ val parameter = SymbolModifier("parameter")
+ val preSuper = SymbolModifier("preSuper")
+ val static = SymbolModifier("static")
+
+ val sourceModifiers: Set[SourceModifier] = SourceModifier.all.toSet
+ val symbolModifiers: Set[SymbolModifier] = SymbolModifier.all.toSet
+ val allModifiers: Set[Modifier] = sourceModifiers ++ symbolModifiers
+ def values = allModifiers
+
+ def contains(key: Modifier) = allModifiers(key)
+ def iterator = allModifiers.iterator
+ def -(elem: Modifier) = allModifiers - elem
+ def +(elem: Modifier) = allModifiers + elem
}
diff --git a/src/library/scala/reflect/api/Names.scala b/src/library/scala/reflect/api/Names.scala
index 9498f0af36..c72774dfc7 100755
--- a/src/library/scala/reflect/api/Names.scala
+++ b/src/library/scala/reflect/api/Names.scala
@@ -6,12 +6,11 @@ package api
* The same string can be a name in both universes.
* Two names are equal if they represent the same string and they are
* members of the same universe.
- *
+ *
* Names are interned. That is, for two names `name11 and `name2`,
* `name1 == name2` implies `name1 eq name2`.
*/
trait Names {
-
/** The abstract type of names */
type Name >: Null <: AbsName
@@ -37,12 +36,20 @@ trait Names {
/** Replaces all occurrences of $op_names in this name by corresponding operator symbols.
* Example: `foo_+=` becomes `foo_$plus$eq`.
*/
- def decode: String
+ def decoded: String
/** Replaces all occurrences of operator symbols in this name by corresponding $op_names.
* Example: `foo_$plus$eq` becomes `foo_+=`
*/
- def encode: Name
+ def encoded: String
+
+ /** The decoded name, still represented as a name.
+ */
+ def decodedName: Name
+
+ /** The encoded name, still represented as a name.
+ */
+ def encodedName: Name
}
/** Create a new term name.
diff --git a/src/library/scala/reflect/api/StandardDefinitions.scala b/src/library/scala/reflect/api/StandardDefinitions.scala
index 3526cf259d..e737b0ea4f 100755
--- a/src/library/scala/reflect/api/StandardDefinitions.scala
+++ b/src/library/scala/reflect/api/StandardDefinitions.scala
@@ -11,14 +11,11 @@ trait StandardDefinitions { self: Universe =>
val definitions: AbsDefinitions
abstract class AbsDefinitions {
- // outer packages and their classes
- def RootPackage: Symbol // under consideration
+ // packages
+ def RootPackage: Symbol
def RootClass: Symbol
def EmptyPackage: Symbol
- def EmptyPackageClass: Symbol
-
def ScalaPackage: Symbol
- def ScalaPackageClass: Symbol
// top types
def AnyClass : Symbol
@@ -54,17 +51,19 @@ trait StandardDefinitions { self: Universe =>
// fundamental modules
def PredefModule: Symbol
- // fundamental type constructions
- def ClassType(arg: Type): Type
+ /** Given a type T, returns the type corresponding to the VM's
+ * representation: ClassClass's type constructor applied to `arg`.
+ */
+ def vmClassType(arg: Type): Type // !!! better name?
/** The string representation used by the given type in the VM.
*/
- def signature(tp: Type): String
+ def vmSignature(sym: Symbol, info: Type): String
/** Is symbol one of the value classes? */
- def isValueClass(sym: Symbol): Boolean
+ def isValueClass(sym: Symbol): Boolean // !!! better name?
/** Is symbol one of the numeric value classes? */
- def isNumericValueClass(sym: Symbol): Boolean
+ def isNumericValueClass(sym: Symbol): Boolean // !!! better name?
}
}
diff --git a/src/library/scala/reflect/api/StandardNames.scala b/src/library/scala/reflect/api/StandardNames.scala
new file mode 100644
index 0000000000..81517d2a6b
--- /dev/null
+++ b/src/library/scala/reflect/api/StandardNames.scala
@@ -0,0 +1,21 @@
+/* NSC -- new Scala compiler
+ * Copyright 2005-2011 LAMP/EPFL
+ * @author Martin Odersky
+ */
+
+package scala.reflect
+package api
+
+trait StandardNames { self: Universe =>
+
+ val nme: AbsTermNames
+
+ abstract class AbsTermNames {
+ val CONSTRUCTOR: TermName
+ }
+
+ val tpnme: AbsTypeNames
+
+ abstract class AbsTypeNames {
+ }
+}
diff --git a/src/library/scala/reflect/api/Symbols.scala b/src/library/scala/reflect/api/Symbols.scala
index 17d9b06324..44dc2ce1c2 100755
--- a/src/library/scala/reflect/api/Symbols.scala
+++ b/src/library/scala/reflect/api/Symbols.scala
@@ -9,11 +9,20 @@ trait Symbols { self: Universe =>
/** The modifiers of this symbol
*/
- def allModifiers: Set[Modifier.Value]
+ def modifiers: Set[Modifier]
/** Does this symbol have given modifier?
*/
- def hasModifier(mod: Modifier.Value): Boolean
+ def hasModifier(mod: Modifier): Boolean
+
+ /** A list of annotations attached to this Symbol.
+ */
+ def annotations: List[self.AnnotationInfo]
+
+ /** Whether this symbol carries an annotation for which the given
+ * symbol is its typeSymbol.
+ */
+ def hasAnnotation(sym: Symbol): Boolean
/** The owner of this symbol. This is the symbol
* that directly contains the current symbol's definition.
@@ -30,14 +39,6 @@ trait Symbols { self: Universe =>
*/
def name: Name
- /** The name of the symbol before decoding, e.g. `\$eq\$eq` instead of `==`.
- */
- def encodedName: String
-
- /** The decoded name of the symbol, e.g. `==` instead of `\$eq\$eq`.
- */
- def decodedName: String
-
/** The encoded full path name of this symbol, where outer names and inner names
* are separated by periods.
*/
@@ -66,49 +67,43 @@ trait Symbols { self: Universe =>
*
* The java access levels translate as follows:
*
- * java private: hasFlag(PRIVATE) && !hasAccessBoundary
- * java package: !hasFlag(PRIVATE | PROTECTED) && (privateWithin == enclosing package)
- * java protected: hasFlag(PROTECTED) && (privateWithin == enclosing package)
- * java public: !hasFlag(PRIVATE | PROTECTED) && !hasAccessBoundary
+ * java private: hasFlag(PRIVATE) && (privateWithin == NoSymbol)
+ * java package: !hasFlag(PRIVATE | PROTECTED) && (privateWithin == enclosingPackage)
+ * java protected: hasFlag(PROTECTED) && (privateWithin == enclosingPackage)
+ * java public: !hasFlag(PRIVATE | PROTECTED) && (privateWithin == NoSymbol)
*/
def privateWithin: Symbol
- /** Whether this symbol has a "privateWithin" visibility barrier attached.
- */
- def hasAccessBoundary: Boolean
-
- /** A list of annotations attached to this Symbol.
- */
- def getAnnotations: List[self.AnnotationInfo]
-
/** For a class: the module or case class factory with the same name in the same package.
+ * For a module: the class with the same name in the same package.
* For all others: NoSymbol
*/
- def companionModule: Symbol
-
- /** For a module: the class with the same name in the same package.
- * For all others: NoSymbol
- */
- def companionClass: Symbol
-
- /** The module corresponding to this module class (note that this
- * is not updated when a module is cloned), or NoSymbol if this is not a ModuleClass
- */
- def sourceModule: Symbol
+ def companionSymbol: Symbol
/** If symbol is an object definition, its implied associated class,
* otherwise NoSymbol
*/
def moduleClass: Symbol // needed for LiftCode
- /** The top-level class containing this symbol. */
- def toplevelClass: Symbol
+ /** If this symbol is a top-level class, this symbol; otherwise the next enclosing
+ * top-level class, or `NoSymbol` if none exists.
+ */
+ def enclosingTopLevelClass: Symbol
- /** The next enclosing class, or `NoSymbol` if none exists */
- def enclClass : Symbol
+ /** If this symbol is a class, this symbol; otherwise the next enclosing
+ * class, or `NoSymbol` if none exists.
+ */
+ def enclosingClass: Symbol
- /** The next enclosing method, or `NoSymbol` if none exists */
- def enclMethod : Symbol
+ /** If this symbol is a method, this symbol; otherwise the next enclosing
+ * method, or `NoSymbol` if none exists.
+ */
+ def enclosingMethod: Symbol
+
+ /** If this symbol is a package class, this symbol; otherwise the next enclosing
+ * package class, or `NoSymbol` if none exists.
+ */
+ def enclosingPackageClass: Symbol
/** Does this symbol represent the definition of term?
* Note that every symbol is either a term or a type.
@@ -141,13 +136,13 @@ trait Symbols { self: Universe =>
/** The type signature of this symbol.
* Note if the symbol is a member of a class, one almost always is interested
- * in `typeSigIn` with a site type instead.
+ * in `typeSignatureIn` with a site type instead.
*/
- def typeSig: Type
+ def typeSignature: Type // !!! Since one should almost never use this, let's give it a different name.
/** The type signature of this symbol seen as a member of given type `site`.
*/
- def typeSigIn(site: Type): Type
+ def typeSignatureIn(site: Type): Type
/** A type reference that refers to this type symbol
* Note if symbol is a member of a class, one almost always is interested
@@ -156,11 +151,11 @@ trait Symbols { self: Universe =>
* Example: Given a class declaration `class C[T] { ... } `, that generates a symbol
* `C`. Then `C.asType` is the type `C[T]`.
*
- * By contrast, `C.typeSig` would be a type signature of form
+ * By contrast, `C.typeSignature` would be a type signature of form
* `PolyType(ClassInfoType(...))` that describes type parameters, value
* parameters, parent types, and members of `C`.
*/
- def asType: Type
+ def asType: Type // !!! Same as typeSignature.
/** A type reference that refers to this type symbol seen
* as a member of given type `site`.
@@ -172,37 +167,37 @@ trait Symbols { self: Universe =>
* are part of results of `asType`, but not of `asTypeConstructor`.
*
* Example: Given a class declaration `class C[T] { ... } `, that generates a symbol
- * `C`. Then `C.asType` is the type `C[T]`, but `C.asTypeCponstructor` is `C`.
+ * `C`. Then `C.asType` is the type `C[T]`, but `C.asTypeConstructor` is `C`.
*/
def asTypeConstructor: Type // needed by LiftCode
- /** If this symbol is a class or trait, its self type, otherwise the type
- * of the symbol itself.
+ /** If this symbol is a class, the type `C.this`, otherwise `NoPrefix`.
*/
- def typeOfThis: Type
+ def thisPrefix: Type
- /** If this symbol is a class, the type `C.this`, otherwise `NoPrefix`.
+ /** If this symbol is a class or trait, its self type, otherwise the type
+ * of the symbol itself.
*/
- def thisType: Type
+ def selfType: Type
/** A fresh symbol with given name `name`, position `pos` and flags `flags` that has
- * the current symbol as its owner.
+ * the current symbol as its owner.
*/
def newNestedSymbol(name: Name, pos: Position, flags: Long): Symbol // needed by LiftCode
/** Low-level operation to set the symbol's flags
* @return the symbol itself
*/
- def setInternalFlags(flags: Long): this.type // needed by LiftCode
+ def setInternalFlags(flags: Long): this.type // needed by LiftCode !!! not enough reason to have in the api
/** Set symbol's type signature to given type
* @return the symbol itself
*/
- def setTypeSig(tpe: Type): this.type // needed by LiftCode
+ def setTypeSignature(tpe: Type): this.type // needed by LiftCode !!! not enough reason to have in the api
/** Set symbol's annotations to given annotations `annots`.
*/
- def setAnnotations(annots: AnnotationInfo*): this.type // needed by LiftCode
+ def setAnnotations(annots: AnnotationInfo*): this.type // needed by LiftCode !!! not enough reason to have in the api
}
val NoSymbol: Symbol
diff --git a/src/library/scala/reflect/api/TreeBuildUtil.scala b/src/library/scala/reflect/api/TreeBuildUtil.scala
index b437824925..f28008bc21 100644
--- a/src/library/scala/reflect/api/TreeBuildUtil.scala
+++ b/src/library/scala/reflect/api/TreeBuildUtil.scala
@@ -3,19 +3,19 @@ package scala.reflect.api
trait TreeBuildUtil extends Universe {
/** The symbol corresponding to the globally accessible class with the
- * given fully qualified name `fullname`.
+ * given fully qualified name `fullName`.
*/
- def staticClass(fullname: String): Symbol
+ def staticClass(fullName: String): Symbol
/** The symbol corresponding to the globally accessible object with the
- * given fully qualified name `fullname`.
+ * given fully qualified name `fullName`.
*/
- def staticModule(fullname: String): Symbol
+ def staticModule(fullName: String): Symbol
/** The this-ptype of the globally accessible object with the
- * given fully qualified name `fullname`.
+ * given fully qualified name `fullName`.
*/
- def thisModuleType(fullname: String): Type
+ def thisModuleType(fullName: String): Type
/** Selects type symbol with given simple name `name` from the defined members of `owner`.
*/
@@ -38,7 +38,7 @@ trait TreeBuildUtil extends Universe {
* @param tsig the type signature of the free variable
* @param value the value of the free variable at runtime
*/
- def freeVar(name: String, tsig: Type, value: Any): Symbol
+ def newFreeVar(name: String, info: Type, value: Any): Symbol
/** Create a Modiiers structure given internal flags, qualifier, annotations */
def modifiersFromInternalFlags(flags: Long, privateWithin: Name, annotations: List[Tree]): Modifiers
diff --git a/src/library/scala/reflect/api/TreePrinters.scala b/src/library/scala/reflect/api/TreePrinters.scala
index 88ef450ed9..21b55e9c0e 100644
--- a/src/library/scala/reflect/api/TreePrinters.scala
+++ b/src/library/scala/reflect/api/TreePrinters.scala
@@ -31,18 +31,6 @@ trait TreePrinters { self: Universe =>
// emits more or less verbatim representation of the provided tree
// todo. when LiftCode becomes a macro, throw this code away and use that macro
class RawTreePrinter(out: PrintWriter) extends TreePrinter {
- import scala.reflect.api.Modifier
- import scala.reflect.api.Modifier._
-
- def copypasteModifier(mod: Modifier.Value): String = mod match {
- case mod @ (
- `protected` | `private` | `override` |
- `abstract` | `final` | `sealed` |
- `implicit` | `lazy` | `macro` |
- `case` | `trait`) => "`" + mod.toString + "`"
- case mod => mod.toString
- }
-
def print(args: Any*): Unit = args foreach {
case EmptyTree =>
print("EmptyTree")
@@ -67,7 +55,7 @@ trait TreePrinters { self: Universe =>
print(")")
if (typesPrinted)
print(".setType(", tree.tpe, ")")
- case list: List[_] =>
+ case list: List[_] =>
print("List(")
val it = list.iterator
while (it.hasNext) {
@@ -76,16 +64,16 @@ trait TreePrinters { self: Universe =>
}
print(")")
case mods: Modifiers =>
- val parts = collection.mutable.ListBuffer[String]()
- parts += "Set(" + mods.allModifiers.map{copypasteModifier}.mkString(", ") + ")"
+ val parts = collection.mutable.ListBuffer[String]()
+ parts += "Set(" + mods.modifiers.map(_.sourceString).mkString(", ") + ")"
parts += "newTypeName(\"" + mods.privateWithin.toString + "\")"
parts += "List(" + mods.annotations.map{showRaw}.mkString(", ") + ")"
-
+
var keep = 3
if (keep == 3 && mods.annotations.isEmpty) keep -= 1
if (keep == 2 && mods.privateWithin == EmptyTypeName) keep -= 1
- if (keep == 1 && mods.allModifiers.isEmpty) keep -= 1
-
+ if (keep == 1 && mods.modifiers.isEmpty) keep -= 1
+
print("Modifiers(", parts.take(keep).mkString(", "), ")")
case name: Name =>
if (name.isTermName) print("newTermName(\"") else print("newTypeName(\"")
diff --git a/src/library/scala/reflect/api/Trees.scala b/src/library/scala/reflect/api/Trees.scala
index 0a38fb45bf..181ce85dac 100644
--- a/src/library/scala/reflect/api/Trees.scala
+++ b/src/library/scala/reflect/api/Trees.scala
@@ -13,17 +13,17 @@ trait Trees { self: Universe =>
private[scala] var nodeCount = 0
- type Modifiers <: AbsModifiers
+ type Modifiers >: Null <: AbsModifiers
abstract class AbsModifiers {
- def hasModifier(mod: Modifier.Value): Boolean
- def allModifiers: Set[Modifier.Value]
+ def modifiers: Set[Modifier]
+ def hasModifier(mod: Modifier): Boolean
def privateWithin: Name // default: EmptyTypeName
def annotations: List[Tree] // default: List()
def mapAnnotations(f: List[Tree] => List[Tree]): Modifiers
}
- def Modifiers(mods: Set[Modifier.Value] = Set(),
+ def Modifiers(mods: Set[Modifier] = Set(),
privateWithin: Name = EmptyTypeName,
annotations: List[Tree] = List()): Modifiers
@@ -439,6 +439,12 @@ trait Trees { self: Universe =>
case class Assign(lhs: Tree, rhs: Tree)
extends TermTree
+ /** Either an assignment or a named argument. Only appears in argument lists,
+ * eliminated by typecheck (doTypedApply)
+ */
+ case class AssignOrNamedArg(lhs: Tree, rhs: Tree)
+ extends TermTree
+
/** Conditional expression */
case class If(cond: Tree, thenp: Tree, elsep: Tree)
extends TermTree
@@ -476,6 +482,20 @@ trait Trees { self: Universe =>
*/
case class New(tpt: Tree) extends TermTree
+ /** Factory method for object creation `new tpt(args_1)...(args_n)`
+ * A `New(t, as)` is expanded to: `(new t).<init>(as)`
+ */
+ def New(tpt: Tree, argss: List[List[Tree]]): Tree = {
+ // todo. we need to expose names in scala.reflect.api
+ val superRef: Tree = Select(New(tpt), nme.CONSTRUCTOR)
+ if (argss.isEmpty) Apply(superRef, Nil)
+ else (superRef /: argss) (Apply)
+ }
+ /** 0-1 argument list new, based on a type.
+ */
+ def New(tpe: Type, args: Tree*): Tree =
+ New(TypeTree(tpe), List(args.toList))
+
/** Type annotation, eliminated by explicit outer */
case class Typed(expr: Tree, tpt: Tree)
extends TermTree
@@ -632,10 +652,10 @@ trait Trees { self: Universe =>
}
def TypeTree(tp: Type): TypeTree = TypeTree() setType tp
-
+
/** An empty deferred value definition corresponding to:
* val _: _
- * This is used as a placeholder in the `self` parameter Template if there is
+ * This is used as a placeholder in the `self` parameter Template if there is
* no definition of a self value of self type.
*/
def emptyValDef: ValDef
@@ -644,6 +664,96 @@ trait Trees { self: Universe =>
val treeCopy = newLazyTreeCopier
+ def copyDefDef(tree: Tree)(
+ mods: Modifiers = null,
+ name: Name = null,
+ tparams: List[TypeDef] = null,
+ vparamss: List[List[ValDef]] = null,
+ tpt: Tree = null,
+ rhs: Tree = null
+ ): DefDef = tree match {
+ case DefDef(mods0, name0, tparams0, vparamss0, tpt0, rhs0) =>
+ treeCopy.DefDef(tree,
+ if (mods eq null) mods0 else mods,
+ if (name eq null) name0 else name,
+ if (tparams eq null) tparams0 else tparams,
+ if (vparamss eq null) vparamss0 else vparamss,
+ if (tpt eq null) tpt0 else tpt,
+ if (rhs eq null) rhs0 else rhs
+ )
+ case t =>
+ sys.error("Not a DefDef: " + t + "/" + t.getClass)
+ }
+ def copyValDef(tree: Tree)(
+ mods: Modifiers = null,
+ name: Name = null,
+ tpt: Tree = null,
+ rhs: Tree = null
+ ): ValDef = tree match {
+ case ValDef(mods0, name0, tpt0, rhs0) =>
+ treeCopy.ValDef(tree,
+ if (mods eq null) mods0 else mods,
+ if (name eq null) name0 else name,
+ if (tpt eq null) tpt0 else tpt,
+ if (rhs eq null) rhs0 else rhs
+ )
+ case t =>
+ sys.error("Not a ValDef: " + t + "/" + t.getClass)
+ }
+ def copyClassDef(tree: Tree)(
+ mods: Modifiers = null,
+ name: Name = null,
+ tparams: List[TypeDef] = null,
+ impl: Template = null
+ ): ClassDef = tree match {
+ case ClassDef(mods0, name0, tparams0, impl0) =>
+ treeCopy.ClassDef(tree,
+ if (mods eq null) mods0 else mods,
+ if (name eq null) name0 else name,
+ if (tparams eq null) tparams0 else tparams,
+ if (impl eq null) impl0 else impl
+ )
+ case t =>
+ sys.error("Not a ClassDef: " + t + "/" + t.getClass)
+ }
+
+ def deriveDefDef(ddef: Tree)(applyToRhs: Tree => Tree): DefDef = ddef match {
+ case DefDef(mods0, name0, tparams0, vparamss0, tpt0, rhs0) =>
+ treeCopy.DefDef(ddef, mods0, name0, tparams0, vparamss0, tpt0, applyToRhs(rhs0))
+ case t =>
+ sys.error("Not a DefDef: " + t + "/" + t.getClass)
+ }
+ def deriveValDef(vdef: Tree)(applyToRhs: Tree => Tree): ValDef = vdef match {
+ case ValDef(mods0, name0, tpt0, rhs0) =>
+ treeCopy.ValDef(vdef, mods0, name0, tpt0, applyToRhs(rhs0))
+ case t =>
+ sys.error("Not a ValDef: " + t + "/" + t.getClass)
+ }
+ def deriveTemplate(templ: Tree)(applyToBody: List[Tree] => List[Tree]): Template = templ match {
+ case Template(parents0, self0, body0) =>
+ treeCopy.Template(templ, parents0, self0, applyToBody(body0))
+ case t =>
+ sys.error("Not a Template: " + t + "/" + t.getClass)
+ }
+ def deriveClassDef(cdef: Tree)(applyToImpl: Template => Template): ClassDef = cdef match {
+ case ClassDef(mods0, name0, tparams0, impl0) =>
+ treeCopy.ClassDef(cdef, mods0, name0, tparams0, applyToImpl(impl0))
+ case t =>
+ sys.error("Not a ClassDef: " + t + "/" + t.getClass)
+ }
+ def deriveCaseDef(cdef: Tree)(applyToBody: Tree => Tree): CaseDef = cdef match {
+ case CaseDef(pat0, guard0, body0) =>
+ treeCopy.CaseDef(cdef, pat0, guard0, applyToBody(body0))
+ case t =>
+ sys.error("Not a CaseDef: " + t + "/" + t.getClass)
+ }
+ def deriveLabelDef(ldef: Tree)(applyToRhs: Tree => Tree): LabelDef = ldef match {
+ case LabelDef(name0, params0, rhs0) =>
+ treeCopy.LabelDef(ldef, name0, params0, applyToRhs(rhs0))
+ case t =>
+ sys.error("Not a LabelDef: " + t + "/" + t.getClass)
+ }
+
class Traverser {
protected var currentOwner: Symbol = definitions.RootClass
@@ -705,6 +815,8 @@ trait Trees { self: Universe =>
}
case Assign(lhs, rhs) =>
traverse(lhs); traverse(rhs)
+ case AssignOrNamedArg(lhs, rhs) =>
+ traverse(lhs); traverse(rhs)
case If(cond, thenp, elsep) =>
traverse(cond); traverse(thenp); traverse(elsep)
case Match(selector, cases) =>
@@ -803,6 +915,7 @@ trait Trees { self: Universe =>
def ArrayValue(tree: Tree, elemtpt: Tree, trees: List[Tree]): ArrayValue
def Function(tree: Tree, vparams: List[ValDef], body: Tree): Function
def Assign(tree: Tree, lhs: Tree, rhs: Tree): Assign
+ def AssignOrNamedArg(tree: Tree, lhs: Tree, rhs: Tree): AssignOrNamedArg
def If(tree: Tree, cond: Tree, thenp: Tree, elsep: Tree): If
def Match(tree: Tree, selector: Tree, cases: List[CaseDef]): Match
def Return(tree: Tree, expr: Tree): Return
@@ -865,6 +978,8 @@ trait Trees { self: Universe =>
new Function(vparams, body).copyAttrs(tree)
def Assign(tree: Tree, lhs: Tree, rhs: Tree) =
new Assign(lhs, rhs).copyAttrs(tree)
+ def AssignOrNamedArg(tree: Tree, lhs: Tree, rhs: Tree) =
+ new AssignOrNamedArg(lhs, rhs).copyAttrs(tree)
def If(tree: Tree, cond: Tree, thenp: Tree, elsep: Tree) =
new If(cond, thenp, elsep).copyAttrs(tree)
def Match(tree: Tree, selector: Tree, cases: List[CaseDef]) =
@@ -1010,6 +1125,11 @@ trait Trees { self: Universe =>
if (lhs0 == lhs) && (rhs0 == rhs) => t
case _ => treeCopy.Assign(tree, lhs, rhs)
}
+ def AssignOrNamedArg(tree: Tree, lhs: Tree, rhs: Tree) = tree match {
+ case t @ AssignOrNamedArg(lhs0, rhs0)
+ if (lhs0 == lhs) && (rhs0 == rhs) => t
+ case _ => treeCopy.AssignOrNamedArg(tree, lhs, rhs)
+ }
def If(tree: Tree, cond: Tree, thenp: Tree, elsep: Tree) = tree match {
case t @ If(cond0, thenp0, elsep0)
if (cond0 == cond) && (thenp0 == thenp) && (elsep0 == elsep) => t
@@ -1129,9 +1249,9 @@ trait Trees { self: Universe =>
abstract class Transformer {
val treeCopy: TreeCopier = newLazyTreeCopier
protected var currentOwner: Symbol = definitions.RootClass
- protected def currentMethod = currentOwner.enclMethod
- protected def currentClass = currentOwner.enclClass
- protected def currentPackage = currentOwner.toplevelClass.owner
+ protected def currentMethod = currentOwner.enclosingMethod
+ protected def currentClass = currentOwner.enclosingClass
+ protected def currentPackage = currentOwner.enclosingTopLevelClass.owner
def transform(tree: Tree): Tree = tree match {
case EmptyTree =>
tree
@@ -1194,6 +1314,8 @@ trait Trees { self: Universe =>
}
case Assign(lhs, rhs) =>
treeCopy.Assign(tree, transform(lhs), transform(rhs))
+ case AssignOrNamedArg(lhs, rhs) =>
+ treeCopy.AssignOrNamedArg(tree, transform(lhs), transform(rhs))
case If(cond, thenp, elsep) =>
treeCopy.If(tree, transform(cond), transform(thenp), transform(elsep))
case Match(selector, cases) =>
@@ -1361,6 +1483,8 @@ trait Trees { self: Universe =>
// vparams => body where vparams:List[ValDef]
case Assign(lhs, rhs) =>
// lhs = rhs
+ case AssignOrNamedArg(lhs, rhs) => (eliminated by typer, resurrected by reifier)
+ // @annotation(lhs = rhs)
case If(cond, thenp, elsep) =>
// if (cond) thenp else elsep
case Match(selector, cases) =>
diff --git a/src/library/scala/reflect/api/Types.scala b/src/library/scala/reflect/api/Types.scala
index 6185a788ae..cc8e85b9c8 100755
--- a/src/library/scala/reflect/api/Types.scala
+++ b/src/library/scala/reflect/api/Types.scala
@@ -6,7 +6,6 @@ trait Types { self: Universe =>
/** This class declares operations that are visible in a Type.
*/
abstract class AbsType {
-
/** The type symbol associated with the type, or `NoSymbol` for types
* that do not refer to a type symbol.
*/
@@ -47,7 +46,7 @@ trait Types { self: Universe =>
/** Substitute types in `to` for corresponding occurrences of references to
* symbols `from` in this type.
*/
- def subst(from: List[Symbol], to: List[Type]): Type
+ def substituteTypes(from: List[Symbol], to: List[Type]): Type // !!! Too many things with names like "subst"
/** If this is a parameterized types, the type arguments.
* Otherwise the empty list
@@ -56,7 +55,7 @@ trait Types { self: Universe =>
/** Is this type a type constructor that is missing its type arguments?
*/
- def isHigherKinded: Boolean
+ def isHigherKinded: Boolean // !!! This should be called "isTypeConstructor", no?
/**
* Expands type aliases and converts higher-kinded TypeRefs to PolyTypes.
@@ -66,7 +65,7 @@ trait Types { self: Universe =>
* TypeRef(pre, <List>, List()) is replaced by
* PolyType(X, TypeRef(pre, <List>, List(X)))
*/
- def normalize: Type
+ def normalize: Type // !!! Alternative name? "normalize" is used to mean too many things.
/** Does this type conform to given type argument `that`? */
def <:< (that: Type): Boolean
@@ -74,11 +73,11 @@ trait Types { self: Universe =>
/** Is this type equivalent to given type argument `that`? */
def =:= (that: Type): Boolean
- /** The list of all baseclasses of this type (including its own typeSymbol)
+ /** The list of all base classes of this type (including its own typeSymbol)
* in reverse linearization order, starting with the class itself and ending
* in class Any.
*/
- def baseClasses: List[Symbol]
+ def baseClasses: List[Symbol] // !!! Alternative name, perhaps linearization?
/** The least type instance of given class which is a supertype
* of this type. Example:
@@ -104,9 +103,9 @@ trait Types { self: Universe =>
def asSeenFrom(pre: Type, clazz: Symbol): Type
/** The erased type corresponding to this type after
- * all transcformations from Scala to Java have been performed.
+ * all transformations from Scala to Java have been performed.
*/
- def erasedType: Type
+ def erasedType: Type // !!! "erasedType", compare with "widen" (so "erase") or "underlying" (so "erased")
/** Apply `f` to each part of this type, returning
* a new type. children get mapped before their parents */
@@ -138,10 +137,10 @@ trait Types { self: Universe =>
/** If this is a singleton type, widen it to its nearest underlying non-singleton
* base type by applying one or more `underlying` dereferences.
- * If this is not a singlecon type, returns this type itself.
+ * If this is not a singleton type, returns this type itself.
*
* Example:
- *
+ *
* class Outer { class C ; val x: C }
* val o: Outer
* <o.x.type>.widen = o.C
@@ -400,11 +399,6 @@ trait Types { self: Universe =>
def unapply(tpe: ClassInfoType): Option[(List[Type], Scope, Symbol)]
}
-
-
-
-
-
abstract class NullaryMethodTypeExtractor {
def apply(resultType: Type): NullaryMethodType
def unapply(tpe: NullaryMethodType): Option[(Type)]
diff --git a/src/library/scala/reflect/api/Universe.scala b/src/library/scala/reflect/api/Universe.scala
index 03acbdda2c..a3cec3271b 100755
--- a/src/library/scala/reflect/api/Universe.scala
+++ b/src/library/scala/reflect/api/Universe.scala
@@ -10,7 +10,8 @@ abstract class Universe extends Symbols
with Positions
with TreePrinters
with AnnotationInfos
- with StandardDefinitions {
+ with StandardDefinitions
+ with StandardNames {
type Position
val NoPosition: Position
diff --git a/src/library/scala/reflect/macro/Context.scala b/src/library/scala/reflect/macro/Context.scala
index d0a2787fdf..2fd9bb6484 100644
--- a/src/library/scala/reflect/macro/Context.scala
+++ b/src/library/scala/reflect/macro/Context.scala
@@ -2,14 +2,35 @@ package scala.reflect
package macro
trait Context extends api.Universe {
-
+
/** Mark a variable as captured; i.e. force boxing in a *Ref type.
*/
def captureVariable(vble: Symbol): Unit
-
+
/** Mark given identifier as a reference to a captured variable itself
* suppressing dereferencing with the `elem` field.
*/
def referenceCapturedVariable(id: Ident): Tree
+ /** Given a tree or type, generate a tree that when executed at runtime produces the original tree or type.
+ * For instance, given the abstract syntax tree representation of the `x + 1` expression:
+ *
+ * Apply(Select(Ident("x"), "+"), List(Literal(Constant(1))))
+ *
+ * The reifier transforms it to the following tree:
+ *
+ * $mr.Apply($mr.Select($mr.Ident($mr.newFreeVar("x", <Int>, x), "+"), List($mr.Literal($mr.Constant(1))))))
+ *
+ * The transformation looks mostly straightforward, but it has its tricky parts:
+ * * Reifier retains symbols and types defined outside the reified tree, however
+ * locally defined entities get erased and replaced with their original trees
+ * * Free variables are detected and wrapped in symbols of the type FreeVar
+ * * Mutable variables that are accessed from a local function are wrapped in refs
+ * * Since reified trees can be compiled outside of the scope they've been created in,
+ * special measures are taken to ensure that all freeVars remain visible
+ *
+ * Typical usage of this function is to retain some of the trees received/created by a macro
+ * into the form that can be inspected (via pattern matching) or compiled/run (by a reflective ToolBox) during the runtime.
+ */
+ def reify(tree: Tree): Tree
}
diff --git a/src/library/scala/runtime/AbstractFunction1.scala b/src/library/scala/runtime/AbstractFunction1.scala
index a9e5e90e20..b2f336fe52 100644
--- a/src/library/scala/runtime/AbstractFunction1.scala
+++ b/src/library/scala/runtime/AbstractFunction1.scala
@@ -9,6 +9,6 @@
package scala.runtime
-abstract class AbstractFunction1[@specialized(scala.Int, scala.Long, scala.Float, scala.Double) -T1, @specialized(scala.Unit, scala.Boolean, scala.Int, scala.Float, scala.Long, scala.Double) +R] extends Function1[T1, R] {
+abstract class AbstractFunction1[@specialized(scala.Int, scala.Long, scala.Float, scala.Double, scala.AnyRef) -T1, @specialized(scala.Unit, scala.Boolean, scala.Int, scala.Float, scala.Long, scala.Double, scala.AnyRef) +R] extends Function1[T1, R] {
}
diff --git a/src/library/scala/runtime/NonLocalReturnControl.scala b/src/library/scala/runtime/NonLocalReturnControl.scala
index 8be2745086..216e3e664b 100644
--- a/src/library/scala/runtime/NonLocalReturnControl.scala
+++ b/src/library/scala/runtime/NonLocalReturnControl.scala
@@ -6,12 +6,10 @@
** |/ **
\* */
-
-
package scala.runtime
import scala.util.control.ControlThrowable
-class NonLocalReturnControl[T](val key: AnyRef, val value: T) extends ControlThrowable {
+class NonLocalReturnControl[@specialized T](val key: AnyRef, val value: T) extends ControlThrowable {
final override def fillInStackTrace(): Throwable = this
}
diff --git a/src/library/scala/runtime/ScalaRunTime.scala b/src/library/scala/runtime/ScalaRunTime.scala
index 951bdd888e..8bc63ae3a0 100644
--- a/src/library/scala/runtime/ScalaRunTime.scala
+++ b/src/library/scala/runtime/ScalaRunTime.scala
@@ -36,7 +36,16 @@ object ScalaRunTime {
case _: Byte | _: Short | _: Char | _: Int | _: Long | _: Float | _: Double | _: Boolean | _: Unit => true
case _ => false
}
- private val tupleNames = 1 to 22 map ("scala.Tuple" + _) toSet
+ // Avoiding boxing which messes up the specialized tests. Don't ask.
+ private val tupleNames = {
+ var i = 22
+ var names: List[String] = Nil
+ while (i >= 1) {
+ names ::= ("scala.Tuple" + String.valueOf(i))
+ i -= 1
+ }
+ names.toSet
+ }
/** Return the class object representing an unboxed value type,
* e.g. classOf[int], not classOf[java.lang.Integer]. The compiler
diff --git a/src/library/scala/specialized.scala b/src/library/scala/specialized.scala
index 902faa166e..b876869afb 100644
--- a/src/library/scala/specialized.scala
+++ b/src/library/scala/specialized.scala
@@ -6,10 +6,10 @@
** |/ **
\* */
-
-
package scala
+import Specializable._
+
/** Annotate type parameters on which code should be automatically
* specialized. For example:
* {{{
@@ -24,8 +24,9 @@ package scala
*
* @since 2.8
*/
-class specialized(types: SpecializableCompanion*) extends annotation.StaticAnnotation {
- def this() {
- this(Unit, Boolean, Byte, Short, Char, Int, Long, Float, Double)
- }
+// class tspecialized[T](group: Group[T]) extends annotation.StaticAnnotation {
+
+class specialized(group: SpecializedGroup) extends annotation.StaticAnnotation {
+ def this(types: Specializable*) = this(new Group(types.toList))
+ def this() = this(Everything)
}
diff --git a/src/library/scala/sys/process/BasicIO.scala b/src/library/scala/sys/process/BasicIO.scala
index 44e573896f..77e36f6196 100644
--- a/src/library/scala/sys/process/BasicIO.scala
+++ b/src/library/scala/sys/process/BasicIO.scala
@@ -13,15 +13,25 @@ import processInternal._
import java.io.{ BufferedReader, InputStreamReader, FilterInputStream, FilterOutputStream }
import java.util.concurrent.LinkedBlockingQueue
import scala.collection.immutable.Stream
+import scala.annotation.tailrec
/**
* This object contains factories for [[scala.sys.process.ProcessIO]],
* which can be used to control the I/O of a [[scala.sys.process.Process]]
* when a [[scala.sys.process.ProcessBuilder]] is started with the `run`
* command.
+ *
+ * It also contains some helper methods that can be used to in the creation of
+ * `ProcessIO`.
+ *
+ * It is used by other classes in the package in the implementation of various
+ * features, but can also be used by client code.
*/
object BasicIO {
+ /** Size of the buffer used in all the functions that copy data */
final val BufferSize = 8192
+
+ /** Used to separate lines in the `processFully` function that takes `Appendable`. */
final val Newline = props("line.separator")
private[process] final class Streamed[T](
@@ -52,15 +62,70 @@ object BasicIO {
def protect(out: OutputStream): OutputStream = if ((out eq stdout) || (out eq stderr)) Uncloseable(out) else out
}
+ /** Creates a `ProcessIO` from a function `String => Unit`. It can attach the
+ * process input to stdin, and it will either send the error stream to
+ * stderr, or to a `ProcessLogger`.
+ *
+ * For example, the `ProcessIO` created below will print all normal output
+ * while ignoring all error output. No input will be provided.
+ * {{{
+ * import scala.sys.process.BasicIO
+ * val errToDevNull = BasicIO(false, println(_), None)
+ * }}}
+ *
+ * @param withIn True if the process input should be attached to stdin.
+ * @param output A function that will be called with the process output.
+ * @param log An optional `ProcessLogger` to which the output should be
+ * sent. If `None`, output will be sent to stderr.
+ * @return A `ProcessIO` with the characteristics above.
+ */
def apply(withIn: Boolean, output: String => Unit, log: Option[ProcessLogger]) =
new ProcessIO(input(withIn), processFully(output), getErr(log))
+ /** Creates a `ProcessIO` that appends its output to a `StringBuffer`. It can
+ * attach the process input to stdin, and it will either send the error
+ * stream to stderr, or to a `ProcessLogger`.
+ *
+ * For example, the `ProcessIO` created by the function below will store the
+ * normal output on the buffer provided, and print all error on stderr. The
+ * input will be read from stdin.
+ * {{{
+ * import scala.sys.process.{BasicIO, ProcessLogger}
+ * val printer = ProcessLogger(println(_))
+ * def appendToBuffer(b: StringBuffer) = BasicIO(true, b, Some(printer))
+ * }}}
+ *
+ * @param withIn True if the process input should be attached to stdin.
+ * @param buffer A `StringBuffer` which will receive the process normal
+ * output.
+ * @param log An optional `ProcessLogger` to which the output should be
+ * sent. If `None`, output will be sent to stderr.
+ * @return A `ProcessIO` with the characteristics above.
+ */
def apply(withIn: Boolean, buffer: StringBuffer, log: Option[ProcessLogger]) =
new ProcessIO(input(withIn), processFully(buffer), getErr(log))
+ /** Creates a `ProcessIO` from a `ProcessLogger` . It can attach the
+ * process input to stdin.
+ *
+ * @param withIn True if the process input should be attached to stdin.
+ * @param log A `ProcessLogger` to receive all output, normal and error.
+ * @return A `ProcessIO` with the characteristics above.
+ */
def apply(withIn: Boolean, log: ProcessLogger) =
new ProcessIO(input(withIn), processOutFully(log), processErrFully(log))
+ /** Returns a function `InputStream => Unit` given an optional
+ * `ProcessLogger`. If no logger is passed, the function will send the output
+ * to stderr. This function can be used to create a
+ * [[scala.sys.process.ProcessIO]].
+ *
+ * @param log An optional `ProcessLogger` to which the contents of
+ * the `InputStream` will be sent.
+ * @return A function `InputStream => Unit` (used by
+ * [[scala.sys.process.ProcessIO]]) which will send the data to
+ * either the provided `ProcessLogger` or, if `None`, to stderr.
+ */
def getErr(log: Option[ProcessLogger]) = log match {
case Some(lg) => processErrFully(lg)
case None => toStdErr
@@ -69,13 +134,40 @@ object BasicIO {
private def processErrFully(log: ProcessLogger) = processFully(log err _)
private def processOutFully(log: ProcessLogger) = processFully(log out _)
+ /** Closes a `Closeable` without throwing an exception */
def close(c: Closeable) = try c.close() catch { case _: IOException => () }
+
+ /** Returns a function `InputStream => Unit` that appends all data read to the
+ * provided `Appendable`. This function can be used to create a
+ * [[scala.sys.process.ProcessIO]]. The buffer will be appended line by line.
+ *
+ * @param buffer An `Appendable` such as `StringBuilder` or `StringBuffer`.
+ * @return A function `InputStream => Unit` (used by
+ * [[scala.sys.process.ProcessIO]] which will append all data read
+ * from the stream to the buffer.
+ */
def processFully(buffer: Appendable): InputStream => Unit = processFully(appendLine(buffer))
+
+ /** Returns a function `InputStream => Unit` that will call the passed
+ * function with all data read. This function can be used to create a
+ * [[scala.sys.process.ProcessIO]]. The `processLine` function will be called
+ * with each line read, and `Newline` will be appended after each line.
+ *
+ * @param processLine A function that will be called with all data read from
+ * the stream.
+ * @return A function `InputStream => Unit` (used by
+ * [[scala.sys.process.ProcessIO]] which will call `processLine`
+ * with all data read from the stream.
+ */
def processFully(processLine: String => Unit): InputStream => Unit = in => {
val reader = new BufferedReader(new InputStreamReader(in))
processLinesFully(processLine)(reader.readLine)
+ reader.close()
}
+ /** Calls `processLine` with the result of `readLine` until the latter returns
+ * `null`.
+ */
def processLinesFully(processLine: String => Unit)(readLine: () => String) {
def readFully() {
val line = readLine()
@@ -86,14 +178,38 @@ object BasicIO {
}
readFully()
}
- def connectToIn(o: OutputStream): Unit = transferFully(stdin, o)
- def input(connect: Boolean): OutputStream => Unit = if (connect) connectToIn else _ => ()
+
+ /** Copy contents of stdin to the `OutputStream`. */
+ def connectToIn(o: OutputStream): Unit = transferFully(Uncloseable protect stdin, o)
+
+ /** Returns a function `OutputStream => Unit` that either reads the content
+ * from stdin or does nothing. This function can be used by
+ * [[scala.sys.process.ProcessIO]].
+ */
+ def input(connect: Boolean): OutputStream => Unit = { outputToProcess =>
+ if (connect) connectToIn(outputToProcess)
+ outputToProcess.close()
+ }
+
+ /** Returns a `ProcessIO` connected to stdout and stderr, and, optionally, stdin. */
def standard(connectInput: Boolean): ProcessIO = standard(input(connectInput))
+
+ /** Retruns a `ProcessIO` connected to stdout, stderr and the provided `in` */
def standard(in: OutputStream => Unit): ProcessIO = new ProcessIO(in, toStdOut, toStdErr)
+ /** Send all the input from the stream to stderr, and closes the input stream
+ * afterwards.
+ */
def toStdErr = (in: InputStream) => transferFully(in, stderr)
+
+ /** Send all the input from the stream to stdout, and closes the input stream
+ * afterwards.
+ */
def toStdOut = (in: InputStream) => transferFully(in, stdout)
+ /** Copy all input from the input stream to the output stream. Closes the
+ * input stream once it's all read.
+ */
def transferFully(in: InputStream, out: OutputStream): Unit =
try transferFullyImpl(in, out)
catch onInterrupt(())
@@ -105,14 +221,16 @@ object BasicIO {
private[this] def transferFullyImpl(in: InputStream, out: OutputStream) {
val buffer = new Array[Byte](BufferSize)
- def loop() {
+ @tailrec def loop() {
val byteCount = in.read(buffer)
if (byteCount > 0) {
out.write(buffer, 0, byteCount)
- out.flush()
- loop()
+ // flush() will throw an exception once the process has terminated
+ val available = try { out.flush(); true } catch { case _: IOException => false }
+ if (available) loop()
}
}
loop()
+ in.close()
}
}
diff --git a/src/library/scala/sys/process/Process.scala b/src/library/scala/sys/process/Process.scala
index b8765aa615..c2a61af936 100644
--- a/src/library/scala/sys/process/Process.scala
+++ b/src/library/scala/sys/process/Process.scala
@@ -13,7 +13,7 @@ import processInternal._
import ProcessBuilder._
/** Represents a process that is running or has finished running.
- * It may be a compound process with several underlying native processes (such as 'a #&& b`).
+ * It may be a compound process with several underlying native processes (such as `a #&& b`).
*
* This trait is often not used directly, though its companion object contains
* factories for [[scala.sys.process.ProcessBuilder]], the main component of this
@@ -42,28 +42,28 @@ object Process extends ProcessImpl with ProcessCreation { }
* found on and used through [[scala.sys.process.Process]]'s companion object.
*/
trait ProcessCreation {
- /** Create a [[scala.sys.process.ProcessBuilder]] from a `String`, including the
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `String`, including the
* parameters.
*
* @example {{{ apply("cat file.txt") }}}
*/
def apply(command: String): ProcessBuilder = apply(command, None)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a sequence of `String`,
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a sequence of `String`,
* where the head is the command and each element of the tail is a parameter.
*
* @example {{{ apply("cat" :: files) }}}
*/
def apply(command: Seq[String]): ProcessBuilder = apply(command, None)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a command represented by a `String`,
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a command represented by a `String`,
* and a sequence of `String` representing the arguments.
*
* @example {{{ apply("cat", files) }}}
*/
def apply(command: String, arguments: Seq[String]): ProcessBuilder = apply(command +: arguments, None)
- /** Create a [[scala.sys.process.ProcessBuilder]] with working dir set to `File` and extra
+ /** Creates a [[scala.sys.process.ProcessBuilder]] with working dir set to `File` and extra
* environment variables.
*
* @example {{{ apply("java", new java.ioFile("/opt/app"), "CLASSPATH" -> "library.jar") }}}
@@ -71,7 +71,7 @@ trait ProcessCreation {
def apply(command: String, cwd: File, extraEnv: (String, String)*): ProcessBuilder =
apply(command, Some(cwd), extraEnv: _*)
- /** Create a [[scala.sys.process.ProcessBuilder]] with working dir set to `File` and extra
+ /** Creates a [[scala.sys.process.ProcessBuilder]] with working dir set to `File` and extra
* environment variables.
*
* @example {{{ apply("java" :: javaArgs, new java.ioFile("/opt/app"), "CLASSPATH" -> "library.jar") }}}
@@ -79,7 +79,7 @@ trait ProcessCreation {
def apply(command: Seq[String], cwd: File, extraEnv: (String, String)*): ProcessBuilder =
apply(command, Some(cwd), extraEnv: _*)
- /** Create a [[scala.sys.process.ProcessBuilder]] with working dir optionally set to
+ /** Creates a [[scala.sys.process.ProcessBuilder]] with working dir optionally set to
* `File` and extra environment variables.
*
* @example {{{ apply("java", params.get("cwd"), "CLASSPATH" -> "library.jar") }}}
@@ -93,7 +93,7 @@ trait ProcessCreation {
}*/
}
- /** Create a [[scala.sys.process.ProcessBuilder]] with working dir optionally set to
+ /** Creates a [[scala.sys.process.ProcessBuilder]] with working dir optionally set to
* `File` and extra environment variables.
*
* @example {{{ apply("java" :: javaArgs, params.get("cwd"), "CLASSPATH" -> "library.jar") }}}
@@ -105,7 +105,7 @@ trait ProcessCreation {
apply(jpb)
}
- /** create a [[scala.sys.process.ProcessBuilder]] from a `java.lang.ProcessBuilder`.
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `java.lang.ProcessBuilder`.
*
* @example {{{
* apply((new java.lang.ProcessBuilder("ls", "-l")) directory new java.io.File(System.getProperty("user.home")))
@@ -113,19 +113,19 @@ trait ProcessCreation {
*/
def apply(builder: JProcessBuilder): ProcessBuilder = new Simple(builder)
- /** create a [[scala.sys.process.ProcessBuilder]] from a `java.io.File`. This
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `java.io.File`. This
* `ProcessBuilder` can then be used as a `Source` or a `Sink`, so one can
* pipe things from and to it.
*/
def apply(file: File): FileBuilder = new FileImpl(file)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a `java.net.URL`. This
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `java.net.URL`. This
* `ProcessBuilder` can then be used as a `Source`, so that one can pipe things
* from it.
*/
def apply(url: URL): URLBuilder = new URLImpl(url)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a Scala XML Element.
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a Scala XML Element.
* This can be used as a way to template strings.
*
* @example {{{
@@ -134,23 +134,23 @@ trait ProcessCreation {
*/
def apply(command: scala.xml.Elem): ProcessBuilder = apply(command.text.trim)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a `Boolean`. This can be
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `Boolean`. This can be
* to force an exit value.
*/
def apply(value: Boolean): ProcessBuilder = apply(value.toString, if (value) 0 else 1)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a `String` name and a
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `String` name and a
* `Boolean`. This can be used to force an exit value, with the name being
* used for `toString`.
*/
def apply(name: String, exitValue: => Int): ProcessBuilder = new Dummy(name, exitValue)
- /** Create a sequence of [[scala.sys.process.ProcessBuilder.Source]] from a sequence of
+ /** Creates a sequence of [[scala.sys.process.ProcessBuilder.Source]] from a sequence of
* something else for which there's an implicit conversion to `Source`.
*/
def applySeq[T](builders: Seq[T])(implicit convert: T => Source): Seq[Source] = builders.map(convert)
- /** Create a [[scala.sys.process.ProcessBuilder]] from one or more
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from one or more
* [[scala.sys.process.ProcessBuilder.Source]], which can then be
* piped to something else.
*
@@ -170,7 +170,7 @@ trait ProcessCreation {
*/
def cat(file: Source, files: Source*): ProcessBuilder = cat(file +: files)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a non-empty sequence
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a non-empty sequence
* of [[scala.sys.process.ProcessBuilder.Source]], which can then be
* piped to something else.
*
@@ -198,18 +198,41 @@ trait ProcessImplicits {
/** Implicitly convert a `java.lang.ProcessBuilder` into a Scala one. */
implicit def builderToProcess(builder: JProcessBuilder): ProcessBuilder = apply(builder)
- /** Implicitly convert a `java.io.File` into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a `java.io.File` into a
+ * [[scala.sys.process.ProcessBuilder.FileBuilder]], which can be used as
+ * either input or output of a process. For example:
+ * {{{
+ * import scala.sys.process._
+ * "ls" #> new java.io.File("dirContents.txt") !
+ * }}}
+ */
implicit def fileToProcess(file: File): FileBuilder = apply(file)
- /** Implicitly convert a `java.net.URL` into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a `java.net.URL` into a
+ * [[scala.sys.process.ProcessBuilder.URLBuilder]] , which can be used as
+ * input to a process. For example:
+ * {{{
+ * import scala.sys.process._
+ * Seq("xmllint", "--html", "-") #< new java.net.URL("http://www.scala-lang.org") #> new java.io.File("fixed.html") !
+ * }}}
+ */
implicit def urlToProcess(url: URL): URLBuilder = apply(url)
- /** Implicitly convert a [[scala.xml.Elem]] into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a [[scala.xml.Elem]] into a
+ * [[scala.sys.process.ProcessBuilder]]. This is done by obtaining the text
+ * elements of the element, trimming spaces, and then converting the result
+ * from string to a process. Importantly, tags are completely ignored, so
+ * they cannot be used to separate parameters.
+ */
implicit def xmlToProcess(command: scala.xml.Elem): ProcessBuilder = apply(command)
- /** Implicitly convert a `String` into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a `String` into a [[scala.sys.process.ProcessBuilder]]. */
implicit def stringToProcess(command: String): ProcessBuilder = apply(command)
- /** Implicitly convert a sequence of `String` into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a sequence of `String` into a
+ * [[scala.sys.process.ProcessBuilder]]. The first argument will be taken to
+ * be the command to be executed, and the remaining will be its arguments.
+ * When using this, arguments may contain spaces.
+ */
implicit def stringSeqToProcess(command: Seq[String]): ProcessBuilder = apply(command)
}
diff --git a/src/library/scala/sys/process/ProcessBuilder.scala b/src/library/scala/sys/process/ProcessBuilder.scala
index 214d908012..20270d423f 100644
--- a/src/library/scala/sys/process/ProcessBuilder.scala
+++ b/src/library/scala/sys/process/ProcessBuilder.scala
@@ -12,133 +12,265 @@ package process
import processInternal._
import ProcessBuilder._
-/** Represents a runnable process.
+/** Represents a sequence of one or more external processes that can be
+ * executed. A `ProcessBuilder` can be a single external process, or a
+ * combination of other `ProcessBuilder`. One can control where a
+ * the output of an external process will go to, and where its input will come
+ * from, or leave that decision to whoever starts it.
*
- * This is the main component of this package. A `ProcessBuilder` may be composed with
- * others, either concatenating their outputs or piping them from one to the next, and
- * possibly with conditional execution depending on the last process exit value.
+ * One creates a `ProcessBuilder` through factories provided in
+ * [[scala.sys.process.Process]]'s companion object, or implicit conversions
+ * based on these factories made available in the package object
+ * [[scala.sys.process]]. Here are some examples:
+ * {{{
+ * import.scala.sys.process._
*
- * Once executed, one can retrieve the output or redirect it to a
- * [[scala.sys.process.ProcessLogger]], or one can get the exit value, discarding or
- * redirecting the output.
+ * // Executes "ls" and sends output to stdout
+ * "ls".!
*
- * One creates a `ProcessBuilder` through factories provided in [[scala.sys.process.Process]]'s
- * companion object, or implicit conversions based on these factories made available in the
- * package object [[scala.sys.process]].
+ * // Execute "ls" and assign a `Stream[String]` of its output to "contents".
+ * // Because [[scala.Predef]] already defines a `lines` method for `String`,
+ * // we use [[scala.sys.process.Process]]'s object companion to create it.
+ * val contents = Process("ls").lines
*
- * Let's examine in detail one example of usage:
+ * // Here we use a `Seq` to make the parameter whitespace-safe
+ * def contentsOf(dir: String): String = Seq("ls", dir).!!
+ * }}}
+ *
+ * The methods of `ProcessBuilder` are divided in three categories: the ones that
+ * combine two `ProcessBuilder` to create a third, the ones that redirect input
+ * or output of a `ProcessBuilder`, and the ones that execute
+ * the external processes associated with it.
+ *
+ * ==Combining `ProcessBuilder`==
+ *
+ * Two existing `ProcessBuilder` can be combined in the following ways:
+ *
+ * * They can be executed in parallel, with the output of the first being fed
+ * as input to the second, like Unix pipes. This is achieved with the `#|`
+ * method.
+ * * They can be executed in sequence, with the second starting as soon as
+ * the first ends. This is done by the `###` method.
+ * * The execution of the second one can be conditioned by the return code
+ * (exit status) of the first, either only when it's zero, or only when it's
+ * not zero. The methods `#&&` and `#||` accomplish these tasks.
+ *
+ * ==Redirecting Input/Output==
+ *
+ * Though control of input and output can be done when executing the process,
+ * there's a few methods that create a new `ProcessBuilder` with a
+ * pre-configured input or output. They are `#<`, `#>` and `#>>`, and may take
+ * as input either another `ProcessBuilder` (like the pipe described above), or
+ * something else such as a `java.io.File` or a `java.lang.InputStream`.
+ * For example:
+ * {{{
+ * new URL("http://databinder.net/dispatch/About") #> "grep JSON" #>> new File("About_JSON") !
+ * }}}
+ *
+ * ==Starting Processes==
+ *
+ * To execute all external commands associated with a `ProcessBuilder`, one
+ * may use one of four groups of methods. Each of these methods have various
+ * overloads and variations to enable further control over the I/O. These
+ * methods are:
+ *
+ * * `run`: the most general method, it returns a
+ * [[scala.sys.process.Process]] immediately, and the external command
+ * executes concurrently.
+ * * `!`: blocks until all external commands exit, and returns the exit code
+ * of the last one in the chain of execution.
+ * * `!!`: blocks until all external commands exit, and returns a `String`
+ * with the output generated.
+ * * `lines`: returns immediately like `run`, and the output being generared
+ * is provided through a `Stream[String]`. Getting the next element of that
+ * `Stream` may block until it becomes available. This method will throw an
+ * exception if the return code is different than zero -- if this is not
+ * desired, use the `lines_!` method.
+ *
+ * ==Handling Input and Output==
+ *
+ * If not specified, the input of the external commands executed with `run` or
+ * `!` will not be tied to anything, and the output will be redirected to the
+ * stdout and stderr of the Scala process. For the methods `!!` and `lines`, no
+ * input will be provided, and the output will be directed according to the
+ * semantics of these methods.
*
+ * Some methods will cause stdin to be used as input. Output can be controlled
+ * with a [[scala.sys.process.ProcessLogger]] -- `!!` and `lines` will only
+ * redirect error output when passed a `ProcessLogger`. If one desires full
+ * control over input and output, then a [[scala.sys.process.ProcessIO]] can be
+ * used with `run`.
+ *
+ * For example, we could silence the error output from `lines_!` like this:
+ * {{{
+ * val etcFiles = "find /etc" lines_! ProcessLogger(line => ())
+ * }}}
+ *
+ * ==Extended Example==
+ *
+ * Let's examine in detail one example of usage:
* {{{
* import scala.sys.process._
* "find src -name *.scala -exec grep null {} ;" #| "xargs test -z" #&& "echo null-free" #|| "echo null detected" !
* }}}
- *
* Note that every `String` is implicitly converted into a `ProcessBuilder`
* through the implicits imported from [[scala.sys.process]]. These `ProcessBuilder` are then
* combined in three different ways.
*
* 1. `#|` pipes the output of the first command into the input of the second command. It
- * mirrors a shell pipe (`|`).
- * 2. `#&&` conditionally executes the second command if the previous one finished with
- * exit value 0. It mirrors shell's `&&`.
- * 3. `#||` conditionally executes the third command if the exit value of the previous
- * command is different than zero. It mirrors shell's `&&`.
- *
- * Not shown here, the equivalent of a shell's `;` would be `###`. The reason for this name is
- * that `;` is a reserved token in Scala.
- *
- * Finally, `!` at the end executes the commands, and returns the exit value. If the output
- * was desired instead, one could run that with `!!` instead.
- *
- * If one wishes to execute the commands in background, one can either call `run`, which
- * returns a [[scala.sys.process.Process]] from which the exit value can be obtained, or
- * `lines`, which returns a [scala.collection.immutable.Stream] of output lines. This throws
- * an exception at the end of the `Stream` is the exit value is non-zero. To avoid exceptions,
- * one can use `lines_!` instead.
- *
- * One can also start the commands in specific ways to further control their I/O. Using `!<` to
- * start the commands will use the stdin from the current process for them. All methods can
- * be used passing a [[scala.sys.process.ProcessLogger]] to capture the output, both stderr and
- * stdout. And, when using `run`, one can pass a [[scala.sys.process.ProcessIO]] to control
- * stdin, stdout and stderr.
- *
- * The stdin of a command can be redirected from a `java.io.InputStream`, a `java.io.File`, a
- * `java.net.URL` or another `ProcessBuilder` through the method `#<`. Likewise, the stdout
- * can be sent to a `java.io.OutputStream`, a `java.io.File` or another `ProcessBuilder` with
- * the method `#>`. The method `#>>` can be used to append the output to a `java.io.File`.
- * For example:
+ * mirrors a shell pipe (`|`).
+ * 1. `#&&` conditionally executes the second command if the previous one finished with
+ * exit value 0. It mirrors shell's `&&`.
+ * 1. `#||` conditionally executes the third command if the exit value of the previous
+ * command is different than zero. It mirrors shell's `&&`.
*
- * {{{
- * new URL("http://databinder.net/dispatch/About") #> "grep JSON" #>> new File("About_JSON") !
- * }}}
+ * Finally, `!` at the end executes the commands, and returns the exit value.
+ * Whatever is printed will be sent to the Scala process standard output. If
+ * we wanted to caputre it, we could run that with `!!` instead.
+ *
+ * Note: though it is not shown above, the equivalent of a shell's `;` would be
+ * `###`. The reason for this name is that `;` is a reserved token in Scala.
*/
trait ProcessBuilder extends Source with Sink {
- /** Starts the process represented by this builder, blocks until it exits, and returns the output as a String. Standard error is
- * sent to the console. If the exit code is non-zero, an exception is thrown.*/
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the output as a String. Standard error is sent to the console. If
+ * the exit code is non-zero, an exception is thrown.
+ */
def !! : String
- /** Starts the process represented by this builder, blocks until it exits, and returns the output as a String. Standard error is
- * sent to the provided ProcessLogger. If the exit code is non-zero, an exception is thrown.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the output as a String. Standard error is sent to the provided
+ * ProcessLogger. If the exit code is non-zero, an exception is thrown.
+ */
def !!(log: ProcessLogger): String
- /** Starts the process represented by this builder. The output is returned as a Stream that blocks when lines are not available
- * but the process has not completed. Standard error is sent to the console. If the process exits with a non-zero value,
- * the Stream will provide all lines up to termination and then throw an exception. */
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the output as a String. Standard error is sent to the console. If
+ * the exit code is non-zero, an exception is thrown. The newly started
+ * process reads from standard input of the current process.
+ */
+ def !!< : String
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the output as a String. Standard error is sent to the provided
+ * ProcessLogger. If the exit code is non-zero, an exception is thrown. The
+ * newly started process reads from standard input of the current process.
+ */
+ def !!<(log: ProcessLogger): String
+
+ /** Starts the process represented by this builder. The output is returned as
+ * a Stream that blocks when lines are not available but the process has not
+ * completed. Standard error is sent to the console. If the process exits
+ * with a non-zero value, the Stream will provide all lines up to termination
+ * and then throw an exception.
+ */
def lines: Stream[String]
- /** Starts the process represented by this builder. The output is returned as a Stream that blocks when lines are not available
- * but the process has not completed. Standard error is sent to the provided ProcessLogger. If the process exits with a non-zero value,
- * the Stream will provide all lines up to termination but will not throw an exception. */
+
+ /** Starts the process represented by this builder. The output is returned as
+ * a Stream that blocks when lines are not available but the process has not
+ * completed. Standard error is sent to the provided ProcessLogger. If the
+ * process exits with a non-zero value, the Stream will provide all lines up
+ * to termination but will not throw an exception.
+ */
def lines(log: ProcessLogger): Stream[String]
- /** Starts the process represented by this builder. The output is returned as a Stream that blocks when lines are not available
- * but the process has not completed. Standard error is sent to the console. If the process exits with a non-zero value,
- * the Stream will provide all lines up to termination but will not throw an exception. */
+
+ /** Starts the process represented by this builder. The output is returned as
+ * a Stream that blocks when lines are not available but the process has not
+ * completed. Standard error is sent to the console. If the process exits
+ * with a non-zero value, the Stream will provide all lines up to termination
+ * but will not throw an exception.
+ */
def lines_! : Stream[String]
- /** Starts the process represented by this builder. The output is returned as a Stream that blocks when lines are not available
- * but the process has not completed. Standard error is sent to the provided ProcessLogger. If the process exits with a non-zero value,
- * the Stream will provide all lines up to termination but will not throw an exception. */
+
+ /** Starts the process represented by this builder. The output is returned as
+ * a Stream that blocks when lines are not available but the process has not
+ * completed. Standard error is sent to the provided ProcessLogger. If the
+ * process exits with a non-zero value, the Stream will provide all lines up
+ * to termination but will not throw an exception.
+ */
def lines_!(log: ProcessLogger): Stream[String]
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the console.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the console.
+ */
def ! : Int
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the given ProcessLogger.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the given
+ * ProcessLogger.
+ */
def !(log: ProcessLogger): Int
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the console. The newly started process reads from standard input of the current process.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the console.
+ * The newly started process reads from standard input of the current process.
+ */
def !< : Int
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the given ProcessLogger. The newly started process reads from standard input of the current process.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the given
+ * ProcessLogger. The newly started process reads from standard input of the
+ * current process.
+ */
def !<(log: ProcessLogger): Int
- /** Starts the process represented by this builder. Standard output and error are sent to the console.*/
+
+ /** Starts the process represented by this builder. Standard output and error
+ * are sent to the console.*/
def run(): Process
- /** Starts the process represented by this builder. Standard output and error are sent to the given ProcessLogger.*/
+
+ /** Starts the process represented by this builder. Standard output and error
+ * are sent to the given ProcessLogger.
+ */
def run(log: ProcessLogger): Process
- /** Starts the process represented by this builder. I/O is handled by the given ProcessIO instance.*/
+
+ /** Starts the process represented by this builder. I/O is handled by the
+ * given ProcessIO instance.
+ */
def run(io: ProcessIO): Process
- /** Starts the process represented by this builder. Standard output and error are sent to the console.
- * The newly started process reads from standard input of the current process if `connectInput` is true.*/
+
+ /** Starts the process represented by this builder. Standard output and error
+ * are sent to the console. The newly started process reads from standard
+ * input of the current process if `connectInput` is true.
+ */
def run(connectInput: Boolean): Process
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the given ProcessLogger.
- * The newly started process reads from standard input of the current process if `connectInput` is true.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the given
+ * ProcessLogger. The newly started process reads from standard input of the
+ * current process if `connectInput` is true.
+ */
def run(log: ProcessLogger, connectInput: Boolean): Process
- /** Constructs a command that runs this command first and then `other` if this command succeeds.*/
+ /** Constructs a command that runs this command first and then `other` if this
+ * command succeeds.
+ */
def #&& (other: ProcessBuilder): ProcessBuilder
- /** Constructs a command that runs this command first and then `other` if this command does not succeed.*/
+
+ /** Constructs a command that runs this command first and then `other` if this
+ * command does not succeed.
+ */
def #|| (other: ProcessBuilder): ProcessBuilder
- /** Constructs a command that will run this command and pipes the output to `other`. `other` must be a simple command.*/
+
+ /** Constructs a command that will run this command and pipes the output to
+ * `other`. `other` must be a simple command.
+ */
def #| (other: ProcessBuilder): ProcessBuilder
- /** Constructs a command that will run this command and then `other`. The exit code will be the exit code of `other`.*/
+
+ /** Constructs a command that will run this command and then `other`. The
+ * exit code will be the exit code of `other`.
+ */
def ### (other: ProcessBuilder): ProcessBuilder
- /** True if this command can be the target of a pipe.
- */
+
+ /** True if this command can be the target of a pipe. */
def canPipeTo: Boolean
- /** True if this command has an exit code which should be propagated to the user.
- * Given a pipe between A and B, if B.hasExitValue is true then the exit code will
- * be the one from B; if it is false, the one from A. This exists to prevent output
- * redirections (implemented as pipes) from masking useful process error codes.
- */
+ /** True if this command has an exit code which should be propagated to the
+ * user. Given a pipe between A and B, if B.hasExitValue is true then the
+ * exit code will be the one from B; if it is false, the one from A. This
+ * exists to prevent output redirections (implemented as pipes) from masking
+ * useful process error codes.
+ */
def hasExitValue: Boolean
}
diff --git a/src/library/scala/sys/process/ProcessIO.scala b/src/library/scala/sys/process/ProcessIO.scala
index 261e837a4d..fa0674670f 100644
--- a/src/library/scala/sys/process/ProcessIO.scala
+++ b/src/library/scala/sys/process/ProcessIO.scala
@@ -11,14 +11,40 @@ package process
import processInternal._
-/** This class is used to control the I/O of every [[scala.sys.process.ProcessBuilder]].
- * Most of the time, there is no need to interact with `ProcessIO` directly. However, if
- * fine control over the I/O of a `ProcessBuilder` is desired, one can use the factories
- * on [[scala.sys.process.BasicIO]] stand-alone object to create one.
- *
- * Each method will be called in a separate thread.
- * If daemonizeThreads is true, they will all be marked daemon threads.
- */
+/** This class is used to control the I/O of every
+ * [[scala.sys.process.Process]]. The functions used to create it will be
+ * called with the process streams once it has been started. It might not be
+ * necessary to use `ProcessIO` directly --
+ * [[scala.sys.process.ProcessBuilder]] can return the process output to the
+ * caller, or use a [[scala.sys.process.ProcessLogger]] which avoids direct
+ * interaction with a stream. One can even use the factories at `BasicIO` to
+ * create a `ProcessIO`, or use its helper methods when creating one's own
+ * `ProcessIO`.
+ *
+ * When creating a `ProcessIO`, it is important to ''close all streams'' when
+ * finished, since the JVM might use system resources to capture the process
+ * input and output, and will not release them unless the streams are
+ * explicitly closed.
+ *
+ * `ProcessBuilder` will call `writeInput`, `processOutput` and `processError`
+ * in separate threads, and if daemonizeThreads is true, they will all be
+ * marked as daemon threads.
+ *
+ * @param writeInput Function that will be called with the `OutputStream` to
+ * which all input to the process must be written. This will
+ * be called in a newly spawned thread.
+ * @param processOutput Function that will be called with the `InputStream`
+ * from which all normal output of the process must be
+ * read from. This will be called in a newly spawned
+ * thread.
+ * @param processError Function that will be called with the `InputStream` from
+ * which all error output of the process must be read from.
+ * This will be called in a newly spawned thread.
+ * @param daemonizeThreads Indicates whether the newly spawned threads that
+ * will run `processOutput`, `processError` and
+ * `writeInput` should be marked as daemon threads.
+ * @note Failure to close the passed streams may result in resource leakage.
+ */
final class ProcessIO(
val writeInput: OutputStream => Unit,
val processOutput: InputStream => Unit,
@@ -27,8 +53,15 @@ final class ProcessIO(
) {
def this(in: OutputStream => Unit, out: InputStream => Unit, err: InputStream => Unit) = this(in, out, err, false)
+ /** Creates a new `ProcessIO` with a different handler for the process input. */
def withInput(write: OutputStream => Unit): ProcessIO = new ProcessIO(write, processOutput, processError, daemonizeThreads)
+
+ /** Creates a new `ProcessIO` with a different handler for the normal output. */
def withOutput(process: InputStream => Unit): ProcessIO = new ProcessIO(writeInput, process, processError, daemonizeThreads)
+
+ /** Creates a new `ProcessIO` with a different handler for the error output. */
def withError(process: InputStream => Unit): ProcessIO = new ProcessIO(writeInput, processOutput, process, daemonizeThreads)
+
+ /** Creates a new `ProcessIO`, with `daemonizeThreads` true. */
def daemonized(): ProcessIO = new ProcessIO(writeInput, processOutput, processError, true)
}
diff --git a/src/library/scala/sys/process/ProcessLogger.scala b/src/library/scala/sys/process/ProcessLogger.scala
index 67146dd70e..a8241db53c 100644
--- a/src/library/scala/sys/process/ProcessLogger.scala
+++ b/src/library/scala/sys/process/ProcessLogger.scala
@@ -11,12 +11,26 @@ package process
import java.io._
-/** Encapsulates the output and error streams of a running process.
- * Many of the methods of `ProcessBuilder` accept a `ProcessLogger` as
- * an argument.
- *
- * @see [[scala.sys.process.ProcessBuilder]]
- */
+/** Encapsulates the output and error streams of a running process. This is used
+ * by [[scala.sys.process.ProcessBuilder]] when starting a process, as an
+ * alternative to [[scala.sys.process.ProcessIO]], which can be more difficult
+ * to use. Note that a `ProcessLogger` will be used to create a `ProcessIO`
+ * anyway. The object `BasicIO` has some functions to do that.
+ *
+ * Here is an example that counts the number of lines in the normal and error
+ * output of a process:
+ * {{{
+ * import scala.sys.process._
+ *
+ * var normalLines = 0
+ * var errorLines = 0
+ * val countLogger = ProcessLogger(line => normalLines += 1,
+ * line => errorLines += 1)
+ * "find /etc" ! countLogger
+ * }}}
+ *
+ * @see [[scala.sys.process.ProcessBuilder]]
+ */
trait ProcessLogger {
/** Will be called with each line read from the process output stream.
*/
diff --git a/src/library/scala/sys/process/package.scala b/src/library/scala/sys/process/package.scala
index 3eb0e5bb89..c1bf470831 100644
--- a/src/library/scala/sys/process/package.scala
+++ b/src/library/scala/sys/process/package.scala
@@ -11,40 +11,175 @@
// for process debugging output.
//
package scala.sys {
- /**
- * This package is used to create process pipelines, similar to Unix command pipelines.
+ /** This package handles the execution of external processes. The contents of
+ * this package can be divided in three groups, according to their
+ * responsibilities:
*
- * The key concept is that one builds a [[scala.sys.process.Process]] that will run and return an exit
- * value. This `Process` is usually composed of one or more [[scala.sys.process.ProcessBuilder]], fed by a
- * [[scala.sys.process.ProcessBuilder.Source]] and feeding a [[scala.sys.process.ProcessBuilder.Sink]]. A
- * `ProcessBuilder` itself is both a `Source` and a `Sink`.
+ * - Indicating what to run and how to run it.
+ * - Handling a process input and output.
+ * - Running the process.
*
- * As `ProcessBuilder`, `Sink` and `Source` are abstract, one usually creates them with `apply` methods on
- * the companion object of [[scala.sys.process.Process]], or through implicit conversions available in this
- * package object from `String` and other types. The pipe is composed through unix-like pipeline and I/O
- * redirection operators available on [[scala.sys.process.ProcessBuilder]].
+ * For simple uses, the only group that matters is the first one. Running an
+ * external command can be as simple as `"ls".!`, or as complex as building a
+ * pipeline of commands such as this:
*
- * The example below shows how to build and combine such commands. It searches for `null` uses in the `src`
- * directory, printing a message indicating whether they were found or not. The first command pipes its
- * output to the second command, whose exit value is then used to choose between the third or fourth
- * commands. This same example is explained in greater detail on [[scala.sys.process.ProcessBuilder]].
+ * {{{
+ * import scala.sys.process._
+ * "ls" #| "grep .scala" #&& "scalac *.scala" #|| "echo nothing found" lines
+ * }}}
+ *
+ * We describe below the general concepts and architecture of the package,
+ * and then take a closer look at each of the categories mentioned above.
+ *
+ * ==Concepts and Architecture==
+ *
+ * The underlying basis for the whole package is Java's `Process` and
+ * `ProcessBuilder` classes. While there's no need to use these Java classes,
+ * they impose boundaries on what is possible. One cannot, for instance,
+ * retrieve a ''process id'' for whatever is executing.
+ *
+ * When executing an external process, one can provide a command's name,
+ * arguments to it, the directory in which it will be executed and what
+ * environment variables will be set. For each executing process, one can
+ * feed its standard input through a `java.io.OutputStream`, and read from
+ * its standard output and standard error through a pair of
+ * `java.io.InputStream`. One can wait until a process finishes execution and
+ * then retrieve its return value, or one can kill an executing process.
+ * Everything else must be built on those features.
+ *
+ * This package provides a DSL for running and chaining such processes,
+ * mimicking Unix shells ability to pipe output from one process to the input
+ * of another, or control the execution of further processes based on the
+ * return status of the previous one.
+ *
+ * In addition to this DSL, this package also provides a few ways of
+ * controlling input and output of these processes, going from simple and
+ * easy to use to complex and flexible.
*
+ * When processes are composed, a new `ProcessBuilder` is created which, when
+ * run, will execute the `ProcessBuilder` instances it is composed of
+ * according to the manner of the composition. If piping one process to
+ * another, they'll be executed simultaneously, and each will be passed a
+ * `ProcessIO` that will copy the output of one to the input of the other.
+ *
+ * ==What to Run and How==
+ *
+ * The central component of the process execution DSL is the
+ * [[scala.sys.process.ProcessBuilder]] trait. It is `ProcessBuilder` that
+ * implements the process execution DSL, that creates the
+ * [[scala.sys.process.Process]] that will handle the execution, and return
+ * the results of such execution to the caller. We can see that DSL in the
+ * introductory example: `#|`, `#&&` and `#!!` are methods on
+ * `ProcessBuilder` used to create a new `ProcessBuilder` through
+ * composition.
+ *
+ * One creates a `ProcessBuilder` either through factories on the
+ * [[scala.sys.process.Process]]'s companion object, or through implicit
+ * conversions available in this package object itself. Implicitly, each
+ * process is created either out of a `String`, with arguments separated by
+ * spaces -- no escaping of spaces is possible -- or out of a
+ * [[scala.collection.Seq]], where the first element represents the command
+ * name, and the remaining elements are arguments to it. In this latter case,
+ * arguments may contain spaces. One can also implicitly convert
+ * [[scala.xml.Elem]] and `java.lang.ProcessBuilder` into a `ProcessBuilder`.
+ * In the introductory example, the strings were converted into
+ * `ProcessBuilder` implicitly.
+ *
+ * To further control what how the process will be run, such as specifying
+ * the directory in which it will be run, see the factories on
+ * [[scala.sys.process.Process]]'s object companion.
+ *
+ * Once the desired `ProcessBuilder` is available, it can be executed in
+ * different ways, depending on how one desires to control its I/O, and what
+ * kind of result one wishes for:
+ *
+ * - Return status of the process (`!` methods)
+ * - Output of the process as a `String` (`!!` methods)
+ * - Continuous output of the process as a `Stream[String]` (`lines` methods)
+ * - The `Process` representing it (`run` methods)
+ *
+ * Some simple examples of these methods:
* {{{
* import scala.sys.process._
- * (
- * "find src -name *.scala -exec grep null {} ;"
- * #| "xargs test -z"
- * #&& "echo null-free" #|| "echo null detected"
- * ) !
+ *
+ * // This uses ! to get the exit code
+ * def fileExists(name: String) = Seq("test", "-f", name).! == 0
+ *
+ * // This uses !! to get the whole result as a string
+ * val dirContents = "ls".!!
+ *
+ * // This "fire-and-forgets" the method, which can be lazily read through
+ * // a Stream[String]
+ * def sourceFilesAt(baseDir: String): Stream[String] = {
+ * val cmd = Seq("find", baseDir, "-name", "*.scala", "-type", "f")
+ * cmd.lines
+ * }
* }}}
*
- * Other implicits available here are for [[scala.sys.process.ProcessBuilder.FileBuilder]], which extends
- * both `Sink` and `Source`, and for [[scala.sys.process.ProcessBuilder.URLBuilder]], which extends
- * `Source` alone.
+ * We'll see more details about controlling I/O of the process in the next
+ * section.
+ *
+ * ==Handling Input and Output==
+ *
+ * In the underlying Java model, once a `Process` has been started, one can
+ * get `java.io.InputStream` and `java.io.OutpuStream` representing its
+ * output and input respectively. That is, what one writes to an
+ * `OutputStream` is turned into input to the process, and the output of a
+ * process can be read from an `InputStream` -- of which there are two, one
+ * representing normal output, and the other representing error output.
+ *
+ * This model creates a difficulty, which is that the code responsible for
+ * actually running the external processes is the one that has to take
+ * decisions about how to handle its I/O.
+ *
+ * This package presents an alternative model: the I/O of a running process
+ * is controlled by a [[scala.sys.process.ProcessIO]] object, which can be
+ * passed _to_ the code that runs the external process. A `ProcessIO` will
+ * have direct access to the java streams associated with the process I/O. It
+ * must, however, close these streams afterwards.
+ *
+ * Simpler abstractions are available, however. The components of this
+ * package that handle I/O are:
+ *
+ * - [[scala.sys.process.ProcessIO]]: provides the low level abstraction.
+ * - [[scala.sys.process.ProcessLogger]]: provides a higher level abstraction
+ * for output, and can be created through its object companion
+ * - [[scala.sys.process.BasicIO]]: a library of helper methods for the
+ * creation of `ProcessIO`.
+ * - This package object itself, with a few implicit conversions.
*
- * One can even create a `Process` solely out of these, without running any command. For example, this will
- * download from a URL to a file:
+ * Some examples of I/O handling:
+ * {{{
+ * import scala.sys.process._
+ *
+ * // An overly complex way of computing size of a compressed file
+ * def gzFileSize(name: String) = {
+ * val cat = Seq("zcat", "name")
+ * var count = 0
+ * def byteCounter(input: java.io.InputStream) = {
+ * while(input.read() != -1) count += 1
+ * input.close()
+ * }
+ * cat ! new ProcessIO(_.close(), byteCounter, _.close())
+ * count
+ * }
+ *
+ * // This "fire-and-forgets" the method, which can be lazily read through
+ * // a Stream[String], and accumulates all errors on a StringBuffer
+ * def sourceFilesAt(baseDir: String): (Stream[String], StringBuffer) = {
+ * val buffer = new StringBuffer()
+ * val cmd = Seq("find", baseDir, "-name", "*.scala", "-type", "f")
+ * val lines = cmd lines_! ProcessLogger(buffer append _)
+ * (lines, buffer)
+ * }
+ * }}}
*
+ * Instances of the java classes `java.io.File` and `java.net.URL` can both
+ * be used directly as input to other processes, and `java.io.File` can be
+ * used as output as well. One can even pipe one to the other directly
+ * without any intervening process, though that's not a design goal or
+ * recommended usage. For example, the following code will copy a web page to
+ * a file:
* {{{
* import java.io.File
* import java.net.URL
@@ -52,26 +187,33 @@ package scala.sys {
* new URL("http://www.scala-lang.org/") #> new File("scala-lang.html") !
* }}}
*
- * One may use a `Process` directly through `ProcessBuilder`'s `run` method, which starts the process in
- * the background, and returns a `Process`. If background execution is not desired, one can get a
- * `ProcessBuilder` to execute through a method such as `!`, `lines`, `run` or variations thereof. That
- * will create the `Process` to execute the commands, and return either the exit value or the output, maybe
- * throwing an exception.
- *
- * Finally, when executing a `ProcessBuilder`, one may pass a [[scala.sys.process.ProcessLogger]] to
- * capture stdout and stderr of the executing processes. A `ProcessLogger` may be created through its
- * companion object from functions of type `(String) => Unit`, or one might redirect it to a file, using
- * [[scala.sys.process.FileProcessLogger]], which can also be created through `ProcessLogger`'s object
- * companion.
+ * More information about the other ways of controlling I/O can be looked at
+ * in the scaladoc for the associated objects, traits and classes.
+ *
+ * ==Running the Process==
+ *
+ * Paradoxically, this is the simplest component of all, and the one least
+ * likely to be interacted with. It consists solely of
+ * [[scala.sys.process.Process]], and it provides only two methods:
+ *
+ * - `exitValue()`: blocks until the process exit, and then returns the exit
+ * value. This is what happens when one uses the `!` method of
+ * `ProcessBuilder`.
+ * - `destroy()`: this will kill the external process and close the streams
+ * associated with it.
*/
package object process extends ProcessImplicits {
+ /** The arguments passed to `java` when creating this process */
def javaVmArguments: List[String] = {
import collection.JavaConversions._
java.lang.management.ManagementFactory.getRuntimeMXBean().getInputArguments().toList
}
+ /** The input stream of this process */
def stdin = java.lang.System.in
+ /** The output stream of this process */
def stdout = java.lang.System.out
+ /** The error stream of this process */
def stderr = java.lang.System.err
}
// private val shell: String => Array[String] =
diff --git a/src/library/scala/util/Duration.scala b/src/library/scala/util/Duration.scala
new file mode 100644
index 0000000000..4c118f8b3b
--- /dev/null
+++ b/src/library/scala/util/Duration.scala
@@ -0,0 +1,485 @@
+/**
+ * Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.util
+
+import java.util.concurrent.TimeUnit
+import TimeUnit._
+import java.lang.{ Long ⇒ JLong, Double ⇒ JDouble }
+//import akka.actor.ActorSystem (commented methods)
+
+class TimerException(message: String) extends RuntimeException(message)
+
+/**
+ * Simple timer class.
+ * Usage:
+ * <pre>
+ * import akka.util.duration._
+ * import akka.util.Timer
+ *
+ * val timer = Timer(30.seconds)
+ * while (timer.isTicking) { ... }
+ * </pre>
+ */
+case class Timer(duration: Duration, throwExceptionOnTimeout: Boolean = false) {
+ val startTimeInMillis = System.currentTimeMillis
+ val timeoutInMillis = duration.toMillis
+
+ /**
+ * Returns true while the timer is ticking. After that it either throws and exception or
+ * returns false. Depending on if the 'throwExceptionOnTimeout' argument is true or false.
+ */
+ def isTicking: Boolean = {
+ if (!(timeoutInMillis > (System.currentTimeMillis - startTimeInMillis))) {
+ if (throwExceptionOnTimeout) throw new TimerException("Time out after " + duration)
+ else false
+ } else true
+ }
+}
+
+object Duration {
+ def apply(length: Long, unit: TimeUnit): Duration = new FiniteDuration(length, unit)
+ def apply(length: Double, unit: TimeUnit): Duration = fromNanos(unit.toNanos(1) * length)
+ def apply(length: Long, unit: String): Duration = new FiniteDuration(length, timeUnit(unit))
+
+ def fromNanos(nanos: Long): Duration = {
+ if (nanos % 86400000000000L == 0) {
+ Duration(nanos / 86400000000000L, DAYS)
+ } else if (nanos % 3600000000000L == 0) {
+ Duration(nanos / 3600000000000L, HOURS)
+ } else if (nanos % 60000000000L == 0) {
+ Duration(nanos / 60000000000L, MINUTES)
+ } else if (nanos % 1000000000L == 0) {
+ Duration(nanos / 1000000000L, SECONDS)
+ } else if (nanos % 1000000L == 0) {
+ Duration(nanos / 1000000L, MILLISECONDS)
+ } else if (nanos % 1000L == 0) {
+ Duration(nanos / 1000L, MICROSECONDS)
+ } else {
+ Duration(nanos, NANOSECONDS)
+ }
+ }
+
+ def fromNanos(nanos: Double): Duration = fromNanos((nanos + 0.5).asInstanceOf[Long])
+
+ /**
+ * Construct a Duration by parsing a String. In case of a format error, a
+ * RuntimeException is thrown. See `unapply(String)` for more information.
+ */
+ def apply(s: String): Duration = unapply(s) getOrElse sys.error("format error")
+
+ /**
+ * Deconstruct a Duration into length and unit if it is finite.
+ */
+ def unapply(d: Duration): Option[(Long, TimeUnit)] = {
+ if (d.finite_?) {
+ Some((d.length, d.unit))
+ } else {
+ None
+ }
+ }
+
+ private val RE = ("""^\s*(\d+(?:\.\d+)?)\s*""" + // length part
+ "(?:" + // units are distinguished in separate match groups
+ "(d|day|days)|" +
+ "(h|hour|hours)|" +
+ "(min|minute|minutes)|" +
+ "(s|sec|second|seconds)|" +
+ "(ms|milli|millis|millisecond|milliseconds)|" +
+ "(µs|micro|micros|microsecond|microseconds)|" +
+ "(ns|nano|nanos|nanosecond|nanoseconds)" +
+ """)\s*$""").r // close the non-capturing group
+ private val REinf = """^\s*Inf\s*$""".r
+ private val REminf = """^\s*(?:-\s*|Minus)Inf\s*""".r
+
+ /**
+ * Parse String, return None if no match. Format is `"<length><unit>"`, where
+ * whitespace is allowed before, between and after the parts. Infinities are
+ * designated by `"Inf"` and `"-Inf"` or `"MinusInf"`.
+ */
+ def unapply(s: String): Option[Duration] = s match {
+ case RE(length, d, h, m, s, ms, mus, ns) ⇒
+ if (d ne null) Some(Duration(JDouble.parseDouble(length), DAYS)) else if (h ne null) Some(Duration(JDouble.parseDouble(length), HOURS)) else if (m ne null) Some(Duration(JDouble.parseDouble(length), MINUTES)) else if (s ne null) Some(Duration(JDouble.parseDouble(length), SECONDS)) else if (ms ne null) Some(Duration(JDouble.parseDouble(length), MILLISECONDS)) else if (mus ne null) Some(Duration(JDouble.parseDouble(length), MICROSECONDS)) else if (ns ne null) Some(Duration(JDouble.parseDouble(length), NANOSECONDS)) else
+ sys.error("made some error in regex (should not be possible)")
+ case REinf() ⇒ Some(Inf)
+ case REminf() ⇒ Some(MinusInf)
+ case _ ⇒ None
+ }
+
+ /**
+ * Parse TimeUnit from string representation.
+ */
+ def timeUnit(unit: String) = unit.toLowerCase match {
+ case "d" | "day" | "days" ⇒ DAYS
+ case "h" | "hour" | "hours" ⇒ HOURS
+ case "min" | "minute" | "minutes" ⇒ MINUTES
+ case "s" | "sec" | "second" | "seconds" ⇒ SECONDS
+ case "ms" | "milli" | "millis" | "millisecond" | "milliseconds" ⇒ MILLISECONDS
+ case "µs" | "micro" | "micros" | "microsecond" | "microseconds" ⇒ MICROSECONDS
+ case "ns" | "nano" | "nanos" | "nanosecond" | "nanoseconds" ⇒ NANOSECONDS
+ }
+
+ val Zero: Duration = new FiniteDuration(0, NANOSECONDS)
+ val Undefined: Duration = new Duration with Infinite {
+ override def toString = "Duration.Undefined"
+ override def equals(other: Any) = other.asInstanceOf[AnyRef] eq this
+ override def +(other: Duration): Duration = throw new IllegalArgumentException("cannot add Undefined duration")
+ override def -(other: Duration): Duration = throw new IllegalArgumentException("cannot subtract Undefined duration")
+ override def *(factor: Double): Duration = throw new IllegalArgumentException("cannot multiply Undefined duration")
+ override def /(factor: Double): Duration = throw new IllegalArgumentException("cannot divide Undefined duration")
+ override def /(other: Duration): Double = throw new IllegalArgumentException("cannot divide Undefined duration")
+ def >(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration")
+ def >=(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration")
+ def <(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration")
+ def <=(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration")
+ def unary_- : Duration = throw new IllegalArgumentException("cannot negate Undefined duration")
+ }
+
+ trait Infinite {
+ this: Duration ⇒
+
+ override def equals(other: Any) = false
+
+ def +(other: Duration): Duration =
+ other match {
+ case _: this.type ⇒ this
+ case _: Infinite ⇒ throw new IllegalArgumentException("illegal addition of infinities")
+ case _ ⇒ this
+ }
+ def -(other: Duration): Duration =
+ other match {
+ case _: this.type ⇒ throw new IllegalArgumentException("illegal subtraction of infinities")
+ case _ ⇒ this
+ }
+ def *(factor: Double): Duration = this
+ def /(factor: Double): Duration = this
+ def /(other: Duration): Double =
+ other match {
+ case _: Infinite ⇒ throw new IllegalArgumentException("illegal division of infinities")
+ // maybe questionable but pragmatic: Inf / 0 => Inf
+ case x ⇒ Double.PositiveInfinity * (if ((this > Zero) ^ (other >= Zero)) -1 else 1)
+ }
+
+ def finite_? = false
+
+ def length: Long = throw new IllegalArgumentException("length not allowed on infinite Durations")
+ def unit: TimeUnit = throw new IllegalArgumentException("unit not allowed on infinite Durations")
+ def toNanos: Long = throw new IllegalArgumentException("toNanos not allowed on infinite Durations")
+ def toMicros: Long = throw new IllegalArgumentException("toMicros not allowed on infinite Durations")
+ def toMillis: Long = throw new IllegalArgumentException("toMillis not allowed on infinite Durations")
+ def toSeconds: Long = throw new IllegalArgumentException("toSeconds not allowed on infinite Durations")
+ def toMinutes: Long = throw new IllegalArgumentException("toMinutes not allowed on infinite Durations")
+ def toHours: Long = throw new IllegalArgumentException("toHours not allowed on infinite Durations")
+ def toDays: Long = throw new IllegalArgumentException("toDays not allowed on infinite Durations")
+ def toUnit(unit: TimeUnit): Double = throw new IllegalArgumentException("toUnit not allowed on infinite Durations")
+
+ def printHMS = toString
+ }
+
+ /**
+ * Infinite duration: greater than any other and not equal to any other,
+ * including itself.
+ */
+ val Inf: Duration = new Duration with Infinite {
+ override def toString = "Duration.Inf"
+ def >(other: Duration) = true
+ def >=(other: Duration) = true
+ def <(other: Duration) = false
+ def <=(other: Duration) = false
+ def unary_- : Duration = MinusInf
+ }
+
+ /**
+ * Infinite negative duration: lesser than any other and not equal to any other,
+ * including itself.
+ */
+ val MinusInf: Duration = new Duration with Infinite {
+ override def toString = "Duration.MinusInf"
+ def >(other: Duration) = false
+ def >=(other: Duration) = false
+ def <(other: Duration) = true
+ def <=(other: Duration) = true
+ def unary_- : Duration = Inf
+ }
+
+ // Java Factories
+ def create(length: Long, unit: TimeUnit): Duration = apply(length, unit)
+ def create(length: Double, unit: TimeUnit): Duration = apply(length, unit)
+ def create(length: Long, unit: String): Duration = apply(length, unit)
+ def parse(s: String): Duration = unapply(s).get
+}
+
+/**
+ * Utility for working with java.util.concurrent.TimeUnit durations.
+ *
+ * <p/>
+ * Examples of usage from Java:
+ * <pre>
+ * import akka.util.FiniteDuration;
+ * import java.util.concurrent.TimeUnit;
+ *
+ * Duration duration = new FiniteDuration(100, MILLISECONDS);
+ * Duration duration = new FiniteDuration(5, "seconds");
+ *
+ * duration.toNanos();
+ * </pre>
+ *
+ * <p/>
+ * Examples of usage from Scala:
+ * <pre>
+ * import akka.util.Duration
+ * import java.util.concurrent.TimeUnit
+ *
+ * val duration = Duration(100, MILLISECONDS)
+ * val duration = Duration(100, "millis")
+ *
+ * duration.toNanos
+ * duration < 1.second
+ * duration <= Duration.Inf
+ * </pre>
+ *
+ * <p/>
+ * Implicits are also provided for Int, Long and Double. Example usage:
+ * <pre>
+ * import akka.util.duration._
+ *
+ * val duration = 100 millis
+ * </pre>
+ *
+ * Extractors, parsing and arithmetic are also included:
+ * <pre>
+ * val d = Duration("1.2 µs")
+ * val Duration(length, unit) = 5 millis
+ * val d2 = d * 2.5
+ * val d3 = d2 + 1.millisecond
+ * </pre>
+ */
+abstract class Duration extends Serializable {
+ def length: Long
+ def unit: TimeUnit
+ def toNanos: Long
+ def toMicros: Long
+ def toMillis: Long
+ def toSeconds: Long
+ def toMinutes: Long
+ def toHours: Long
+ def toDays: Long
+ def toUnit(unit: TimeUnit): Double
+ def printHMS: String
+ def <(other: Duration): Boolean
+ def <=(other: Duration): Boolean
+ def >(other: Duration): Boolean
+ def >=(other: Duration): Boolean
+ def +(other: Duration): Duration
+ def -(other: Duration): Duration
+ def *(factor: Double): Duration
+ def /(factor: Double): Duration
+ def /(other: Duration): Double
+ def unary_- : Duration
+ def finite_? : Boolean
+// def dilated(implicit system: ActorSystem): Duration = this * system.settings.TestTimeFactor
+ def min(other: Duration): Duration = if (this < other) this else other
+ def max(other: Duration): Duration = if (this > other) this else other
+ def sleep(): Unit = Thread.sleep(toMillis)
+
+ // Java API
+ def lt(other: Duration) = this < other
+ def lteq(other: Duration) = this <= other
+ def gt(other: Duration) = this > other
+ def gteq(other: Duration) = this >= other
+ def plus(other: Duration) = this + other
+ def minus(other: Duration) = this - other
+ def mul(factor: Double) = this * factor
+ def div(factor: Double) = this / factor
+ def div(other: Duration) = this / other
+ def neg() = -this
+ def isFinite() = finite_?
+}
+
+class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration {
+ import Duration._
+
+ def this(length: Long, unit: String) = this(length, Duration.timeUnit(unit))
+
+ def toNanos = unit.toNanos(length)
+ def toMicros = unit.toMicros(length)
+ def toMillis = unit.toMillis(length)
+ def toSeconds = unit.toSeconds(length)
+ def toMinutes = unit.toMinutes(length)
+ def toHours = unit.toHours(length)
+ def toDays = unit.toDays(length)
+ def toUnit(u: TimeUnit) = long2double(toNanos) / NANOSECONDS.convert(1, u)
+
+ override def toString = this match {
+ case Duration(1, DAYS) ⇒ "1 day"
+ case Duration(x, DAYS) ⇒ x + " days"
+ case Duration(1, HOURS) ⇒ "1 hour"
+ case Duration(x, HOURS) ⇒ x + " hours"
+ case Duration(1, MINUTES) ⇒ "1 minute"
+ case Duration(x, MINUTES) ⇒ x + " minutes"
+ case Duration(1, SECONDS) ⇒ "1 second"
+ case Duration(x, SECONDS) ⇒ x + " seconds"
+ case Duration(1, MILLISECONDS) ⇒ "1 millisecond"
+ case Duration(x, MILLISECONDS) ⇒ x + " milliseconds"
+ case Duration(1, MICROSECONDS) ⇒ "1 microsecond"
+ case Duration(x, MICROSECONDS) ⇒ x + " microseconds"
+ case Duration(1, NANOSECONDS) ⇒ "1 nanosecond"
+ case Duration(x, NANOSECONDS) ⇒ x + " nanoseconds"
+ }
+
+ def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000.0 % 60)
+
+ def <(other: Duration) = {
+ if (other.finite_?) {
+ toNanos < other.asInstanceOf[FiniteDuration].toNanos
+ } else {
+ other > this
+ }
+ }
+
+ def <=(other: Duration) = {
+ if (other.finite_?) {
+ toNanos <= other.asInstanceOf[FiniteDuration].toNanos
+ } else {
+ other >= this
+ }
+ }
+
+ def >(other: Duration) = {
+ if (other.finite_?) {
+ toNanos > other.asInstanceOf[FiniteDuration].toNanos
+ } else {
+ other < this
+ }
+ }
+
+ def >=(other: Duration) = {
+ if (other.finite_?) {
+ toNanos >= other.asInstanceOf[FiniteDuration].toNanos
+ } else {
+ other <= this
+ }
+ }
+
+ def +(other: Duration) = {
+ if (!other.finite_?) {
+ other
+ } else {
+ val nanos = toNanos + other.asInstanceOf[FiniteDuration].toNanos
+ fromNanos(nanos)
+ }
+ }
+
+ def -(other: Duration) = {
+ if (!other.finite_?) {
+ other
+ } else {
+ val nanos = toNanos - other.asInstanceOf[FiniteDuration].toNanos
+ fromNanos(nanos)
+ }
+ }
+
+ def *(factor: Double) = fromNanos(long2double(toNanos) * factor)
+
+ def /(factor: Double) = fromNanos(long2double(toNanos) / factor)
+
+ def /(other: Duration) = if (other.finite_?) long2double(toNanos) / other.toNanos else 0
+
+ def unary_- = Duration(-length, unit)
+
+ def finite_? = true
+
+ override def equals(other: Any) =
+ other.isInstanceOf[FiniteDuration] &&
+ toNanos == other.asInstanceOf[FiniteDuration].toNanos
+
+ override def hashCode = toNanos.asInstanceOf[Int]
+}
+
+class DurationInt(n: Int) {
+ def nanoseconds = Duration(n, NANOSECONDS)
+ def nanos = Duration(n, NANOSECONDS)
+ def nanosecond = Duration(n, NANOSECONDS)
+ def nano = Duration(n, NANOSECONDS)
+
+ def microseconds = Duration(n, MICROSECONDS)
+ def micros = Duration(n, MICROSECONDS)
+ def microsecond = Duration(n, MICROSECONDS)
+ def micro = Duration(n, MICROSECONDS)
+
+ def milliseconds = Duration(n, MILLISECONDS)
+ def millis = Duration(n, MILLISECONDS)
+ def millisecond = Duration(n, MILLISECONDS)
+ def milli = Duration(n, MILLISECONDS)
+
+ def seconds = Duration(n, SECONDS)
+ def second = Duration(n, SECONDS)
+
+ def minutes = Duration(n, MINUTES)
+ def minute = Duration(n, MINUTES)
+
+ def hours = Duration(n, HOURS)
+ def hour = Duration(n, HOURS)
+
+ def days = Duration(n, DAYS)
+ def day = Duration(n, DAYS)
+}
+
+class DurationLong(n: Long) {
+ def nanoseconds = Duration(n, NANOSECONDS)
+ def nanos = Duration(n, NANOSECONDS)
+ def nanosecond = Duration(n, NANOSECONDS)
+ def nano = Duration(n, NANOSECONDS)
+
+ def microseconds = Duration(n, MICROSECONDS)
+ def micros = Duration(n, MICROSECONDS)
+ def microsecond = Duration(n, MICROSECONDS)
+ def micro = Duration(n, MICROSECONDS)
+
+ def milliseconds = Duration(n, MILLISECONDS)
+ def millis = Duration(n, MILLISECONDS)
+ def millisecond = Duration(n, MILLISECONDS)
+ def milli = Duration(n, MILLISECONDS)
+
+ def seconds = Duration(n, SECONDS)
+ def second = Duration(n, SECONDS)
+
+ def minutes = Duration(n, MINUTES)
+ def minute = Duration(n, MINUTES)
+
+ def hours = Duration(n, HOURS)
+ def hour = Duration(n, HOURS)
+
+ def days = Duration(n, DAYS)
+ def day = Duration(n, DAYS)
+}
+
+class DurationDouble(d: Double) {
+ def nanoseconds = Duration(d, NANOSECONDS)
+ def nanos = Duration(d, NANOSECONDS)
+ def nanosecond = Duration(d, NANOSECONDS)
+ def nano = Duration(d, NANOSECONDS)
+
+ def microseconds = Duration(d, MICROSECONDS)
+ def micros = Duration(d, MICROSECONDS)
+ def microsecond = Duration(d, MICROSECONDS)
+ def micro = Duration(d, MICROSECONDS)
+
+ def milliseconds = Duration(d, MILLISECONDS)
+ def millis = Duration(d, MILLISECONDS)
+ def millisecond = Duration(d, MILLISECONDS)
+ def milli = Duration(d, MILLISECONDS)
+
+ def seconds = Duration(d, SECONDS)
+ def second = Duration(d, SECONDS)
+
+ def minutes = Duration(d, MINUTES)
+ def minute = Duration(d, MINUTES)
+
+ def hours = Duration(d, HOURS)
+ def hour = Duration(d, HOURS)
+
+ def days = Duration(d, DAYS)
+ def day = Duration(d, DAYS)
+}
diff --git a/src/library/scala/util/Properties.scala b/src/library/scala/util/Properties.scala
index df1c68ced4..0c7772cd07 100644
--- a/src/library/scala/util/Properties.scala
+++ b/src/library/scala/util/Properties.scala
@@ -142,7 +142,7 @@ private[scala] trait PropertiesTrait {
*/
def isWin = osName startsWith "Windows"
def isMac = javaVendor startsWith "Apple"
-
+
// This is looking for javac, tools.jar, etc.
// Tries JDK_HOME first, then the more common but likely jre JAVA_HOME,
// and finally the system property based javaHome.
diff --git a/src/library/scala/util/Timeout.scala b/src/library/scala/util/Timeout.scala
new file mode 100644
index 0000000000..0190675344
--- /dev/null
+++ b/src/library/scala/util/Timeout.scala
@@ -0,0 +1,33 @@
+/**
+ * Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
+ */
+package scala.util
+
+import java.util.concurrent.TimeUnit
+
+case class Timeout(duration: Duration) {
+ def this(timeout: Long) = this(Duration(timeout, TimeUnit.MILLISECONDS))
+ def this(length: Long, unit: TimeUnit) = this(Duration(length, unit))
+}
+
+object Timeout {
+ /**
+ * A timeout with zero duration, will cause most requests to always timeout.
+ */
+ val zero = new Timeout(Duration.Zero)
+
+ /**
+ * A Timeout with infinite duration. Will never timeout. Use extreme caution with this
+ * as it may cause memory leaks, blocked threads, or may not even be supported by
+ * the receiver, which would result in an exception.
+ */
+ val never = new Timeout(Duration.Inf)
+
+ def apply(timeout: Long) = new Timeout(timeout)
+ def apply(length: Long, unit: TimeUnit) = new Timeout(length, unit)
+
+ implicit def durationToTimeout(duration: Duration) = new Timeout(duration)
+ implicit def intToTimeout(timeout: Int) = new Timeout(timeout)
+ implicit def longToTimeout(timeout: Long) = new Timeout(timeout)
+ //implicit def defaultTimeout(implicit system: ActorSystem) = system.settings.ActorTimeout (have to introduce this in ActorSystem)
+}
diff --git a/src/library/scala/util/Try.scala b/src/library/scala/util/Try.scala
new file mode 100644
index 0000000000..c9bde81317
--- /dev/null
+++ b/src/library/scala/util/Try.scala
@@ -0,0 +1,165 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2008-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.util
+
+
+
+import collection.Seq
+
+
+
+/**
+ * The `Try` type represents a computation that may either result in an exception,
+ * or return a success value. It's analagous to the `Either` type.
+ */
+sealed abstract class Try[+T] {
+ /**
+ * Returns true if the `Try` is a `Failure`, false otherwise.
+ */
+ def isFailure: Boolean
+
+ /**
+ * Returns true if the `Try` is a `Success`, false otherwise.
+ */
+ def isSuccess: Boolean
+
+ /**
+ * Returns the value from this `Success` or the given argument if this is a `Failure`.
+ */
+ def getOrElse[U >: T](default: => U) = if (isSuccess) get else default
+
+ /**
+ * Returns the value from this `Success` or throws the exception if this is a `Failure`.
+ */
+ def get: T
+
+ /**
+ * Applies the given function f if this is a Result.
+ */
+ def foreach[U](f: T => U): Unit
+
+ /**
+ * Returns the given function applied to the value from this `Success` or returns this if this is a `Failure`.
+ */
+ def flatMap[U](f: T => Try[U]): Try[U]
+
+ /**
+ * Maps the given function to the value from this `Success` or returns this if this is a `Failure`.
+ */
+ def map[U](f: T => U): Try[U]
+
+ def collect[U](pf: PartialFunction[T, U]): Try[U]
+
+ def exists(p: T => Boolean): Boolean
+
+ /**
+ * Converts this to a `Failure` if the predicate is not satisfied.
+ */
+ def filter(p: T => Boolean): Try[T]
+
+ /**
+ * Converts this to a `Failure` if the predicate is not satisfied.
+ */
+ def filterNot(p: T => Boolean): Try[T] = filter(x => !p(x))
+
+ /**
+ * Calls the exceptionHandler with the exception if this is a `Failure`. This is like `flatMap` for the exception.
+ */
+ def rescue[U >: T](rescueException: PartialFunction[Throwable, Try[U]]): Try[U]
+
+ /**
+ * Calls the exceptionHandler with the exception if this is a `Failure`. This is like map for the exception.
+ */
+ def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U]
+
+ /**
+ * Returns `None` if this is a `Failure` or a `Some` containing the value if this is a `Success`.
+ */
+ def toOption = if (isSuccess) Some(get) else None
+
+ def toSeq = if (isSuccess) Seq(get) else Seq()
+
+ /**
+ * Returns the given function applied to the value from this Success or returns this if this is a `Failure`.
+ * Alias for `flatMap`.
+ */
+ def andThen[U](f: T => Try[U]): Try[U] = flatMap(f)
+
+ /**
+ * Transforms a nested `Try`, i.e., a `Try` of type `Try[Try[T]]`,
+ * into an un-nested `Try`, i.e., a `Try` of type `Try[T]`.
+ */
+ def flatten[U](implicit ev: T <:< Try[U]): Try[U]
+
+ def failed: Try[Throwable]
+}
+
+
+final case class Failure[+T](val exception: Throwable) extends Try[T] {
+ def isFailure = true
+ def isSuccess = false
+ def rescue[U >: T](rescueException: PartialFunction[Throwable, Try[U]]): Try[U] = {
+ try {
+ if (rescueException.isDefinedAt(exception)) rescueException(exception) else this
+ } catch {
+ case e2 => Failure(e2)
+ }
+ }
+ def get: T = throw exception
+ def flatMap[U](f: T => Try[U]): Try[U] = Failure[U](exception)
+ def flatten[U](implicit ev: T <:< Try[U]): Try[U] = Failure[U](exception)
+ def foreach[U](f: T => U): Unit = {}
+ def map[U](f: T => U): Try[U] = Failure[U](exception)
+ def collect[U](pf: PartialFunction[T, U]): Try[U] = Failure[U](exception)
+ def filter(p: T => Boolean): Try[T] = this
+ def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U] =
+ if (rescueException.isDefinedAt(exception)) {
+ Try(rescueException(exception))
+ } else {
+ this
+ }
+ def exists(p: T => Boolean): Boolean = false
+ def failed: Try[Throwable] = Success(exception)
+}
+
+
+final case class Success[+T](r: T) extends Try[T] {
+ def isFailure = false
+ def isSuccess = true
+ def rescue[U >: T](rescueException: PartialFunction[Throwable, Try[U]]): Try[U] = Success(r)
+ def get = r
+ def flatMap[U](f: T => Try[U]): Try[U] =
+ try f(r)
+ catch {
+ case e => Failure(e)
+ }
+ def flatten[U](implicit ev: T <:< Try[U]): Try[U] = r
+ def foreach[U](f: T => U): Unit = f(r)
+ def map[U](f: T => U): Try[U] = Try[U](f(r))
+ def collect[U](pf: PartialFunction[T, U]): Try[U] =
+ if (pf isDefinedAt r) Success(pf(r))
+ else Failure[U](new NoSuchElementException("Partial function not defined at " + r))
+ def filter(p: T => Boolean): Try[T] =
+ if (p(r)) this
+ else Failure(new NoSuchElementException("Predicate does not hold for " + r))
+ def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U] = this
+ def exists(p: T => Boolean): Boolean = p(r)
+ def failed: Try[Throwable] = Failure(new UnsupportedOperationException("Success.failed"))
+}
+
+
+object Try {
+
+ def apply[T](r: => T): Try[T] = {
+ try { Success(r) } catch {
+ case e => Failure(e)
+ }
+ }
+
+}
diff --git a/src/library/scala/util/parsing/combinator/Parsers.scala b/src/library/scala/util/parsing/combinator/Parsers.scala
index 4004a01ad9..9aaf0aeb54 100644
--- a/src/library/scala/util/parsing/combinator/Parsers.scala
+++ b/src/library/scala/util/parsing/combinator/Parsers.scala
@@ -487,7 +487,7 @@ trait Parsers {
}
/** Changes the error message produced by a parser.
- *
+ *
* This doesn't change the behavior of a parser on neither
* success nor failure, just on error. The semantics are
* slightly different than those obtained by doing `| error(msg)`,
@@ -794,7 +794,7 @@ trait Parsers {
*/
def chainl1[T, U](first: => Parser[T], p: => Parser[U], q: => Parser[(T, U) => T]): Parser[T]
= first ~ rep(q ~ p) ^^ {
- case x ~ xs => xs.foldLeft(x){(_, _) match {case (a, f ~ b) => f(a, b)}}
+ case x ~ xs => xs.foldLeft(x: T){case (a, f ~ b) => f(a, b)} // x's type annotation is needed to deal with changed type inference due to SI-5189
}
/** A parser generator that generalises the `rep1sep` generator so that `q`,
@@ -812,8 +812,7 @@ trait Parsers {
*/
def chainr1[T, U](p: => Parser[T], q: => Parser[(T, U) => U], combine: (T, U) => U, first: U): Parser[U]
= p ~ rep(q ~ p) ^^ {
- case x ~ xs => (new ~(combine, x) :: xs).
- foldRight(first){(_, _) match {case (f ~ a, b) => f(a, b)}}
+ case x ~ xs => (new ~(combine, x) :: xs).foldRight(first){case (f ~ a, b) => f(a, b)}
}
/** A parser generator for optional sub-phrases.