summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAleksandar Prokopec <axel22@gmail.com>2012-03-02 10:27:13 +0100
committerAleksandar Prokopec <axel22@gmail.com>2012-03-02 10:27:13 +0100
commit66271b123807340632c24d3dc83bb833f411cf30 (patch)
tree0bae36bb6af7cb86f85ecf343a2810f4a7deb264 /src
parent1852a7ddf7f8c5fb4a85e64b73123d333e698932 (diff)
parent54b541b103f79bdfff96227eeeac1d92d68165d8 (diff)
downloadscala-66271b123807340632c24d3dc83bb833f411cf30.tar.gz
scala-66271b123807340632c24d3dc83bb833f411cf30.tar.bz2
scala-66271b123807340632c24d3dc83bb833f411cf30.zip
Merge branch 'master' into feature/pc-execution-contexts
Conflicts: src/library/scala/collection/parallel/Combiner.scala src/library/scala/collection/parallel/ParIterableLike.scala src/library/scala/collection/parallel/mutable/ParCtrie.scala
Diffstat (limited to 'src')
-rw-r--r--src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala4
-rw-r--r--src/actors/scala/actors/scheduler/ForkJoinScheduler.scala9
-rw-r--r--src/build/maven/maven-deploy.xml2
-rw-r--r--src/compiler/scala/reflect/internal/BaseTypeSeqs.scala12
-rw-r--r--src/compiler/scala/reflect/internal/ClassfileConstants.scala6
-rw-r--r--src/compiler/scala/reflect/internal/Definitions.scala45
-rw-r--r--src/compiler/scala/reflect/internal/ExistentialsAndSkolems.scala2
-rw-r--r--src/compiler/scala/reflect/internal/Flags.scala5
-rw-r--r--src/compiler/scala/reflect/internal/HasFlags.scala2
-rw-r--r--src/compiler/scala/reflect/internal/Kinds.scala2
-rw-r--r--src/compiler/scala/reflect/internal/NameManglers.scala13
-rw-r--r--src/compiler/scala/reflect/internal/Names.scala20
-rw-r--r--src/compiler/scala/reflect/internal/Phase.scala4
-rw-r--r--src/compiler/scala/reflect/internal/Scopes.scala8
-rw-r--r--src/compiler/scala/reflect/internal/StdNames.scala13
-rw-r--r--src/compiler/scala/reflect/internal/SymbolTable.scala40
-rw-r--r--src/compiler/scala/reflect/internal/Symbols.scala86
-rw-r--r--src/compiler/scala/reflect/internal/TreeInfo.scala4
-rw-r--r--src/compiler/scala/reflect/internal/Trees.scala28
-rw-r--r--src/compiler/scala/reflect/internal/Types.scala283
-rw-r--r--src/compiler/scala/reflect/internal/pickling/UnPickler.scala20
-rw-r--r--src/compiler/scala/reflect/internal/util/Collections.scala22
-rw-r--r--src/compiler/scala/reflect/runtime/ConversionUtil.scala4
-rw-r--r--src/compiler/scala/reflect/runtime/Mirror.scala10
-rw-r--r--src/compiler/scala/reflect/runtime/SynchronizedOps.scala20
-rw-r--r--src/compiler/scala/reflect/runtime/SynchronizedSymbols.scala38
-rw-r--r--src/compiler/scala/reflect/runtime/SynchronizedTypes.scala66
-rw-r--r--src/compiler/scala/tools/ant/Scaladoc.scala8
-rw-r--r--src/compiler/scala/tools/ant/templates/tool-unix.tmpl4
-rw-r--r--src/compiler/scala/tools/nsc/CompilationUnits.scala2
-rw-r--r--src/compiler/scala/tools/nsc/Global.scala216
-rw-r--r--src/compiler/scala/tools/nsc/MacroContext.scala4
-rw-r--r--src/compiler/scala/tools/nsc/SubComponent.scala3
-rw-r--r--src/compiler/scala/tools/nsc/ast/TreeDSL.scala8
-rw-r--r--src/compiler/scala/tools/nsc/ast/TreeGen.scala21
-rw-r--r--src/compiler/scala/tools/nsc/ast/Trees.scala28
-rw-r--r--src/compiler/scala/tools/nsc/ast/parser/Parsers.scala25
-rw-r--r--src/compiler/scala/tools/nsc/ast/parser/Scanners.scala63
-rw-r--r--src/compiler/scala/tools/nsc/ast/parser/Tokens.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/BasicBlocks.scala4
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/Members.scala17
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/TypeStacks.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala8
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala14
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala49
-rw-r--r--src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala4
-rw-r--r--src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala4
-rw-r--r--src/compiler/scala/tools/nsc/backend/opt/Inliners.scala9
-rw-r--r--src/compiler/scala/tools/nsc/dependencies/Changes.scala2
-rw-r--r--src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala21
-rw-r--r--src/compiler/scala/tools/nsc/doc/model/ModelFactory.scala6
-rw-r--r--src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala14
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/ExprTyper.scala2
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/ILoop.scala8
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/IMain.scala12
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/Imports.scala4
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala8
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala6
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/Power.scala41
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/ReplVals.scala12
-rw-r--r--src/compiler/scala/tools/nsc/javac/JavaParsers.scala9
-rw-r--r--src/compiler/scala/tools/nsc/reporters/Reporter.scala4
-rw-r--r--src/compiler/scala/tools/nsc/settings/MutableSettings.scala2
-rw-r--r--src/compiler/scala/tools/nsc/symtab/SymbolTable.scala2
-rw-r--r--src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala24
-rw-r--r--src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala2
-rw-r--r--src/compiler/scala/tools/nsc/symtab/classfile/Pickler.scala2
-rw-r--r--src/compiler/scala/tools/nsc/transform/AddInterfaces.scala12
-rw-r--r--src/compiler/scala/tools/nsc/transform/CleanUp.scala24
-rw-r--r--src/compiler/scala/tools/nsc/transform/Constructors.scala19
-rw-r--r--src/compiler/scala/tools/nsc/transform/Erasure.scala67
-rw-r--r--src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala20
-rw-r--r--src/compiler/scala/tools/nsc/transform/Flatten.scala32
-rw-r--r--src/compiler/scala/tools/nsc/transform/LambdaLift.scala42
-rw-r--r--src/compiler/scala/tools/nsc/transform/LazyVals.scala32
-rw-r--r--src/compiler/scala/tools/nsc/transform/Mixin.scala64
-rw-r--r--src/compiler/scala/tools/nsc/transform/OverridingPairs.scala2
-rw-r--r--src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala128
-rw-r--r--src/compiler/scala/tools/nsc/transform/TailCalls.scala10
-rw-r--r--src/compiler/scala/tools/nsc/transform/UnCurry.scala110
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala74
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Contexts.scala8
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Duplicators.scala32
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Implicits.scala14
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Infer.scala8
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala16
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Namers.scala12
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala22
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala15
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/RefChecks.scala35
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala12
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala8
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala2
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Typers.scala53
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Unapplies.scala2
-rwxr-xr-xsrc/compiler/scala/tools/nsc/util/DocStrings.scala16
-rw-r--r--src/compiler/scala/tools/nsc/util/ProxyReport.scala2
-rw-r--r--src/compiler/scala/tools/nsc/util/Statistics.scala2
-rw-r--r--src/compiler/scala/tools/nsc/util/WeakHashSet.scala60
-rw-r--r--src/compiler/scala/tools/util/EditDistance.scala2
-rw-r--r--src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala16
-rw-r--r--src/continuations/plugin/scala/tools/selectivecps/CPSUtils.scala2
-rw-r--r--src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala20
-rw-r--r--src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala38
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java3829
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java1749
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java756
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java1590
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java113
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java31
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java81
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java85
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/package-info.java5
-rw-r--r--src/library/scala/Enumeration.scala6
-rw-r--r--src/library/scala/Function0.scala8
-rw-r--r--src/library/scala/Function1.scala8
-rw-r--r--src/library/scala/Function2.scala6
-rw-r--r--src/library/scala/PartialFunction.scala10
-rw-r--r--src/library/scala/Product1.scala4
-rw-r--r--src/library/scala/Product10.scala4
-rw-r--r--src/library/scala/Product11.scala4
-rw-r--r--src/library/scala/Product12.scala4
-rw-r--r--src/library/scala/Product13.scala4
-rw-r--r--src/library/scala/Product14.scala4
-rw-r--r--src/library/scala/Product15.scala4
-rw-r--r--src/library/scala/Product16.scala4
-rw-r--r--src/library/scala/Product17.scala4
-rw-r--r--src/library/scala/Product18.scala4
-rw-r--r--src/library/scala/Product19.scala4
-rw-r--r--src/library/scala/Product2.scala4
-rw-r--r--src/library/scala/Product20.scala4
-rw-r--r--src/library/scala/Product21.scala4
-rw-r--r--src/library/scala/Product22.scala4
-rw-r--r--src/library/scala/Product3.scala4
-rw-r--r--src/library/scala/Product4.scala4
-rw-r--r--src/library/scala/Product5.scala4
-rw-r--r--src/library/scala/Product6.scala4
-rw-r--r--src/library/scala/Product7.scala4
-rw-r--r--src/library/scala/Product8.scala4
-rw-r--r--src/library/scala/Product9.scala4
-rw-r--r--src/library/scala/Specializable.scala2
-rw-r--r--src/library/scala/StringContext.scala16
-rw-r--r--src/library/scala/Tuple1.scala2
-rw-r--r--src/library/scala/Tuple10.scala2
-rw-r--r--src/library/scala/Tuple11.scala2
-rw-r--r--src/library/scala/Tuple12.scala2
-rw-r--r--src/library/scala/Tuple13.scala2
-rw-r--r--src/library/scala/Tuple14.scala2
-rw-r--r--src/library/scala/Tuple15.scala2
-rw-r--r--src/library/scala/Tuple16.scala2
-rw-r--r--src/library/scala/Tuple17.scala2
-rw-r--r--src/library/scala/Tuple18.scala2
-rw-r--r--src/library/scala/Tuple19.scala2
-rw-r--r--src/library/scala/Tuple2.scala2
-rw-r--r--src/library/scala/Tuple20.scala2
-rw-r--r--src/library/scala/Tuple21.scala2
-rw-r--r--src/library/scala/Tuple22.scala2
-rw-r--r--src/library/scala/Tuple3.scala2
-rw-r--r--src/library/scala/Tuple4.scala2
-rw-r--r--src/library/scala/Tuple5.scala2
-rw-r--r--src/library/scala/Tuple6.scala2
-rw-r--r--src/library/scala/Tuple7.scala2
-rw-r--r--src/library/scala/Tuple8.scala2
-rw-r--r--src/library/scala/Tuple9.scala2
-rw-r--r--src/library/scala/annotation/elidable.scala55
-rw-r--r--src/library/scala/collection/GenTraversableLike.scala2
-rw-r--r--src/library/scala/collection/SeqLike.scala2
-rw-r--r--src/library/scala/collection/generic/MutableSortedSetFactory.scala6
-rw-r--r--src/library/scala/collection/immutable/BitSet.scala2
-rw-r--r--src/library/scala/collection/immutable/List.scala20
-rw-r--r--src/library/scala/collection/immutable/Range.scala13
-rw-r--r--src/library/scala/collection/mutable/AVLTree.scala26
-rw-r--r--src/library/scala/collection/mutable/Ctrie.scala248
-rw-r--r--src/library/scala/collection/mutable/FlatHashTable.scala40
-rw-r--r--src/library/scala/collection/mutable/HashTable.scala10
-rw-r--r--src/library/scala/collection/mutable/ListBuffer.scala16
-rw-r--r--src/library/scala/collection/mutable/SortedSet.scala10
-rw-r--r--src/library/scala/collection/mutable/TreeSet.scala14
-rw-r--r--src/library/scala/collection/parallel/Combiner.scala4
-rw-r--r--src/library/scala/collection/parallel/ParIterableLike.scala46
-rw-r--r--src/library/scala/collection/parallel/ParSeqLike.scala4
-rw-r--r--src/library/scala/collection/parallel/RemainsIterator.scala16
-rw-r--r--src/library/scala/collection/parallel/Tasks.scala10
-rw-r--r--src/library/scala/collection/parallel/mutable/ParCtrie.scala77
-rw-r--r--src/library/scala/collection/parallel/mutable/ParHashSet.scala2
-rw-r--r--src/library/scala/collection/parallel/package.scala14
-rw-r--r--src/library/scala/concurrent/Channel.scala6
-rw-r--r--src/library/scala/concurrent/ConcurrentPackageObject.scala30
-rw-r--r--src/library/scala/concurrent/DelayedLazyVal.scala8
-rw-r--r--src/library/scala/concurrent/ExecutionContext.scala56
-rw-r--r--src/library/scala/concurrent/Future.scala194
-rw-r--r--src/library/scala/concurrent/JavaConversions.scala6
-rw-r--r--src/library/scala/concurrent/Promise.scala64
-rw-r--r--src/library/scala/concurrent/Task.scala6
-rw-r--r--src/library/scala/concurrent/impl/ExecutionContextImpl.scala34
-rw-r--r--src/library/scala/concurrent/impl/Future.scala20
-rw-r--r--src/library/scala/concurrent/impl/Promise.scala74
-rw-r--r--src/library/scala/concurrent/package.scala14
-rw-r--r--src/library/scala/reflect/ReflectionUtils.scala4
-rw-r--r--src/library/scala/reflect/api/Mirror.scala4
-rw-r--r--src/library/scala/reflect/api/Modifier.scala2
-rwxr-xr-xsrc/library/scala/reflect/api/Names.scala4
-rwxr-xr-xsrc/library/scala/reflect/api/Symbols.scala10
-rw-r--r--src/library/scala/reflect/api/TreePrinters.scala8
-rw-r--r--src/library/scala/reflect/api/Trees.scala67
-rwxr-xr-xsrc/library/scala/reflect/api/Types.scala2
-rw-r--r--src/library/scala/reflect/macro/Context.scala4
-rw-r--r--src/library/scala/runtime/NonLocalReturnControl.scala4
-rw-r--r--src/library/scala/specialized.scala2
-rw-r--r--src/library/scala/sys/process/BasicIO.scala2
-rw-r--r--src/library/scala/util/Properties.scala2
-rw-r--r--src/library/scala/util/Try.scala32
-rw-r--r--src/library/scala/util/parsing/combinator/Parsers.scala2
-rw-r--r--src/manual/scala/tools/docutil/EmitManPage.scala2
-rw-r--r--src/partest/scala/tools/partest/CompilerTest.scala2
-rw-r--r--src/partest/scala/tools/partest/DirectTest.scala2
-rw-r--r--src/partest/scala/tools/partest/nest/PathSettings.scala8
-rw-r--r--src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala2
220 files changed, 6969 insertions, 5253 deletions
diff --git a/src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala b/src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala
index 257fe92a91..15ce60566a 100644
--- a/src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala
+++ b/src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala
@@ -4,9 +4,9 @@ package scheduler
import java.util.Collection
import scala.concurrent.forkjoin.{ForkJoinPool, ForkJoinTask}
-private class DrainableForkJoinPool extends ForkJoinPool {
+private class DrainableForkJoinPool(parallelism: Int, maxPoolSize: Int) extends ForkJoinPool(parallelism, ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true) {
- override def drainTasksTo(c: Collection[ForkJoinTask[_]]): Int =
+ override def drainTasksTo(c: Collection[ _ >: ForkJoinTask[_]]): Int =
super.drainTasksTo(c)
}
diff --git a/src/actors/scala/actors/scheduler/ForkJoinScheduler.scala b/src/actors/scala/actors/scheduler/ForkJoinScheduler.scala
index ba0f88c668..ce67ffd037 100644
--- a/src/actors/scala/actors/scheduler/ForkJoinScheduler.scala
+++ b/src/actors/scala/actors/scheduler/ForkJoinScheduler.scala
@@ -38,13 +38,8 @@ class ForkJoinScheduler(val initCoreSize: Int, val maxSize: Int, daemon: Boolean
}
private def makeNewPool(): DrainableForkJoinPool = {
- val p = new DrainableForkJoinPool()
- // enable locally FIFO scheduling mode
- p.setAsyncMode(true)
- p.setParallelism(initCoreSize)
- p.setMaximumPoolSize(maxSize)
+ val p = new DrainableForkJoinPool(initCoreSize, maxSize)
Debug.info(this+": parallelism "+p.getParallelism())
- Debug.info(this+": max pool size "+p.getMaximumPoolSize())
p
}
@@ -144,7 +139,7 @@ class ForkJoinScheduler(val initCoreSize: Int, val maxSize: Int, daemon: Boolean
ForkJoinPool.managedBlock(new ForkJoinPool.ManagedBlocker {
def block = blocker.block()
def isReleasable() = blocker.isReleasable
- }, true)
+ })
}
/** Suspends the scheduler. All threads that were in use by the
diff --git a/src/build/maven/maven-deploy.xml b/src/build/maven/maven-deploy.xml
index fe99e956d9..2e490163e0 100644
--- a/src/build/maven/maven-deploy.xml
+++ b/src/build/maven/maven-deploy.xml
@@ -129,7 +129,7 @@
<artifact:pom refid="@{name}.pom" />
<artifact:remoteRepository url="@{repository}" id="${repository.credentials.id}" />
<artifact:attach type="jar" file="@{name}/@{name}-src.jar" classifier="sources" />
- <artifact:attach type="jar" file="@{name}/@{name}-doc.jar" classifier="javadoc" />
+ <artifact:attach type="jar" file="@{name}/@{name}-docs.jar" classifier="javadoc" />
<extra-attachments />
</artifact:deploy>
</sequential>
diff --git a/src/compiler/scala/reflect/internal/BaseTypeSeqs.scala b/src/compiler/scala/reflect/internal/BaseTypeSeqs.scala
index 9e5c93753f..3753a45133 100644
--- a/src/compiler/scala/reflect/internal/BaseTypeSeqs.scala
+++ b/src/compiler/scala/reflect/internal/BaseTypeSeqs.scala
@@ -29,12 +29,12 @@ trait BaseTypeSeqs {
this: SymbolTable =>
import definitions._
- protected def newBaseTypeSeq(parents: List[Type], elems: Array[Type]) =
+ protected def newBaseTypeSeq(parents: List[Type], elems: Array[Type]) =
new BaseTypeSeq(parents, elems)
/** Note: constructor is protected to force everyone to use the factory method newBaseTypeSeq instead.
- * This is necessary because when run from reflection every base type sequence needs to have a
- * SynchronizedBaseTypeSeq as mixin.
+ * This is necessary because when run from reflection every base type sequence needs to have a
+ * SynchronizedBaseTypeSeq as mixin.
*/
class BaseTypeSeq protected[BaseTypeSeqs] (private[BaseTypeSeqs] val parents: List[Type], private[BaseTypeSeqs] val elems: Array[Type]) {
self =>
@@ -242,7 +242,7 @@ trait BaseTypeSeqs {
// Console.println("computed baseTypeSeq of " + tsym.tpe + " " + parents + ": "+elems.toString)//DEBUG
newBaseTypeSeq(parents, elems)
}
-
+
class MappedBaseTypeSeq(orig: BaseTypeSeq, f: Type => Type) extends BaseTypeSeq(orig.parents map f, orig.elems) {
override def apply(i: Int) = f(orig.apply(i))
override def rawElem(i: Int) = f(orig.rawElem(i))
@@ -254,7 +254,7 @@ trait BaseTypeSeqs {
override def exists(p: Type => Boolean) = elems exists (x => p(f(x)))
override protected def maxDepthOfElems: Int = elems map (x => maxDpth(f(x))) max
override def toString = elems.mkString("MBTS(", ",", ")")
- }
-
+ }
+
val CyclicInheritance = new Throwable
}
diff --git a/src/compiler/scala/reflect/internal/ClassfileConstants.scala b/src/compiler/scala/reflect/internal/ClassfileConstants.scala
index 1c4c007de0..eec72d082d 100644
--- a/src/compiler/scala/reflect/internal/ClassfileConstants.scala
+++ b/src/compiler/scala/reflect/internal/ClassfileConstants.scala
@@ -360,7 +360,7 @@ object ClassfileConstants {
res |= translateFlag(jflags & JAVA_ACC_INTERFACE)
res
}
-
+
def classFlags(jflags: Int): Long = {
initFields(jflags)
isClass = true
@@ -376,11 +376,11 @@ object ClassfileConstants {
}
}
object FlagTranslation extends FlagTranslation { }
-
+
def toScalaMethodFlags(flags: Int): Long = FlagTranslation methodFlags flags
def toScalaClassFlags(flags: Int): Long = FlagTranslation classFlags flags
def toScalaFieldFlags(flags: Int): Long = FlagTranslation fieldFlags flags
-
+
@deprecated("Use another method in this object", "2.10.0")
def toScalaFlags(flags: Int, isClass: Boolean = false, isField: Boolean = false): Long = (
if (isClass) toScalaClassFlags(flags)
diff --git a/src/compiler/scala/reflect/internal/Definitions.scala b/src/compiler/scala/reflect/internal/Definitions.scala
index ce5bb3d1c4..ec171c5f2c 100644
--- a/src/compiler/scala/reflect/internal/Definitions.scala
+++ b/src/compiler/scala/reflect/internal/Definitions.scala
@@ -21,7 +21,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
* methods.
*/
private type PolyMethodCreator = List[Symbol] => (Option[List[Type]], Type)
-
+
private def newClass(owner: Symbol, name: TypeName, parents: List[Type], flags: Long = 0L): Symbol = {
val clazz = owner.newClassSymbol(name, NoPosition, flags)
clazz setInfoAndEnter ClassInfoType(parents, newScope, clazz)
@@ -164,7 +164,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
lazy val RuntimePackage = getRequiredModule("scala.runtime")
lazy val RuntimePackageClass = RuntimePackage.moduleClass
-
+
lazy val JavaLangEnumClass = getRequiredClass("java.lang.Enum")
// convenient one-argument parameter lists
@@ -176,10 +176,10 @@ trait Definitions extends reflect.api.StandardDefinitions {
private def booltype = BooleanClass.typeConstructor
private def inttype = IntClass.typeConstructor
private def stringtype = StringClass.typeConstructor
-
+
// Java types
def javaTypeName(jclazz: Class[_]): TypeName = newTypeName(jclazz.getName)
-
+
def javaTypeToValueClass(jtype: Class[_]): Symbol = jtype match {
case java.lang.Void.TYPE => UnitClass
case java.lang.Byte.TYPE => ByteClass
@@ -213,7 +213,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
// Note: this is not the type alias AnyRef, it's a companion-like
// object used by the @specialize annotation.
lazy val AnyRefModule = getMember(ScalaPackageClass, nme.AnyRef)
- @deprecated("Use AnyRefModule", "2.10.0")
+ @deprecated("Use AnyRefModule", "2.10.0")
def Predef_AnyRef = AnyRefModule
// bottom types
@@ -269,13 +269,13 @@ trait Definitions extends reflect.api.StandardDefinitions {
lazy val PredefModule: Symbol = getRequiredModule("scala.Predef")
lazy val PredefModuleClass = PredefModule.moduleClass
-
+
def Predef_classOf = getMember(PredefModule, nme.classOf)
def Predef_identity = getMember(PredefModule, nme.identity)
def Predef_conforms = getMember(PredefModule, nme.conforms)
def Predef_wrapRefArray = getMember(PredefModule, nme.wrapRefArray)
def Predef_??? = getMember(PredefModule, nme.???)
-
+
/** Is `sym` a member of Predef with the given name?
* Note: DON't replace this by sym == Predef_conforms/etc, as Predef_conforms is a `def`
* which does a member lookup (it can't be a lazy val because we might reload Predef
@@ -284,7 +284,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
def isPredefMemberNamed(sym: Symbol, name: Name) = (
(sym.name == name) && (sym.owner == PredefModule.moduleClass)
)
-
+
/** Specialization.
*/
lazy val SpecializableModule = getRequiredModule("scala.Specializable")
@@ -429,7 +429,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
case m: ClassManifest[_] =>
val sym = manifestToSymbol(m)
val args = m.typeArguments
-
+
if ((sym eq NoSymbol) || args.isEmpty) sym.tpe
else appliedType(sym.typeConstructor, args map manifestToType)
case _ =>
@@ -439,7 +439,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
def manifestToSymbol(m: ClassManifest[_]): Symbol = m match {
case x: scala.reflect.AnyValManifest[_] =>
getMember(ScalaPackageClass, newTypeName("" + x))
- case _ =>
+ case _ =>
val name = m.erasure.getName
if (name endsWith nme.MODULE_SUFFIX_STRING)
getModuleIfDefined(name stripSuffix nme.MODULE_SUFFIX_STRING)
@@ -499,7 +499,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
case DoubleClass => nme.wrapDoubleArray
case BooleanClass => nme.wrapBooleanArray
case UnitClass => nme.wrapUnitArray
- case _ =>
+ case _ =>
if ((elemtp <:< AnyRefClass.tpe) && !isPhantomClass(elemtp.typeSymbol)) nme.wrapRefArray
else nme.genericWrapArray
}
@@ -605,7 +605,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
def ClassType(arg: Type) =
if (phase.erasedTypes || forMSIL) ClassClass.tpe
else appliedType(ClassClass.typeConstructor, List(arg))
-
+
def vmClassType(arg: Type): Type = ClassType(arg)
def vmSignature(sym: Symbol, info: Type): String = signature(info) // !!!
@@ -658,7 +658,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
case _ => false
})
}
-
+
// members of class scala.Any
lazy val Any_== = newMethod(AnyClass, nme.EQ, anyparam, booltype, FINAL)
lazy val Any_!= = newMethod(AnyClass, nme.NE, anyparam, booltype, FINAL)
@@ -688,7 +688,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
lazy val Object_ne = newMethod(ObjectClass, nme.ne, anyrefparam, booltype, FINAL)
lazy val Object_isInstanceOf = newT1NoParamsMethod(ObjectClass, nme.isInstanceOf_Ob, FINAL | SYNTHETIC)(_ => booltype)
lazy val Object_asInstanceOf = newT1NoParamsMethod(ObjectClass, nme.asInstanceOf_Ob, FINAL | SYNTHETIC)(_.typeConstructor)
- lazy val Object_synchronized = newPolyMethod(1, ObjectClass, nme.synchronized_, FINAL)(tps =>
+ lazy val Object_synchronized = newPolyMethod(1, ObjectClass, nme.synchronized_, FINAL)(tps =>
(Some(List(tps.head.typeConstructor)), tps.head.typeConstructor)
)
lazy val String_+ = newMethod(StringClass, nme.raw.PLUS, anyparam, stringtype, FINAL)
@@ -796,10 +796,10 @@ trait Definitions extends reflect.api.StandardDefinitions {
while (result.isAliasType) result = result.info.typeSymbol
result
}
-
+
def getRequiredModule(fullname: String): Symbol =
getModule(newTermNameCached(fullname))
- def getRequiredClass(fullname: String): Symbol =
+ def getRequiredClass(fullname: String): Symbol =
getClass(newTypeNameCached(fullname))
def getClassIfDefined(fullname: String): Symbol =
@@ -814,6 +814,9 @@ trait Definitions extends reflect.api.StandardDefinitions {
try getModule(fullname.toTermName)
catch { case _: MissingRequirementError => NoSymbol }
+ def termMember(owner: Symbol, name: String): Symbol = owner.info.member(newTermName(name))
+ def typeMember(owner: Symbol, name: String): Symbol = owner.info.member(newTypeName(name))
+
def getMember(owner: Symbol, name: Name): Symbol = {
if (owner == NoSymbol) NoSymbol
else owner.info.nonPrivateMember(name) match {
@@ -848,15 +851,15 @@ trait Definitions extends reflect.api.StandardDefinitions {
private def newAlias(owner: Symbol, name: TypeName, alias: Type): Symbol =
owner.newAliasType(name) setInfoAndEnter alias
-
+
private def specialPolyClass(name: TypeName, flags: Long)(parentFn: Symbol => Type): Symbol = {
val clazz = newClass(ScalaPackageClass, name, Nil)
val tparam = clazz.newSyntheticTypeParam("T0", flags)
val parents = List(AnyRefClass.tpe, parentFn(tparam))
-
+
clazz setInfo polyType(List(tparam), ClassInfoType(parents, newScope, clazz))
}
-
+
def newPolyMethod(typeParamCount: Int, owner: Symbol, name: TermName, flags: Long)(createFn: PolyMethodCreator): Symbol = {
val msym = owner.newMethod(name.encode, NoPosition, flags)
val tparams = msym.newSyntheticTypeParams(typeParamCount)
@@ -867,7 +870,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
msym setInfoAndEnter polyType(tparams, mtpe)
}
-
+
/** T1 means one type parameter.
*/
def newT1NullaryMethod(owner: Symbol, name: TermName, flags: Long)(createFn: Symbol => Type): Symbol = {
@@ -974,7 +977,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
RootClass.info.decls enter EmptyPackage
RootClass.info.decls enter RootPackage
-
+
val forced = List( // force initialization of every symbol that is entered as a side effect
AnnotationDefaultAttr, // #2264
RepeatedParamClass,
diff --git a/src/compiler/scala/reflect/internal/ExistentialsAndSkolems.scala b/src/compiler/scala/reflect/internal/ExistentialsAndSkolems.scala
index 47f794681c..f1fe4fc118 100644
--- a/src/compiler/scala/reflect/internal/ExistentialsAndSkolems.scala
+++ b/src/compiler/scala/reflect/internal/ExistentialsAndSkolems.scala
@@ -14,7 +14,7 @@ import util._
*/
trait ExistentialsAndSkolems {
self: SymbolTable =>
-
+
/** Map a list of type parameter symbols to skolemized symbols, which
* can be deskolemized to the original type parameter. (A skolem is a
* representation of a bound variable when viewed inside its scope.)
diff --git a/src/compiler/scala/reflect/internal/Flags.scala b/src/compiler/scala/reflect/internal/Flags.scala
index aa696bc6e8..270491d078 100644
--- a/src/compiler/scala/reflect/internal/Flags.scala
+++ b/src/compiler/scala/reflect/internal/Flags.scala
@@ -165,6 +165,7 @@ class Flags extends ModifierFlags {
final val TRIEDCOOKING = 0x100000000000L // ``Cooking'' has been tried on this symbol
// A Java method's type is ``cooked'' by transforming raw types to existentials
+ final val SYNCHRONIZED = 0x200000000000L // symbol is a method which should be marked ACC_SYNCHRONIZED
// ------- shift definitions -------------------------------------------------------
final val InitialFlags = 0x0001FFFFFFFFFFFFL // flags that are enabled from phase 1.
@@ -222,7 +223,7 @@ class Flags extends ModifierFlags {
/** These modifiers appear in TreePrinter output. */
final val PrintableFlags: Long =
ExplicitFlags | LOCAL | SYNTHETIC | STABLE | CASEACCESSOR | MACRO |
- ACCESSOR | SUPERACCESSOR | PARAMACCESSOR | BRIDGE | STATIC | VBRIDGE | SPECIALIZED
+ ACCESSOR | SUPERACCESSOR | PARAMACCESSOR | BRIDGE | STATIC | VBRIDGE | SPECIALIZED | SYNCHRONIZED
/** The two bridge flags */
final val BridgeFlags = BRIDGE | VBRIDGE
@@ -384,7 +385,7 @@ class Flags extends ModifierFlags {
case VBRIDGE => "<vbridge>" // (1L << 42)
case VARARGS => "<varargs>" // (1L << 43)
case TRIEDCOOKING => "<triedcooking>" // (1L << 44)
- case 0x200000000000L => "" // (1L << 45)
+ case SYNCHRONIZED => "<synchronized>" // (1L << 45)
case 0x400000000000L => "" // (1L << 46)
case 0x800000000000L => "" // (1L << 47)
case 0x1000000000000L => "" // (1L << 48)
diff --git a/src/compiler/scala/reflect/internal/HasFlags.scala b/src/compiler/scala/reflect/internal/HasFlags.scala
index ec4e919bdc..8affd66cd5 100644
--- a/src/compiler/scala/reflect/internal/HasFlags.scala
+++ b/src/compiler/scala/reflect/internal/HasFlags.scala
@@ -136,7 +136,7 @@ trait HasFlags {
/** Whether this entity has NONE of the flags in the given mask.
*/
def hasNoFlags(mask: Long): Boolean = !hasFlag(mask)
-
+
protected def isSetting(f: Long, mask: Long) = !hasFlag(f) && ((mask & f) != 0L)
protected def isClearing(f: Long, mask: Long) = hasFlag(f) && ((mask & f) != 0L)
diff --git a/src/compiler/scala/reflect/internal/Kinds.scala b/src/compiler/scala/reflect/internal/Kinds.scala
index e675be43dc..23bff950b8 100644
--- a/src/compiler/scala/reflect/internal/Kinds.scala
+++ b/src/compiler/scala/reflect/internal/Kinds.scala
@@ -128,7 +128,7 @@ trait Kinds {
// @M sometimes hkargs != arg.typeParams, the symbol and the type may
// have very different type parameters
val hkparams = param.typeParams
-
+
def kindCheck(cond: Boolean, f: KindErrors => KindErrors) {
if (!cond)
kindErrors = f(kindErrors)
diff --git a/src/compiler/scala/reflect/internal/NameManglers.scala b/src/compiler/scala/reflect/internal/NameManglers.scala
index c4ee7254dc..12f56976c9 100644
--- a/src/compiler/scala/reflect/internal/NameManglers.scala
+++ b/src/compiler/scala/reflect/internal/NameManglers.scala
@@ -22,10 +22,10 @@ trait NameManglers {
val MODULE_SUFFIX_STRING = NameTransformer.MODULE_SUFFIX_STRING
val NAME_JOIN_STRING = NameTransformer.NAME_JOIN_STRING
-
+
val MODULE_SUFFIX_NAME: TermName = newTermName(MODULE_SUFFIX_STRING)
val NAME_JOIN_NAME: TermName = newTermName(NAME_JOIN_STRING)
-
+
def flattenedName(segments: Name*): NameType = compactedString(segments mkString NAME_JOIN_STRING)
/**
@@ -79,7 +79,7 @@ trait NameManglers {
val SUPER_PREFIX_STRING = "super$"
val TRAIT_SETTER_SEPARATOR_STRING = "$_setter_$"
val SETTER_SUFFIX: TermName = encode("_=")
-
+
@deprecated("2.10.0", "Use SPECIALIZED_SUFFIX")
def SPECIALIZED_SUFFIX_STRING = SPECIALIZED_SUFFIX.toString
@deprecated("2.10.0", "Use SPECIALIZED_SUFFIX")
@@ -92,6 +92,7 @@ trait NameManglers {
def isLocalName(name: Name) = name endsWith LOCAL_SUFFIX_STRING
def isLoopHeaderLabel(name: Name) = (name startsWith WHILE_PREFIX) || (name startsWith DO_WHILE_PREFIX)
def isProtectedAccessorName(name: Name) = name startsWith PROTECTED_PREFIX
+ def isSuperAccessorName(name: Name) = name startsWith SUPER_PREFIX_STRING
def isReplWrapperName(name: Name) = name containsName INTERPRETER_IMPORT_WRAPPER
def isSetterName(name: Name) = name endsWith SETTER_SUFFIX
def isTraitSetterName(name: Name) = isSetterName(name) && (name containsName TRAIT_SETTER_SEPARATOR_STRING)
@@ -120,13 +121,13 @@ trait NameManglers {
name.subName(i, name.length)
} else name
}
-
+
def unspecializedName(name: Name): Name = (
if (name endsWith SPECIALIZED_SUFFIX)
name.subName(0, name.lastIndexOf('m') - 1)
else name
)
-
+
def macroMethodName(name: Name) = {
val base = if (name.isTypeName) nme.TYPEkw else nme.DEFkw
base append nme.MACRO append name
@@ -157,7 +158,7 @@ trait NameManglers {
def getterToLocal(name: TermName): TermName = name append LOCAL_SUFFIX_STRING
def getterToSetter(name: TermName): TermName = name append SETTER_SUFFIX
def localToGetter(name: TermName): TermName = name dropRight LOCAL_SUFFIX_STRING.length
-
+
def dropLocalSuffix(name: Name): Name = if (name endsWith ' ') name dropRight 1 else name
def setterToGetter(name: TermName): TermName = {
diff --git a/src/compiler/scala/reflect/internal/Names.scala b/src/compiler/scala/reflect/internal/Names.scala
index e6ca4c49ba..5f38374f20 100644
--- a/src/compiler/scala/reflect/internal/Names.scala
+++ b/src/compiler/scala/reflect/internal/Names.scala
@@ -73,7 +73,7 @@ trait Names extends api.Names {
/** Create a term name from the characters in cs[offset..offset+len-1]. */
def newTermName(cs: Array[Char], offset: Int, len: Int): TermName =
newTermName(cs, offset, len, cachedString = null)
-
+
def newTermName(cs: Array[Char]): TermName = newTermName(cs, 0, cs.length)
def newTypeName(cs: Array[Char]): TypeName = newTypeName(cs, 0, cs.length)
@@ -87,7 +87,7 @@ trait Names extends api.Names {
var n = termHashtable(h)
while ((n ne null) && (n.length != len || !equals(n.start, cs, offset, len)))
n = n.next
-
+
if (n ne null) n
else {
// The logic order here is future-proofing against the possibility
@@ -135,7 +135,7 @@ trait Names extends api.Names {
/** The name class.
* TODO - resolve schizophrenia regarding whether to treat Names as Strings
- * or Strings as Names. Give names the key functions the absence of which
+ * or Strings as Names. Give names the key functions the absence of which
* make people want Strings all the time.
*/
sealed abstract class Name(protected val index: Int, protected val len: Int) extends AbsName with Function1[Int, Char] {
@@ -166,7 +166,7 @@ trait Names extends api.Names {
/** Return a new name of the same variety. */
def newName(str: String): ThisNameType
-
+
/** Return a new name based on string transformation. */
def mapName(f: String => String): ThisNameType = newName(f(toString))
@@ -357,7 +357,7 @@ trait Names extends api.Names {
def dropRight(n: Int) = subName(0, len - n)
def drop(n: Int) = subName(n, len)
-
+
def indexOf(ch: Char) = {
val idx = pos(ch)
if (idx == length) -1 else idx
@@ -382,7 +382,7 @@ trait Names extends api.Names {
}
newTermName(cs, 0, len)
}
-
+
/** TODO - reconcile/fix that encode returns a Name but
* decode returns a String.
*/
@@ -393,7 +393,7 @@ trait Names extends api.Names {
def encoded: String = "" + encode
// def decodedName: ThisNameType = newName(decoded)
def encodedName: ThisNameType = encode
-
+
/** Replace operator symbols by corresponding $op_name. */
def encode: ThisNameType = {
val str = toString
@@ -425,7 +425,7 @@ trait Names extends api.Names {
def longString: String = nameKind + " " + decode
def debugString = { val s = decode ; if (isTypeName) s + "!" else s }
}
-
+
/** A name that contains no operator chars nor dollar signs.
* TODO - see if it's any faster to do something along these lines.
*/
@@ -461,7 +461,7 @@ trait Names extends api.Names {
sealed abstract class TermName(index0: Int, len0: Int, hash: Int) extends Name(index0, len0) {
type ThisNameType = TermName
protected[this] def thisName: TermName = this
-
+
var next: TermName = termHashtable(hash)
termHashtable(hash) = this
def isTermName: Boolean = true
@@ -488,7 +488,7 @@ trait Names extends api.Names {
sealed abstract class TypeName(index0: Int, len0: Int, hash: Int) extends Name(index0, len0) {
type ThisNameType = TypeName
protected[this] def thisName: TypeName = this
-
+
var next: TypeName = typeHashtable(hash)
typeHashtable(hash) = this
def isTermName: Boolean = false
diff --git a/src/compiler/scala/reflect/internal/Phase.scala b/src/compiler/scala/reflect/internal/Phase.scala
index acd3360c4f..89d643aacf 100644
--- a/src/compiler/scala/reflect/internal/Phase.scala
+++ b/src/compiler/scala/reflect/internal/Phase.scala
@@ -26,6 +26,8 @@ abstract class Phase(val prev: Phase) {
if ((prev ne null) && (prev ne NoPhase)) prev.nx = this
def next: Phase = nx
+ def hasNext = next != this
+ def iterator = Iterator.iterate(this)(_.next) takeWhile (p => p.next != p)
def name: String
def description: String = name
@@ -37,7 +39,7 @@ abstract class Phase(val prev: Phase) {
def refChecked: Boolean = false
/** This is used only in unsafeTypeParams, and at this writing is
- * overridden to false in namer, typer, and erasure. (And NoPhase.)
+ * overridden to false in parser, namer, typer, and erasure. (And NoPhase.)
*/
def keepsTypeParams = true
def run(): Unit
diff --git a/src/compiler/scala/reflect/internal/Scopes.scala b/src/compiler/scala/reflect/internal/Scopes.scala
index 37464ebf29..ef48d6102f 100644
--- a/src/compiler/scala/reflect/internal/Scopes.scala
+++ b/src/compiler/scala/reflect/internal/Scopes.scala
@@ -38,11 +38,11 @@ trait Scopes extends api.Scopes { self: SymbolTable =>
}
/** Note: constructor is protected to force everyone to use the factory methods newScope or newNestedScope instead.
- * This is necessary because when run from reflection every scope needs to have a
- * SynchronizedScope as mixin.
+ * This is necessary because when run from reflection every scope needs to have a
+ * SynchronizedScope as mixin.
*/
class Scope protected[Scopes] (initElems: ScopeEntry = null) extends Iterable[Symbol] {
-
+
protected[Scopes] def this(base: Scope) = {
this(base.elems)
nestinglevel = base.nestinglevel + 1
@@ -319,7 +319,7 @@ trait Scopes extends api.Scopes { self: SymbolTable =>
/** Create a new scope */
def newScope: Scope = new Scope()
-
+
/** Create a new scope nested in another one with which it shares its elements */
def newNestedScope(outer: Scope): Scope = new Scope(outer)
diff --git a/src/compiler/scala/reflect/internal/StdNames.scala b/src/compiler/scala/reflect/internal/StdNames.scala
index a78e0cc939..ef2114b608 100644
--- a/src/compiler/scala/reflect/internal/StdNames.scala
+++ b/src/compiler/scala/reflect/internal/StdNames.scala
@@ -94,6 +94,7 @@ trait StdNames extends NameManglers { self: SymbolTable =>
val EMPTY: NameType = ""
val ANON_FUN_NAME: NameType = "$anonfun"
+ val ANON_CLASS_NAME: NameType = "$anon"
val EMPTY_PACKAGE_NAME: NameType = "<empty>"
val IMPORT: NameType = "<import>"
val MODULE_VAR_SUFFIX: NameType = "$module"
@@ -245,7 +246,7 @@ trait StdNames extends NameManglers { self: SymbolTable =>
val x_7 : NameType = "x$7"
val x_8 : NameType = "x$8"
val x_9 : NameType = "x$9"
-
+
@switch def syntheticParamName(i: Int): TermName = i match {
case 0 => nme.x_0
case 1 => nme.x_1
@@ -259,9 +260,9 @@ trait StdNames extends NameManglers { self: SymbolTable =>
case 9 => nme.x_9
case _ => newTermName("x$" + i)
}
-
+
val ??? = encode("???")
-
+
val wrapRefArray: NameType = "wrapRefArray"
val wrapByteArray: NameType = "wrapByteArray"
val wrapShortArray: NameType = "wrapShortArray"
@@ -346,6 +347,7 @@ trait StdNames extends NameManglers { self: SymbolTable =>
val isInstanceOf_ : NameType = "isInstanceOf"
val isInstanceOf_Ob : NameType = "$isInstanceOf"
val java: NameType = "java"
+ val key: NameType = "key"
val lang: NameType = "lang"
val length: NameType = "length"
val lengthCompare: NameType = "lengthCompare"
@@ -442,7 +444,6 @@ trait StdNames extends NameManglers { self: SymbolTable =>
protected implicit def createNameType(name: String): TypeName = newTypeNameCached(name)
val REFINE_CLASS_NAME: NameType = "<refinement>"
- val ANON_CLASS_NAME: NameType = "$anon"
}
/** For fully qualified type names.
@@ -617,7 +618,7 @@ trait StdNames extends NameManglers { self: SymbolTable =>
val testLessOrEqualThan: NameType = "testLessOrEqualThan"
val testLessThan: NameType = "testLessThan"
val testNotEqual: NameType = "testNotEqual"
-
+
val isBoxedNumberOrBoolean: NameType = "isBoxedNumberOrBoolean"
val isBoxedNumber: NameType = "isBoxedNumber"
@@ -669,7 +670,7 @@ trait StdNames extends NameManglers { self: SymbolTable =>
case `toDouble` => toDouble
case _ => NO_NAME
}
-
+
val reflPolyCacheName: NameType = "reflPoly$Cache"
val reflClassCacheName: NameType = "reflClass$Cache"
val reflParamsCacheName: NameType = "reflParams$Cache"
diff --git a/src/compiler/scala/reflect/internal/SymbolTable.scala b/src/compiler/scala/reflect/internal/SymbolTable.scala
index b3c62bffbf..ce54c32273 100644
--- a/src/compiler/scala/reflect/internal/SymbolTable.scala
+++ b/src/compiler/scala/reflect/internal/SymbolTable.scala
@@ -8,6 +8,7 @@ package internal
import scala.collection.{ mutable, immutable }
import util._
+import scala.tools.nsc.util.WeakHashSet
abstract class SymbolTable extends api.Universe
with Collections
@@ -42,7 +43,7 @@ abstract class SymbolTable extends api.Universe
/** Override with final implementation for inlining. */
def debuglog(msg: => String): Unit = if (settings.debug.value) log(msg)
def debugwarn(msg: => String): Unit = if (settings.debug.value) Console.err.println(msg)
-
+
/** Overridden when we know more about what was happening during a failure. */
def supplementErrorMessage(msg: String): String = msg
@@ -78,16 +79,29 @@ abstract class SymbolTable extends api.Universe
type RunId = Int
final val NoRunId = 0
+ // sigh, this has to be public or atPhase doesn't inline.
+ var phStack: List[Phase] = Nil
private var ph: Phase = NoPhase
private var per = NoPeriod
+ final def atPhaseStack: List[Phase] = phStack
final def phase: Phase = ph
final def phase_=(p: Phase) {
//System.out.println("setting phase to " + p)
- assert((p ne null) && p != NoPhase)
+ assert((p ne null) && p != NoPhase, p)
ph = p
- per = (currentRunId << 8) + p.id
+ per = period(currentRunId, p.id)
+ }
+ final def pushPhase(ph: Phase): Phase = {
+ val current = phase
+ phase = ph
+ phStack ::= ph
+ current
+ }
+ final def popPhase(ph: Phase) {
+ phStack = phStack.tail
+ phase = ph
}
/** The current compiler run identifier. */
@@ -112,20 +126,23 @@ abstract class SymbolTable extends api.Universe
final def phaseOf(period: Period): Phase = phaseWithId(phaseId(period))
final def period(rid: RunId, pid: Phase#Id): Period =
- (currentRunId << 8) + pid
+ (rid << 8) + pid
/** Perform given operation at given phase. */
@inline final def atPhase[T](ph: Phase)(op: => T): T = {
- val current = phase
- phase = ph
+ val saved = pushPhase(ph)
try op
- finally phase = current
+ finally popPhase(saved)
}
+
+
/** Since when it is to be "at" a phase is inherently ambiguous,
* a couple unambiguously named methods.
*/
@inline final def beforePhase[T](ph: Phase)(op: => T): T = atPhase(ph)(op)
@inline final def afterPhase[T](ph: Phase)(op: => T): T = atPhase(ph.next)(op)
+ @inline final def afterCurrentPhase[T](op: => T): T = atPhase(phase.next)(op)
+ @inline final def beforePrevPhase[T](op: => T): T = atPhase(phase.prev)(op)
@inline final def atPhaseNotLaterThan[T](target: Phase)(op: => T): T =
if (target != NoPhase && phase.id > target.id) atPhase(target)(op) else op
@@ -260,9 +277,10 @@ abstract class SymbolTable extends api.Universe
}
}
- def newWeakMap[K, V]() = recordCache(mutable.WeakHashMap[K, V]())
- def newMap[K, V]() = recordCache(mutable.HashMap[K, V]())
- def newSet[K]() = recordCache(mutable.HashSet[K]())
+ def newWeakMap[K, V]() = recordCache(mutable.WeakHashMap[K, V]())
+ def newMap[K, V]() = recordCache(mutable.HashMap[K, V]())
+ def newSet[K]() = recordCache(mutable.HashSet[K]())
+ def newWeakSet[K <: AnyRef]() = recordCache(new WeakHashSet[K]())
}
/** Break into repl debugger if assertion is true. */
@@ -279,7 +297,7 @@ abstract class SymbolTable extends api.Universe
/** The phase which has given index as identifier. */
val phaseWithId: Array[Phase]
-
+
/** Is this symbol table part of reflexive mirror? In this case
* operations need to be made thread safe.
*/
diff --git a/src/compiler/scala/reflect/internal/Symbols.scala b/src/compiler/scala/reflect/internal/Symbols.scala
index ce85d65050..62b0206c28 100644
--- a/src/compiler/scala/reflect/internal/Symbols.scala
+++ b/src/compiler/scala/reflect/internal/Symbols.scala
@@ -17,7 +17,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
import definitions._
protected var ids = 0
-
+
val emptySymbolArray = new Array[Symbol](0)
def symbolCount = ids // statistics
@@ -38,14 +38,14 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
nextexid += 1
newTypeName("_" + nextexid + suffix)
}
-
+
// Set the fields which point companions at one another. Returns the module.
def connectModuleToClass(m: ModuleSymbol, moduleClass: ClassSymbol): ModuleSymbol = {
moduleClass.sourceModule = m
m setModuleClass moduleClass
m
}
-
+
/** Create a new free variable. Its owner is NoSymbol.
*/
def newFreeVar(name: TermName, tpe: Type, value: Any, newFlags: Long = 0L): FreeVar =
@@ -67,7 +67,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
def selfType: Type = typeOfThis
def typeSignature: Type = info
def typeSignatureIn(site: Type): Type = site memberInfo this
-
+
def asType: Type = tpe
def asTypeIn(site: Type): Type = site.memberType(this)
def asTypeConstructor: Type = typeConstructor
@@ -89,19 +89,19 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
private[this] var _rawowner = initOwner // Syncnote: need not be protected, as only assignment happens in owner_=, which is not exposed to api
private[this] var _rawname = initName
private[this] var _rawflags = 0L
-
+
def rawowner = _rawowner
def rawname = _rawname
def rawflags = _rawflags
-
+
protected def rawflags_=(x: FlagsType) { _rawflags = x }
-
+
private var rawpos = initPos
-
+
val id = nextId() // identity displayed when -uniqid
private[this] var _validTo: Period = NoPeriod
-
+
def validTo = _validTo
def validTo_=(x: Period) { _validTo = x}
@@ -179,10 +179,10 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
*/
def newTermSymbol(name: TermName, pos: Position = NoPosition, newFlags: Long = 0L): TermSymbol =
new TermSymbol(this, pos, name) initFlags newFlags
-
+
def newAbstractTypeSymbol(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): AbstractTypeSymbol =
new AbstractTypeSymbol(this, pos, name) initFlags newFlags
-
+
def newAliasTypeSymbol(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): AliasTypeSymbol =
new AliasTypeSymbol(this, pos, name) initFlags newFlags
@@ -194,10 +194,10 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
def newClassSymbol(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): ClassSymbol =
new ClassSymbol(this, pos, name) initFlags newFlags
-
+
def newModuleClassSymbol(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): ModuleClassSymbol =
new ModuleClassSymbol(this, pos, name) initFlags newFlags
-
+
/** Derive whether it is an abstract type from the flags; after creation
* the DEFERRED flag will be ignored.
*/
@@ -206,7 +206,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
newAliasTypeSymbol(name, pos, newFlags)
else
newAbstractTypeSymbol(name, pos, newFlags)
-
+
def newTypeSkolemSymbol(name: TypeName, origin: AnyRef, pos: Position = NoPosition, newFlags: Long = 0L): TypeSkolem =
if ((newFlags & DEFERRED) == 0L)
new TypeSkolem(this, pos, name, origin) initFlags newFlags
@@ -243,7 +243,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
*/
final def newAliasType(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): Symbol =
newAliasTypeSymbol(name, pos, newFlags)
-
+
/** Symbol of an abstract type type T >: ... <: ...
*/
final def newAbstractType(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): Symbol =
@@ -261,7 +261,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
def freshName() = { cnt += 1; nme.syntheticParamName(cnt) }
mmap(argtypess)(tp => newValueParameter(freshName(), focusPos(owner.pos), SYNTHETIC) setInfo tp)
}
-
+
def newSyntheticTypeParam(): Symbol = newSyntheticTypeParam("T0", 0L)
def newSyntheticTypeParam(name: String, newFlags: Long): Symbol = newTypeParameter(newTypeName(name), NoPosition, newFlags) setInfo TypeBounds.empty
def newSyntheticTypeParams(num: Int): List[Symbol] = (0 until num).toList map (n => newSyntheticTypeParam("T" + n, 0L))
@@ -302,7 +302,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
final def newClass(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L) =
newClassSymbol(name, pos, newFlags)
-
+
/** A new class with its info set to a ClassInfoType with given scope and parents. */
def newClassWithInfo(name: TypeName, parents: List[Type], scope: Scope, pos: Position = NoPosition, newFlags: Long = 0L) = {
val clazz = newClass(name, pos, newFlags)
@@ -354,9 +354,9 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
def newAliasType(pos: Position, name: TypeName): Symbol = newAliasType(name, pos)
@deprecated("Use the other signature", "2.10.0")
def newAbstractType(pos: Position, name: TypeName): Symbol = newAbstractType(name, pos)
- @deprecated("Use the other signature", "2.10.0")
+ @deprecated("Use the other signature", "2.10.0")
def newExistential(pos: Position, name: TypeName): Symbol = newExistential(name, pos)
- @deprecated("Use the other signature", "2.10.0")
+ @deprecated("Use the other signature", "2.10.0")
def newMethod(pos: Position, name: TermName): MethodSymbol = newMethod(name, pos)
// ----- locking and unlocking ------------------------------------------------------
@@ -838,7 +838,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
private def addModuleSuffix(n: Name): Name =
if (needsModuleSuffix) n append nme.MODULE_SUFFIX_STRING else n
-
+
def moduleSuffix: String = (
if (needsModuleSuffix) nme.MODULE_SUFFIX_STRING
else ""
@@ -846,7 +846,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
/** Whether this symbol needs nme.MODULE_SUFFIX_STRING (aka $) appended on the java platform.
*/
def needsModuleSuffix = (
- hasModuleFlag
+ hasModuleFlag
&& !isMethod
&& !isImplClass
&& !isJavaDefined
@@ -873,7 +873,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
else if (owner.isEffectiveRoot) name
else effectiveOwner.enclClass.fullNameAsName(separator) append separator append name
)
-
+
def fullNameAsName(separator: Char): Name = nme.dropLocalSuffix(fullNameInternal(separator))
/** The encoded full path name of this symbol, where outer names and inner names
@@ -1028,7 +1028,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
else modifyInfo(_.substSym(syms0, syms1))
def setInfoOwnerAdjusted(info: Type): this.type = setInfo(info atOwner this)
-
+
/** Set the info and enter this symbol into the owner's scope. */
def setInfoAndEnter(info: Type): this.type = {
setInfo(info)
@@ -1166,7 +1166,13 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
abort("typeConstructor inapplicable for " + this)
/** The logic approximately boils down to finding the most recent phase
- * which immediately follows any of namer, typer, or erasure.
+ * which immediately follows any of parser, namer, typer, or erasure.
+ * In effect that means this will return one of:
+ *
+ * - packageobjects (follows namer)
+ * - superaccessors (follows typer)
+ * - lazyvals (follows erasure)
+ * - null
*/
private def unsafeTypeParamPhase = {
var ph = phase
@@ -1337,7 +1343,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
*/
final def isNestedIn(that: Symbol): Boolean =
owner == that || owner != NoSymbol && (owner isNestedIn that)
-
+
/** Is this class symbol a subclass of that symbol,
* and is this class symbol also different from Null or Nothing? */
def isNonBottomSubClass(that: Symbol): Boolean = false
@@ -1398,10 +1404,10 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
clone.typeOfThis = (clone.typeOfThis cloneInfo clone)
if (newName != nme.NO_NAME)
clone.name = newName
-
+
clone
}
-
+
/** Internal method to clone a symbol's implementation with the given flags and no info. */
def cloneSymbolImpl(owner: Symbol, newFlags: Long): Symbol
def cloneSymbolImpl(owner: Symbol): Symbol = cloneSymbolImpl(owner, 0L)
@@ -1594,7 +1600,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
/** Kept for source compatibility with 2.9. Scala IDE for Eclipse relies on this. */
@deprecated("Use enclosingTopLevelClass")
def toplevelClass: Symbol = enclosingTopLevelClass
-
+
/** The top-level class containing this symbol. */
def enclosingTopLevelClass: Symbol =
if (owner.isPackageClass) {
@@ -1870,7 +1876,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
/** Remove any access boundary and clear flags PROTECTED | PRIVATE.
*/
def makePublic = this setPrivateWithin NoSymbol resetFlag AccessFlags
-
+
/** The first parameter to the first argument list of this method,
* or NoSymbol if inapplicable.
*/
@@ -2154,7 +2160,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
def referenced: Symbol = _referenced
def referenced_=(x: Symbol) { _referenced = x }
-
+
def existentialBound = singletonBounds(this.tpe)
def cloneSymbolImpl(owner: Symbol, newFlags: Long): Symbol =
@@ -2248,7 +2254,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
if (!isMethod && needsFlatClasses) {
if (flatname eq null)
flatname = nme.flattenedName(rawowner.name, rawname)
-
+
flatname
}
else rawname.toTermName
@@ -2284,7 +2290,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
res
}
}
-
+
class AliasTypeSymbol protected[Symbols] (initOwner: Symbol, initPos: Position, initName: TypeName)
extends TypeSymbol(initOwner, initPos, initName) {
// Temporary programmatic help tracking down who might do such a thing
@@ -2299,13 +2305,13 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
override def cloneSymbolImpl(owner: Symbol, newFlags: Long): AliasTypeSymbol =
owner.newAliasTypeSymbol(name, pos, newFlags)
}
-
+
class AbstractTypeSymbol(initOwner: Symbol, initPos: Position, initName: TypeName)
extends TypeSymbol(initOwner, initPos, initName) with AbstractTypeMixin {
override def cloneSymbolImpl(owner: Symbol, newFlags: Long): AbstractTypeSymbol =
owner.newAbstractTypeSymbol(name, pos, newFlags)
}
-
+
/** Might be mixed into TypeSymbol or TypeSkolem.
*/
trait AbstractTypeMixin extends TypeSymbol {
@@ -2503,7 +2509,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
final override def isNonClassType = false
final override def isAbstractType = false
final override def isAliasType = false
-
+
override def existentialBound = polyType(this.typeParams, TypeBounds.upper(this.classBound))
override def sourceFile =
@@ -2531,19 +2537,19 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
}
thisTypeCache
}
-
+
override def owner: Symbol =
if (needsFlatClasses) rawowner.owner else rawowner
override def name: TypeName = (
if (needsFlatClasses) {
if (flatname eq null)
flatname = nme.flattenedName(rawowner.name, rawname).toTypeName
-
+
flatname
}
else rawname.toTypeName
)
-
+
/** A symbol carrying the self type of the class as its type */
override def thisSym: Symbol = thissym
@@ -2728,7 +2734,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
val syms1 = cloneSymbolsAtOwner(syms, owner)
creator(syms1, tpe.substSym(syms, syms1))
}
-
+
/** A deep map on a symbol's paramss.
*/
def mapParamss[T](sym: Symbol)(f: Symbol => T): List[List[T]] = mmap(sym.info.paramss)(f)
@@ -2752,5 +2758,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
assert(validFrom != NoPeriod)
override def toString() =
"TypeHistory(" + phaseOf(validFrom)+":"+runId(validFrom) + "," + info + "," + prev + ")"
+
+ def toList: List[TypeHistory] = this :: ( if (prev eq null) Nil else prev.toList )
}
}
diff --git a/src/compiler/scala/reflect/internal/TreeInfo.scala b/src/compiler/scala/reflect/internal/TreeInfo.scala
index e3ee39d2a0..3252b970d1 100644
--- a/src/compiler/scala/reflect/internal/TreeInfo.scala
+++ b/src/compiler/scala/reflect/internal/TreeInfo.scala
@@ -146,7 +146,7 @@ abstract class TreeInfo {
true
}
-
+
/**
* Selects the correct parameter list when there are nested applications.
* Given Apply(fn, args), args might correspond to any of fn.symbol's parameter
@@ -175,7 +175,7 @@ abstract class TreeInfo {
}
def foreachMethodParamAndArg(t: Tree)(f: (Symbol, Tree) => Unit): Unit = t match {
case Apply(fn, args) => foreachMethodParamAndArg(applyMethodParameters(fn), args)(f)
- case _ =>
+ case _ =>
}
/** Is symbol potentially a getter of a variable?
diff --git a/src/compiler/scala/reflect/internal/Trees.scala b/src/compiler/scala/reflect/internal/Trees.scala
index b6b7e3cbda..54cc53aaac 100644
--- a/src/compiler/scala/reflect/internal/Trees.scala
+++ b/src/compiler/scala/reflect/internal/Trees.scala
@@ -121,7 +121,7 @@ trait Trees extends api.Trees { self: SymbolTable =>
new ChangeOwnerTraverser(oldOwner, newOwner) apply t
}
}
-
+
def substTreeSyms(pairs: (Symbol, Symbol)*): Tree = {
val list = pairs.toList
val subst = new TreeSymSubstituter(list map (_._1), list map (_._2))
@@ -230,17 +230,18 @@ trait Trees extends api.Trees { self: SymbolTable =>
def Bind(sym: Symbol, body: Tree): Bind =
Bind(sym.name, body) setSymbol sym
- /** 0-1 argument list new, based on a symbol or type.
- */
- def New(sym: Symbol, args: Tree*): Tree =
- New(sym.tpe, args: _*)
+ def Try(body: Tree, cases: (Tree, Tree)*): Try =
+ Try(body, cases.toList map { case (pat, rhs) => CaseDef(pat, EmptyTree, rhs) }, EmptyTree)
- def New(tpe: Type, args: Tree*): Tree =
- New(TypeTree(tpe), List(args.toList))
+ def Throw(tpe: Type, args: Tree*): Throw =
+ Throw(New(tpe, args: _*))
def Apply(sym: Symbol, args: Tree*): Tree =
Apply(Ident(sym), args.toList)
+ def New(sym: Symbol, args: Tree*): Tree =
+ New(sym.tpe, args: _*)
+
def Super(sym: Symbol, mix: TypeName): Tree = Super(This(sym), mix)
/** Block factory that flattens directly nested blocks.
@@ -272,7 +273,18 @@ trait Trees extends api.Trees { self: SymbolTable =>
override def traverse(t: Tree) {
if (t != EmptyTree && t.pos == NoPosition) {
t.setPos(pos)
- super.traverse(t) // TODO: bug? shouldn't the traverse be outside of the if?
+ super.traverse(t) // TODO: bug? shouldn't the traverse be outside of the if?
+ // @PP: it's pruning whenever it encounters a node with a
+ // position, which I interpret to mean that (in the author's
+ // mind at least) either the children of a positioned node will
+ // already be positioned, or the children of a positioned node
+ // do not merit positioning.
+ //
+ // Whatever the author's rationale, it does seem like a bad idea
+ // to press on through a positioned node to find unpositioned
+ // children beneath it and then to assign whatever happens to
+ // be in `pos` to such nodes. There are supposed to be some
+ // position invariants which I can't imagine surviving that.
}
}
}
diff --git a/src/compiler/scala/reflect/internal/Types.scala b/src/compiler/scala/reflect/internal/Types.scala
index 04efe04636..ed1631fae5 100644
--- a/src/compiler/scala/reflect/internal/Types.scala
+++ b/src/compiler/scala/reflect/internal/Types.scala
@@ -110,13 +110,13 @@ trait Types extends api.Types { self: SymbolTable =>
* to undo constraints in the case of isSubType/isSameType failure.
*/
lazy val undoLog = newUndoLog
-
+
protected def newUndoLog = new UndoLog
-
+
class UndoLog {
private type UndoPairs = List[(TypeVar, TypeConstraint)]
private var log: UndoPairs = List()
-
+
// register with the auto-clearing cache manager
perRunCaches.recordCache(this)
@@ -136,7 +136,7 @@ trait Types extends api.Types { self: SymbolTable =>
private[reflect] def record(tv: TypeVar) = {
log ::= ((tv, tv.constr.cloneInternal))
}
-
+
private[scala] def clear() {
if (settings.debug.value)
self.log("Clearing " + log.size + " entries from the undoLog.")
@@ -428,7 +428,7 @@ trait Types extends api.Types { self: SymbolTable =>
/** For a typeref, its arguments. The empty list for all other types */
def typeArgs: List[Type] = List()
-
+
/** A list of placeholder types derived from the type parameters.
* Used by RefinedType and TypeRef.
*/
@@ -525,7 +525,7 @@ trait Types extends api.Types { self: SymbolTable =>
* Alternatives of overloaded symbol appear in the order they are declared.
*/
def decl(name: Name): Symbol = findDecl(name, 0)
-
+
/** A list of all non-private members defined or declared in this type. */
def nonPrivateDecls: List[Symbol] = decls filter (x => !x.isPrivate) toList
@@ -566,7 +566,7 @@ trait Types extends api.Types { self: SymbolTable =>
*/
def nonPrivateMember(name: Name): Symbol =
memberBasedOnName(name, BridgeAndPrivateFlags)
-
+
/** All members with the given flags, excluding bridges.
*/
def membersWithFlags(requiredFlags: Long): List[Symbol] =
@@ -591,7 +591,7 @@ trait Types extends api.Types { self: SymbolTable =>
* an OverloadedSymbol if several exist, NoSymbol if none exist */
def nonLocalMember(name: Name): Symbol =
memberBasedOnName(name, BridgeFlags | LOCAL)
-
+
/** Members excluding and requiring the given flags.
* Note: unfortunately it doesn't work to exclude DEFERRED this way.
*/
@@ -893,7 +893,7 @@ trait Types extends api.Types { self: SymbolTable =>
def toLongString = {
val str = toString
if (str == "type") widen.toString
- else if (str endsWith ".type") str + " (with underlying type " + widen + ")"
+ else if ((str endsWith ".type") && !typeSymbol.isModuleClass) str + " (with underlying type " + widen + ")"
else str
}
@@ -1237,7 +1237,7 @@ trait Types extends api.Types { self: SymbolTable =>
private[reflect] var underlyingPeriod = NoPeriod
override def underlying: Type = {
val cache = underlyingCache
- if (underlyingPeriod == currentPeriod && cache != null) cache
+ if (underlyingPeriod == currentPeriod && cache != null) cache
else {
defineUnderlyingOfSingleType(this)
underlyingCache
@@ -1280,7 +1280,7 @@ trait Types extends api.Types { self: SymbolTable =>
unique(new UniqueSingleType(pre, sym))
}
}
-
+
protected def defineUnderlyingOfSingleType(tpe: SingleType) = {
val period = tpe.underlyingPeriod
if (period != currentPeriod) {
@@ -1350,13 +1350,13 @@ trait Types extends api.Types { self: SymbolTable =>
override def baseTypeSeq: BaseTypeSeq = {
val cached = baseTypeSeqCache
- if (baseTypeSeqPeriod == currentPeriod && cached != null && cached != undetBaseTypeSeq)
+ if (baseTypeSeqPeriod == currentPeriod && cached != null && cached != undetBaseTypeSeq)
cached
else {
defineBaseTypeSeqOfCompoundType(this)
if (baseTypeSeqCache eq undetBaseTypeSeq)
throw new RecoverableCyclicReference(typeSymbol)
-
+
baseTypeSeqCache
}
}
@@ -1370,7 +1370,7 @@ trait Types extends api.Types { self: SymbolTable =>
defineBaseClassesOfCompoundType(this)
if (baseClassesCache eq null)
throw new RecoverableCyclicReference(typeSymbol)
-
+
baseClassesCache
}
}
@@ -1410,13 +1410,13 @@ trait Types extends api.Types { self: SymbolTable =>
// override def isNullable: Boolean =
// parents forall (p => p.isNullable && !p.typeSymbol.isAbstractType);
-
+
override def safeToString: String =
parents.mkString(" with ") +
(if (settings.debug.value || parents.isEmpty || (decls.elems ne null))
decls.mkString("{", "; ", "}") else "")
}
-
+
protected def defineBaseTypeSeqOfCompoundType(tpe: CompoundType) = {
val period = tpe.baseTypeSeqPeriod;
if (period != currentPeriod) {
@@ -1469,7 +1469,7 @@ trait Types extends api.Types { self: SymbolTable =>
if (tpe.baseTypeSeqCache eq undetBaseTypeSeq)
throw new TypeError("illegal cyclic inheritance involving " + tpe.typeSymbol)
}
-
+
protected def defineBaseClassesOfCompoundType(tpe: CompoundType) = {
def computeBaseClasses: List[Symbol] =
if (tpe.parents.isEmpty) List(tpe.typeSymbol)
@@ -1751,7 +1751,7 @@ trait Types extends api.Types { self: SymbolTable =>
// override def isNonNull: Boolean = symbol == NonNullClass || super.isNonNull;
override def kind = "ClassInfoType"
-
+
override def safeToString =
if (settings.debug.value || decls.size > 1)
formattedToString
@@ -1801,13 +1801,13 @@ trait Types extends api.Types { self: SymbolTable =>
}
}
- /* Syncnote: The `volatile` var and `pendingVolatiles` mutable set need not be protected
+ /* Syncnote: The `volatile` var and `pendingVolatiles` mutable set need not be protected
* with synchronized, because they are accessed only from isVolatile, which is called only from
* Typer.
*/
private var volatileRecursions: Int = 0
private val pendingVolatiles = new mutable.HashSet[Symbol]
-
+
class ArgsTypeRef(pre0: Type, sym0: Symbol, args0: List[Type]) extends TypeRef(pre0, sym0, args0) with UniqueType {
require(args0.nonEmpty, this)
@@ -1825,7 +1825,7 @@ trait Types extends api.Types { self: SymbolTable =>
asSeenFromOwner(tp).instantiateTypeParams(sym.typeParams, args)
}
-
+
// note: does not go through typeRef. There's no need to because
// neither `pre` nor `sym` changes. And there's a performance
// advantage to call TypeRef directly.
@@ -1840,7 +1840,7 @@ trait Types extends api.Types { self: SymbolTable =>
override def isHigherKinded = typeParams.nonEmpty
override def typeParams = if (isDefinitionsInitialized) sym.typeParams else sym.unsafeTypeParams
private def isRaw = !phase.erasedTypes && isRawIfWithoutArgs(sym)
-
+
override def instantiateTypeParams(formals: List[Symbol], actuals: List[Type]): Type =
if (isHigherKinded) {
if (sameLength(formals intersect typeParams, typeParams))
@@ -1860,9 +1860,9 @@ trait Types extends api.Types { self: SymbolTable =>
res
}
- override def transformInfo(tp: Type): Type =
+ override def transformInfo(tp: Type): Type =
appliedType(asSeenFromOwner(tp), dummyArgs)
-
+
override def narrow =
if (sym.isModuleClass) singleType(pre, sym.sourceModule)
else super.narrow
@@ -1870,14 +1870,14 @@ trait Types extends api.Types { self: SymbolTable =>
override def typeConstructor = this
// eta-expand, subtyping relies on eta-expansion of higher-kinded types
- override protected def normalizeImpl: Type =
+ override protected def normalizeImpl: Type =
if (isHigherKinded) etaExpand else super.normalizeImpl
}
-
+
trait ClassTypeRef extends TypeRef {
// !!! There are scaladoc-created symbols arriving which violate this require.
// require(sym.isClass, sym)
-
+
override protected def normalizeImpl: Type =
if (sym.isRefinementClass) sym.info.normalize // I think this is okay, but see #1241 (r12414), #2208, and typedTypeConstructor in Typers
else super.normalizeImpl
@@ -1886,7 +1886,7 @@ trait Types extends api.Types { self: SymbolTable =>
if (sym == clazz) this
else transform(sym.info.baseType(clazz))
}
-
+
trait NonClassTypeRef extends TypeRef {
require(sym.isNonClassType, sym)
@@ -1905,11 +1905,11 @@ trait Types extends api.Types { self: SymbolTable =>
}
relativeInfoCache
}
-
+
override def baseType(clazz: Symbol): Type =
if (sym == clazz) this else baseTypeOfNonClassTypeRef(this, clazz)
}
-
+
protected def baseTypeOfNonClassTypeRef(tpe: NonClassTypeRef, clazz: Symbol) = try {
basetypeRecursions += 1
if (basetypeRecursions < LogPendingBaseTypesThreshold)
@@ -1926,7 +1926,7 @@ trait Types extends api.Types { self: SymbolTable =>
} finally {
basetypeRecursions -= 1
}
-
+
trait AliasTypeRef extends NonClassTypeRef {
require(sym.isAliasType, sym)
@@ -1944,7 +1944,7 @@ trait Types extends api.Types { self: SymbolTable =>
if (typeParamsMatchArgs) betaReduce.normalize
else if (isHigherKinded) super.normalizeImpl
else ErrorType
-
+
// isHKSubType0 introduces synthetic type params so that
// betaReduce can first apply sym.info to typeArgs before calling
// asSeenFrom. asSeenFrom then skips synthetic type params, which
@@ -1954,7 +1954,7 @@ trait Types extends api.Types { self: SymbolTable =>
// this crashes pos/depmet_implicit_tpbetareduce.scala
// appliedType(sym.info, typeArgs).asSeenFrom(pre, sym.owner)
def betaReduce = transform(sym.info.resultType)
-
+
// #3731: return sym1 for which holds: pre bound sym.name to sym and
// pre1 now binds sym.name to sym1, conceptually exactly the same
// symbol as sym. The selection of sym on pre must be updated to the
@@ -1968,12 +1968,12 @@ trait Types extends api.Types { self: SymbolTable =>
// TODO: is there another way a typeref's symbol can refer to a symbol defined in its pre?
case _ => sym
}
-
+
}
trait AbstractTypeRef extends NonClassTypeRef {
require(sym.isAbstractType, sym)
-
+
/** Syncnote: Pure performance caches; no need to synchronize in multi-threaded environment
*/
private var symInfoCache: Type = _
@@ -2002,7 +2002,7 @@ trait Types extends api.Types { self: SymbolTable =>
volatileRecursions -= 1
}
}
-
+
override def thisInfo = {
val symInfo = sym.info
if (thisInfoCache == null || (symInfo ne symInfoCache)) {
@@ -2035,7 +2035,7 @@ trait Types extends api.Types { self: SymbolTable =>
private[reflect] var parentsPeriod = NoPeriod
private[reflect] var baseTypeSeqCache: BaseTypeSeq = _
private[reflect] var baseTypeSeqPeriod = NoPeriod
- private var normalized: Type = _
+ private var normalized: Type = _
// @M: propagate actual type params (args) to `tp`, by replacing
// formal type parameters with actual ones. If tp is higher kinded,
@@ -2057,7 +2057,7 @@ trait Types extends api.Types { self: SymbolTable =>
normalized
}
}
-
+
def etaExpand: Type = {
// must initialise symbol, see test/files/pos/ticket0137.scala
val tpars = initializedTypeParams
@@ -2111,12 +2111,12 @@ trait Types extends api.Types { self: SymbolTable =>
}
thisInfo.decls
}
-
+
protected[Types] def baseTypeSeqImpl: BaseTypeSeq = sym.info.baseTypeSeq map transform
override def baseTypeSeq: BaseTypeSeq = {
val cache = baseTypeSeqCache
- if (baseTypeSeqPeriod == currentPeriod && cache != null && cache != undetBaseTypeSeq)
+ if (baseTypeSeqPeriod == currentPeriod && cache != null && cache != undetBaseTypeSeq)
cache
else {
defineBaseTypeSeqOfTypeRef(this)
@@ -2210,7 +2210,7 @@ trait Types extends api.Types { self: SymbolTable =>
}
})
}
-
+
protected def defineParentsOfTypeRef(tpe: TypeRef) = {
val period = tpe.parentsPeriod
if (period != currentPeriod) {
@@ -2222,7 +2222,7 @@ trait Types extends api.Types { self: SymbolTable =>
}
}
}
-
+
protected def defineBaseTypeSeqOfTypeRef(tpe: TypeRef) = {
val period = tpe.baseTypeSeqPeriod
if (period != currentPeriod) {
@@ -2382,7 +2382,7 @@ trait Types extends api.Types { self: SymbolTable =>
}
object PolyType extends PolyTypeExtractor
-
+
/** A creator for existential types which flattens nested existentials.
*/
def newExistentialType(quantified: List[Symbol], underlying: Type): Type =
@@ -2436,7 +2436,7 @@ trait Types extends api.Types { self: SymbolTable =>
/** An existential can only be printed with wildcards if:
* - the underlying type is a typeref
* - where there is a 1-to-1 correspondence between underlying's typeargs and quantified
- * - and none of the existential parameters is referenced from anywhere else in the type
+ * - and none of the existential parameters is referenced from anywhere else in the type
* - and none of the existential parameters are singleton types
*/
private def isRepresentableWithWildcards = !settings.debug.value && {
@@ -2597,7 +2597,7 @@ trait Types extends api.Types { self: SymbolTable =>
else if (args.isEmpty) new HKTypeVar(origin, constr, params)
else throw new Error("Invalid TypeVar construction: " + ((origin, constr, args, params)))
)
-
+
trace("create", "In " + tv.originLocation)(tv)
}
}
@@ -2638,7 +2638,7 @@ trait Types extends api.Types { self: SymbolTable =>
override def isHigherKinded = true
override protected def typeVarString = params.map(_.name).mkString("[", ", ", "]=>" + originName)
}
-
+
/** Precondition: zipped params/args nonEmpty. (Size equivalence enforced structurally.)
*/
class AppliedTypeVar(
@@ -2646,17 +2646,17 @@ trait Types extends api.Types { self: SymbolTable =>
_constr: TypeConstraint,
zippedArgs: List[(Symbol, Type)]
) extends TypeVar(_origin, _constr) {
-
+
require(zippedArgs.nonEmpty, this)
override def params: List[Symbol] = zippedArgs map (_._1)
override def typeArgs: List[Type] = zippedArgs map (_._2)
-
+
override protected def typeVarString = (
zippedArgs map { case (p, a) => p.name + "=" + a } mkString (origin + "[", ", ", "]")
)
}
-
+
/** A class representing a type variable: not used after phase `typer`.
*
* A higher-kinded TypeVar has params (Symbols) and typeArgs (Types).
@@ -2674,7 +2674,7 @@ trait Types extends api.Types { self: SymbolTable =>
override def typeArgs: List[Type] = Nil
override def isHigherKinded = false
- /** The constraint associated with the variable
+ /** The constraint associated with the variable
* Syncnote: Type variables are assumed to be used from only one
* thread. They are not exposed in api.Types and are used only locally
* in operations that are exposed from types. Hence, no syncing of `constr`
@@ -2685,7 +2685,7 @@ trait Types extends api.Types { self: SymbolTable =>
/** The variable's skolemization level */
val level = skolemizationLevel
-
+
/** Two occurrences of a higher-kinded typevar, e.g. `?CC[Int]` and `?CC[String]`, correspond to
* ''two instances'' of `TypeVar` that share the ''same'' `TypeConstraint`.
*
@@ -2716,7 +2716,7 @@ trait Types extends api.Types { self: SymbolTable =>
// inference may generate several TypeVar's for a single type parameter that must be inferred,
// only one of them is in the set of tvars that need to be solved, but
// they share the same TypeConstraint instance
-
+
// When comparing to types containing skolems, remember the highest level
// of skolemization. If that highest level is higher than our initial
// skolemizationLevel, we can't re-use those skolems as the solution of this
@@ -2940,7 +2940,7 @@ trait Types extends api.Types { self: SymbolTable =>
def originLocation = {
val sym = origin.typeSymbolDirect
val encl = sym.owner.logicallyEnclosingMember
-
+
// This should display somewhere between one and three
// things which enclose the origin: at most, a class, a
// a method, and a term. At least, a class.
@@ -3272,7 +3272,7 @@ trait Types extends api.Types { self: SymbolTable =>
case WildcardType => tycon // needed for neg/t0226
case _ => abort(debugString(tycon))
}
-
+
/** A creator for existential types where the type arguments,
* rather than being applied directly, are interpreted as the
* upper bounds of unknown types. For instance if the type argument
@@ -3283,7 +3283,7 @@ trait Types extends api.Types { self: SymbolTable =>
tycon match {
case TypeRef(pre, sym, _) if sameLength(sym.typeParams, args) =>
val eparams = typeParamsToExistentials(sym)
- val bounds = args map (TypeBounds upper _)
+ val bounds = args map (TypeBounds upper _)
(eparams, bounds).zipped foreach (_ setInfo _)
newExistentialType(eparams, typeRef(pre, sym, eparams map (_.tpe)))
@@ -3387,7 +3387,7 @@ trait Types extends api.Types { self: SymbolTable =>
mapOver(tp)
}
}
-
+
/** Type with all top-level occurrences of abstract types replaced by their bounds */
def abstractTypesToBounds(tp: Type): Type = tp match { // @M don't normalize here (compiler loops on pos/bug1090.scala )
case TypeRef(_, sym, _) if sym.isAbstractType =>
@@ -3497,7 +3497,7 @@ trait Types extends api.Types { self: SymbolTable =>
def this(lo0: List[Type], hi0: List[Type]) = this(lo0, hi0, NoType, NoType)
def this(bounds: TypeBounds) = this(List(bounds.lo), List(bounds.hi))
def this() = this(List(), List())
-
+
/* Syncnote: Type constraints are assumed to be used from only one
* thread. They are not exposed in api.Types and are used only locally
* in operations that are exposed from types. Hence, no syncing of any
@@ -3571,7 +3571,7 @@ trait Types extends api.Types { self: SymbolTable =>
val hi = hiBounds filterNot (_.typeSymbolDirect eq AnyClass)
val lostr = if (lo.isEmpty) Nil else List(lo.mkString(" >: (", ", ", ")"))
val histr = if (hi.isEmpty) Nil else List(hi.mkString(" <: (", ", ", ")"))
-
+
lostr ++ histr mkString ("[", " | ", "]")
}
if (inst eq NoType) boundsStr
@@ -3597,7 +3597,7 @@ trait Types extends api.Types { self: SymbolTable =>
override def variance = _variance
def variance_=(x: Int) = _variance = x
-
+
override protected def noChangeToSymbols(origSyms: List[Symbol]) = {
origSyms forall { sym =>
val v = variance
@@ -3763,7 +3763,7 @@ trait Types extends api.Types { self: SymbolTable =>
protected def mapOverArgs(args: List[Type], tparams: List[Symbol]): List[Type] =
args mapConserve this
-
+
/** Called by mapOver to determine whether the original symbols can
* be returned, or whether they must be cloned. Overridden in VariantTypeMap.
*/
@@ -3777,7 +3777,7 @@ trait Types extends api.Types { self: SymbolTable =>
if (elems1 eq elems) scope
else newScopeWith(elems1: _*)
}
-
+
/** Map this function over given list of symbols */
def mapOver(origSyms: List[Symbol]): List[Symbol] = {
// fast path in case nothing changes due to map
@@ -3840,7 +3840,7 @@ trait Types extends api.Types { self: SymbolTable =>
def traverse(tp: Type): Unit
def apply(tp: Type): Type = { traverse(tp); tp }
}
-
+
abstract class TypeTraverserWithResult[T] extends TypeTraverser {
def result: T
def clear(): Unit
@@ -3860,13 +3860,13 @@ trait Types extends api.Types { self: SymbolTable =>
*/
// class ContainsVariantExistentialCollector(v: Int) extends TypeCollector(false) with VariantTypeMap {
// variance = v
- //
+ //
// def traverse(tp: Type) = tp match {
// case ExistentialType(_, _) if (variance == v) => result = true
// case _ => mapOver(tp)
// }
// }
- //
+ //
// val containsCovariantExistentialCollector = new ContainsVariantExistentialCollector(1)
// val containsContravariantExistentialCollector = new ContainsVariantExistentialCollector(-1)
@@ -3903,7 +3903,6 @@ trait Types extends api.Types { self: SymbolTable =>
*/
def rawToExistential = new TypeMap {
private var expanded = immutable.Set[Symbol]()
- private var generated = immutable.Set[Type]()
def apply(tp: Type): Type = tp match {
case TypeRef(pre, sym, List()) if isRawIfWithoutArgs(sym) =>
if (expanded contains sym) AnyRefClass.tpe
@@ -3914,15 +3913,11 @@ trait Types extends api.Types { self: SymbolTable =>
} finally {
expanded -= sym
}
- case ExistentialType(_, _) if !(generated contains tp) => // to avoid infinite expansions. todo: not sure whether this is needed
- val result = mapOver(tp)
- generated += result
- result
case _ =>
mapOver(tp)
}
}
-
+
/** Used by existentialAbstraction.
*/
class ExistentialExtrapolation(tparams: List[Symbol]) extends VariantTypeMap {
@@ -3940,10 +3935,10 @@ trait Types extends api.Types { self: SymbolTable =>
countOccs(tpe)
for (tparam <- tparams)
countOccs(tparam.info)
-
+
apply(tpe)
}
-
+
def apply(tp: Type): Type = {
val tp1 = mapOver(tp)
if (variance == 0) tp1
@@ -4319,83 +4314,83 @@ trait Types extends api.Types { self: SymbolTable =>
else mapOver(tp)
}
- class InstantiateDependentMap(params: List[Symbol], actuals: List[Type]) extends TypeMap with KeepOnlyTypeConstraints {
- private val actualsIndexed = actuals.toIndexedSeq
+ class InstantiateDependentMap(params: List[Symbol], actuals0: List[Type]) extends TypeMap with KeepOnlyTypeConstraints {
+ private val actuals = actuals0.toIndexedSeq
+ private val existentials = new Array[Symbol](actuals.size)
+ def existentialsNeeded: List[Symbol] = existentials.filter(_ ne null).toList
- object ParamWithActual {
- def unapply(sym: Symbol): Option[Type] = {
- val pid = params indexOf sym
- if(pid != -1) Some(actualsIndexed(pid)) else None
- }
+ private object StableArg {
+ def unapply(param: Symbol) = Arg unapply param map actuals filter (tp =>
+ tp.isStable && (tp.typeSymbol != NothingClass)
+ )
+ }
+ private object Arg {
+ def unapply(param: Symbol) = Some(params indexOf param) filter (_ >= 0)
}
- def apply(tp: Type): Type =
- mapOver(tp) match {
- case SingleType(NoPrefix, ParamWithActual(arg)) if arg.isStable => arg // unsound to replace args by unstable actual #3873
- // (soundly) expand type alias selections on implicit arguments, see depmet_implicit_oopsla* test cases -- typically, `param.isImplicit`
- case tp1@TypeRef(SingleType(NoPrefix, ParamWithActual(arg)), sym, targs) =>
- val res = typeRef(arg, sym, targs)
- if(res.typeSymbolDirect isAliasType) res.dealias
- else tp1
- case tp1 => tp1 // don't return the original `tp`, which may be different from `tp1`, due to dropping annotations
- }
-
- def existentialsNeeded: List[Symbol] = existSyms.filter(_ ne null).toList
-
- private val existSyms: Array[Symbol] = new Array(actualsIndexed.size)
- private def haveExistential(i: Int) = {assert((i >= 0) && (i <= actualsIndexed.size)); existSyms(i) ne null}
+ def apply(tp: Type): Type = mapOver(tp) match {
+ // unsound to replace args by unstable actual #3873
+ case SingleType(NoPrefix, StableArg(arg)) => arg
+ // (soundly) expand type alias selections on implicit arguments,
+ // see depmet_implicit_oopsla* test cases -- typically, `param.isImplicit`
+ case tp1 @ TypeRef(SingleType(NoPrefix, Arg(pid)), sym, targs) =>
+ val arg = actuals(pid)
+ val res = typeRef(arg, sym, targs)
+ if (res.typeSymbolDirect.isAliasType) res.dealias else tp1
+ // don't return the original `tp`, which may be different from `tp1`,
+ // due to dropping annotations
+ case tp1 => tp1
+ }
/* Return the type symbol for referencing a parameter inside the existential quantifier.
* (Only needed if the actual is unstable.)
*/
- def existSymFor(actualIdx: Int) =
- if (haveExistential(actualIdx)) existSyms(actualIdx)
- else {
- val oldSym = params(actualIdx)
- val symowner = oldSym.owner
- val bound = singletonBounds(actualsIndexed(actualIdx))
-
- val sym = symowner.newExistential(newTypeName(oldSym.name + ".type"), oldSym.pos)
- sym.setInfo(bound)
- sym.setFlag(oldSym.flags)
-
- existSyms(actualIdx) = sym
- sym
+ private def existentialFor(pid: Int) = {
+ if (existentials(pid) eq null) {
+ val param = params(pid)
+ existentials(pid) = (
+ param.owner.newExistential(newTypeName(param.name + ".type"), param.pos, param.flags)
+ setInfo singletonBounds(actuals(pid))
+ )
}
+ existentials(pid)
+ }
//AM propagate more info to annotations -- this seems a bit ad-hoc... (based on code by spoon)
override def mapOver(arg: Tree, giveup: ()=>Nothing): Tree = {
+ // TODO: this should be simplified; in the stable case, one can
+ // probably just use an Ident to the tree.symbol.
+ //
+ // @PP: That leads to failure here, where stuff no longer has type
+ // 'String @Annot("stuff")' but 'String @Annot(x)'.
+ //
+ // def m(x: String): String @Annot(x) = x
+ // val stuff = m("stuff")
+ //
+ // (TODO cont.) Why an existential in the non-stable case?
+ //
+ // @PP: In the following:
+ //
+ // def m = { val x = "three" ; val y: String @Annot(x) = x; y }
+ //
+ // m is typed as 'String @Annot(x) forSome { val x: String }'.
+ //
+ // Both examples are from run/constrained-types.scala.
object treeTrans extends Transformer {
- override def transform(tree: Tree): Tree = {
- tree match {
- case RefParamAt(pid) =>
- // TODO: this should be simplified; in the stable case, one can probably
- // just use an Ident to the tree.symbol. Why an existential in the non-stable case?
- val actual = actualsIndexed(pid)
- if (actual.isStable && actual.typeSymbol != NothingClass) {
- gen.mkAttributedQualifier(actualsIndexed(pid), tree.symbol)
- } else {
- val sym = existSymFor(pid)
- (Ident(sym.name)
- copyAttrs tree
- setType typeRef(NoPrefix, sym, Nil))
- }
- case _ => super.transform(tree)
- }
- }
- object RefParamAt {
- def unapply(tree: Tree): Option[Int] = tree match {
- case Ident(_) => Some(params indexOf tree.symbol) filterNot (_ == -1)
- case _ => None
- }
+ override def transform(tree: Tree): Tree = tree.symbol match {
+ case StableArg(actual) =>
+ gen.mkAttributedQualifier(actual, tree.symbol)
+ case Arg(pid) =>
+ val sym = existentialFor(pid)
+ Ident(sym) copyAttrs tree setType typeRef(NoPrefix, sym, Nil)
+ case _ =>
+ super.transform(tree)
}
}
-
- treeTrans.transform(arg)
+ treeTrans transform arg
}
}
-
object StripAnnotationsMap extends TypeMap {
def apply(tp: Type): Type = tp match {
case AnnotatedType(_, atp, _) =>
@@ -4522,12 +4517,12 @@ trait Types extends api.Types { self: SymbolTable =>
result
}
}
-
+
protected def commonOwnerMap: CommonOwnerMap = commonOwnerMapObj
-
+
protected class CommonOwnerMap extends TypeTraverserWithResult[Symbol] {
var result: Symbol = _
-
+
def clear() { result = null }
private def register(sym: Symbol) {
@@ -4545,7 +4540,7 @@ trait Types extends api.Types { self: SymbolTable =>
case _ => mapOver(tp)
}
}
-
+
private lazy val commonOwnerMapObj = new CommonOwnerMap
class MissingAliasControl extends ControlThrowable
@@ -4553,7 +4548,7 @@ trait Types extends api.Types { self: SymbolTable =>
class MissingTypeControl extends ControlThrowable
object adaptToNewRunMap extends TypeMap {
-
+
private def adaptToNewRun(pre: Type, sym: Symbol): Symbol = {
if (phase.flatClasses) {
sym
@@ -4720,7 +4715,7 @@ trait Types extends api.Types { self: SymbolTable =>
case (TypeRef(pre1, sym1, args1), TypeRef(pre2, sym2, args2)) =>
assert(sym1 == sym2)
pre1 =:= pre2 &&
- forall3(args1, args2, sym1.typeParams) { (arg1, arg2, tparam) =>
+ forall3(args1, args2, sym1.typeParams) { (arg1, arg2, tparam) =>
//if (tparam.variance == 0 && !(arg1 =:= arg2)) Console.println("inconsistent: "+arg1+"!="+arg2)//DEBUG
if (tparam.variance == 0) arg1 =:= arg2
else if (arg1.isInstanceOf[TypeVar])
@@ -5743,8 +5738,8 @@ trait Types extends api.Types { self: SymbolTable =>
val formatted = tableDef.table(transposed)
println("** Depth is " + depth + "\n" + formatted)
}
-
- /** From a list of types, find any which take type parameters
+
+ /** From a list of types, find any which take type parameters
* where the type parameter bounds contain references to other
* any types in the list (including itself.)
*
@@ -6263,13 +6258,13 @@ trait Types extends api.Types { self: SymbolTable =>
if (ts exists (_.isNotNull)) res.notNull else res
}
-
+
/** A list of the typevars in a type. */
def typeVarsInType(tp: Type): List[TypeVar] = {
var tvs: List[TypeVar] = Nil
tp foreach {
case t: TypeVar => tvs ::= t
- case _ =>
+ case _ =>
}
tvs.reverse
}
@@ -6281,7 +6276,7 @@ trait Types extends api.Types { self: SymbolTable =>
// !!! Is it somehow guaranteed that this will not break under nesting?
// In general one has to save and restore the contents of the field...
tvs foreach (_.suspended = true)
- tvs
+ tvs
}
/** Compute lub (if `variance == 1`) or glb (if `variance == -1`) of given list
@@ -6509,5 +6504,5 @@ trait Types extends api.Types { self: SymbolTable =>
} finally {
tostringRecursions -= 1
}
-
+
}
diff --git a/src/compiler/scala/reflect/internal/pickling/UnPickler.scala b/src/compiler/scala/reflect/internal/pickling/UnPickler.scala
index b21b33e138..34163d54f8 100644
--- a/src/compiler/scala/reflect/internal/pickling/UnPickler.scala
+++ b/src/compiler/scala/reflect/internal/pickling/UnPickler.scala
@@ -846,10 +846,11 @@ abstract class UnPickler /*extends reflect.generic.UnPickler*/ {
private val p = phase
override def complete(sym: Symbol) : Unit = try {
val tp = at(i, () => readType(sym.isTerm)) // after NMT_TRANSITION, revert `() => readType(sym.isTerm)` to `readType`
- if (p != phase) atPhase(p) (sym setInfo tp)
- else sym setInfo tp
- if (currentRunId != definedAtRunId) sym.setInfo(adaptToNewRunMap(tp))
- } catch {
+ atPhase(p) (sym setInfo tp)
+ if (currentRunId != definedAtRunId)
+ sym.setInfo(adaptToNewRunMap(tp))
+ }
+ catch {
case e: MissingRequirementError => throw toTypeError(e)
}
override def load(sym: Symbol) { complete(sym) }
@@ -862,13 +863,12 @@ abstract class UnPickler /*extends reflect.generic.UnPickler*/ {
override def complete(sym: Symbol) = try {
super.complete(sym)
var alias = at(j, readSymbol)
- if (alias.isOverloaded) {
- atPhase(picklerPhase) {
- alias = alias suchThat (alt => sym.tpe =:= sym.owner.thisType.memberType(alt))
- }
- }
+ if (alias.isOverloaded)
+ alias = atPhase(picklerPhase)((alias suchThat (alt => sym.tpe =:= sym.owner.thisType.memberType(alt))))
+
sym.asInstanceOf[TermSymbol].setAlias(alias)
- } catch {
+ }
+ catch {
case e: MissingRequirementError => throw toTypeError(e)
}
}
diff --git a/src/compiler/scala/reflect/internal/util/Collections.scala b/src/compiler/scala/reflect/internal/util/Collections.scala
index 94672097c4..d26a1abadb 100644
--- a/src/compiler/scala/reflect/internal/util/Collections.scala
+++ b/src/compiler/scala/reflect/internal/util/Collections.scala
@@ -64,7 +64,21 @@ trait Collections {
}
lb.toList
}
+
+ final def foreachWithIndex[A, B](xs: List[A])(f: (A, Int) => Unit) {
+ var index = 0
+ var ys = xs
+ while (!ys.isEmpty) {
+ f(ys.head, index)
+ ys = ys.tail
+ index += 1
+ }
+ }
+ @inline final def findOrElse[A](xs: TraversableOnce[A])(p: A => Boolean)(orElse: => A): A = {
+ xs find p getOrElse orElse
+ }
+
final def mapWithIndex[A, B](xs: List[A])(f: (A, Int) => B): List[B] = {
val lb = new ListBuffer[B]
var index = 0
@@ -88,7 +102,7 @@ trait Collections {
val x2 = ys2.head
if (p(x1, x2))
buf += ((x1, x2))
-
+
ys1 = ys1.tail
ys2 = ys2.tail
}
@@ -120,7 +134,7 @@ trait Collections {
while (!ys1.isEmpty && !ys2.isEmpty) {
if (f(ys1.head, ys2.head))
return true
-
+
ys1 = ys1.tail
ys2 = ys2.tail
}
@@ -132,7 +146,7 @@ trait Collections {
while (!ys1.isEmpty && !ys2.isEmpty) {
if (!f(ys1.head, ys2.head))
return false
-
+
ys1 = ys1.tail
ys2 = ys2.tail
}
@@ -145,7 +159,7 @@ trait Collections {
while (!ys1.isEmpty && !ys2.isEmpty && !ys3.isEmpty) {
if (!f(ys1.head, ys2.head, ys3.head))
return false
-
+
ys1 = ys1.tail
ys2 = ys2.tail
ys3 = ys3.tail
diff --git a/src/compiler/scala/reflect/runtime/ConversionUtil.scala b/src/compiler/scala/reflect/runtime/ConversionUtil.scala
index e75fd78590..8c32026e37 100644
--- a/src/compiler/scala/reflect/runtime/ConversionUtil.scala
+++ b/src/compiler/scala/reflect/runtime/ConversionUtil.scala
@@ -23,7 +23,7 @@ trait ConversionUtil { self: SymbolTable =>
toJavaMap(s) = j
}
- def toScala(key: J)(body: => S): S = synchronized {
+ def toScala(key: J)(body: => S): S = synchronized {
toScalaMap get key match {
case Some(v) =>
v
@@ -34,7 +34,7 @@ trait ConversionUtil { self: SymbolTable =>
}
}
- def toJava(key: S)(body: => J): J = synchronized {
+ def toJava(key: S)(body: => J): J = synchronized {
toJavaMap get key match {
case Some(v) =>
v
diff --git a/src/compiler/scala/reflect/runtime/Mirror.scala b/src/compiler/scala/reflect/runtime/Mirror.scala
index 028a660a35..d3e4dd7619 100644
--- a/src/compiler/scala/reflect/runtime/Mirror.scala
+++ b/src/compiler/scala/reflect/runtime/Mirror.scala
@@ -16,12 +16,12 @@ class Mirror extends Universe with RuntimeTypes with TreeBuildUtil with ToolBoxe
val clazz = javaClass(name, defaultReflectiveClassLoader())
classToScala(clazz)
}
-
+
def companionInstance(clazz: Symbol): AnyRef = {
val singleton = ReflectionUtils.singletonInstance(clazz.fullName, defaultReflectiveClassLoader())
singleton
}
-
+
def symbolOfInstance(obj: Any): Symbol = classToScala(obj.getClass)
def typeOfInstance(obj: Any): Type = typeToScala(obj.getClass)
// to do add getClass/getType for instances of primitive types, probably like this:
@@ -41,8 +41,8 @@ class Mirror extends Universe with RuntimeTypes with TreeBuildUtil with ToolBoxe
case nme.update => return Array.set(receiver, args(0).asInstanceOf[Int], args(1))
}
}
-
- val jmeth = methodToJava(meth)
+
+ val jmeth = methodToJava(meth)
jmeth.invoke(receiver, args.asInstanceOf[Seq[AnyRef]]: _*)
}
@@ -51,7 +51,7 @@ class Mirror extends Universe with RuntimeTypes with TreeBuildUtil with ToolBoxe
override def typeToClass(tpe: Type): java.lang.Class[_] = typeToJavaClass(tpe)
override def symbolToClass(sym: Symbol): java.lang.Class[_] = classToJava(sym)
-
+
override def inReflexiveMirror = true
}
diff --git a/src/compiler/scala/reflect/runtime/SynchronizedOps.scala b/src/compiler/scala/reflect/runtime/SynchronizedOps.scala
index 72adbd4004..dd806beb2a 100644
--- a/src/compiler/scala/reflect/runtime/SynchronizedOps.scala
+++ b/src/compiler/scala/reflect/runtime/SynchronizedOps.scala
@@ -1,22 +1,22 @@
package scala.reflect
package runtime
-trait SynchronizedOps extends internal.SymbolTable
+trait SynchronizedOps extends internal.SymbolTable
with SynchronizedSymbols
with SynchronizedTypes { self: SymbolTable =>
-
+
// Names
-
+
private lazy val nameLock = new Object
-
+
override def newTermName(s: String): TermName = nameLock.synchronized { super.newTermName(s) }
override def newTypeName(s: String): TypeName = nameLock.synchronized { super.newTypeName(s) }
-
+
// BaseTypeSeqs
-
- override protected def newBaseTypeSeq(parents: List[Type], elems: Array[Type]) =
+
+ override protected def newBaseTypeSeq(parents: List[Type], elems: Array[Type]) =
new BaseTypeSeq(parents, elems) with SynchronizedBaseTypeSeq
-
+
trait SynchronizedBaseTypeSeq extends BaseTypeSeq {
override def apply(i: Int): Type = synchronized { super.apply(i) }
override def rawElem(i: Int) = synchronized { super.rawElem(i) }
@@ -30,9 +30,9 @@ trait SynchronizedOps extends internal.SymbolTable
override def lateMap(f: Type => Type): BaseTypeSeq = new MappedBaseTypeSeq(this, f) with SynchronizedBaseTypeSeq
}
-
+
// Scopes
-
+
override def newScope = new Scope() with SynchronizedScope
override def newNestedScope(outer: Scope): Scope = new Scope(outer) with SynchronizedScope
diff --git a/src/compiler/scala/reflect/runtime/SynchronizedSymbols.scala b/src/compiler/scala/reflect/runtime/SynchronizedSymbols.scala
index 9baf94f71d..3f2fa30be2 100644
--- a/src/compiler/scala/reflect/runtime/SynchronizedSymbols.scala
+++ b/src/compiler/scala/reflect/runtime/SynchronizedSymbols.scala
@@ -6,61 +6,61 @@ import internal.Flags.DEFERRED
trait SynchronizedSymbols extends internal.Symbols { self: SymbolTable =>
override protected def nextId() = synchronized { super.nextId() }
-
- override protected def freshExistentialName(suffix: String) =
+
+ override protected def freshExistentialName(suffix: String) =
synchronized { super.freshExistentialName(suffix) }
// Set the fields which point companions at one another. Returns the module.
override def connectModuleToClass(m: ModuleSymbol, moduleClass: ClassSymbol): ModuleSymbol =
synchronized { super.connectModuleToClass(m, moduleClass) }
-
+
override def newFreeVar(name: TermName, tpe: Type, value: Any, newFlags: Long = 0L): FreeVar =
new FreeVar(name, value) with SynchronizedTermSymbol initFlags newFlags setInfo tpe
override protected def makeNoSymbol = new NoSymbol with SynchronizedSymbol
-
+
trait SynchronizedSymbol extends Symbol {
-
+
override def rawowner = synchronized { super.rawowner }
override def rawname = synchronized { super.rawname }
override def rawflags = synchronized { super.rawflags }
-
+
override def rawflags_=(x: FlagsType) = synchronized { super.rawflags_=(x) }
override def name_=(x: Name) = synchronized { super.name_=(x) }
override def owner_=(owner: Symbol) = synchronized { super.owner_=(owner) }
-
+
override def validTo = synchronized { super.validTo }
override def validTo_=(x: Period) = synchronized { super.validTo_=(x) }
-
+
override def pos = synchronized { super.pos }
override def setPos(pos: Position): this.type = { synchronized { super.setPos(pos) }; this }
override def privateWithin = synchronized { super.privateWithin }
- override def privateWithin_=(sym: Symbol) = synchronized { super.privateWithin_=(sym) }
+ override def privateWithin_=(sym: Symbol) = synchronized { super.privateWithin_=(sym) }
- override def info = synchronized { super.info }
+ override def info = synchronized { super.info }
override def info_=(info: Type) = synchronized { super.info_=(info) }
- override def updateInfo(info: Type): Symbol = synchronized { super.updateInfo(info) }
+ override def updateInfo(info: Type): Symbol = synchronized { super.updateInfo(info) }
override def rawInfo: Type = synchronized { super.rawInfo }
override def typeParams: List[Symbol] = synchronized { super.typeParams }
- override def reset(completer: Type) = synchronized { super.reset(completer) }
+ override def reset(completer: Type) = synchronized { super.reset(completer) }
- override def infosString: String = synchronized { super.infosString }
+ override def infosString: String = synchronized { super.infosString }
override def annotations: List[AnnotationInfo] = synchronized { super.annotations }
- override def setAnnotations(annots: List[AnnotationInfo]): this.type = { synchronized { super.setAnnotations(annots) }; this }
+ override def setAnnotations(annots: List[AnnotationInfo]): this.type = { synchronized { super.setAnnotations(annots) }; this }
// ------ creators -------------------------------------------------------------------
override def newTermSymbol(name: TermName, pos: Position = NoPosition, newFlags: Long = 0L): TermSymbol =
new TermSymbol(this, pos, name) with SynchronizedTermSymbol initFlags newFlags
-
+
override def newAbstractTypeSymbol(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): AbstractTypeSymbol =
new AbstractTypeSymbol(this, pos, name) with SynchronizedTypeSymbol initFlags newFlags
-
+
override def newAliasTypeSymbol(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): AliasTypeSymbol =
new AliasTypeSymbol(this, pos, name) with SynchronizedTypeSymbol initFlags newFlags
@@ -72,10 +72,10 @@ trait SynchronizedSymbols extends internal.Symbols { self: SymbolTable =>
override def newClassSymbol(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): ClassSymbol =
new ClassSymbol(this, pos, name) with SynchronizedClassSymbol initFlags newFlags
-
+
override def newModuleClassSymbol(name: TypeName, pos: Position = NoPosition, newFlags: Long = 0L): ModuleClassSymbol =
new ModuleClassSymbol(this, pos, name) with SynchronizedModuleClassSymbol initFlags newFlags
-
+
override def newTypeSkolemSymbol(name: TypeName, origin: AnyRef, pos: Position = NoPosition, newFlags: Long = 0L): TypeSkolem =
if ((newFlags & DEFERRED) == 0L)
new TypeSkolem(this, pos, name, origin) with SynchronizedTypeSymbol initFlags newFlags
@@ -116,4 +116,4 @@ trait SynchronizedSymbols extends internal.Symbols { self: SymbolTable =>
override def implicitMembers: List[Symbol] = synchronized { super.implicitMembers }
}
}
-
+
diff --git a/src/compiler/scala/reflect/runtime/SynchronizedTypes.scala b/src/compiler/scala/reflect/runtime/SynchronizedTypes.scala
index c842d3dd01..e5a508f802 100644
--- a/src/compiler/scala/reflect/runtime/SynchronizedTypes.scala
+++ b/src/compiler/scala/reflect/runtime/SynchronizedTypes.scala
@@ -2,86 +2,86 @@ package scala.reflect
package runtime
/** This trait overrides methods in reflect.internal, bracketing
- * them in synchronized { ... } to make them thread-safe
+ * them in synchronized { ... } to make them thread-safe
*/
trait SynchronizedTypes extends internal.Types { self: SymbolTable =>
-
+
// No sharing of map objects:
override protected def commonOwnerMap = new CommonOwnerMap
-
+
private val uniqueLock = new Object
override def unique[T <: Type](tp: T): T = uniqueLock.synchronized { super.unique(tp) }
-
+
class SynchronizedUndoLog extends UndoLog {
-
- override def clear() =
+
+ override def clear() =
synchronized { super.clear() }
-
+
override def undo[T](block: => T): T =
synchronized { super.undo(block) }
-
+
override def undoUnless(block: => Boolean): Boolean =
synchronized { super.undoUnless(block) }
}
-
+
override protected def newUndoLog = new SynchronizedUndoLog
-
- override protected def baseTypeOfNonClassTypeRef(tpe: NonClassTypeRef, clazz: Symbol) =
+
+ override protected def baseTypeOfNonClassTypeRef(tpe: NonClassTypeRef, clazz: Symbol) =
synchronized { super.baseTypeOfNonClassTypeRef(tpe, clazz) }
-
- private val subsametypeLock = new Object
-
+
+ private val subsametypeLock = new Object
+
override def isSameType(tp1: Type, tp2: Type): Boolean =
subsametypeLock.synchronized { super.isSameType(tp1, tp2) }
-
+
override def isDifferentType(tp1: Type, tp2: Type): Boolean =
subsametypeLock.synchronized { super.isDifferentType(tp1, tp2) }
-
+
override def isSubType(tp1: Type, tp2: Type, depth: Int): Boolean =
subsametypeLock.synchronized { super.isSubType(tp1, tp2, depth) }
-
+
private val lubglbLock = new Object
-
+
override def glb(ts: List[Type]): Type =
lubglbLock.synchronized { super.glb(ts) }
-
+
override def lub(ts: List[Type]): Type =
lubglbLock.synchronized { super.lub(ts) }
-
+
private val indentLock = new Object
-
+
override protected def explain[T](op: String, p: (Type, T) => Boolean, tp1: Type, arg2: T): Boolean = {
indentLock.synchronized { super.explain(op, p, tp1, arg2) }
}
-
+
private val toStringLock = new Object
override protected def typeToString(tpe: Type): String =
toStringLock.synchronized(super.typeToString(tpe))
-
- /* The idea of caches is as follows.
+
+ /* The idea of caches is as follows.
* When in reflexive mode, a cache is either null, or one sentinal
* value representing undefined or the final defined
* value. Hence, we can ask in non-synchronized ode whether the cache field
- * is non null and different from the sentinel (if a sentinel exists).
+ * is non null and different from the sentinel (if a sentinel exists).
* If that's true, the cache value is current.
* Otherwise we arrive in one of the defined... methods listed below
* which go through all steps in synchronized mode.
*/
-
+
override protected def defineUnderlyingOfSingleType(tpe: SingleType) =
tpe.synchronized { super.defineUnderlyingOfSingleType(tpe) }
-
- override protected def defineBaseTypeSeqOfCompoundType(tpe: CompoundType) =
+
+ override protected def defineBaseTypeSeqOfCompoundType(tpe: CompoundType) =
tpe.synchronized { super.defineBaseTypeSeqOfCompoundType(tpe) }
- override protected def defineBaseClassesOfCompoundType(tpe: CompoundType) =
+ override protected def defineBaseClassesOfCompoundType(tpe: CompoundType) =
tpe.synchronized { super.defineBaseClassesOfCompoundType(tpe) }
-
- override protected def defineParentsOfTypeRef(tpe: TypeRef) =
+
+ override protected def defineParentsOfTypeRef(tpe: TypeRef) =
tpe.synchronized { super.defineParentsOfTypeRef(tpe) }
-
- override protected def defineBaseTypeSeqOfTypeRef(tpe: TypeRef) =
+
+ override protected def defineBaseTypeSeqOfTypeRef(tpe: TypeRef) =
tpe.synchronized { super.defineBaseTypeSeqOfTypeRef(tpe) }
} \ No newline at end of file
diff --git a/src/compiler/scala/tools/ant/Scaladoc.scala b/src/compiler/scala/tools/ant/Scaladoc.scala
index 92d6e6320c..c92474b33e 100644
--- a/src/compiler/scala/tools/ant/Scaladoc.scala
+++ b/src/compiler/scala/tools/ant/Scaladoc.scala
@@ -123,10 +123,10 @@ class Scaladoc extends ScalaMatchingTask {
/** Instruct the compiler to generate unchecked information. */
private var unchecked: Boolean = false
-
+
/** Instruct the ant task not to fail in the event of errors */
private var nofail: Boolean = false
-
+
/*============================================================================*\
** Properties setters **
\*============================================================================*/
@@ -356,7 +356,7 @@ class Scaladoc extends ScalaMatchingTask {
def setDocUncompilable(input: String) {
docUncompilable = Some(input)
}
-
+
/** Set the `nofail` info attribute.
*
* @param input One of the flags `yes/no` or `on/off`. Default if no/off.
@@ -569,7 +569,7 @@ class Scaladoc extends ScalaMatchingTask {
}
def safeBuildError(message: String): Unit = if (nofail) log(message) else buildError(message)
-
+
/** Performs the compilation. */
override def execute() = {
val Pair(docSettings, sourceFiles) = initialize
diff --git a/src/compiler/scala/tools/ant/templates/tool-unix.tmpl b/src/compiler/scala/tools/ant/templates/tool-unix.tmpl
index 7e51930fa4..599936f6f8 100644
--- a/src/compiler/scala/tools/ant/templates/tool-unix.tmpl
+++ b/src/compiler/scala/tools/ant/templates/tool-unix.tmpl
@@ -128,9 +128,11 @@ if [[ -z "$cygwin$mingw" ]]; then
usebootcp="true"
fi
+# If using the boot classpath, also pass an empty classpath
+# to java to suppress "." from materializing.
classpathArgs () {
if [[ -n $usebootcp ]]; then
- echo "-Xbootclasspath/a:$TOOL_CLASSPATH"
+ echo "-Xbootclasspath/a:$TOOL_CLASSPATH -classpath \"\""
else
echo "-classpath $TOOL_CLASSPATH"
fi
diff --git a/src/compiler/scala/tools/nsc/CompilationUnits.scala b/src/compiler/scala/tools/nsc/CompilationUnits.scala
index 940d115b2f..d6f57801e7 100644
--- a/src/compiler/scala/tools/nsc/CompilationUnits.scala
+++ b/src/compiler/scala/tools/nsc/CompilationUnits.scala
@@ -74,7 +74,7 @@ trait CompilationUnits { self: Global =>
* It is empty up to phase 'icode'.
*/
val icode: LinkedHashSet[icodes.IClass] = new LinkedHashSet
-
+
def echo(pos: Position, msg: String) =
reporter.echo(pos, msg)
diff --git a/src/compiler/scala/tools/nsc/Global.scala b/src/compiler/scala/tools/nsc/Global.scala
index 4493188b31..44dc2fe384 100644
--- a/src/compiler/scala/tools/nsc/Global.scala
+++ b/src/compiler/scala/tools/nsc/Global.scala
@@ -59,7 +59,7 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
type AbstractFileType = scala.tools.nsc.io.AbstractFile
def mkAttributedQualifier(tpe: Type, termSym: Symbol): Tree = gen.mkAttributedQualifier(tpe, termSym)
-
+
def picklerPhase: Phase = if (currentRun.isDefined) currentRun.picklerPhase else NoPhase
// platform specific elements
@@ -154,7 +154,7 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
/** Register top level class (called on entering the class)
*/
def registerTopLevelSym(sym: Symbol) {}
-
+
// ------------------ Reporting -------------------------------------
// not deprecated yet, but a method called "error" imported into
@@ -193,10 +193,6 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
if (settings.debug.value)
body
}
- @inline final override def debuglog(msg: => String) {
- if (settings.debug.value && (settings.log containsPhase globalPhase))
- inform("[log " + phase + "] " + msg)
- }
// Warnings issued only under -Ydebug. For messages which should reach
// developer ears, but are not adequately actionable by users.
@inline final override def debugwarn(msg: => String) {
@@ -213,10 +209,29 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
def informTime(msg: String, start: Long) = informProgress(elapsedMessage(msg, start))
def logError(msg: String, t: Throwable): Unit = ()
+
+ def logAfterEveryPhase[T](msg: String)(op: => T) {
+ log("Running operation '%s' after every phase.\n".format(msg) + describeAfterEveryPhase(op))
+ }
+
+ def shouldLogAtThisPhase = (
+ (settings.log.isSetByUser)
+ && ((settings.log containsPhase globalPhase) || (settings.log containsPhase phase))
+ )
+ def atPhaseStackMessage = atPhaseStack match {
+ case Nil => ""
+ case ps => ps.reverseMap("->" + _).mkString("(", " ", ")")
+ }
// Over 200 closure objects are eliminated by inlining this.
- @inline final def log(msg: => AnyRef): Unit =
- if (settings.log containsPhase globalPhase)
- inform("[log " + phase + "] " + msg)
+ @inline final def log(msg: => AnyRef) {
+ if (shouldLogAtThisPhase)
+ inform("[log %s%s] %s".format(globalPhase, atPhaseStackMessage, msg))
+ }
+
+ @inline final override def debuglog(msg: => String) {
+ if (settings.debug.value)
+ log(msg)
+ }
def logThrowable(t: Throwable): Unit = globalError(throwableAsString(t))
def throwableAsString(t: Throwable): String =
@@ -696,18 +711,18 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
private lazy val unitTimings = mutable.HashMap[CompilationUnit, Long]() withDefaultValue 0L // tracking time spent per unit
private def unitTimingsFormatted(): String = {
def toMillis(nanos: Long) = "%.3f" format nanos / 1000000d
-
+
val formatter = new util.TableDef[(String, String)] {
>> ("ms" -> (_._1)) >+ " "
<< ("path" -> (_._2))
}
"" + (
- new formatter.Table(unitTimings.toList sortBy (-_._2) map {
+ new formatter.Table(unitTimings.toList sortBy (-_._2) map {
case (unit, nanos) => (toMillis(nanos), unit.source.path)
})
)
}
-
+
protected def addToPhasesSet(sub: SubComponent, descr: String) {
phasesSet += sub
phasesDescMap(sub) = descr
@@ -754,6 +769,51 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
line1 :: line2 :: descs mkString
}
+ /** Returns List of (phase, value) pairs, including only those
+ * where the value compares unequal to the previous phase's value.
+ */
+ def afterEachPhase[T](op: => T): List[(Phase, T)] = {
+ phaseDescriptors.map(_.ownPhase).foldLeft(List[(Phase, T)]()) { (res, ph) =>
+ val value = afterPhase(ph)(op)
+ if (res.nonEmpty && res.head._2 == value) res
+ else ((ph, value)) :: res
+ } reverse
+ }
+
+ /** Returns List of ChangeAfterPhase objects, encapsulating those
+ * phase transitions where the result of the operation gave a different
+ * list than it had when run during the previous phase.
+ */
+ def changesAfterEachPhase[T](op: => List[T]): List[ChangeAfterPhase[T]] = {
+ val ops = ((NoPhase, Nil)) :: afterEachPhase(op)
+
+ ops sliding 2 map {
+ case (_, before) :: (ph, after) :: Nil =>
+ val lost = before filterNot (after contains _)
+ val gained = after filterNot (before contains _)
+ ChangeAfterPhase(ph, lost, gained)
+ case _ => ???
+ } toList
+ }
+ private def numberedPhase(ph: Phase) = "%2d/%s".format(ph.id, ph.name)
+
+ case class ChangeAfterPhase[+T](ph: Phase, lost: List[T], gained: List[T]) {
+ private def mkStr(what: String, xs: List[_]) = (
+ if (xs.isEmpty) ""
+ else xs.mkString(what + " after " + numberedPhase(ph) + " {\n ", "\n ", "\n}\n")
+ )
+ override def toString = mkStr("Lost", lost) + mkStr("Gained", gained)
+ }
+
+ def describeAfterEachPhase[T](op: => T): List[String] =
+ afterEachPhase(op) map { case (ph, t) => "[after %-15s] %s".format(numberedPhase(ph), t) }
+
+ def describeAfterEveryPhase[T](op: => T): String =
+ describeAfterEachPhase(op) map (" " + _ + "\n") mkString
+
+ def printAfterEachPhase[T](op: => T): Unit =
+ describeAfterEachPhase(op) foreach (m => println(" " + m))
+
// ----------- Runs ---------------------------------------
private var curRun: Run = null
@@ -807,10 +867,28 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
def currentRun: Run = curRun
def currentUnit: CompilationUnit = if (currentRun eq null) NoCompilationUnit else currentRun.currentUnit
def currentSource: SourceFile = if (currentUnit.exists) currentUnit.source else lastSeenSourceFile
-
- @inline final def afterTyper[T](op: => T): T = afterPhase(currentRun.typerPhase)(op)
- @inline final def beforeErasure[T](op: => T): T = beforePhase(currentRun.erasurePhase)(op)
- @inline final def afterErasure[T](op: => T): T = afterPhase(currentRun.erasurePhase)(op)
+
+ // TODO - trim these to the absolute minimum.
+ @inline final def afterErasure[T](op: => T): T = afterPhase(currentRun.erasurePhase)(op)
+ @inline final def afterExplicitOuter[T](op: => T): T = afterPhase(currentRun.explicitouterPhase)(op)
+ @inline final def afterFlatten[T](op: => T): T = afterPhase(currentRun.flattenPhase)(op)
+ @inline final def afterIcode[T](op: => T): T = afterPhase(currentRun.icodePhase)(op)
+ @inline final def afterMixin[T](op: => T): T = afterPhase(currentRun.mixinPhase)(op)
+ @inline final def afterPickler[T](op: => T): T = afterPhase(currentRun.picklerPhase)(op)
+ @inline final def afterRefchecks[T](op: => T): T = afterPhase(currentRun.refchecksPhase)(op)
+ @inline final def afterSpecialize[T](op: => T): T = afterPhase(currentRun.specializePhase)(op)
+ @inline final def afterTyper[T](op: => T): T = afterPhase(currentRun.typerPhase)(op)
+ @inline final def afterUncurry[T](op: => T): T = afterPhase(currentRun.uncurryPhase)(op)
+ @inline final def beforeErasure[T](op: => T): T = beforePhase(currentRun.erasurePhase)(op)
+ @inline final def beforeExplicitOuter[T](op: => T): T = beforePhase(currentRun.explicitouterPhase)(op)
+ @inline final def beforeFlatten[T](op: => T): T = beforePhase(currentRun.flattenPhase)(op)
+ @inline final def beforeIcode[T](op: => T): T = beforePhase(currentRun.icodePhase)(op)
+ @inline final def beforeMixin[T](op: => T): T = beforePhase(currentRun.mixinPhase)(op)
+ @inline final def beforePickler[T](op: => T): T = beforePhase(currentRun.picklerPhase)(op)
+ @inline final def beforeRefchecks[T](op: => T): T = beforePhase(currentRun.refchecksPhase)(op)
+ @inline final def beforeSpecialize[T](op: => T): T = beforePhase(currentRun.specializePhase)(op)
+ @inline final def beforeTyper[T](op: => T): T = beforePhase(currentRun.typerPhase)(op)
+ @inline final def beforeUncurry[T](op: => T): T = beforePhase(currentRun.uncurryPhase)(op)
/** Don't want to introduce new errors trying to report errors,
* so swallow exceptions.
@@ -858,7 +936,7 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
/** Counts for certain classes of warnings during this run. */
var deprecationWarnings: List[(Position, String)] = Nil
var uncheckedWarnings: List[(Position, String)] = Nil
-
+
/** A flag whether macro expansions failed */
var macroExpansionFailed = false
@@ -919,16 +997,18 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
// Each subcomponent supplies a phase, which are chained together.
// If -Ystop:phase is given, neither that phase nor any beyond it is added.
// If -Yskip:phase is given, that phase will be skipped.
- val lastPhase = phaseDescriptors.tail .
- takeWhile (pd => !stopPhase(pd.phaseName)) .
- filterNot (pd => skipPhase(pd.phaseName)) .
- foldLeft (parserPhase) ((chain, ph) => ph newPhase chain)
-
- // Ensure there is a terminal phase at the end, since -Ystop may have limited the phases.
- terminalPhase =
- if (lastPhase.name == "terminal") lastPhase
- else terminal newPhase lastPhase
-
+ val phaseLinks = {
+ val phs = (
+ phaseDescriptors.tail
+ takeWhile (pd => !stopPhase(pd.phaseName))
+ filterNot (pd => skipPhase(pd.phaseName))
+ )
+ // Ensure there is a terminal phase at the end, since -Ystop may have limited the phases.
+ if (phs.isEmpty || (phs.last ne terminal)) phs :+ terminal
+ else phs
+ }
+ // Link them together.
+ phaseLinks.foldLeft(parserPhase)((chain, ph) => ph newPhase chain)
parserPhase
}
@@ -1005,37 +1085,45 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
}
def cancel() { reporter.cancelled = true }
-
+
private def currentProgress = (phasec * size) + unitc
private def totalProgress = (phaseDescriptors.size - 1) * size // -1: drops terminal phase
private def refreshProgress() = if (size > 0) progress(currentProgress, totalProgress)
// ----- finding phases --------------------------------------------
- def phaseNamed(name: String): Phase = {
- var p: Phase = firstPhase
- while (p.next != p && p.name != name) p = p.next
- if (p.name != name) NoPhase else p
- }
-
- val parserPhase = phaseNamed("parser")
- val namerPhase = phaseNamed("namer")
- // packageobjects
- val typerPhase = phaseNamed("typer")
- // superaccessors
- val picklerPhase = phaseNamed("pickler")
- val refchecksPhase = phaseNamed("refchecks")
- val uncurryPhase = phaseNamed("uncurry")
- // tailcalls, specialize
- val explicitouterPhase = phaseNamed("explicitouter")
- val erasurePhase = phaseNamed("erasure")
- // lazyvals, lambdalift, constructors
- val flattenPhase = phaseNamed("flatten")
- val mixinPhase = phaseNamed("mixin")
- val cleanupPhase = phaseNamed("cleanup")
- val icodePhase = phaseNamed("icode")
- // inliner, closelim, dce
- val jvmPhase = phaseNamed("jvm")
+ def phaseNamed(name: String): Phase =
+ findOrElse(firstPhase.iterator)(_.name == name)(NoPhase)
+
+ /** All phases as of 3/2012 here for handiness; the ones in
+ * active use uncommented.
+ */
+ val parserPhase = phaseNamed("parser")
+ val namerPhase = phaseNamed("namer")
+ // val packageobjectsPhase = phaseNamed("packageobjects")
+ val typerPhase = phaseNamed("typer")
+ // val superaccessorsPhase = phaseNamed("superaccessors")
+ val picklerPhase = phaseNamed("pickler")
+ val refchecksPhase = phaseNamed("refchecks")
+ // val selectiveanfPhase = phaseNamed("selectiveanf")
+ // val selectivecpsPhase = phaseNamed("selectivecps")
+ val uncurryPhase = phaseNamed("uncurry")
+ // val tailcallsPhase = phaseNamed("tailcalls")
+ val specializePhase = phaseNamed("specialize")
+ val explicitouterPhase = phaseNamed("explicitouter")
+ val erasurePhase = phaseNamed("erasure")
+ // val lazyvalsPhase = phaseNamed("lazyvals")
+ val lambdaliftPhase = phaseNamed("lambdalift")
+ // val constructorsPhase = phaseNamed("constructors")
+ val flattenPhase = phaseNamed("flatten")
+ val mixinPhase = phaseNamed("mixin")
+ val cleanupPhase = phaseNamed("cleanup")
+ val icodePhase = phaseNamed("icode")
+ // val inlinerPhase = phaseNamed("inliner")
+ // val inlineExceptionHandlersPhase = phaseNamed("inlineExceptionHandlers")
+ // val closelimPhase = phaseNamed("closelim")
+ // val dcePhase = phaseNamed("dce")
+ val jvmPhase = phaseNamed("jvm")
def runIsAt(ph: Phase) = globalPhase.id == ph.id
def runIsPast(ph: Phase) = globalPhase.id > ph.id
@@ -1171,12 +1259,12 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
*/
def compileUnits(units: List[CompilationUnit], fromPhase: Phase) {
try compileUnitsInternal(units, fromPhase)
- catch { case ex =>
+ catch { case ex =>
globalError(supplementErrorMessage("uncaught exception during compilation: " + ex.getClass.getName))
throw ex
}
}
-
+
private def compileUnitsInternal(units: List[CompilationUnit], fromPhase: Phase) {
units foreach addUnit
if (opt.profileAll) {
@@ -1189,7 +1277,7 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
checkDeprecatedSettings(unitbuf.head)
globalPhase = fromPhase
- while (globalPhase != terminalPhase && !reporter.hasErrors) {
+ while (globalPhase.hasNext && !reporter.hasErrors) {
val startTime = currentTime
phase = globalPhase
@@ -1310,19 +1398,13 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
/** Compile abstract file until `globalPhase`, but at least to phase "namer".
*/
def compileLate(unit: CompilationUnit) {
- def stop(ph: Phase) = ph == null || ph.id >= (globalPhase.id max typerPhase.id)
- def loop(ph: Phase) {
- if (stop(ph)) refreshProgress
- else {
- atPhase(ph)(ph.asInstanceOf[GlobalPhase] applyPhase unit)
- loop(ph.next match {
- case `ph` => null // ph == ph.next implies terminal, and null ends processing
- case x => x
- })
- }
- }
+ val maxId = math.max(globalPhase.id, typerPhase.id)
addUnit(unit)
- loop(firstPhase)
+
+ firstPhase.iterator takeWhile (_.id < maxId) foreach (ph =>
+ atPhase(ph)(ph.asInstanceOf[GlobalPhase] applyPhase unit)
+ )
+ refreshProgress
}
/**
diff --git a/src/compiler/scala/tools/nsc/MacroContext.scala b/src/compiler/scala/tools/nsc/MacroContext.scala
index 72662291f8..9ea1f87125 100644
--- a/src/compiler/scala/tools/nsc/MacroContext.scala
+++ b/src/compiler/scala/tools/nsc/MacroContext.scala
@@ -3,8 +3,8 @@ package scala.tools.nsc
import symtab.Flags._
trait MacroContext extends reflect.macro.Context { self: Global =>
-
+
def captureVariable(vble: Symbol): Unit = vble setFlag CAPTURED
-
+
def referenceCapturedVariable(id: Ident): Tree = ReferenceToBoxed(id)
}
diff --git a/src/compiler/scala/tools/nsc/SubComponent.scala b/src/compiler/scala/tools/nsc/SubComponent.scala
index cd9fef117f..a3e451f32f 100644
--- a/src/compiler/scala/tools/nsc/SubComponent.scala
+++ b/src/compiler/scala/tools/nsc/SubComponent.scala
@@ -47,6 +47,9 @@ abstract class SubComponent {
private var ownPhaseCache: WeakReference[Phase] = new WeakReference(null)
private var ownPhaseRunId = global.NoRunId
+ @inline final def beforeOwnPhase[T](op: => T) = global.beforePhase(ownPhase)(op)
+ @inline final def afterOwnPhase[T](op: => T) = global.afterPhase(ownPhase)(op)
+
/** The phase corresponding to this subcomponent in the current compiler run */
def ownPhase: Phase = {
ownPhaseCache.get match {
diff --git a/src/compiler/scala/tools/nsc/ast/TreeDSL.scala b/src/compiler/scala/tools/nsc/ast/TreeDSL.scala
index f361d45018..0d19b781e2 100644
--- a/src/compiler/scala/tools/nsc/ast/TreeDSL.scala
+++ b/src/compiler/scala/tools/nsc/ast/TreeDSL.scala
@@ -253,13 +253,11 @@ trait TreeDSL {
}
/** Top level accessible. */
- def MATCHERROR(arg: Tree) = Throw(New(MatchErrorClass, arg))
- /** !!! should generalize null guard from match error here. */
- def THROW(sym: Symbol): Throw = Throw(New(sym))
- def THROW(sym: Symbol, msg: Tree): Throw = Throw(New(sym, msg.TOSTRING()))
+ def MATCHERROR(arg: Tree) = Throw(MatchErrorClass.tpe, arg)
+ def THROW(sym: Symbol, msg: Tree): Throw = Throw(sym.tpe, msg.TOSTRING())
def NEW(tpt: Tree, args: Tree*): Tree = New(tpt, List(args.toList))
- def NEW(sym: Symbol, args: Tree*): Tree = New(sym, args: _*)
+ def NEW(sym: Symbol, args: Tree*): Tree = New(sym.tpe, args: _*)
def DEF(name: Name, tp: Type): DefTreeStart = DEF(name) withType tp
def DEF(name: Name): DefTreeStart = new DefTreeStart(name)
diff --git a/src/compiler/scala/tools/nsc/ast/TreeGen.scala b/src/compiler/scala/tools/nsc/ast/TreeGen.scala
index c7414bf34b..d7159c5fa8 100644
--- a/src/compiler/scala/tools/nsc/ast/TreeGen.scala
+++ b/src/compiler/scala/tools/nsc/ast/TreeGen.scala
@@ -30,7 +30,7 @@ abstract class TreeGen extends reflect.internal.TreeGen with TreeDSL {
else
tree
}
-
+
/** Builds a fully attributed wildcard import node.
*/
def mkWildcardImport(pkg: Symbol): Import = {
@@ -51,9 +51,8 @@ abstract class TreeGen extends reflect.internal.TreeGen with TreeDSL {
}
// wrap the given expression in a SoftReference so it can be gc-ed
- def mkSoftRef(expr: Tree): Tree = atPos(expr.pos) {
- New(SoftReferenceClass, expr)
- }
+ def mkSoftRef(expr: Tree): Tree = atPos(expr.pos)(New(SoftReferenceClass.tpe, expr))
+
// annotate the expression with @unchecked
def mkUnchecked(expr: Tree): Tree = atPos(expr.pos) {
// This can't be "Annotated(New(UncheckedClass), expr)" because annotations
@@ -161,7 +160,7 @@ abstract class TreeGen extends reflect.internal.TreeGen with TreeDSL {
def mkModuleVarDef(accessor: Symbol) = {
val inClass = accessor.owner.isClass
val extraFlags = if (inClass) PrivateLocal | SYNTHETIC else 0
-
+
val mval = (
accessor.owner.newVariable(nme.moduleVarName(accessor.name), accessor.pos.focus, MODULEVAR | extraFlags)
setInfo accessor.tpe.finalResultType
@@ -220,6 +219,18 @@ abstract class TreeGen extends reflect.internal.TreeGen with TreeDSL {
def mkSynchronized(monitor: Tree, body: Tree): Tree =
Apply(Select(monitor, Object_synchronized), List(body))
+ def mkAppliedTypeForCase(clazz: Symbol): Tree = {
+ val numParams = clazz.typeParams.size
+ if (clazz.typeParams.isEmpty) Ident(clazz)
+ else AppliedTypeTree(Ident(clazz), 1 to numParams map (_ => Bind(tpnme.WILDCARD, EmptyTree)) toList)
+ }
+ def mkBindForCase(patVar: Symbol, clazz: Symbol, targs: List[Type]): Tree = {
+ Bind(patVar, Typed(Ident(nme.WILDCARD),
+ if (targs.isEmpty) mkAppliedTypeForCase(clazz)
+ else AppliedTypeTree(Ident(clazz), targs map TypeTree)
+ ))
+ }
+
def wildcardStar(tree: Tree) =
atPos(tree.pos) { Typed(tree, Ident(tpnme.WILDCARD_STAR)) }
diff --git a/src/compiler/scala/tools/nsc/ast/Trees.scala b/src/compiler/scala/tools/nsc/ast/Trees.scala
index 855b55bb5e..ad87889145 100644
--- a/src/compiler/scala/tools/nsc/ast/Trees.scala
+++ b/src/compiler/scala/tools/nsc/ast/Trees.scala
@@ -79,16 +79,16 @@ trait Trees extends reflect.internal.Trees { self: Global =>
val (edefs, rest) = body span treeInfo.isEarlyDef
val (evdefs, etdefs) = edefs partition treeInfo.isEarlyValDef
val gvdefs = evdefs map {
- case vdef @ ValDef(mods, name, tpt, rhs) =>
- treeCopy.ValDef(
- vdef.duplicate, mods, name,
- atPos(focusPos(vdef.pos)) { TypeTree() setOriginal tpt setPos focusPos(tpt.pos) }, // atPos in case
- EmptyTree)
- }
- val lvdefs = evdefs map {
- case vdef @ ValDef(mods, name, tpt, rhs) =>
- treeCopy.ValDef(vdef, Modifiers(PRESUPER), name, tpt, rhs)
+ case vdef @ ValDef(_, _, tpt, _) => copyValDef(vdef)(
+ // !!! I know "atPos in case" wasn't intentionally planted to
+ // add an air of mystery to this file, but it is the sort of
+ // comment which only its author could love.
+ tpt = atPos(focusPos(vdef.pos))(TypeTree() setOriginal tpt setPos focusPos(tpt.pos)), // atPos in case
+ rhs = EmptyTree
+ )
}
+ val lvdefs = evdefs collect { case vdef: ValDef => copyValDef(vdef)(mods = Modifiers(PRESUPER)) }
+
val constrs = {
if (constrMods hasFlag TRAIT) {
if (body forall treeInfo.isInterfaceMember) List()
@@ -108,13 +108,11 @@ trait Trees extends reflect.internal.Trees { self: Global =>
DefDef(constrMods, nme.CONSTRUCTOR, List(), vparamss1, TypeTree(), Block(lvdefs ::: List(superCall), Literal(Constant())))))
}
}
- // println("typed template, gvdefs = "+gvdefs+", parents = "+parents+", constrs = "+constrs)
constrs foreach (ensureNonOverlapping(_, parents ::: gvdefs))
- // vparamss2 are used as field definitions for the class. remove defaults
- val vparamss2 = vparamss map (vps => vps map { vd =>
- treeCopy.ValDef(vd, vd.mods &~ DEFAULTPARAM, vd.name, vd.tpt, EmptyTree)
- })
- Template(parents, self, gvdefs ::: vparamss2.flatten ::: constrs ::: etdefs ::: rest)
+ // Field definitions for the class - remove defaults.
+ val fieldDefs = vparamss.flatten map (vd => copyValDef(vd)(mods = vd.mods &~ DEFAULTPARAM, rhs = EmptyTree))
+
+ Template(parents, self, gvdefs ::: fieldDefs ::: constrs ::: etdefs ::: rest)
}
/** Construct class definition with given class symbol, value parameters,
diff --git a/src/compiler/scala/tools/nsc/ast/parser/Parsers.scala b/src/compiler/scala/tools/nsc/ast/parser/Parsers.scala
index e3a59058a3..0e5f9ee80e 100644
--- a/src/compiler/scala/tools/nsc/ast/parser/Parsers.scala
+++ b/src/compiler/scala/tools/nsc/ast/parser/Parsers.scala
@@ -38,7 +38,7 @@ trait ParsersCommon extends ScannersCommon {
def freshTypeName(prefix: String): TypeName
def deprecationWarning(off: Int, msg: String): Unit
def accept(token: Int): Int
-
+
/** Methods inParensOrError and similar take a second argument which, should
* the next token not be the expected opener (e.g. LPAREN) will be returned
* instead of the contents of the groupers. However in all cases accept(LPAREN)
@@ -1141,7 +1141,7 @@ self =>
private def interpolatedString(): Tree = atPos(in.offset) {
val start = in.offset
val interpolator = in.name
-
+
val partsBuf = new ListBuffer[Tree]
val exprBuf = new ListBuffer[Tree]
in.nextToken()
@@ -1153,7 +1153,7 @@ self =>
}
}
if (in.token == STRINGLIT) partsBuf += literal()
-
+
val t1 = atPos(o2p(start)) { Ident(nme.StringContext) }
val t2 = atPos(start) { Apply(t1, partsBuf.toList) }
t2 setPos t2.pos.makeTransparent
@@ -1423,15 +1423,14 @@ self =>
def implicitClosure(start: Int, location: Int): Tree = {
val param0 = convertToParam {
atPos(in.offset) {
- var paramexpr: Tree = Ident(ident())
- if (in.token == COLON) {
- in.nextToken()
- paramexpr = Typed(paramexpr, typeOrInfixType(location))
+ Ident(ident()) match {
+ case expr if in.token == COLON =>
+ in.nextToken() ; Typed(expr, typeOrInfixType(location))
+ case expr => expr
}
- paramexpr
}
}
- val param = treeCopy.ValDef(param0, param0.mods | Flags.IMPLICIT, param0.name, param0.tpt, param0.rhs)
+ val param = copyValDef(param0)(mods = param0.mods | Flags.IMPLICIT)
atPos(start, in.offset) {
accept(ARROW)
Function(List(param), if (location != InBlock) expr() else block())
@@ -2689,8 +2688,8 @@ self =>
val (self, body) = templateBody(true)
if (in.token == WITH && self.isEmpty) {
val earlyDefs: List[Tree] = body flatMap {
- case vdef @ ValDef(mods, name, tpt, rhs) if !mods.isDeferred =>
- List(treeCopy.ValDef(vdef, mods | Flags.PRESUPER, name, tpt, rhs))
+ case vdef @ ValDef(mods, _, _, _) if !mods.isDeferred =>
+ List(copyValDef(vdef)(mods = mods | Flags.PRESUPER))
case tdef @ TypeDef(mods, name, tparams, rhs) =>
List(treeCopy.TypeDef(tdef, mods | Flags.PRESUPER, name, tparams, rhs))
case stat if !stat.isEmpty =>
@@ -2968,9 +2967,9 @@ self =>
val annots = annotations(true)
val pos = in.offset
val mods = (localModifiers() | implicitMod) withAnnotations annots
- val defs =
+ val defs = joinComment( // for SI-5527
if (!(mods hasFlag ~(Flags.IMPLICIT | Flags.LAZY))) defOrDcl(pos, mods)
- else List(tmplDef(pos, mods))
+ else List(tmplDef(pos, mods)))
in.token match {
case RBRACE | CASE => defs :+ (Literal(Constant()) setPos o2p(in.offset))
diff --git a/src/compiler/scala/tools/nsc/ast/parser/Scanners.scala b/src/compiler/scala/tools/nsc/ast/parser/Scanners.scala
index f712c7411f..2895d02dfe 100644
--- a/src/compiler/scala/tools/nsc/ast/parser/Scanners.scala
+++ b/src/compiler/scala/tools/nsc/ast/parser/Scanners.scala
@@ -84,7 +84,7 @@ trait Scanners extends ScannersCommon {
abstract class Scanner extends CharArrayReader with TokenData with ScannerCommon {
private def isDigit(c: Char) = java.lang.Character isDigit c
-
+
def isAtEnd = charOffset >= buf.length
def flush = { charOffset = offset; nextChar(); this }
@@ -164,7 +164,7 @@ trait Scanners extends ScannersCommon {
* RBRACE if region starts with '{'
* ARROW if region starts with `case'
* STRINGLIT if region is a string interpolation expression starting with '${'
- * (the STRINGLIT appears twice in succession on the stack iff the
+ * (the STRINGLIT appears twice in succession on the stack iff the
* expression is a multiline string literal).
*/
var sepRegions: List[Int] = List()
@@ -173,15 +173,15 @@ trait Scanners extends ScannersCommon {
/** Are we directly in a string interpolation expression?
*/
- @inline private def inStringInterpolation =
+ @inline private def inStringInterpolation =
sepRegions.nonEmpty && sepRegions.head == STRINGLIT
-
+
/** Are we directly in a multiline string interpolation expression?
* @pre: inStringInterpolation
*/
- @inline private def inMultiLineInterpolation =
+ @inline private def inMultiLineInterpolation =
inStringInterpolation && sepRegions.tail.nonEmpty && sepRegions.tail.head == STRINGPART
-
+
/** read next token and return last offset
*/
def skipToken(): Offset = {
@@ -205,7 +205,7 @@ trait Scanners extends ScannersCommon {
case CASE =>
sepRegions = ARROW :: sepRegions
case RBRACE =>
- while (!sepRegions.isEmpty && sepRegions.head != RBRACE)
+ while (!sepRegions.isEmpty && sepRegions.head != RBRACE)
sepRegions = sepRegions.tail
if (!sepRegions.isEmpty) sepRegions = sepRegions.tail
docBuffer = null
@@ -223,7 +223,7 @@ trait Scanners extends ScannersCommon {
sepRegions = sepRegions.tail
case _ =>
}
-
+
// Read a token or copy it from `next` tokenData
if (next.token == EMPTY) {
lastOffset = charOffset - 1
@@ -327,8 +327,8 @@ trait Scanners extends ScannersCommon {
'z' =>
putChar(ch)
nextChar()
- getIdentRest()
- if (ch == '"' && token == IDENTIFIER && settings.Xexperimental.value)
+ getIdentRest()
+ if (ch == '"' && token == IDENTIFIER && settings.Xexperimental.value)
token = INTERPOLATIONID
case '<' => // is XMLSTART?
val last = if (charOffset >= 2) buf(charOffset - 2) else ' '
@@ -409,7 +409,7 @@ trait Scanners extends ScannersCommon {
token = STRINGLIT
strVal = ""
}
- } else {
+ } else {
getStringLit()
}
}
@@ -632,8 +632,8 @@ trait Scanners extends ScannersCommon {
else finishNamed()
}
}
-
-
+
+
// Literals -----------------------------------------------------------------
private def getStringLit() = {
@@ -661,21 +661,27 @@ trait Scanners extends ScannersCommon {
getRawStringLit()
}
}
-
+
@annotation.tailrec private def getStringPart(multiLine: Boolean): Unit = {
def finishStringPart() = {
setStrVal()
token = STRINGPART
next.lastOffset = charOffset - 1
next.offset = charOffset - 1
- }
+ }
if (ch == '"') {
- nextRawChar()
- if (!multiLine || isTripleQuote()) {
+ if (multiLine) {
+ nextRawChar()
+ if (isTripleQuote()) {
+ setStrVal()
+ token = STRINGLIT
+ } else
+ getStringPart(multiLine)
+ } else {
+ nextChar()
setStrVal()
token = STRINGLIT
- } else
- getStringPart(multiLine)
+ }
} else if (ch == '$') {
nextRawChar()
if (ch == '$') {
@@ -698,20 +704,23 @@ trait Scanners extends ScannersCommon {
} else {
syntaxError("invalid string interpolation")
}
- } else if ((ch == CR || ch == LF || ch == SU) && !isUnicodeEscape) {
- syntaxError("unclosed string literal")
} else {
- putChar(ch)
- nextRawChar()
- getStringPart(multiLine)
+ val isUnclosedLiteral = !isUnicodeEscape && (ch == SU || (!multiLine && (ch == CR || ch == LF)))
+ if (isUnclosedLiteral) {
+ syntaxError(if (!multiLine) "unclosed string literal" else "unclosed multi-line string literal")
+ } else {
+ putChar(ch)
+ nextRawChar()
+ getStringPart(multiLine)
+ }
}
}
-
+
private def fetchStringPart() = {
offset = charOffset - 1
getStringPart(multiLine = inMultiLineInterpolation)
}
-
+
private def isTripleQuote(): Boolean =
if (ch == '"') {
nextRawChar()
@@ -732,7 +741,7 @@ trait Scanners extends ScannersCommon {
false
}
- /** copy current character into cbuf, interpreting any escape sequences,
+ /** copy current character into cbuf, interpreting any escape sequences,
* and advance to next character.
*/
protected def getLitChar(): Unit =
diff --git a/src/compiler/scala/tools/nsc/ast/parser/Tokens.scala b/src/compiler/scala/tools/nsc/ast/parser/Tokens.scala
index 091f333c27..fb4daefd57 100644
--- a/src/compiler/scala/tools/nsc/ast/parser/Tokens.scala
+++ b/src/compiler/scala/tools/nsc/ast/parser/Tokens.scala
@@ -58,7 +58,7 @@ object Tokens extends Tokens {
final val BACKQUOTED_IDENT = 11
def isIdentifier(code: Int) =
code >= IDENTIFIER && code <= BACKQUOTED_IDENT
-
+
@switch def canBeginExpression(code: Int) = code match {
case IDENTIFIER|BACKQUOTED_IDENT|USCORE => true
case LBRACE|LPAREN|LBRACKET|COMMENT => true
diff --git a/src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala b/src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala
index 05571b2424..aab944f65a 100644
--- a/src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala
+++ b/src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala
@@ -565,7 +565,7 @@ abstract class ScalaPrimitives {
import definitions._
val code = getPrimitive(fun)
- def elementType = atPhase(currentRun.typerPhase) {
+ def elementType = beforeTyper {
val arrayParent = tpe :: tpe.parents collectFirst {
case TypeRef(_, ArrayClass, elem :: Nil) => elem
}
diff --git a/src/compiler/scala/tools/nsc/backend/icode/BasicBlocks.scala b/src/compiler/scala/tools/nsc/backend/icode/BasicBlocks.scala
index 4ab0eb0129..68c4ac03f6 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/BasicBlocks.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/BasicBlocks.scala
@@ -18,7 +18,7 @@ trait BasicBlocks {
import opcodes._
import global.{ ifDebug, settings, log, nme }
import nme.isExceptionResultName
-
+
object NoBasicBlock extends BasicBlock(-1, null)
/** This class represents a basic block. Each
@@ -182,7 +182,7 @@ trait BasicBlocks {
final def foreach[U](f: Instruction => U) = {
if (!closed) dumpMethodAndAbort(method, this)
else instrs foreach f
-
+
// !!! If I replace "instrs foreach f" with the following:
// var i = 0
// val len = instrs.length
diff --git a/src/compiler/scala/tools/nsc/backend/icode/Members.scala b/src/compiler/scala/tools/nsc/backend/icode/Members.scala
index 298c9171a1..36651541b2 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/Members.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/Members.scala
@@ -21,7 +21,7 @@ trait Members {
self: ICodes =>
import global._
-
+
object NoCode extends Code(null, "NoCode") {
override def blocksList: List[BasicBlock] = Nil
}
@@ -138,7 +138,7 @@ trait Members {
/** Represent a field in ICode */
class IField(val symbol: Symbol) extends IMember { }
-
+
object NoIMethod extends IMethod(NoSymbol) { }
/**
@@ -183,12 +183,7 @@ trait Members {
this
}
- def addLocal(l: Local): Local =
- locals find (_ == l) getOrElse {
- locals ::= l
- l
- }
-
+ def addLocal(l: Local): Local = findOrElse(locals)(_ == l) { locals ::= l ; l }
def addParam(p: Local): Unit =
if (params contains p) ()
@@ -213,6 +208,12 @@ trait Members {
override def toString() = symbol.fullName
+ def matchesSignature(other: IMethod) = {
+ (symbol.name == other.symbol.name) &&
+ (params corresponds other.params)(_.kind == _.kind) &&
+ (returnType == other.returnType)
+ }
+
import opcodes._
def checkLocals(): Unit = {
def localsSet = (code.blocks flatMap { bb =>
diff --git a/src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala b/src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala
index a485272ca6..5eef02f2cb 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala
@@ -145,7 +145,7 @@ trait TypeKinds { self: ICodes =>
* Here we make the adjustment by rewinding to a pre-erasure state and
* sifting through the parents for a class type.
*/
- def lub0(tk1: TypeKind, tk2: TypeKind): Type = atPhase(currentRun.uncurryPhase) {
+ def lub0(tk1: TypeKind, tk2: TypeKind): Type = beforeUncurry {
import definitions._
val tp = global.lub(List(tk1.toType, tk2.toType))
val (front, rest) = tp.parents span (_.typeSymbol.hasTraitFlag)
diff --git a/src/compiler/scala/tools/nsc/backend/icode/TypeStacks.scala b/src/compiler/scala/tools/nsc/backend/icode/TypeStacks.scala
index ba4b250303..8a2ec9a191 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/TypeStacks.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/TypeStacks.scala
@@ -21,7 +21,7 @@ trait TypeStacks {
* stack of the ICode.
*/
type Rep = List[TypeKind]
-
+
object NoTypeStack extends TypeStack(Nil) { }
class TypeStack(var types: Rep) {
diff --git a/src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala b/src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala
index c06bd2e097..69de0dfa90 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala
@@ -105,11 +105,9 @@ abstract class ReachingDefinitions {
def genAndKill(b: BasicBlock): (ListSet[Definition], ListSet[Local]) = {
var genSet = ListSet[Definition]()
var killSet = ListSet[Local]()
- for ((i, idx) <- b.toList.zipWithIndex) i match {
- case STORE_LOCAL(local) =>
- killSet = killSet + local
- genSet = updateReachingDefinition(b, idx, genSet)
- case _ => ()
+ for ((STORE_LOCAL(local), idx) <- b.toList.zipWithIndex) {
+ killSet = killSet + local
+ genSet = updateReachingDefinition(b, idx, genSet)
}
(genSet, killSet)
}
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala b/src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala
index 865bacffaa..c217869a48 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala
@@ -23,9 +23,7 @@ trait BytecodeWriters {
import global._
private def outputDirectory(sym: Symbol): AbstractFile = (
- settings.outputDirs.outputDirFor {
- atPhase(currentRun.flattenPhase.prev)(sym.sourceFile)
- }
+ settings.outputDirs.outputDirFor(beforeFlatten(sym.sourceFile))
)
private def getFile(base: AbstractFile, cls: JClass, suffix: String): AbstractFile = {
var dir = base
@@ -85,7 +83,7 @@ trait BytecodeWriters {
emitJavap(bytes, javapFile)
}
}
-
+
trait ClassBytecodeWriter extends BytecodeWriter {
def writeClass(label: String, jclass: JClass, sym: Symbol) {
val outfile = getFile(sym, jclass, ".class")
@@ -96,18 +94,18 @@ trait BytecodeWriters {
informProgress("wrote '" + label + "' to " + outfile)
}
}
-
+
trait DumpBytecodeWriter extends BytecodeWriter {
val baseDir = Directory(settings.Ydumpclasses.value).createDirectory()
-
+
abstract override def writeClass(label: String, jclass: JClass, sym: Symbol) {
super.writeClass(label, jclass, sym)
-
+
val pathName = jclass.getName()
var dumpFile = pathName.split("[./]").foldLeft(baseDir: Path) (_ / _) changeExtension "class" toFile;
dumpFile.parent.createDirectory()
val outstream = new DataOutputStream(new FileOutputStream(dumpFile.path))
-
+
try jclass writeTo outstream
finally outstream.close()
}
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala b/src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala
index 8238705cc3..387b7fb3d7 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala
@@ -37,11 +37,9 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
/** Create a new phase */
override def newPhase(p: Phase): Phase = new JvmPhase(p)
- private def outputDirectory(sym: Symbol): AbstractFile = (
- settings.outputDirs.outputDirFor {
- atPhase(currentRun.flattenPhase.prev)(sym.sourceFile)
- }
- )
+ private def outputDirectory(sym: Symbol): AbstractFile =
+ settings.outputDirs outputDirFor beforeFlatten(sym.sourceFile)
+
private def getFile(base: AbstractFile, cls: JClass, suffix: String): AbstractFile = {
var dir = base
val pathParts = cls.getName().split("[./]").toList
@@ -87,7 +85,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
// succeed or warn that it isn't.
hasApproximate && {
// Before erasure so we can identify generic mains.
- atPhase(currentRun.erasurePhase) {
+ beforeErasure {
val companion = sym.linkedClassOfClass
val companionMain = companion.tpe.member(nme.main)
@@ -154,14 +152,14 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
if (settings.Ygenjavap.isDefault) {
if(settings.Ydumpclasses.isDefault)
new ClassBytecodeWriter { }
- else
+ else
new ClassBytecodeWriter with DumpBytecodeWriter { }
}
else new ClassBytecodeWriter with JavapBytecodeWriter { }
}
val codeGenerator = new BytecodeGenerator(bytecodeWriter)
- log("Created new bytecode generator for " + classes.size + " classes.")
+ debuglog("Created new bytecode generator for " + classes.size + " classes.")
sortedClasses foreach { c =>
try codeGenerator.genClass(c)
@@ -209,10 +207,11 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
val BeanInfoSkipAttr = definitions.getRequiredClass("scala.beans.BeanInfoSkip")
val BeanDisplayNameAttr = definitions.getRequiredClass("scala.beans.BeanDisplayName")
val BeanDescriptionAttr = definitions.getRequiredClass("scala.beans.BeanDescription")
-
+
final val ExcludedForwarderFlags = {
import Flags._
- ( CASE | SPECIALIZED | LIFTED | PROTECTED | STATIC | BridgeAndPrivateFlags )
+ // Should include DEFERRED but this breaks findMember.
+ ( CASE | SPECIALIZED | LIFTED | PROTECTED | STATIC | EXPANDEDNAME | BridgeAndPrivateFlags )
}
// Additional interface parents based on annotations and other cues
@@ -272,7 +271,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
* of inner class all until root class.
*/
def collectInnerClass(s: Symbol): Unit = {
- // TODO: something atPhase(currentRun.flattenPhase.prev) which accounts for
+ // TODO: some beforeFlatten { ... } which accounts for
// being nested in parameterized classes (if we're going to selectively flatten.)
val x = innerClassSymbolFor(s)
val isInner = x.isClass && !x.rawowner.isPackageClass
@@ -393,7 +392,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
// it must be a top level class (name contains no $s)
def isCandidateForForwarders(sym: Symbol): Boolean =
- atPhase(currentRun.picklerPhase.next) {
+ afterPickler {
!(sym.name.toString contains '$') && sym.hasModuleFlag && !sym.isImplClass && !sym.isNestedClass
}
@@ -433,7 +432,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
private def addEnclosingMethodAttribute(jclass: JClass, clazz: Symbol) {
val sym = clazz.originalEnclosingMethod
if (sym.isMethod) {
- log("enclosing method for %s is %s (in %s)".format(clazz, sym, sym.enclClass))
+ debuglog("enclosing method for %s is %s (in %s)".format(clazz, sym, sym.enclClass))
jclass addAttribute fjbgContext.JEnclosingMethodAttribute(
jclass,
javaName(sym.enclClass),
@@ -449,7 +448,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
enclClass, clazz)
)
else {
- log("enclosing method for %s is %s (in %s)".format(clazz, sym, enclClass))
+ debuglog("enclosing method for %s is %s (in %s)".format(clazz, sym, enclClass))
jclass addAttribute fjbgContext.JEnclosingMethodAttribute(
jclass,
javaName(enclClass),
@@ -681,7 +680,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
)
def addGenericSignature(jmember: JMember, sym: Symbol, owner: Symbol) {
if (needsGenericSignature(sym)) {
- val memberTpe = atPhase(currentRun.erasurePhase)(owner.thisType.memberInfo(sym))
+ val memberTpe = beforeErasure(owner.thisType.memberInfo(sym))
// println("addGenericSignature sym: " + sym.fullName + " : " + memberTpe + " sym.info: " + sym.info)
// println("addGenericSignature: "+ (sym.ownerChain map (x => (x.name, x.isImplClass))))
erasure.javaSig(sym, memberTpe) foreach { sig =>
@@ -700,7 +699,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
return
}
if ((settings.check.value contains "genjvm")) {
- val normalizedTpe = atPhase(currentRun.erasurePhase)(erasure.prepareSigMap(memberTpe))
+ val normalizedTpe = beforeErasure(erasure.prepareSigMap(memberTpe))
val bytecodeTpe = owner.thisType.memberInfo(sym)
if (!sym.isType && !sym.isConstructor && !(erasure.erasure(sym, normalizedTpe) =:= bytecodeTpe)) {
clasz.cunit.warning(sym.pos,
@@ -716,9 +715,8 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
}
val index = jmember.getConstantPool.addUtf8(sig).toShort
if (opt.verboseDebug)
- atPhase(currentRun.erasurePhase) {
- println("add generic sig "+sym+":"+sym.info+" ==> "+sig+" @ "+index)
- }
+ beforeErasure(println("add generic sig "+sym+":"+sym.info+" ==> "+sig+" @ "+index))
+
val buf = ByteBuffer.allocate(2)
buf putShort index
addAttribute(jmember, tpnme.SignatureATTR, buf)
@@ -793,14 +791,14 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
innerSym.rawname + innerSym.moduleSuffix
// add inner classes which might not have been referenced yet
- atPhase(currentRun.erasurePhase.next) {
+ afterErasure {
for (sym <- List(clasz.symbol, clasz.symbol.linkedClassOfClass); m <- sym.info.decls.map(innerClassSymbolFor) if m.isClass)
innerClassBuffer += m
}
val allInners = innerClassBuffer.toList
if (allInners.nonEmpty) {
- log(clasz.symbol.fullName('.') + " contains " + allInners.size + " inner classes.")
+ debuglog(clasz.symbol.fullName('.') + " contains " + allInners.size + " inner classes.")
val innerClassesAttr = jclass.getInnerClasses()
// sort them so inner classes succeed their enclosing class
// to satisfy the Eclipse Java compiler
@@ -1228,7 +1226,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
val jtype = javaType(method).asInstanceOf[JMethodType]
def emit(invoke: String) {
- log("%s %s %s.%s:%s".format(invoke, receiver.accessString, jowner, jname, jtype))
+ debuglog("%s %s %s.%s:%s".format(invoke, receiver.accessString, jowner, jname, jtype))
invoke match {
case "invokeinterface" => jcode.emitINVOKEINTERFACE(jowner, jname, jtype)
case "invokevirtual" => jcode.emitINVOKEVIRTUAL(jowner, jname, jtype)
@@ -1916,7 +1914,8 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
if (sym.isStaticMember) ACC_STATIC else 0,
if (sym.isBridge) ACC_BRIDGE | ACC_SYNTHETIC else 0,
if (sym.isClass && !sym.isInterface) ACC_SUPER else 0,
- if (sym.isVarargsMethod) ACC_VARARGS else 0
+ if (sym.isVarargsMethod) ACC_VARARGS else 0,
+ if (sym.hasFlag(Flags.SYNCHRONIZED)) JAVA_ACC_SYNCHRONIZED else 0
)
}
def javaFieldFlags(sym: Symbol) = {
@@ -1928,9 +1927,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
}
def isTopLevelModule(sym: Symbol): Boolean =
- atPhase (currentRun.picklerPhase.next) {
- sym.isModuleClass && !sym.isImplClass && !sym.isNestedClass
- }
+ afterPickler { sym.isModuleClass && !sym.isImplClass && !sym.isNestedClass }
def isStaticModule(sym: Symbol): Boolean = {
sym.isModuleClass && !sym.isImplClass && !sym.isLifted
diff --git a/src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala b/src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala
index d2e54ff3f1..2fb615f893 100644
--- a/src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala
+++ b/src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala
@@ -1125,7 +1125,7 @@ abstract class GenMSIL extends SubComponent {
}
// method: implicit view(FunctionX[PType0, PType1, ...,PTypeN, ResType]):DelegateType
- val (isDelegateView, paramType, resType) = atPhase(currentRun.typerPhase) {
+ val (isDelegateView, paramType, resType) = beforeTyper {
msym.tpe match {
case MethodType(params, resultType)
if (params.length == 1 && msym.name == nme.view_) =>
@@ -1954,7 +1954,7 @@ abstract class GenMSIL extends SubComponent {
} // createClassMembers0
private def isTopLevelModule(sym: Symbol): Boolean =
- atPhase (currentRun.refchecksPhase) {
+ beforeRefchecks {
sym.isModuleClass && !sym.isImplClass && !sym.isNestedClass
}
diff --git a/src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala b/src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala
index 5fc7329955..95c371fa8b 100644
--- a/src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala
+++ b/src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala
@@ -225,9 +225,9 @@ abstract class DeadCodeElimination extends SubComponent {
m foreachBlock { bb =>
assert(bb.closed, "Open block in computeCompensations")
- for ((i, idx) <- bb.toList.zipWithIndex) {
+ foreachWithIndex(bb.toList) { (i, idx) =>
if (!useful(bb)(idx)) {
- for ((consumedType, depth) <- i.consumedTypes.reverse.zipWithIndex) {
+ foreachWithIndex(i.consumedTypes.reverse) { (consumedType, depth) =>
log("Finding definitions of: " + i + "\n\t" + consumedType + " at depth: " + depth)
val defs = rdef.findDefs(bb, idx, 1, depth)
for (d <- defs) {
diff --git a/src/compiler/scala/tools/nsc/backend/opt/Inliners.scala b/src/compiler/scala/tools/nsc/backend/opt/Inliners.scala
index a3a19868e0..e91bab8367 100644
--- a/src/compiler/scala/tools/nsc/backend/opt/Inliners.scala
+++ b/src/compiler/scala/tools/nsc/backend/opt/Inliners.scala
@@ -466,8 +466,10 @@ abstract class Inliners extends SubComponent {
}
}
- private def isHigherOrderMethod(sym: Symbol) =
- sym.isMethod && atPhase(currentRun.erasurePhase.prev)(sym.info.paramTypes exists isFunctionType)
+ private def isHigherOrderMethod(sym: Symbol) = (
+ sym.isMethod
+ && beforeExplicitOuter(sym.info.paramTypes exists isFunctionType) // was "at erasurePhase.prev"
+ )
/** Should method 'sym' being called in 'receiver' be loaded from disk? */
def shouldLoadImplFor(sym: Symbol, receiver: Symbol): Boolean = {
@@ -705,7 +707,7 @@ abstract class Inliners extends SubComponent {
}
def isStampedForInlining(stackLength: Int) =
- !sameSymbols && inc.m.hasCode && shouldInline && isSafeToInline(stackLength)
+ !sameSymbols && inc.m.hasCode && shouldInline && isSafeToInline(stackLength) && !inc.m.symbol.hasFlag(Flags.SYNCHRONIZED)
def logFailure(stackLength: Int) = log(
"""|inline failed for %s:
@@ -722,6 +724,7 @@ abstract class Inliners extends SubComponent {
def failureReason(stackLength: Int) =
if (!inc.m.hasCode) "bytecode was unavailable"
+ else if (inc.m.symbol.hasFlag(Flags.SYNCHRONIZED)) "method is synchronized"
else if (!isSafeToInline(stackLength)) "it is unsafe (target may reference private fields)"
else "of a bug (run with -Ylog:inline -Ydebug for more information)"
diff --git a/src/compiler/scala/tools/nsc/dependencies/Changes.scala b/src/compiler/scala/tools/nsc/dependencies/Changes.scala
index 089ef9cf35..176c00c025 100644
--- a/src/compiler/scala/tools/nsc/dependencies/Changes.scala
+++ b/src/compiler/scala/tools/nsc/dependencies/Changes.scala
@@ -18,7 +18,7 @@ abstract class Changes {
abstract class Change
- private lazy val annotationsChecked =
+ private lazy val annotationsChecked =
List(definitions.SpecializedClass) // Any others that should be checked?
private val flagsToCheck = IMPLICIT | FINAL | PRIVATE | PROTECTED | SEALED |
diff --git a/src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala b/src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala
index bd890b7194..02be916f59 100644
--- a/src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala
+++ b/src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala
@@ -145,10 +145,8 @@ trait DependencyAnalysis extends SubComponent with Files {
val name = d.toString
d.symbol match {
case s : ModuleClassSymbol =>
- val isTopLevelModule =
- atPhase (currentRun.picklerPhase.next) {
- !s.isImplClass && !s.isNestedClass
- }
+ val isTopLevelModule = afterPickler { !s.isImplClass && !s.isNestedClass }
+
if (isTopLevelModule && (s.companionModule != NoSymbol)) {
dependencies.emits(source, nameToFile(unit.source.file, name))
}
@@ -182,16 +180,18 @@ trait DependencyAnalysis extends SubComponent with Files {
|| (tree.symbol.sourceFile.path != file.path))
&& (!tree.symbol.isClassConstructor)) {
updateReferences(tree.symbol.fullName)
- atPhase(currentRun.uncurryPhase.prev) {
- checkType(tree.symbol.tpe)
- }
+ // was "at uncurryPhase.prev", which is actually non-deterministic
+ // because the continuations plugin may or may not supply uncurry's
+ // immediately preceding phase.
+ beforeRefchecks(checkType(tree.symbol.tpe))
}
tree match {
case cdef: ClassDef if !cdef.symbol.hasPackageFlag &&
!cdef.symbol.isAnonymousFunction =>
if (cdef.symbol != NoSymbol) buf += cdef.symbol
- atPhase(currentRun.erasurePhase.prev) {
+ // was "at erasurePhase.prev"
+ beforeExplicitOuter {
for (s <- cdef.symbol.info.decls)
s match {
case ts: TypeSymbol if !ts.isClass =>
@@ -202,9 +202,8 @@ trait DependencyAnalysis extends SubComponent with Files {
super.traverse(tree)
case ddef: DefDef =>
- atPhase(currentRun.typerPhase.prev) {
- checkType(ddef.symbol.tpe)
- }
+ // was "at typer.prev"
+ beforeTyper { checkType(ddef.symbol.tpe) }
super.traverse(tree)
case a @ Select(q, n) if ((a.symbol != NoSymbol) && (q.symbol != null)) => // #2556
if (!a.symbol.isConstructor &&
diff --git a/src/compiler/scala/tools/nsc/doc/model/ModelFactory.scala b/src/compiler/scala/tools/nsc/doc/model/ModelFactory.scala
index 7eb8c393f3..127faf8ed9 100644
--- a/src/compiler/scala/tools/nsc/doc/model/ModelFactory.scala
+++ b/src/compiler/scala/tools/nsc/doc/model/ModelFactory.scala
@@ -104,7 +104,7 @@ class ModelFactory(val global: Global, val settings: doc.Settings) {
case mb: NonTemplateMemberEntity if (mb.useCaseOf.isDefined) =>
mb.useCaseOf.get.inDefinitionTemplates
case _ =>
- if (inTpl == null)
+ if (inTpl == null)
makeRootPackage.toList
else
makeTemplate(sym.owner) :: (sym.allOverriddenSymbols map { inhSym => makeTemplate(inhSym.owner) })
@@ -123,14 +123,14 @@ class ModelFactory(val global: Global, val settings: doc.Settings) {
else Public()
}
}
- def flags = {
+ def flags = {
val fgs = mutable.ListBuffer.empty[Paragraph]
if (sym.isImplicit) fgs += Paragraph(Text("implicit"))
if (sym.isSealed) fgs += Paragraph(Text("sealed"))
if (!sym.isTrait && (sym hasFlag Flags.ABSTRACT)) fgs += Paragraph(Text("abstract"))
if (!sym.isTrait && (sym hasFlag Flags.DEFERRED)) fgs += Paragraph(Text("abstract"))
if (!sym.isModule && (sym hasFlag Flags.FINAL)) fgs += Paragraph(Text("final"))
- fgs.toList
+ fgs.toList
}
def deprecation =
if (sym.isDeprecated)
diff --git a/src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala b/src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala
index 0539c885c2..bad181eb76 100644
--- a/src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala
+++ b/src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala
@@ -48,7 +48,7 @@ class RefinedBuildManager(val settings: Settings) extends Changes with BuildMana
protected def newCompiler(settings: Settings) = new BuilderGlobal(settings)
val compiler = newCompiler(settings)
- import compiler.{Symbol, Type, atPhase, currentRun}
+ import compiler.{ Symbol, Type, beforeErasure }
import compiler.dependencyAnalysis.Inherited
private case class SymWithHistory(sym: Symbol, befErasure: Type)
@@ -160,10 +160,8 @@ class RefinedBuildManager(val settings: Settings) extends Changes with BuildMana
isCorrespondingSym(s.sym, sym)) match {
case Some(SymWithHistory(oldSym, info)) =>
val changes = changeSet(oldSym.info, sym)
- val changesErasure =
- atPhase(currentRun.erasurePhase.prev) {
- changeSet(info, sym)
- }
+ val changesErasure = beforeErasure(changeSet(info, sym))
+
changesOf(oldSym) = (changes ++ changesErasure).distinct
case _ =>
// a new top level definition
@@ -333,11 +331,7 @@ class RefinedBuildManager(val settings: Settings) extends Changes with BuildMana
for (src <- files; localDefs = compiler.dependencyAnalysis.definitions(src)) {
definitions(src) = (localDefs map (s => {
this.classes += s.fullName -> src
- SymWithHistory(
- s.cloneSymbol,
- atPhase(currentRun.erasurePhase.prev) {
- s.info.cloneInfo(s)
- })
+ SymWithHistory(s.cloneSymbol, beforeErasure(s.info.cloneInfo(s)))
}))
}
this.references = compiler.dependencyAnalysis.references
diff --git a/src/compiler/scala/tools/nsc/interpreter/ExprTyper.scala b/src/compiler/scala/tools/nsc/interpreter/ExprTyper.scala
index 39a1a406ba..68c8f2fdb8 100644
--- a/src/compiler/scala/tools/nsc/interpreter/ExprTyper.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/ExprTyper.scala
@@ -92,7 +92,7 @@ trait ExprTyper {
case _ => NoType
}
}
-
+
def evaluate(): Type = {
typeOfExpressionDepth += 1
try typeOfTerm(expr) orElse asModule orElse asExpr orElse asQualifiedImport
diff --git a/src/compiler/scala/tools/nsc/interpreter/ILoop.scala b/src/compiler/scala/tools/nsc/interpreter/ILoop.scala
index 7c71438b98..e1ea69842f 100644
--- a/src/compiler/scala/tools/nsc/interpreter/ILoop.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/ILoop.scala
@@ -324,7 +324,7 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
private def implicitsCommand(line: String): Result = {
val intp = ILoop.this.intp
import intp._
- import global.Symbol
+ import global.{ Symbol, afterTyper }
def p(x: Any) = intp.reporter.printMessage("" + x)
@@ -348,7 +348,7 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
// This groups the members by where the symbol is defined
val byOwner = syms groupBy (_.owner)
- val sortedOwners = byOwner.toList sortBy { case (owner, _) => intp.afterTyper(source.info.baseClasses indexOf owner) }
+ val sortedOwners = byOwner.toList sortBy { case (owner, _) => afterTyper(source.info.baseClasses indexOf owner) }
sortedOwners foreach {
case (owner, members) =>
@@ -382,7 +382,7 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
private def findToolsJar() = {
val jdkPath = Directory(jdkHome)
val jar = jdkPath / "lib" / "tools.jar" toFile;
-
+
if (jar isFile)
Some(jar)
else if (jdkPath.isDirectory)
@@ -440,7 +440,7 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
else {
val tp = intp.typeOfExpression(line, false)
if (tp == NoType) "" // the error message was already printed
- else intp.afterTyper(tp.toString)
+ else intp.global.afterTyper(tp.toString)
}
}
private def warningsCommand(): Result = {
diff --git a/src/compiler/scala/tools/nsc/interpreter/IMain.scala b/src/compiler/scala/tools/nsc/interpreter/IMain.scala
index de408f083f..9a12bc1471 100644
--- a/src/compiler/scala/tools/nsc/interpreter/IMain.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/IMain.scala
@@ -230,9 +230,6 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
} with MemberHandlers
import memberHandlers._
- def atPickler[T](op: => T): T = atPhase(currentRun.picklerPhase)(op)
- def afterTyper[T](op: => T): T = atPhase(currentRun.typerPhase.next)(op)
-
/** Temporarily be quiet */
def beQuietDuring[T](body: => T): T = {
val saved = printResults
@@ -787,10 +784,6 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
}
def compile(source: String): Boolean = compileAndSaveRun("<console>", source)
- def lineAfterTyper[T](op: => T): T = {
- assert(lastRun != null, "Internal error: trying to use atPhase, but Run is null." + this)
- atPhase(lastRun.typerPhase.next)(op)
- }
/** The innermost object inside the wrapper, found by
* following accessPath into the outer one.
@@ -799,7 +792,7 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
val readRoot = getRequiredModule(readPath) // the outermost wrapper
(accessPath split '.').foldLeft(readRoot) { (sym, name) =>
if (name == "") sym else
- lineAfterTyper(sym.info member newTermName(name))
+ afterTyper(sym.info member newTermName(name))
}
}
/** We get a bunch of repeated warnings for reasons I haven't
@@ -842,7 +835,6 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
// private
class Request(val line: String, val trees: List[Tree]) {
val lineRep = new ReadEvalPrint()
- import lineRep.lineAfterTyper
private var _originalLine: String = null
def withOriginalLine(s: String): this.type = { _originalLine = s ; this }
@@ -961,7 +953,7 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
}
lazy val resultSymbol = lineRep.resolvePathToSymbol(accessPath)
- def applyToResultMember[T](name: Name, f: Symbol => T) = lineAfterTyper(f(resultSymbol.info.nonPrivateDecl(name)))
+ def applyToResultMember[T](name: Name, f: Symbol => T) = afterTyper(f(resultSymbol.info.nonPrivateDecl(name)))
/* typeOf lookup with encoding */
def lookupTypeOf(name: Name) = typeOf.getOrElse(name, typeOf(global.encode(name.toString)))
diff --git a/src/compiler/scala/tools/nsc/interpreter/Imports.scala b/src/compiler/scala/tools/nsc/interpreter/Imports.scala
index d34ca8bbca..d579e0369e 100644
--- a/src/compiler/scala/tools/nsc/interpreter/Imports.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/Imports.scala
@@ -61,7 +61,7 @@ trait Imports {
def importedTypeSymbols = importedSymbols collect { case x: TypeSymbol => x }
def implicitSymbols = importedSymbols filter (_.isImplicit)
- def importedTermNamed(name: String): Symbol =
+ def importedTermNamed(name: String): Symbol =
importedTermSymbols find (_.name.toString == name) getOrElse NoSymbol
/** Tuples of (source, imported symbols) in the order they were imported.
@@ -191,5 +191,5 @@ trait Imports {
prevRequestList flatMap (req => req.handlers map (req -> _))
private def membersAtPickler(sym: Symbol): List[Symbol] =
- atPickler(sym.info.nonPrivateMembers)
+ beforePickler(sym.info.nonPrivateMembers)
} \ No newline at end of file
diff --git a/src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala b/src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala
index d96e8b07fc..f9c1907696 100644
--- a/src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala
@@ -18,7 +18,7 @@ class JLineCompletion(val intp: IMain) extends Completion with CompletionOutput
import global._
import definitions.{ PredefModule, RootClass, AnyClass, AnyRefClass, ScalaPackage, JavaLangPackage, getModuleIfDefined }
type ExecResult = Any
- import intp.{ debugging, afterTyper }
+ import intp.{ debugging }
// verbosity goes up with consecutive tabs
private var verbosity: Int = 0
@@ -61,7 +61,7 @@ class JLineCompletion(val intp: IMain) extends Completion with CompletionOutput
def packageNames = packages map tos
def aliasNames = aliases map tos
}
-
+
object NoTypeCompletion extends TypeMemberCompletion(NoType) {
override def memberNamed(s: String) = NoSymbol
override def members = Nil
@@ -165,11 +165,11 @@ class JLineCompletion(val intp: IMain) extends Completion with CompletionOutput
override def follow(id: String): Option[CompletionAware] = {
if (!completions(0).contains(id))
return None
-
+
val tpe = intp typeOfExpression id
if (tpe == NoType)
return None
-
+
def default = Some(TypeMemberCompletion(tpe))
// only rebinding vals in power mode for now.
diff --git a/src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala b/src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala
index 48a5fa9e34..7e032753f2 100644
--- a/src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala
@@ -13,7 +13,7 @@ import scala.reflect.internal.Chars
trait MemberHandlers {
val intp: IMain
- import intp.{ Request, global, naming, atPickler }
+ import intp.{ Request, global, naming }
import global._
import naming._
@@ -200,10 +200,10 @@ trait MemberHandlers {
def importedSymbols = individualSymbols ++ wildcardSymbols
lazy val individualSymbols: List[Symbol] =
- atPickler(individualNames map (targetType nonPrivateMember _))
+ beforePickler(individualNames map (targetType nonPrivateMember _))
lazy val wildcardSymbols: List[Symbol] =
- if (importsWildcard) atPickler(targetType.nonPrivateMembers)
+ if (importsWildcard) beforePickler(targetType.nonPrivateMembers)
else Nil
/** Complete list of names imported by a wildcard */
diff --git a/src/compiler/scala/tools/nsc/interpreter/Power.scala b/src/compiler/scala/tools/nsc/interpreter/Power.scala
index 835fbb5638..14876425f4 100644
--- a/src/compiler/scala/tools/nsc/interpreter/Power.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/Power.scala
@@ -15,6 +15,31 @@ import scala.io.Codec
import java.net.{ URL, MalformedURLException }
import io.{ Path }
+/** Collecting some power mode examples.
+
+scala> trait F[@specialized(Int) T] { def f: T = ??? }
+defined trait F
+
+scala> trait G[@specialized(Long, Int) T] extends F[T] { override def f: T = super.f }
+defined trait G
+
+scala> changesAfterEachPhase(intp("G").info.members filter (_.name.toString contains "super")) >
+Gained after 1/parser {
+ method super$f
+}
+
+Gained after 12/specialize {
+ method super$f$mcJ$sp
+ method super$f$mcI$sp
+}
+
+Lost after 18/flatten {
+ method super$f$mcJ$sp
+ method super$f$mcI$sp
+ method super$f
+}
+*/
+
/** A class for methods to be injected into the intp in power mode.
*/
class Power[ReplValsImpl <: ReplVals : Manifest](val intp: IMain, replVals: ReplValsImpl) {
@@ -130,7 +155,7 @@ class Power[ReplValsImpl <: ReplVals : Manifest](val intp: IMain, replVals: Repl
( rutil.info[ReplValsImpl].membersDeclared
filter (m => m.isPublic && !m.hasModuleFlag && !m.isConstructor)
- sortBy (_.decodedName)
+ sortBy (_.decodedName)
map to_str
mkString ("Name and type of values imported into the repl in power mode.\n\n", "\n", "")
)
@@ -140,7 +165,7 @@ class Power[ReplValsImpl <: ReplVals : Manifest](val intp: IMain, replVals: Repl
implicit def apply[T: Manifest] : InternalInfo[T] = new InternalInfo[T](None)
}
object InternalInfo extends LowPriorityInternalInfo { }
-
+
/** Now dealing with the problem of acidentally calling a method on Type
* when you're holding a Symbol and seeing the Symbol converted to the
* type of Symbol rather than the type of the thing represented by the
@@ -151,7 +176,7 @@ class Power[ReplValsImpl <: ReplVals : Manifest](val intp: IMain, replVals: Repl
implicit def apply[T: Manifest] : InternalInfoWrapper[T] = new InternalInfoWrapper[T](None)
}
object InternalInfoWrapper extends LowPriorityInternalInfoWrapper {
-
+
}
class InternalInfoWrapper[T: Manifest](value: Option[T] = None) {
def ? : InternalInfo[T] = new InternalInfo[T](value)
@@ -165,7 +190,7 @@ class Power[ReplValsImpl <: ReplVals : Manifest](val intp: IMain, replVals: Repl
private def newInfo[U: Manifest](value: U): InternalInfo[U] = new InternalInfo[U](Some(value))
private def isSpecialized(s: Symbol) = s.name.toString contains "$mc"
private def isImplClass(s: Symbol) = s.name.toString endsWith "$class"
-
+
/** Standard noise reduction filter. */
def excludeMember(s: Symbol) = (
isSpecialized(s)
@@ -193,7 +218,7 @@ class Power[ReplValsImpl <: ReplVals : Manifest](val intp: IMain, replVals: Repl
def membersInherited = members filterNot (membersDeclared contains _)
def memberTypes = members filter (_.name.isTypeName)
def memberMethods = members filter (_.isMethod)
-
+
def pkg = symbol.enclosingPackage
def pkgName = pkg.fullName
def pkgClass = symbol.enclosingPackageClass
@@ -318,12 +343,12 @@ class Power[ReplValsImpl <: ReplVals : Manifest](val intp: IMain, replVals: Repl
def sigs = syms map (_.defString)
def infos = syms map (_.info)
}
-
+
trait Implicits1 {
// fallback
implicit def replPrinting[T](x: T)(implicit pretty: Prettifier[T] = Prettifier.default[T]) =
new SinglePrettifierClass[T](x)
-
+
implicit def liftToTypeName(s: String): TypeName = newTypeName(s)
}
trait Implicits2 extends Implicits1 {
@@ -350,7 +375,7 @@ class Power[ReplValsImpl <: ReplVals : Manifest](val intp: IMain, replVals: Repl
implicit def replInputStream(in: InputStream)(implicit codec: Codec) = new RichInputStream(in)
implicit def replEnhancedURLs(url: URL)(implicit codec: Codec): RichReplURL = new RichReplURL(url)(codec)
-
+
implicit def liftToTermName(s: String): TermName = newTermName(s)
implicit def replListOfSymbols(xs: List[Symbol]) = new RichSymbolList(xs)
}
diff --git a/src/compiler/scala/tools/nsc/interpreter/ReplVals.scala b/src/compiler/scala/tools/nsc/interpreter/ReplVals.scala
index 6e5dec4205..a68392f0fb 100644
--- a/src/compiler/scala/tools/nsc/interpreter/ReplVals.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/ReplVals.scala
@@ -50,7 +50,7 @@ object ReplVals {
def mkManifestToType[T <: Global](global: T) = {
import global._
import definitions._
-
+
/** We can't use definitions.manifestToType directly because we're passing
* it to map and the compiler refuses to perform eta expansion on a method
* with a dependent return type. (Can this be relaxed?) To get around this
@@ -59,15 +59,17 @@ object ReplVals {
*/
def manifestToType(m: OptManifest[_]): Global#Type =
definitions.manifestToType(m)
-
+
class AppliedTypeFromManifests(sym: Symbol) {
def apply[M](implicit m1: Manifest[M]): Type =
- appliedType(sym.typeConstructor, List(m1) map (x => manifestToType(x).asInstanceOf[Type]))
+ if (sym eq NoSymbol) NoType
+ else appliedType(sym.typeConstructor, List(m1) map (x => manifestToType(x).asInstanceOf[Type]))
def apply[M1, M2](implicit m1: Manifest[M1], m2: Manifest[M2]): Type =
- appliedType(sym.typeConstructor, List(m1, m2) map (x => manifestToType(x).asInstanceOf[Type]))
+ if (sym eq NoSymbol) NoType
+ else appliedType(sym.typeConstructor, List(m1, m2) map (x => manifestToType(x).asInstanceOf[Type]))
}
-
+
(sym: Symbol) => new AppliedTypeFromManifests(sym)
}
}
diff --git a/src/compiler/scala/tools/nsc/javac/JavaParsers.scala b/src/compiler/scala/tools/nsc/javac/JavaParsers.scala
index 3490c1f5a8..06b06c50a6 100644
--- a/src/compiler/scala/tools/nsc/javac/JavaParsers.scala
+++ b/src/compiler/scala/tools/nsc/javac/JavaParsers.scala
@@ -393,7 +393,7 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
// assumed true unless we see public/private/protected
var isPackageAccess = true
var annots: List[Tree] = Nil
- def addAnnot(sym: Symbol) = annots :+= New(sym)
+ def addAnnot(sym: Symbol) = annots :+= New(sym.tpe)
while (true) {
in.token match {
@@ -653,15 +653,12 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
// leaves auxiliary constructors unable to access members of the companion object
// as unqualified identifiers.
def addCompanionObject(statics: List[Tree], cdef: ClassDef): List[Tree] = {
- def implWithImport(importStmt: Tree) = {
- import cdef.impl._
- treeCopy.Template(cdef.impl, parents, self, importStmt :: body)
- }
+ def implWithImport(importStmt: Tree) = deriveTemplate(cdef.impl)(importStmt :: _)
// if there are no statics we can use the original cdef, but we always
// create the companion so import A._ is not an error (see ticket #1700)
val cdefNew =
if (statics.isEmpty) cdef
- else treeCopy.ClassDef(cdef, cdef.mods, cdef.name, cdef.tparams, implWithImport(importCompanionObject(cdef)))
+ else deriveClassDef(cdef)(_ => implWithImport(importCompanionObject(cdef)))
List(makeCompanionObject(cdefNew, statics), cdefNew)
}
diff --git a/src/compiler/scala/tools/nsc/reporters/Reporter.scala b/src/compiler/scala/tools/nsc/reporters/Reporter.scala
index f19a285d7c..309fc5733f 100644
--- a/src/compiler/scala/tools/nsc/reporters/Reporter.scala
+++ b/src/compiler/scala/tools/nsc/reporters/Reporter.scala
@@ -56,10 +56,10 @@ abstract class Reporter {
*/
def echo(msg: String): Unit = info(NoPosition, msg, true)
def echo(pos: Position, msg: String): Unit = info(pos, msg, true)
-
+
/** Informational messages, suppressed unless -verbose or force=true. */
def info(pos: Position, msg: String, force: Boolean): Unit = info0(pos, msg, INFO, force)
-
+
/** Warnings and errors. */
def warning(pos: Position, msg: String): Unit = withoutTruncating(info0(pos, msg, WARNING, false))
def error(pos: Position, msg: String): Unit = withoutTruncating(info0(pos, msg, ERROR, false))
diff --git a/src/compiler/scala/tools/nsc/settings/MutableSettings.scala b/src/compiler/scala/tools/nsc/settings/MutableSettings.scala
index f99d1399c0..e7959f36b2 100644
--- a/src/compiler/scala/tools/nsc/settings/MutableSettings.scala
+++ b/src/compiler/scala/tools/nsc/settings/MutableSettings.scala
@@ -533,7 +533,7 @@ class MutableSettings(val errorFn: String => Unit)
Some(rest)
}
override def tryToSetColon(args: List[String]) = tryToSet(args)
- override def tryToSetFromPropertyValue(s: String) = tryToSet(s.trim.split(" +").toList)
+ override def tryToSetFromPropertyValue(s: String) = tryToSet(s.trim.split(',').toList)
def unparse: List[String] = value map { name + ":" + _ }
withHelpSyntax(name + ":<" + arg + ">")
diff --git a/src/compiler/scala/tools/nsc/symtab/SymbolTable.scala b/src/compiler/scala/tools/nsc/symtab/SymbolTable.scala
index a47bfda8c1..fb85ebeeb0 100644
--- a/src/compiler/scala/tools/nsc/symtab/SymbolTable.scala
+++ b/src/compiler/scala/tools/nsc/symtab/SymbolTable.scala
@@ -9,4 +9,4 @@ package symtab
import ast.{Trees, TreePrinters, DocComments}
import util._
-abstract class SymbolTable extends reflect.internal.SymbolTable \ No newline at end of file
+abstract class SymbolTable extends reflect.internal.SymbolTable \ No newline at end of file
diff --git a/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala b/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
index a8083d7a2d..07d132f7dd 100644
--- a/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
+++ b/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
@@ -368,7 +368,7 @@ abstract class ClassfileParser {
case arr: Type => Constant(arr)
}
}
-
+
private def getSubArray(bytes: Array[Byte]): Array[Byte] = {
val decodedLength = ByteCodecs.decode(bytes)
val arr = new Array[Byte](decodedLength)
@@ -424,19 +424,21 @@ abstract class ClassfileParser {
def forceMangledName(name: Name, module: Boolean): Symbol = {
val parts = name.decode.toString.split(Array('.', '$'))
var sym: Symbol = definitions.RootClass
- atPhase(currentRun.flattenPhase.prev) {
+
+ // was "at flatten.prev"
+ beforeFlatten {
for (part0 <- parts; if !(part0 == ""); part = newTermName(part0)) {
- val sym1 = atPhase(currentRun.icodePhase) {
+ val sym1 = beforeIcode {
sym.linkedClassOfClass.info
sym.info.decl(part.encode)
}//.suchThat(module == _.isModule)
- if (sym1 == NoSymbol)
- sym = sym.info.decl(part.encode.toTypeName)
- else
- sym = sym1
+
+ sym = (
+ if (sym1 ne NoSymbol) sym1
+ else sym.info.decl(part.encode.toTypeName)
+ )
}
}
-// println("found: " + sym)
sym
}
@@ -719,7 +721,7 @@ abstract class ClassfileParser {
index += 1
val bounds = variance match {
case '+' => TypeBounds.upper(objToAny(sig2type(tparams, skiptvs)))
- case '-' =>
+ case '-' =>
val tp = sig2type(tparams, skiptvs)
// sig2type seems to return AnyClass regardless of the situation:
// we don't want Any as a LOWER bound.
@@ -1205,11 +1207,11 @@ abstract class ClassfileParser {
// if loading during initialization of `definitions` typerPhase is not yet set.
// in that case we simply load the member at the current phase
if (currentRun.typerPhase != null)
- atPhase(currentRun.typerPhase)(getMember(sym, innerName.toTypeName))
+ beforeTyper(getMember(sym, innerName.toTypeName))
else
getMember(sym, innerName.toTypeName)
- assert(s ne NoSymbol,
+ assert(s ne NoSymbol,
"" + ((externalName, outerName, innerName, sym.fullLocationString)) + " / " +
" while parsing " + ((in.file, busy)) +
sym + "." + innerName + " linkedModule: " + sym.companionModule + sym.companionModule.info.members
diff --git a/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala b/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala
index 7d42dabc08..68af518d3a 100644
--- a/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala
+++ b/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala
@@ -179,7 +179,7 @@ abstract class ICodeReader extends ClassfileParser {
}
else {
forceMangledName(name, false)
- atPhase(currentRun.flattenPhase.next)(definitions.getClass(name))
+ afterFlatten(definitions.getClass(name.toTypeName))
}
if (sym.isModule)
sym.moduleClass
diff --git a/src/compiler/scala/tools/nsc/symtab/classfile/Pickler.scala b/src/compiler/scala/tools/nsc/symtab/classfile/Pickler.scala
index 25ae6f33d2..758f870d6b 100644
--- a/src/compiler/scala/tools/nsc/symtab/classfile/Pickler.scala
+++ b/src/compiler/scala/tools/nsc/symtab/classfile/Pickler.scala
@@ -77,7 +77,7 @@ abstract class Pickler extends SubComponent {
private var entries = new Array[AnyRef](256)
private var ep = 0
private val index = new LinkedHashMap[AnyRef, Int]
- private lazy val nonClassRoot = root.ownersIterator.find(! _.isClass) getOrElse NoSymbol
+ private lazy val nonClassRoot = findOrElse(root.ownersIterator)(!_.isClass)(NoSymbol)
private def isRootSym(sym: Symbol) =
sym.name.toTermName == rootName && sym.owner == rootOwner
diff --git a/src/compiler/scala/tools/nsc/transform/AddInterfaces.scala b/src/compiler/scala/tools/nsc/transform/AddInterfaces.scala
index d5a413337b..1abaf1c1d6 100644
--- a/src/compiler/scala/tools/nsc/transform/AddInterfaces.scala
+++ b/src/compiler/scala/tools/nsc/transform/AddInterfaces.scala
@@ -82,7 +82,9 @@ abstract class AddInterfaces extends InfoTransform {
implClassMap.getOrElse(iface, {
atPhase(implClassPhase) {
- log("%s.implClass == %s".format(iface, iface.implClass))
+ if (iface.implClass ne NoSymbol)
+ log("%s.implClass == %s".format(iface, iface.implClass))
+
val implName = nme.implClassName(iface.name)
var impl = if (iface.owner.isClass) iface.owner.info.decl(implName) else NoSymbol
@@ -193,7 +195,7 @@ abstract class AddInterfaces extends InfoTransform {
case PolyType(_, restpe) =>
implType(restpe)
}
- sym setInfo implType(atPhase(currentRun.erasurePhase)(iface.info))
+ sym setInfo implType(beforeErasure(iface.info))
}
override def load(clazz: Symbol) { complete(clazz) }
@@ -316,9 +318,9 @@ abstract class AddInterfaces extends InfoTransform {
override def transform(tree: Tree): Tree = {
val sym = tree.symbol
val tree1 = tree match {
- case ClassDef(mods, name, tparams, impl) if (sym.needsImplClass) =>
+ case ClassDef(mods, _, _, impl) if sym.needsImplClass =>
implClass(sym).initialize // to force lateDEFERRED flags
- treeCopy.ClassDef(tree, mods | INTERFACE, name, tparams, ifaceTemplate(impl))
+ copyClassDef(tree)(mods = mods | INTERFACE, impl = ifaceTemplate(impl))
case DefDef(_,_,_,_,_,_) if sym.isClassConstructor && sym.isPrimaryConstructor && sym.owner != ArrayClass =>
deriveDefDef(tree)(addMixinConstructorCalls(_, sym.owner)) // (3)
case Template(parents, self, body) =>
@@ -337,7 +339,7 @@ abstract class AddInterfaces extends InfoTransform {
val mix1 = mix
if (mix == tpnme.EMPTY) mix
else {
- val ps = atPhase(currentRun.erasurePhase) {
+ val ps = beforeErasure {
sym.info.parents dropWhile (p => p.symbol.name != mix)
}
assert(!ps.isEmpty, tree);
diff --git a/src/compiler/scala/tools/nsc/transform/CleanUp.scala b/src/compiler/scala/tools/nsc/transform/CleanUp.scala
index 4521ce9982..d04c6115ca 100644
--- a/src/compiler/scala/tools/nsc/transform/CleanUp.scala
+++ b/src/compiler/scala/tools/nsc/transform/CleanUp.scala
@@ -33,21 +33,21 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
private def savingStatics[T](body: => T): T = {
val savedNewStaticMembers : mutable.Buffer[Tree] = newStaticMembers.clone()
val savedNewStaticInits : mutable.Buffer[Tree] = newStaticInits.clone()
- val savedSymbolsStoredAsStatic : mutable.Map[String, Symbol] = symbolsStoredAsStatic.clone()
+ val savedSymbolsStoredAsStatic : mutable.Map[String, Symbol] = symbolsStoredAsStatic.clone()
val result = body
clearStatics()
newStaticMembers ++= savedNewStaticMembers
newStaticInits ++= savedNewStaticInits
symbolsStoredAsStatic ++= savedSymbolsStoredAsStatic
-
+
result
}
private def transformTemplate(tree: Tree) = {
val Template(parents, self, body) = tree
clearStatics()
val newBody = transformTrees(body)
- val templ = treeCopy.Template(tree, parents, self, transformTrees(newStaticMembers.toList) ::: newBody)
+ val templ = deriveTemplate(tree)(_ => transformTrees(newStaticMembers.toList) ::: newBody)
try addStaticInits(templ) // postprocess to include static ctors
finally clearStatics()
}
@@ -85,6 +85,11 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
case "poly-cache" => POLY_CACHE
}
+ def shouldRewriteTry(tree: Try) = {
+ val sym = tree.tpe.typeSymbol
+ forMSIL && (sym != UnitClass) && (sym != NothingClass)
+ }
+
private def typedWithPos(pos: Position)(tree: Tree) =
localTyper.typedPos(pos)(tree)
@@ -97,7 +102,7 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
/** The boxed type if it's a primitive; identity otherwise.
*/
def toBoxedType(tp: Type) = if (isJavaValueType(tp)) boxedClass(tp.typeSymbol).tpe else tp
-
+
override def transform(tree: Tree): Tree = tree match {
/* Transforms dynamic calls (i.e. calls to methods that are undefined
@@ -134,7 +139,7 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
case ad@ApplyDynamic(qual0, params) =>
if (settings.logReflectiveCalls.value)
unit.echo(ad.pos, "method invocation uses reflection")
-
+
val typedPos = typedWithPos(ad.pos) _
assert(ad.symbol.isPublic)
@@ -146,7 +151,7 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
val flags = PRIVATE | STATIC | SYNTHETIC | (
if (isFinal) FINAL else 0
)
-
+
val varSym = currentClass.newVariable(mkTerm("" + forName), ad.pos, flags) setInfoAndEnter forType
if (!isFinal)
varSym.addAnnotation(VolatileAttr)
@@ -488,7 +493,7 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
val t: Tree = ad.symbol.tpe match {
case MethodType(mparams, resType) =>
assert(params.length == mparams.length, mparams)
-
+
typedPos {
val sym = currentOwner.newValue(mkTerm("qual"), ad.pos) setInfo qual0.tpe
qual = safeREF(sym)
@@ -560,8 +565,7 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
* Hence, we here rewrite all try blocks with a result != {Unit, All} such that they
* store their result in a local variable. The catch blocks are adjusted as well.
* The try tree is subsituted by a block whose result expression is read of that variable. */
- case theTry @ Try(block, catches, finalizer)
- if theTry.tpe.typeSymbol != definitions.UnitClass && theTry.tpe.typeSymbol != definitions.NothingClass =>
+ case theTry @ Try(block, catches, finalizer) if shouldRewriteTry(theTry) =>
val tpe = theTry.tpe.widen
val tempVar = currentOwner.newVariable(mkTerm(nme.EXCEPTION_RESULT_PREFIX), theTry.pos).setInfo(tpe)
def assignBlock(rhs: Tree) = super.transform(BLOCK(Ident(tempVar) === transform(rhs)))
@@ -686,7 +690,7 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
localTyper.typedPos(template.pos)(DefDef(staticCtorSym, rhs))
}
- treeCopy.Template(template, template.parents, template.self, newCtor :: template.body)
+ deriveTemplate(template)(newCtor :: _)
}
}
diff --git a/src/compiler/scala/tools/nsc/transform/Constructors.scala b/src/compiler/scala/tools/nsc/transform/Constructors.scala
index c638d25114..d8f19f85c0 100644
--- a/src/compiler/scala/tools/nsc/transform/Constructors.scala
+++ b/src/compiler/scala/tools/nsc/transform/Constructors.scala
@@ -126,7 +126,7 @@ abstract class Constructors extends Transform with ast.TreeDSL {
if (from.name != nme.OUTER) result
else localTyper.typedPos(to.pos) {
- IF (from OBJ_EQ NULL) THEN THROW(NullPointerExceptionClass) ELSE result
+ IF (from OBJ_EQ NULL) THEN Throw(NullPointerExceptionClass.tpe) ELSE result
}
}
@@ -175,7 +175,7 @@ abstract class Constructors extends Transform with ast.TreeDSL {
else if (stat.symbol.isConstructor) auxConstructorBuf += stat
else defBuf += stat
}
- case ValDef(mods, name, tpt, rhs) =>
+ case ValDef(_, _, _, rhs) =>
// val defs with constant right-hand sides are eliminated.
// for all other val defs, an empty valdef goes into the template and
// the initializer goes as an assignment into the constructor
@@ -188,7 +188,7 @@ abstract class Constructors extends Transform with ast.TreeDSL {
(if (canBeMoved(stat)) constrPrefixBuf else constrStatBuf) += mkAssign(
stat.symbol, rhs1)
}
- defBuf += treeCopy.ValDef(stat, mods, name, tpt, EmptyTree)
+ defBuf += deriveValDef(stat)(_ => EmptyTree)
}
case ClassDef(_, _, _, _) =>
// classes are treated recursively, and left in the template
@@ -226,11 +226,11 @@ abstract class Constructors extends Transform with ast.TreeDSL {
tree match {
case DefDef(_, _, _, _, _, body)
if (tree.symbol.isOuterAccessor && tree.symbol.owner == clazz && clazz.isEffectivelyFinal) =>
- log("outerAccessors += " + tree.symbol.fullName)
+ debuglog("outerAccessors += " + tree.symbol.fullName)
outerAccessors ::= ((tree.symbol, body))
case Select(_, _) =>
if (!mustbeKept(tree.symbol)) {
- log("accessedSyms += " + tree.symbol.fullName)
+ debuglog("accessedSyms += " + tree.symbol.fullName)
accessedSyms addEntry tree.symbol
}
super.traverse(tree)
@@ -515,7 +515,7 @@ abstract class Constructors extends Transform with ast.TreeDSL {
}
def delayedInitCall(closure: Tree) = localTyper.typedPos(impl.pos) {
- gen.mkMethodCall(This(clazz), delayedInitMethod, Nil, List(New(closure.symbol, This(clazz))))
+ gen.mkMethodCall(This(clazz), delayedInitMethod, Nil, List(New(closure.symbol.tpe, This(clazz))))
}
/** Return a pair consisting of (all statements up to and including superclass and trait constr calls, rest) */
@@ -560,14 +560,13 @@ abstract class Constructors extends Transform with ast.TreeDSL {
clazz.info.decls unlink sym
// Eliminate all field definitions that can be dropped from template
- treeCopy.Template(impl, impl.parents, impl.self,
- defBuf.toList filter (stat => mustbeKept(stat.symbol)))
+ deriveTemplate(impl)(_ => defBuf.toList filter (stat => mustbeKept(stat.symbol)))
} // transformClassTemplate
override def transform(tree: Tree): Tree =
tree match {
- case ClassDef(mods, name, tparams, impl) if !tree.symbol.isInterface && !isValueClass(tree.symbol) =>
- treeCopy.ClassDef(tree, mods, name, tparams, transformClassTemplate(impl))
+ case ClassDef(_,_,_,_) if !tree.symbol.isInterface && !isValueClass(tree.symbol) =>
+ deriveClassDef(tree)(transformClassTemplate)
case _ =>
super.transform(tree)
}
diff --git a/src/compiler/scala/tools/nsc/transform/Erasure.scala b/src/compiler/scala/tools/nsc/transform/Erasure.scala
index c1ddd21e9d..2412c90962 100644
--- a/src/compiler/scala/tools/nsc/transform/Erasure.scala
+++ b/src/compiler/scala/tools/nsc/transform/Erasure.scala
@@ -223,7 +223,7 @@ abstract class Erasure extends AddInterfaces
/** The Java signature of type 'info', for symbol sym. The symbol is used to give the right return
* type for constructors.
*/
- def javaSig(sym0: Symbol, info: Type): Option[String] = atPhase(currentRun.erasurePhase) {
+ def javaSig(sym0: Symbol, info: Type): Option[String] = beforeErasure {
val isTraitSignature = sym0.enclClass.isTrait
def superSig(parents: List[Type]) = traceSig("superSig", parents) {
@@ -257,7 +257,7 @@ abstract class Erasure extends AddInterfaces
// Anything which could conceivably be a module (i.e. isn't known to be
// a type parameter or similar) must go through here or the signature is
// likely to end up with Foo<T>.Empty where it needs Foo<T>.Empty$.
- def fullNameInSig(sym: Symbol) = "L" + atPhase(currentRun.icodePhase)(sym.javaBinaryName)
+ def fullNameInSig(sym: Symbol) = "L" + beforeIcode(sym.javaBinaryName)
def jsig(tp0: Type, existentiallyBound: List[Symbol] = Nil, toplevel: Boolean = false, primitiveOK: Boolean = true): String = {
val tp = tp0.dealias
@@ -421,9 +421,9 @@ abstract class Erasure extends AddInterfaces
/** Box `tree` of unboxed type */
private def box(tree: Tree): Tree = tree match {
- case LabelDef(name, params, rhs) =>
- val rhs1 = box(rhs)
- treeCopy.LabelDef(tree, name, params, rhs1) setType rhs1.tpe
+ case LabelDef(_, _, _) =>
+ val ldef = deriveLabelDef(tree)(box)
+ ldef setType ldef.rhs.tpe
case _ =>
typedPos(tree.pos)(tree.tpe.typeSymbol match {
case UnitClass =>
@@ -440,7 +440,7 @@ abstract class Erasure extends AddInterfaces
* fields (see TupleX). (ID)
*/
case Apply(boxFun, List(arg)) if isUnbox(tree.symbol) && safeToRemoveUnbox(arg.tpe.typeSymbol) =>
- log("boxing an unbox: " + tree + " and replying with " + arg)
+ log("boxing an unbox: " + tree + "/" + tree.symbol + " and replying with " + arg + " of type " + arg.tpe)
arg
case _ =>
(REF(boxMethod(x)) APPLY tree) setPos (tree.pos) setType ObjectClass.tpe
@@ -460,9 +460,9 @@ abstract class Erasure extends AddInterfaces
println("unbox shorten: "+tree) // this never seems to kick in during build and test; therefore disabled.
adaptToType(unboxed, pt)
*/
- case LabelDef(name, params, rhs) =>
- val rhs1 = unbox(rhs, pt)
- treeCopy.LabelDef(tree, name, params, rhs1) setType rhs1.tpe
+ case LabelDef(_, _, _) =>
+ val ldef = deriveLabelDef(tree)(unbox(_, pt))
+ ldef setType ldef.rhs.tpe
case _ =>
typedPos(tree.pos)(pt.typeSymbol match {
case UnitClass =>
@@ -604,8 +604,8 @@ abstract class Erasure extends AddInterfaces
throw ex
}
def adaptCase(cdef: CaseDef): CaseDef = {
- val body1 = adaptToType(cdef.body, tree1.tpe)
- treeCopy.CaseDef(cdef, cdef.pat, cdef.guard, body1) setType body1.tpe
+ val newCdef = deriveCaseDef(cdef)(adaptToType(_, tree1.tpe))
+ newCdef setType newCdef.body.tpe
}
def adaptBranch(branch: Tree): Tree =
if (branch == EmptyTree) branch else adaptToType(branch, tree1.tpe);
@@ -648,21 +648,20 @@ abstract class Erasure extends AddInterfaces
private def checkNoDoubleDefs(root: Symbol) {
def doubleDefError(sym1: Symbol, sym2: Symbol) {
// the .toString must also be computed at the earlier phase
- def atRefc[T](op: => T) = atPhase[T](currentRun.refchecksPhase.next)(op)
- val tpe1 = atRefc(root.thisType.memberType(sym1))
- val tpe2 = atRefc(root.thisType.memberType(sym2))
+ val tpe1 = afterRefchecks(root.thisType.memberType(sym1))
+ val tpe2 = afterRefchecks(root.thisType.memberType(sym2))
if (!tpe1.isErroneous && !tpe2.isErroneous)
unit.error(
if (sym1.owner == root) sym1.pos else root.pos,
(if (sym1.owner == sym2.owner) "double definition:\n"
else if (sym1.owner == root) "name clash between defined and inherited member:\n"
else "name clash between inherited members:\n") +
- sym1 + ":" + atRefc(tpe1.toString) +
+ sym1 + ":" + afterRefchecks(tpe1.toString) +
(if (sym1.owner == root) "" else sym1.locationString) + " and\n" +
- sym2 + ":" + atRefc(tpe2.toString) +
+ sym2 + ":" + afterRefchecks(tpe2.toString) +
(if (sym2.owner == root) " at line " + (sym2.pos).line else sym2.locationString) +
"\nhave same type" +
- (if (atRefc(tpe1 =:= tpe2)) "" else " after erasure: " + atPhase(phase.next)(sym1.tpe)))
+ (if (afterRefchecks(tpe1 =:= tpe2)) "" else " after erasure: " + afterErasure(sym1.tpe)))
sym1.setInfo(ErrorType)
}
@@ -672,7 +671,7 @@ abstract class Erasure extends AddInterfaces
if (e.sym.isTerm) {
var e1 = decls.lookupNextEntry(e)
while (e1 ne null) {
- if (atPhase(phase.next)(e1.sym.info =:= e.sym.info)) doubleDefError(e.sym, e1.sym)
+ if (afterErasure(e1.sym.info =:= e.sym.info)) doubleDefError(e.sym, e1.sym)
e1 = decls.lookupNextEntry(e1)
}
}
@@ -686,10 +685,10 @@ abstract class Erasure extends AddInterfaces
|| !sym.hasTypeAt(currentRun.refchecksPhase.id))
override def matches(sym1: Symbol, sym2: Symbol): Boolean =
- atPhase(phase.next)(sym1.tpe =:= sym2.tpe)
+ afterErasure(sym1.tpe =:= sym2.tpe)
}
while (opc.hasNext) {
- if (!atPhase(currentRun.refchecksPhase.next)(
+ if (!afterRefchecks(
root.thisType.memberType(opc.overriding) matches
root.thisType.memberType(opc.overridden))) {
debuglog("" + opc.overriding.locationString + " " +
@@ -708,8 +707,8 @@ abstract class Erasure extends AddInterfaces
for (member <- root.info.nonPrivateMember(other.name).alternatives) {
if (member != other &&
!(member hasFlag BRIDGE) &&
- atPhase(phase.next)(member.tpe =:= other.tpe) &&
- !atPhase(refchecksPhase.next)(
+ afterErasure(member.tpe =:= other.tpe) &&
+ !afterRefchecks(
root.thisType.memberType(member) matches root.thisType.memberType(other))) {
debuglog("" + member.locationString + " " + member.infosString + other.locationString + " " + other.infosString);
doubleDefError(member, other)
@@ -733,13 +732,13 @@ abstract class Erasure extends AddInterfaces
*/
private def bridgeDefs(owner: Symbol): (List[Tree], immutable.Set[Symbol]) = {
var toBeRemoved: immutable.Set[Symbol] = immutable.Set()
- //println("computing bridges for " + owner)//DEBUG
- assert(phase == currentRun.erasurePhase)
+ debuglog("computing bridges for " + owner)//DEBUG
+ assert(phase == currentRun.erasurePhase, phase)
val site = owner.thisType
val bridgesScope = newScope
val bridgeTarget = new mutable.HashMap[Symbol, Symbol]
var bridges: List[Tree] = List()
- val opc = atPhase(currentRun.explicitouterPhase) {
+ val opc = beforeExplicitOuter {
new overridingPairs.Cursor(owner) {
override def parents: List[Type] = List(owner.info.parents.head)
override def exclude(sym: Symbol): Boolean =
@@ -750,9 +749,9 @@ abstract class Erasure extends AddInterfaces
val member = opc.overriding
val other = opc.overridden
//println("bridge? " + member + ":" + member.tpe + member.locationString + " to " + other + ":" + other.tpe + other.locationString)//DEBUG
- if (atPhase(currentRun.explicitouterPhase)(!member.isDeferred)) {
+ if (beforeExplicitOuter(!member.isDeferred)) {
val otpe = erasure(owner, other.tpe)
- val bridgeNeeded = atPhase(phase.next) (
+ val bridgeNeeded = afterErasure (
!(other.tpe =:= member.tpe) &&
!(deconstMap(other.tpe) =:= deconstMap(member.tpe)) &&
{ var e = bridgesScope.lookupEntry(member.name)
@@ -767,15 +766,15 @@ abstract class Erasure extends AddInterfaces
// the parameter symbols need to have the new owner
bridge.setInfo(otpe.cloneInfo(bridge))
bridgeTarget(bridge) = member
- atPhase(phase.next) { owner.info.decls.enter(bridge) }
+ afterErasure { owner.info.decls.enter(bridge) }
if (other.owner == owner) {
//println("bridge to same: "+other+other.locationString)//DEBUG
- atPhase(phase.next) { owner.info.decls.unlink(other) }
+ afterErasure { owner.info.decls.unlink(other) }
toBeRemoved += other
}
bridgesScope enter bridge
bridges =
- atPhase(phase.next) {
+ afterErasure {
atPos(bridge.pos) {
val bridgeDef =
DefDef(bridge,
@@ -789,7 +788,7 @@ abstract class Erasure extends AddInterfaces
if ( member.isSynthetic // TODO: should we do this for user-defined unapplies as well?
&& ((member.name == nme.unapply) || (member.name == nme.unapplySeq))
// && (bridge.paramss.nonEmpty && bridge.paramss.head.nonEmpty && bridge.paramss.head.tail.isEmpty) // does the first argument list has exactly one argument -- for user-defined unapplies we can't be sure
- && !(atPhase(phase.next)(member.tpe <:< other.tpe))) { // no static guarantees (TODO: is the subtype test ever true?)
+ && !(afterErasure(member.tpe <:< other.tpe))) { // no static guarantees (TODO: is the subtype test ever true?)
import CODE._
val typeTest = gen.mkIsInstanceOf(REF(bridge.firstParam), member.tpe.params.head.tpe, any = true, wrapInApply = true) // any = true since we're before erasure (?), wrapInapply is true since we're after uncurry
// println("unapp type test: "+ typeTest)
@@ -846,9 +845,9 @@ abstract class Erasure extends AddInterfaces
*/
private val preTransformer = new TypingTransformer(unit) {
def preErase(tree: Tree): Tree = tree match {
- case ClassDef(mods, name, tparams, impl) =>
+ case ClassDef(_,_,_,_) =>
debuglog("defs of " + tree.symbol + " = " + tree.symbol.info.decls)
- treeCopy.ClassDef(tree, mods, name, List(), impl)
+ copyClassDef(tree)(tparams = Nil)
case DefDef(_,_,_,_,_,_) =>
copyDefDef(tree)(tparams = Nil)
case TypeDef(_, _, _, _) =>
@@ -1055,7 +1054,7 @@ abstract class Erasure extends AddInterfaces
*/
override def transform(tree: Tree): Tree = {
val tree1 = preTransformer.transform(tree)
- atPhase(phase.next) {
+ afterErasure {
val tree2 = mixinTransformer.transform(tree1)
debuglog("tree after addinterfaces: \n" + tree2)
diff --git a/src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala b/src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala
index bc35084a4c..595c1486b6 100644
--- a/src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala
+++ b/src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala
@@ -68,7 +68,7 @@ abstract class ExplicitOuter extends InfoTransform
result
}
-
+
private val innerClassConstructorParamName: TermName = newTermName("arg" + nme.OUTER)
class RemoveBindingsTransformer(toRemove: Set[Symbol]) extends Transformer {
@@ -89,13 +89,13 @@ abstract class ExplicitOuter extends InfoTransform
def outerAccessor(clazz: Symbol): Symbol = {
val firstTry = clazz.info.decl(nme.expandedName(nme.OUTER, clazz))
if (firstTry != NoSymbol && firstTry.outerSource == clazz) firstTry
- else clazz.info.decls find (_.outerSource == clazz) getOrElse NoSymbol
- }
+ else findOrElse(clazz.info.decls)(_.outerSource == clazz)(NoSymbol)
+ }
def newOuterAccessor(clazz: Symbol) = {
val accFlags = SYNTHETIC | METHOD | STABLE | ( if (clazz.isTrait) DEFERRED else 0 )
val sym = clazz.newMethodSymbol(nme.OUTER, clazz.pos, accFlags)
val restpe = if (clazz.isTrait) clazz.outerClass.tpe else clazz.outerClass.thisType
-
+
sym expandName clazz
sym.referenced = clazz
sym setInfo MethodType(Nil, restpe)
@@ -163,14 +163,14 @@ abstract class ExplicitOuter extends InfoTransform
decls1 = decls.cloneScope
val outerAcc = clazz.newMethod(nme.OUTER, clazz.pos) // 3
outerAcc expandName clazz
-
+
decls1 enter newOuterAccessor(clazz)
if (hasOuterField(clazz)) //2
decls1 enter newOuterField(clazz)
}
if (!clazz.isTrait && !parents.isEmpty) {
for (mc <- clazz.mixinClasses) {
- val mixinOuterAcc: Symbol = atPhase(phase.next)(outerAccessor(mc))
+ val mixinOuterAcc: Symbol = afterExplicitOuter(outerAccessor(mc))
if (mixinOuterAcc != NoSymbol) {
if (decls1 eq decls) decls1 = decls.cloneScope
val newAcc = mixinOuterAcc.cloneSymbol(clazz, mixinOuterAcc.flags & ~DEFERRED)
@@ -468,8 +468,10 @@ abstract class ExplicitOuter extends InfoTransform
}
}
super.transform(
- treeCopy.Template(tree, parents, self,
- if (newDefs.isEmpty) decls else decls ::: newDefs.toList)
+ deriveTemplate(tree)(decls =>
+ if (newDefs.isEmpty) decls
+ else decls ::: newDefs.toList
+ )
)
case DefDef(_, _, _, vparamss, _, rhs) =>
if (sym.isClassConstructor) {
@@ -559,7 +561,7 @@ abstract class ExplicitOuter extends InfoTransform
/** The transformation method for whole compilation units */
override def transformUnit(unit: CompilationUnit) {
- atPhase(phase.next)(super.transformUnit(unit))
+ afterExplicitOuter(super.transformUnit(unit))
}
}
diff --git a/src/compiler/scala/tools/nsc/transform/Flatten.scala b/src/compiler/scala/tools/nsc/transform/Flatten.scala
index aa37c966e3..8856024a30 100644
--- a/src/compiler/scala/tools/nsc/transform/Flatten.scala
+++ b/src/compiler/scala/tools/nsc/transform/Flatten.scala
@@ -20,16 +20,14 @@ abstract class Flatten extends InfoTransform {
/** Updates the owning scope with the given symbol; returns the old symbol.
*/
- private def replaceSymbolInCurrentScope(sym: Symbol): Symbol = {
- atPhase(phase.next) {
- val scope = sym.owner.info.decls
- val old = scope lookup sym.name
- if (old ne NoSymbol)
- scope unlink old
+ private def replaceSymbolInCurrentScope(sym: Symbol): Symbol = afterFlatten {
+ val scope = sym.owner.info.decls
+ val old = scope lookup sym.name
+ if (old ne NoSymbol)
+ scope unlink old
- scope enter sym
- old
- }
+ scope enter sym
+ old
}
private def liftClass(sym: Symbol) {
@@ -53,7 +51,8 @@ abstract class Flatten extends InfoTransform {
val clazz = pre.typeSymbol
clazz.isClass && !clazz.isPackageClass && {
// Cannot flatten here: class A[T] { object B }
- atPhase(currentRun.erasurePhase.prev)(clazz.typeParams.isEmpty)
+ // was "at erasurePhase.prev"
+ beforeErasure(clazz.typeParams.isEmpty)
}
}
@@ -67,10 +66,11 @@ abstract class Flatten extends InfoTransform {
val decls1 = scopeTransform(clazz) {
val decls1 = newScope
if (clazz.isPackageClass) {
- atPhase(phase.next)(decls foreach (decls1 enter _))
- } else {
+ afterFlatten { decls foreach (decls1 enter _) }
+ }
+ else {
val oldowner = clazz.owner
- atPhase(phase.next)(oldowner.info)
+ afterFlatten { oldowner.info }
parents1 = parents mapConserve (this)
for (sym <- decls) {
@@ -122,11 +122,7 @@ abstract class Flatten extends InfoTransform {
liftedDefs(sym.enclosingTopLevelClass.owner) += tree
EmptyTree
case Select(qual, name) if (sym.isStaticModule && !sym.owner.isPackageClass) =>
- atPhase(phase.next) {
- atPos(tree.pos) {
- gen.mkAttributedRef(sym)
- }
- }
+ afterFlatten(atPos(tree.pos)(gen.mkAttributedRef(sym)))
case _ =>
tree
}
diff --git a/src/compiler/scala/tools/nsc/transform/LambdaLift.scala b/src/compiler/scala/tools/nsc/transform/LambdaLift.scala
index 99cc7393aa..570eaba3a9 100644
--- a/src/compiler/scala/tools/nsc/transform/LambdaLift.scala
+++ b/src/compiler/scala/tools/nsc/transform/LambdaLift.scala
@@ -18,7 +18,7 @@ abstract class LambdaLift extends InfoTransform {
/** the following two members override abstract members in Transform */
val phaseName: String = "lambdalift"
-
+
/** Converts types of captured variables to *Ref types.
*/
def boxIfCaptured(sym: Symbol, tpe: Type, erasedTypes: Boolean) =
@@ -75,10 +75,10 @@ abstract class LambdaLift extends InfoTransform {
/** Buffers for lifted out classes and methods */
private val liftedDefs = new LinkedHashMap[Symbol, List[Tree]]
-
+
/** True if we are transforming under a ReferenceToBoxed node */
private var isBoxedRef = false
-
+
private type SymSet = TreeSet[Symbol]
private def newSymSet = new TreeSet[Symbol](_ isLess _)
@@ -128,7 +128,7 @@ abstract class LambdaLift extends InfoTransform {
if (!ss(sym)) {
ss addEntry sym
renamable addEntry sym
- atPhase(currentRun.picklerPhase) {
+ beforePickler {
// The param symbol in the MethodType should not be renamed, only the symbol in scope. This way,
// parameter names for named arguments are not changed. Example: without cloning the MethodType,
// def closure(x: Int) = { () => x }
@@ -221,7 +221,7 @@ abstract class LambdaLift extends InfoTransform {
for (caller <- called.keys ; callee <- called(caller) ; fvs <- free get callee ; fv <- fvs)
markFree(fv, caller)
} while (changedFreeVars)
-
+
def renameSym(sym: Symbol) {
val originalName = sym.name
val base = sym.name + nme.NAME_JOIN_STRING + (
@@ -245,7 +245,7 @@ abstract class LambdaLift extends InfoTransform {
debuglog("renaming impl class in step with %s: %s => %s".format(traitSym, originalImplName, implSym.name))
}
-
+
for (sym <- renamable) {
// If we renamed a trait from Foo to Foo$1, we must rename the implementation
// class from Foo$class to Foo$1$class. (Without special consideration it would
@@ -262,7 +262,7 @@ abstract class LambdaLift extends InfoTransform {
}
}
- atPhase(phase.next) {
+ afterOwnPhase {
for ((owner, freeValues) <- free.toList) {
val newFlags = SYNTHETIC | ( if (owner.isClass) PARAMACCESSOR | PrivateLocal else PARAM )
debuglog("free var proxy: %s, %s".format(owner.fullLocationString, freeValues.toList.mkString(", ")))
@@ -324,9 +324,9 @@ abstract class LambdaLift extends InfoTransform {
val addParams = cloneSymbols(ps).map(_.setFlag(PARAM))
sym.updateInfo(
lifted(MethodType(sym.info.params ::: addParams, sym.info.resultType)))
-
+
copyDefDef(tree)(vparamss = List(vparams ++ freeParams))
- case ClassDef(mods, name, tparams, impl @ Template(parents, self, body)) =>
+ case ClassDef(_, _, _, _) =>
// Disabled attempt to to add getters to freeParams
// this does not work yet. Problem is that local symbols need local names
// and references to local symbols need to be transformed into
@@ -338,8 +338,7 @@ abstract class LambdaLift extends InfoTransform {
// DefDef(getter, rhs) setPos tree.pos setType NoType
// }
// val newDefs = if (sym.isTrait) freeParams ::: (ps map paramGetter) else freeParams
- treeCopy.ClassDef(tree, mods, name, tparams,
- treeCopy.Template(impl, parents, self, body ::: freeParams))
+ deriveClassDef(tree)(impl => deriveTemplate(impl)(_ ::: freeParams))
}
case None =>
tree
@@ -420,10 +419,10 @@ abstract class LambdaLift extends InfoTransform {
def refConstr(expr: Tree): Tree = expr match {
case Try(block, catches, finalizer) =>
Try(refConstr(block), catches map refConstrCase, finalizer)
- case _ =>
- New(sym, expr)
+ case _ =>
+ New(sym.tpe, expr)
}
- def refConstrCase(cdef: CaseDef): CaseDef =
+ def refConstrCase(cdef: CaseDef): CaseDef =
CaseDef(cdef.pat, cdef.guard, refConstr(cdef.body))
treeCopy.ValDef(tree, mods, name, tpt1, typer.typedPos(rhs.pos) {
refConstr(constructorArg)
@@ -468,7 +467,7 @@ abstract class LambdaLift extends InfoTransform {
tree
}
}
-
+
private def preTransform(tree: Tree) = super.transform(tree) setType lifted(tree.tpe)
override def transform(tree: Tree): Tree = tree match {
@@ -477,19 +476,18 @@ abstract class LambdaLift extends InfoTransform {
case _ =>
postTransform(preTransform(tree))
}
-
+
/** Transform statements and add lifted definitions to them. */
override def transformStats(stats: List[Tree], exprOwner: Symbol): List[Tree] = {
def addLifted(stat: Tree): Tree = stat match {
- case ClassDef(mods, name, tparams, impl @ Template(parents, self, body)) =>
+ case ClassDef(_, _, _, _) =>
val lifted = liftedDefs get stat.symbol match {
case Some(xs) => xs reverseMap addLifted
case _ => log("unexpectedly no lifted defs for " + stat.symbol) ; Nil
}
- val result = treeCopy.ClassDef(
- stat, mods, name, tparams, treeCopy.Template(impl, parents, self, body ::: lifted))
- liftedDefs -= stat.symbol
- result
+ try deriveClassDef(stat)(impl => deriveTemplate(impl)(_ ::: lifted))
+ finally liftedDefs -= stat.symbol
+
case DefDef(_, _, _, _, _, Block(Nil, expr)) if !stat.symbol.isConstructor =>
deriveDefDef(stat)(_ => expr)
case _ =>
@@ -500,7 +498,7 @@ abstract class LambdaLift extends InfoTransform {
override def transformUnit(unit: CompilationUnit) {
computeFreeVars
- atPhase(phase.next)(super.transformUnit(unit))
+ afterOwnPhase(super.transformUnit(unit))
assert(liftedDefs.isEmpty, liftedDefs.keys mkString ", ")
}
} // class LambdaLifter
diff --git a/src/compiler/scala/tools/nsc/transform/LazyVals.scala b/src/compiler/scala/tools/nsc/transform/LazyVals.scala
index 38dfcd4307..85ba539993 100644
--- a/src/compiler/scala/tools/nsc/transform/LazyVals.scala
+++ b/src/compiler/scala/tools/nsc/transform/LazyVals.scala
@@ -93,7 +93,7 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
deriveDefDef(tree)(_ => if (LocalLazyValFinder.find(res)) typed(addBitmapDefs(sym, res)) else res)
}
- case Template(parents, self, body) => atOwner(currentOwner) {
+ case Template(_, _, body) => atOwner(currentOwner) {
val body1 = super.transformTrees(body)
var added = false
val stats =
@@ -105,8 +105,8 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
added = true
typed(addBitmapDefs(sym, stat))
} else stat
- case ValDef(mods, name, tpt, rhs) =>
- typed(treeCopy.ValDef(stat, mods, name, tpt, addBitmapDefs(stat.symbol, rhs)))
+ case ValDef(_, _, _, _) =>
+ typed(deriveValDef(stat)(addBitmapDefs(stat.symbol, _)))
case _ =>
stat
}
@@ -121,29 +121,29 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
})
toAdd0
} else List()
- treeCopy.Template(tree, parents, self, innerClassBitmaps ++ stats)
+ deriveTemplate(tree)(_ => innerClassBitmaps ++ stats)
}
- case ValDef(mods, name, tpt, rhs0) if (!sym.owner.isModule && !sym.owner.isClass) =>
- val rhs = super.transform(rhs0)
- treeCopy.ValDef(tree, mods, name, tpt,
- if (LocalLazyValFinder.find(rhs)) typed(addBitmapDefs(sym, rhs)) else rhs)
+ case ValDef(_, _, _, _) if !sym.owner.isModule && !sym.owner.isClass =>
+ deriveValDef(tree) { rhs0 =>
+ val rhs = super.transform(rhs0)
+ if (LocalLazyValFinder.find(rhs)) typed(addBitmapDefs(sym, rhs)) else rhs
+ }
case l@LabelDef(name0, params0, ifp0@If(_, _, _)) if name0.startsWith(nme.WHILE_PREFIX) =>
val ifp1 = super.transform(ifp0)
val If(cond0, thenp0, elsep0) = ifp1
+
if (LocalLazyValFinder.find(thenp0))
- treeCopy.LabelDef(l, name0, params0,
- treeCopy.If(ifp1, cond0, typed(addBitmapDefs(sym.owner, thenp0)), elsep0))
+ deriveLabelDef(l)(_ => treeCopy.If(ifp1, cond0, typed(addBitmapDefs(sym.owner, thenp0)), elsep0))
else
l
- case l@LabelDef(name0, params0, block@Block(stats0, _))
+ case l@LabelDef(name0, params0, block@Block(stats0, expr))
if name0.startsWith(nme.WHILE_PREFIX) || name0.startsWith(nme.DO_WHILE_PREFIX) =>
val stats1 = super.transformTrees(stats0)
if (LocalLazyValFinder.find(stats1))
- treeCopy.LabelDef(l, name0, params0,
- treeCopy.Block(block, typed(addBitmapDefs(sym.owner, stats1.head))::stats1.tail, block.expr))
+ deriveLabelDef(l)(_ => treeCopy.Block(block, typed(addBitmapDefs(sym.owner, stats1.head))::stats1.tail, expr))
else
l
@@ -168,9 +168,9 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
def isMatch(params: List[Ident]) = (params.tail corresponds methSym.tpe.params)(_.tpe == _.tpe)
if (bmps.isEmpty) rhs else rhs match {
- case Block(assign, l @ LabelDef(name, params, rhs1))
+ case Block(assign, l @ LabelDef(name, params, _))
if name.toString == ("_" + methSym.name) && isMatch(params) =>
- Block(assign, treeCopy.LabelDef(l, name, params, typed(prependStats(bmps, rhs1))))
+ Block(assign, deriveLabelDef(l)(rhs => typed(prependStats(bmps, rhs))))
case _ => prependStats(bmps, rhs)
}
@@ -242,7 +242,7 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
bmps(n)
else {
val sym = meth.newVariable(nme.newBitmapName(nme.BITMAP_NORMAL, n), meth.pos).setInfo(IntClass.tpe)
- atPhase(currentRun.typerPhase) {
+ beforeTyper {
sym addAnnotation VolatileAttr
}
diff --git a/src/compiler/scala/tools/nsc/transform/Mixin.scala b/src/compiler/scala/tools/nsc/transform/Mixin.scala
index 0ee2f1cd5a..c9794cc20f 100644
--- a/src/compiler/scala/tools/nsc/transform/Mixin.scala
+++ b/src/compiler/scala/tools/nsc/transform/Mixin.scala
@@ -71,7 +71,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
* maps all other types to themselves.
*/
private def toInterface(tp: Type): Type =
- atPhase(currentRun.mixinPhase)(tp.typeSymbol.toInterface).tpe
+ beforeMixin(tp.typeSymbol.toInterface).tpe
private def isFieldWithBitmap(field: Symbol) = {
field.info // ensure that nested objects are transformed
@@ -103,7 +103,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
private val toInterfaceMap = new TypeMap {
def apply(tp: Type): Type = mapOver( tp match {
case TypeRef(pre, sym, args) if (sym.isImplClass) =>
- typeRef(pre, atPhase(currentRun.mixinPhase)(sym.toInterface), args)
+ typeRef(pre, beforeMixin(sym.toInterface), args)
case _ => tp
})
}
@@ -123,7 +123,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
* @param mixinClass The mixin class that produced the superaccessor
*/
private def rebindSuper(base: Symbol, member: Symbol, mixinClass: Symbol): Symbol =
- atPhase(currentRun.picklerPhase.next) {
+ afterPickler {
var bcs = base.info.baseClasses.dropWhile(mixinClass !=).tail
var sym: Symbol = NoSymbol
debuglog("starting rebindsuper " + base + " " + member + ":" + member.tpe +
@@ -131,7 +131,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
while (!bcs.isEmpty && sym == NoSymbol) {
if (settings.debug.value) {
val other = bcs.head.info.nonPrivateDecl(member.name);
- log("rebindsuper " + bcs.head + " " + other + " " + other.tpe +
+ debuglog("rebindsuper " + bcs.head + " " + other + " " + other.tpe +
" " + other.isDeferred)
}
sym = member.matchingSymbol(bcs.head, base.thisType).suchThat(sym => !sym.hasFlag(DEFERRED | BRIDGE))
@@ -147,7 +147,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
member.hasAccessorFlag && (!member.isDeferred || (member hasFlag lateDEFERRED))
/** Is member overridden (either directly or via a bridge) in base class sequence `bcs`? */
- def isOverriddenAccessor(member: Symbol, bcs: List[Symbol]): Boolean = atPhase(ownPhase) {
+ def isOverriddenAccessor(member: Symbol, bcs: List[Symbol]): Boolean = beforeOwnPhase {
def hasOverridingAccessor(clazz: Symbol) = {
clazz.info.nonPrivateDecl(member.name).alternatives.exists(
sym =>
@@ -155,8 +155,9 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
!sym.hasFlag(MIXEDIN) &&
matchesType(sym.tpe, member.tpe, true))
}
- bcs.head != member.owner &&
- (hasOverridingAccessor(bcs.head) || isOverriddenAccessor(member, bcs.tail))
+ ( bcs.head != member.owner
+ && (hasOverridingAccessor(bcs.head) || isOverriddenAccessor(member, bcs.tail))
+ )
}
/** Add given member to given class, and mark member as mixed-in.
@@ -202,7 +203,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
setter setInfo MethodType(setter.newSyntheticValueParams(List(field.info)), UnitClass.tpe)
if (needsExpandedSetterName(field))
setter.name = nme.expandedSetterName(setter.name, clazz)
-
+
setter
}
@@ -241,7 +242,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
*/
def addMixedinMembers(clazz: Symbol, unit : CompilationUnit) {
def cloneBeforeErasure(iface: Symbol, clazz: Symbol, imember: Symbol): Symbol = {
- val newSym = atPhase(currentRun.erasurePhase) {
+ val newSym = beforeErasure {
val res = imember.cloneSymbol(clazz)
// since we used the member (imember) from the interface that represents the trait that's being mixed in,
// have to instantiate the interface type params (that may occur in imember's info) as they are seen from the class
@@ -337,8 +338,8 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
case _ => // otherwise mixin a field as well
// atPhase: the private field is moved to the implementation class by erasure,
// so it can no longer be found in the member's owner (the trait)
- val accessed = atPhase(currentRun.picklerPhase)(member.accessed)
- val sym = atPhase(currentRun.erasurePhase){ // #3857, need to retain info before erasure when cloning (since cloning only carries over the current entry in the type history)
+ val accessed = beforePickler(member.accessed)
+ val sym = beforeErasure { // #3857, need to retain info before erasure when cloning (since cloning only carries over the current entry in the type history)
clazz.newValue(nme.getterToLocal(member.name), member.pos).setInfo(member.tpe.resultType) // so we have a type history entry before erasure
}
sym.updateInfo(member.tpe.resultType) // info at current phase
@@ -349,13 +350,15 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
setAnnotations accessed.annotations)
}
}
- } else if (member.isSuperAccessor) { // mixin super accessors
+ }
+ else if (member.isSuperAccessor) { // mixin super accessors
val member1 = addMember(clazz, member.cloneSymbol(clazz)) setPos clazz.pos
assert(member1.alias != NoSymbol, member1)
val alias1 = rebindSuper(clazz, member.alias, mixinClass)
member1.asInstanceOf[TermSymbol] setAlias alias1
- } else if (member.isMethod && member.isModule && member.hasNoFlags(LIFTED | BRIDGE)) {
+ }
+ else if (member.isMethod && member.isModule && member.hasNoFlags(LIFTED | BRIDGE)) {
// mixin objects: todo what happens with abstract objects?
addMember(clazz, member.cloneSymbol(clazz, member.flags & ~(DEFERRED | lateDEFERRED)) setPos clazz.pos)
}
@@ -383,7 +386,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
var parents1 = parents
var decls1 = decls
if (!clazz.isPackageClass) {
- atPhase(phase.next)(clazz.owner.info)
+ afterMixin(clazz.owner.info)
if (clazz.isImplClass) {
clazz setFlag lateMODULE
var sourceModule = clazz.owner.info.decls.lookup(sym.name.toTermName)
@@ -449,7 +452,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
&& sym.owner == templ.symbol.owner
&& !sym.isLazy
&& !tree.isDef) {
- log("added use in: " + currentOwner + " -- " + tree)
+ debuglog("added use in: " + currentOwner + " -- " + tree)
usedIn(sym) ::= currentOwner
}
@@ -459,7 +462,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
}
}
SingleUseTraverser(templ)
- log("usedIn: " + usedIn)
+ debuglog("usedIn: " + usedIn)
usedIn filter {
case (_, member :: Nil) => member.isValue && member.isLazy
case _ => false
@@ -515,7 +518,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
tree match {
case Template(parents, self, body) =>
localTyper = erasure.newTyper(rootContext.make(tree, currentOwner))
- atPhase(phase.next)(currentOwner.owner.info)//todo: needed?
+ afterMixin(currentOwner.owner.info)//todo: needed?
if (!currentOwner.isTrait && !isValueClass(currentOwner))
addMixedinMembers(currentOwner, unit)
@@ -530,11 +533,11 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
self = sym.newValueParameter(nme.SELF, sym.pos) setInfo toInterface(currentOwner.typeOfThis)
val selfdef = ValDef(self) setType NoType
copyDefDef(tree)(vparamss = List(selfdef :: vparams))
- }
+ }
else EmptyTree
}
else {
- if (currentOwner.isTrait && sym.isSetter && !atPhase(currentRun.picklerPhase)(sym.isDeferred)) {
+ if (currentOwner.isTrait && sym.isSetter && !beforePickler(sym.isDeferred)) {
sym.addAnnotation(TraitSetterAnnotationClass)
}
tree
@@ -703,13 +706,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
val rhs0 = (Super(clazz, tpnme.EMPTY) DOT stat.symbol.alias)(vparams map (v => Ident(v.symbol)): _*)
val rhs1 = localTyped(stat.pos, rhs0, stat.symbol.tpe.resultType)
- debuglog(
- "complete super acc " + stat.symbol.fullLocationString +
- " " + rhs1 + " " + stat.symbol.alias.fullLocationString +
- "/" + stat.symbol.alias.owner.hasFlag(lateINTERFACE)
- )//debug
-
- deriveDefDef(stat)(_ => atPhase(currentRun.mixinPhase)(transform(rhs1)))
+ deriveDefDef(stat)(_ => beforeMixin(transform(rhs1)))
case _ =>
stat
}
@@ -740,7 +737,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
def createBitmap: Symbol = {
val sym = clazz0.newVariable(bitmapName, clazz0.pos) setInfo IntClass.tpe
- atPhase(currentRun.typerPhase)(sym addAnnotation VolatileAttr)
+ beforeTyper(sym addAnnotation VolatileAttr)
category match {
case nme.BITMAP_TRANSIENT | nme.BITMAP_CHECKINIT_TRANSIENT => sym addAnnotation TransientAttr
@@ -848,7 +845,9 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
val nulls = lazyValNullables(lzyVal).toList sortBy (_.id) map nullify
def syncBody = init ::: List(mkSetFlag(clazz, offset, lzyVal), UNIT)
- log("nulling fields inside " + lzyVal + ": " + nulls)
+ if (nulls.nonEmpty)
+ log("nulling fields inside " + lzyVal + ": " + nulls)
+
val result = gen.mkDoubleCheckedLocking(clazz, cond, syncBody, nulls)
typedPos(init.head.pos)(BLOCK(result, retVal))
}
@@ -942,7 +941,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
private def checkedGetter(lhs: Tree) = {
val sym = clazz.info decl lhs.symbol.getterName suchThat (_.isGetter)
if (needsInitAndHasOffset(sym)) {
- log("adding checked getter for: " + sym + " " + lhs.symbol.defaultFlagString)
+ debuglog("adding checked getter for: " + sym + " " + lhs.symbol.defaultFlagString)
List(localTyper typed mkSetFlag(clazz, fieldOffset(sym), sym))
}
else Nil
@@ -1129,7 +1128,6 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
*/
private def postTransform(tree: Tree): Tree = {
val sym = tree.symbol
- // assert(tree.tpe ne null, tree.getClass +" : "+ tree +" in "+ localTyper.context.tree)
// change every node type that refers to an implementation class to its
// corresponding interface, unless the node's symbol is an implementation class.
if (tree.tpe.typeSymbol.isImplClass && ((sym eq null) || !sym.isImplClass))
@@ -1162,7 +1160,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
def implSym = implClass(sym.owner).info.member(sym.name)
assert(target ne NoSymbol,
List(sym + ":", sym.tpe, sym.owner, implClass(sym.owner), implSym,
- atPhase(phase.prev)(implSym.tpe), phase) mkString " "
+ beforePrevPhase(implSym.tpe), phase) mkString " "
)
typedPos(tree.pos)(Apply(staticRef(target), transformSuper(qual) :: args))
}
@@ -1187,7 +1185,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
val sym1 = sym.overridingSymbol(currentOwner.enclClass)
typedPos(tree.pos)((transformSuper(qual) DOT sym1)())
} else {
- staticCall(atPhase(phase.prev)(sym.overridingSymbol(implClass(sym.owner))))
+ staticCall(beforePrevPhase(sym.overridingSymbol(implClass(sym.owner))))
}
} else {
assert(!currentOwner.enclClass.isImplClass, currentOwner.enclClass)
@@ -1236,7 +1234,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
val tree1 = super.transform(preTransform(tree))
// localTyper needed when not flattening inner classes. parts after an
// inner class will otherwise be typechecked with a wrong scope
- try atPhase(phase.next)(postTransform(tree1))
+ try afterMixin(postTransform(tree1))
finally localTyper = saved
}
}
diff --git a/src/compiler/scala/tools/nsc/transform/OverridingPairs.scala b/src/compiler/scala/tools/nsc/transform/OverridingPairs.scala
index e49f8d7c0b..d8c18c2d50 100644
--- a/src/compiler/scala/tools/nsc/transform/OverridingPairs.scala
+++ b/src/compiler/scala/tools/nsc/transform/OverridingPairs.scala
@@ -50,7 +50,7 @@ abstract class OverridingPairs {
val result = sym1.isType || (self.memberType(sym1) matches self.memberType(sym2))
debuglog("overriding-pairs? %s matches %s (%s vs. %s) == %s".format(
sym1.fullLocationString, sym2.fullLocationString, tp_s(sym1), tp_s(sym2), result))
-
+
result
}
diff --git a/src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala b/src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala
index 1e1c9efebb..0851dad0c2 100644
--- a/src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala
+++ b/src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala
@@ -97,7 +97,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
private val wasSpecializedForTypeVars = perRunCaches.newMap[Symbol, Set[Symbol]]() withDefaultValue Set()
/** Concrete methods that use a specialized type, or override such methods. */
- private val concreteSpecMethods = perRunCaches.newSet[Symbol]()
+ private val concreteSpecMethods = perRunCaches.newWeakSet[Symbol]()
private def isSpecialized(sym: Symbol) = sym hasAnnotation SpecializedClass
private def hasSpecializedFlag(sym: Symbol) = sym hasFlag SPECIALIZED
@@ -110,7 +110,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
tp baseType GroupOfSpecializable match {
case TypeRef(_, GroupOfSpecializable, arg :: Nil) =>
arg.typeArgs map (_.typeSymbol)
- case _ =>
+ case _ =>
List(tp.typeSymbol)
}
}
@@ -134,6 +134,16 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case _ => false
}
+ def unspecializedSymbol(sym: Symbol): Symbol = {
+ if (sym hasFlag SPECIALIZED) {
+ // add initialization from its generic class constructor
+ val genericName = nme.unspecializedName(sym.name)
+ val member = sym.owner.info.decl(genericName.toTypeName)
+ member
+ }
+ else NoSymbol
+ }
+
object TypeEnv {
/** Return a new type environment binding specialized type parameters of sym to
* the given args. Expects the lists to have the same length.
@@ -251,7 +261,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
val stvTypeParams = specializedTypeVars(target.info.typeParams map (_.info))
val stvResult = specializedTypeVars(target.info.resultType)
- log("degenerate: " + target + " stv tparams: " + stvTypeParams + " stv info: " + stvResult)
+ debuglog("degenerate: " + target + " stv tparams: " + stvTypeParams + " stv info: " + stvResult)
(stvTypeParams -- stvResult).nonEmpty
}
@@ -308,7 +318,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
else specializedTypeVars(sym).intersect(env.keySet)
)
val (methparams, others) = tvars.toList sortBy ("" + _.name) partition (_.owner.isMethod)
- log("specName(" + sym + ") env: " + env + " tvars: " + tvars)
+ debuglog("specName(" + sym + ") env: " + env + " tvars: " + tvars)
specializedName(sym.name, methparams map env, others map env)
}
@@ -397,8 +407,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
tpes foreach (tp => buf ++= specializedTypeVars(tp))
buf.result
}
- def specializedTypeVars(sym: Symbol): immutable.Set[Symbol] =
- atPhase(currentRun.typerPhase)(specializedTypeVars(sym.info))
+ def specializedTypeVars(sym: Symbol): immutable.Set[Symbol] = beforeTyper(specializedTypeVars(sym.info))
/** Return the set of @specialized type variables mentioned by the given type.
* It only counts type variables that appear:
@@ -506,7 +515,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
def cloneInSpecializedClass(member: Symbol, flagFn: Long => Long) =
member.cloneSymbol(sClass, flagFn(member.flags | SPECIALIZED))
-
+
sClass.sourceFile = clazz.sourceFile
currentRun.symSource(sClass) = clazz.sourceFile // needed later on by mixin
@@ -539,7 +548,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
var res: List[Type] = Nil
// log(specializedClass + ": seeking specialized parents of class with parents: " + parents.map(_.typeSymbol))
for (p <- parents) {
- val stp = atPhase(phase.next)(specializedType(p))
+ val stp = afterSpecialize(specializedType(p))
if (stp != p)
if (p.typeSymbol.isTrait) res ::= stp
else if (currentRun.compiles(clazz))
@@ -549,7 +558,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
res
}
- var parents = List(applyContext(atPhase(currentRun.typerPhase)(clazz.tpe)))
+ var parents = List(applyContext(beforeTyper(clazz.tpe)))
// log("!!! Parents: " + parents + ", sym: " + parents.map(_.typeSymbol))
if (parents.head.typeSymbol.isTrait)
parents = parents.head.parents.head :: parents
@@ -563,7 +572,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
// as with the parents and assign it to typeOfThis.
if (clazz.typeOfThis.typeConstructor ne clazz.typeConstructor) {
sClass.typeOfThis = applyContext(clazz.typeOfThis)
- log("Rewriting self-type for specialized class:\n" +
+ debuglog("Rewriting self-type for specialized class:\n" +
" " + clazz.defStringSeenAs(clazz.typeOfThis) + "\n" +
" => " + sClass.defStringSeenAs(sClass.typeOfThis)
)
@@ -571,7 +580,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
polyType(newClassTParams, ClassInfoType(parents ::: extraSpecializedMixins, decls1, sClass))
}
- atPhase(phase.next)(sClass setInfo specializedInfoType)
+ afterSpecialize(sClass setInfo specializedInfoType)
val fullEnv = outerEnv ++ env
/** Enter 'sym' in the scope of the current specialized class. It's type is
@@ -645,7 +654,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
})
}
else
- log("conflicting env for " + m + " env: " + env)
+ debuglog("conflicting env for " + m + " env: " + env)
}
else if (m.isDeferred) { // abstract methods
val specMember = enterMember(cloneInSpecializedClass(m, _ | DEFERRED))
@@ -714,7 +723,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
typeEnv(specClass) = fullEnv
specClass.name = specializedName(specClass, fullEnv).toTypeName
enterMember(specClass)
- log("entered specialized class " + specClass.fullName)
+ debuglog("entered specialized class " + specClass.fullName)
info(specClass) = SpecializedInnerClass(m, fullEnv)
}
}
@@ -751,7 +760,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
if (existing != NoSymbol)
clazz.owner.info.decls.unlink(existing)
- atPhase(phase.next)(clazz.owner.info.decls enter spc) //!!! assumes fully specialized classes
+ afterSpecialize(clazz.owner.info.decls enter spc) //!!! assumes fully specialized classes
}
if (subclasses.nonEmpty) clazz.resetFlag(FINAL)
cleanAnyRefSpecCache(clazz, decls1)
@@ -770,7 +779,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
private def normalizeMember(owner: Symbol, sym: Symbol, outerEnv: TypeEnv): List[Symbol] = {
debuglog("normalizeMember: " + sym.fullName)
sym :: (
- if (!sym.isMethod || atPhase(currentRun.typerPhase)(sym.typeParams.isEmpty)) Nil
+ if (!sym.isMethod || beforeTyper(sym.typeParams.isEmpty)) Nil
else {
var specializingOn = specializedParams(sym)
val unusedStvars = specializingOn filterNot specializedTypeVars(sym.info)
@@ -828,7 +837,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
if (sym.isPrivate/* || sym.isProtected*/) {
//sym.privateWithin = sym.enclosingPackage
sym.resetFlag(PRIVATE).setFlag(PROTECTED)
- log("-->d SETTING PRIVATE WITHIN TO " + sym.enclosingPackage + " for " + sym)
+ debuglog("-->d SETTING PRIVATE WITHIN TO " + sym.enclosingPackage + " for " + sym)
}
val specMember = subst(outerEnv)(specializedOverload(owner, sym, spec))
@@ -911,7 +920,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
checkOverriddenTParams(overridden)
val env = unify(overridden.info, overriding.info, emptyEnv, false)
- def atNext = atPhase(phase.next)(overridden.owner.info.decl(specializedName(overridden, env)))
+ def atNext = afterSpecialize(overridden.owner.info.decl(specializedName(overridden, env)))
debuglog("\t\tenv: " + env + "isValid: " + TypeEnv.isValid(env, overridden) + "found: " + atNext)
if (TypeEnv.restrict(env, stvars).nonEmpty && TypeEnv.isValid(env, overridden) && atNext != NoSymbol)
@@ -926,7 +935,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case (NoSymbol, _) => None
case (overridden, env) =>
val om = specializedOverload(clazz, overridden, env)
- log("Added specialized overload %s for %s in env: %s with type: %s".format(om, overriding.fullName, env, om.info))
+ debuglog("Added specialized overload %s for %s in env: %s with type: %s".format(om, overriding.fullName, env, om.info))
typeEnv(om) = env
addConcreteSpecMethod(overriding)
info(om) = (
@@ -951,7 +960,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
}
)
overloads(overriding) ::= Overload(om, env)
- ifDebug(atPhase(phase.next)(assert(
+ ifDebug(afterSpecialize(assert(
overridden.owner.info.decl(om.name) != NoSymbol,
"Could not find " + om.name + " in " + overridden.owner.info.decls))
)
@@ -1102,7 +1111,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case cinfo @ ClassInfoType(parents, decls, clazz) if !unspecializableClass(cinfo) =>
val tparams = tpe.typeParams
if (tparams.isEmpty)
- atPhase(phase.next)(parents map (_.typeSymbol.info))
+ afterSpecialize(parents map (_.typeSymbol.info))
val parents1 = parents map specializedType
debuglog("transformInfo %s %s with parents1 %s ph: %s".format(
@@ -1148,7 +1157,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
if (warnings)
reporter.warning(tvar.pos, "Bounds prevent specialization of " + tvar)
- log("specvars: " +
+ debuglog("specvars: " +
tvar.info.bounds.lo + ": " +
specializedTypeVars(tvar.info.bounds.lo) + " " +
subst(env, tvar.info.bounds.hi) + ": " +
@@ -1217,7 +1226,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
} else NoSymbol
def illegalSpecializedInheritance(clazz: Symbol): Boolean = (
- hasSpecializedFlag(clazz)
+ hasSpecializedFlag(clazz)
&& originalClass(clazz).parentSymbols.exists(p => hasSpecializedParams(p) && !p.isTrait)
)
@@ -1275,20 +1284,20 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
/** The specialized symbol of 'tree.symbol' for tree.tpe, if there is one */
def specSym(qual: Tree): Option[Symbol] = {
val env = unify(symbol.tpe, tree.tpe, emptyEnv, false)
- log("[specSym] checking for rerouting: %s with \n\tsym.tpe: %s, \n\ttree.tpe: %s \n\tenv: %s \n\tname: %s"
+ debuglog("[specSym] checking for rerouting: %s with \n\tsym.tpe: %s, \n\ttree.tpe: %s \n\tenv: %s \n\tname: %s"
.format(tree, symbol.tpe, tree.tpe, env, specializedName(symbol, env)))
if (!env.isEmpty) { // a method?
val specCandidates = qual.tpe.member(specializedName(symbol, env))
val specMember = specCandidates suchThat { s =>
doesConform(symbol, tree.tpe, qual.tpe.memberType(s), env)
}
-
- log("[specSym] found: " + specCandidates.tpe + ", instantiated as: " + tree.tpe)
- log("[specSym] found specMember: " + specMember)
+
+ debuglog("[specSym] found: " + specCandidates.tpe + ", instantiated as: " + tree.tpe)
+ debuglog("[specSym] found specMember: " + specMember)
if (specMember ne NoSymbol)
if (TypeEnv.includes(typeEnv(specMember), env)) Some(specMember)
else {
- log("wrong environments for specialized member: \n\ttypeEnv(%s) = %s\n\tenv = %s".format(specMember, typeEnv(specMember), env))
+ debuglog("wrong environments for specialized member: \n\ttypeEnv(%s) = %s\n\tenv = %s".format(specMember, typeEnv(specMember), env))
None
}
else None
@@ -1298,11 +1307,11 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
curTree = tree
tree match {
case Apply(Select(New(tpt), nme.CONSTRUCTOR), args) =>
- log("Attempting to specialize new %s(%s)".format(tpt, args.mkString(", ")))
+ debuglog("Attempting to specialize new %s(%s)".format(tpt, args.mkString(", ")))
val found = findSpec(tpt.tpe)
if (found.typeSymbol ne tpt.tpe.typeSymbol) {
// the ctor can be specialized
- log("** instantiated specialized type: " + found)
+ debuglog("** instantiated specialized type: " + found)
try localTyper.typedPos(tree.pos)(New(found, transformTrees(args): _*))
catch {
case te: TypeError =>
@@ -1331,7 +1340,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
)
val tree1 = gen.mkTypeApply(Select(qual1, specMember), residualTargs)
- log("rewrote " + tree + " to " + tree1)
+ debuglog("rewrote " + tree + " to " + tree1)
localTyper.typedOperator(atPos(tree.pos)(tree1)) // being polymorphic, it must be a method
case None => super.transform(tree)
@@ -1339,8 +1348,8 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case Select(Super(_, _), name) if illegalSpecializedInheritance(currentClass) =>
val pos = tree.pos
- log(pos.source.file.name+":"+pos.line+": not specializing call to super inside illegal specialized inheritance class.")
- log(pos.lineContent)
+ debuglog(pos.source.file.name+":"+pos.line+": not specializing call to super inside illegal specialized inheritance class.")
+ debuglog(pos.lineContent)
tree
case Select(qual, name) =>
@@ -1396,7 +1405,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
(new CollectMethodBodies)(tree)
val parents1 = map2(currentOwner.info.parents, parents)((tpe, parent) =>
TypeTree(tpe) setPos parent.pos)
-
+
treeCopy.Template(tree,
parents1 /*currentOwner.info.parents.map(tpe => TypeTree(tpe) setPos parents.head.pos)*/ ,
self,
@@ -1410,7 +1419,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
val superRef: Tree = Select(Super(This(tpnme.EMPTY), tpnme.EMPTY), nme.CONSTRUCTOR)
forwardCtorCall(tree.pos, superRef, vparamss, symbol.owner)
}
- if (symbol.isPrimaryConstructor)
+ if (symbol.isPrimaryConstructor)
localTyper.typedPos(symbol.pos)(deriveDefDef(tree)(_ => Block(List(t), Literal(Constant()))))
else // duplicate the original constructor
duplicateBody(ddef, info(symbol).target)
@@ -1424,7 +1433,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
deriveDefDef(tree1)(transform)
case NormalizedMember(target) =>
- log("Normalized member: " + symbol + ", target: " + target)
+ debuglog("Normalized member: " + symbol + ", target: " + target)
if (target.isDeferred || conflicting(typeEnv(symbol))) {
deriveDefDef(tree)(_ => localTyper typed gen.mkSysErrorCall("Fatal error in code generation: this should never be called."))
}
@@ -1438,7 +1447,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case SpecialOverride(target) =>
assert(body.isDefinedAt(target), "sym: " + symbol.fullName + " target: " + target.fullName)
//debuglog("moving implementation, body of target " + target + ": " + body(target))
- log("%s is param accessor? %b".format(ddef.symbol, ddef.symbol.isParamAccessor))
+ debuglog("%s is param accessor? %b".format(ddef.symbol, ddef.symbol.isParamAccessor))
// we have an rhs, specialize it
val tree1 = addBody(ddef, target)
(new ChangeOwnerTraverser(target, tree1.symbol))(tree1.rhs)
@@ -1447,24 +1456,24 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case SpecialOverload(original, env) =>
debuglog("completing specialized " + symbol.fullName + " calling " + original)
- log("special overload " + original + " -> " + env)
+ debuglog("special overload " + original + " -> " + env)
val t = DefDef(symbol, { vparamss =>
val fun = Apply(Select(This(symbol.owner), original),
makeArguments(original, vparamss.head))
- log("inside defdef: " + symbol + "; type: " + symbol.tpe + "; owner: " + symbol.owner)
+ debuglog("inside defdef: " + symbol + "; type: " + symbol.tpe + "; owner: " + symbol.owner)
gen.maybeMkAsInstanceOf(fun,
symbol.owner.thisType.memberType(symbol).finalResultType,
symbol.owner.thisType.memberType(original).finalResultType)
})
- log("created special overload tree " + t)
+ debuglog("created special overload tree " + t)
debuglog("created " + t)
localTyper.typed(t)
case fwd @ Forward(_) =>
- log("forward: " + fwd + ", " + ddef)
+ debuglog("forward: " + fwd + ", " + ddef)
val rhs1 = forwardCall(tree.pos, gen.mkAttributedRef(symbol.owner.thisType, fwd.target), vparamss)
- log("-->d completed forwarder to specialized overload: " + fwd.target + ": " + rhs1)
+ debuglog("-->d completed forwarder to specialized overload: " + fwd.target + ": " + rhs1)
localTyper.typed(deriveDefDef(tree)(_ => rhs1))
case SpecializedAccessor(target) =>
@@ -1472,43 +1481,34 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
gen.mkAttributedRef(target)
else
Assign(gen.mkAttributedRef(target), Ident(vparamss.head.head.symbol))
- log("specialized accessor: " + target + " -> " + rhs1)
+ debuglog("specialized accessor: " + target + " -> " + rhs1)
localTyper.typed(deriveDefDef(tree)(_ => rhs1))
case Abstract(targ) =>
- log("abstract: " + targ)
+ debuglog("abstract: " + targ)
localTyper.typed(deriveDefDef(tree)(rhs => rhs))
}
- case ValDef(mods, name, tpt, rhs) if symbol.hasFlag(SPECIALIZED) && !symbol.isParamAccessor =>
+ case ValDef(_, _, _, _) if symbol.hasFlag(SPECIALIZED) && !symbol.isParamAccessor =>
assert(body.isDefinedAt(symbol.alias), body)
- val tree1 = treeCopy.ValDef(tree, mods, name, tpt, body(symbol.alias).duplicate)
+ val tree1 = deriveValDef(tree)(_ => body(symbol.alias).duplicate)
debuglog("now typing: " + tree1 + " in " + tree.symbol.owner.fullName)
+
val d = new Duplicator
- val ValDef(mods1, name1, tpt1, rhs1) = d.retyped(
+ val newValDef = d.retyped(
localTyper.context1.asInstanceOf[d.Context],
tree1,
symbol.alias.enclClass,
symbol.enclClass,
typeEnv(symbol.alias) ++ typeEnv(tree.symbol)
)
- val t = treeCopy.ValDef(tree1, mods1, name1, tpt1, transform(rhs1))
- log("valdef " + tree + " -> " + t)
- t
-
-// val tree1 =
-// treeCopy.ValDef(tree, mods, name, tpt,
-// localTyper.typed(
-// Apply(Select(Super(currentClass, nme.EMPTY), symbol.alias.getter(symbol.alias.owner)),
-// List())))
-// debuglog("replaced ValDef: " + tree1 + " in " + tree.symbol.owner.fullName)
-// tree1
+ deriveValDef(newValDef)(transform)
case Apply(sel @ Select(sup @ Super(qual, name), name1), args)
- if (sup.symbol.info.parents != atPhase(phase.prev)(sup.symbol.info.parents)) =>
+ if (sup.symbol.info.parents != beforePrevPhase(sup.symbol.info.parents)) =>
def parents = sup.symbol.info.parents
- debuglog(tree + " parents changed from: " + atPhase(phase.prev)(parents) + " to: " + parents)
+ debuglog(tree + " parents changed from: " + beforePrevPhase(parents) + " to: " + parents)
val res = localTyper.typed(
Apply(Select(Super(qual, name) setPos sup.pos, name1) setPos sel.pos, transformTrees(args)) setPos tree.pos)
@@ -1525,7 +1525,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
val meth = addBody(tree, source)
val d = new Duplicator
- log("-->d DUPLICATING: " + meth)
+ debuglog("-->d DUPLICATING: " + meth)
d.retyped(
localTyper.context1.asInstanceOf[d.Context],
meth,
@@ -1590,7 +1590,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
if m.hasFlag(SPECIALIZED)
&& (m.sourceFile ne null)
&& satisfiable(typeEnv(m), !sClass.hasFlag(SPECIALIZED))) {
- log("creating tree for " + m.fullName)
+ debuglog("creating tree for " + m.fullName)
if (m.isMethod) {
if (info(m).target.hasAccessorFlag) hasSpecializedFields = true
if (m.isClassConstructor) {
@@ -1646,7 +1646,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
buf +=
ClassDef(specCls, atPos(impl.pos)(Template(parents, emptyValDef, List()))
.setSymbol(specCls.newLocalDummy(sym1.pos))) setPos tree.pos
- log("created synthetic class: " + specCls + " of " + sym1 + " in env: " + env)
+ debuglog("created synthetic class: " + specCls + " of " + sym1 + " in env: " + env)
}
case _ =>
}
@@ -1708,7 +1708,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
* that here are not garbage collected at the end of a compiler run!
*/
def addConcreteSpecMethod(m: Symbol) {
- if (!forInteractive && currentRun.compiles(m)) concreteSpecMethods += m
+ if (currentRun.compiles(m)) concreteSpecMethods += m
}
private def makeArguments(fun: Symbol, vparams: List[Symbol]): List[Tree] = (
@@ -1724,7 +1724,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
informProgress("specializing " + unit)
override def transform(tree: Tree) = {
val resultTree = if (settings.nospecialization.value) tree
- else atPhase(phase.next)(specializeCalls(unit).transform(tree))
+ else afterSpecialize(specializeCalls(unit).transform(tree))
// Remove the final modifier and @inline annotation from anything in the
// original class (since it's being overridden in at least onesubclass).
diff --git a/src/compiler/scala/tools/nsc/transform/TailCalls.scala b/src/compiler/scala/tools/nsc/transform/TailCalls.scala
index 93fcd27191..848d6be47b 100644
--- a/src/compiler/scala/tools/nsc/transform/TailCalls.scala
+++ b/src/compiler/scala/tools/nsc/transform/TailCalls.scala
@@ -204,7 +204,7 @@ abstract class TailCalls extends Transform {
fail(reason)
}
def rewriteTailCall(recv: Tree): Tree = {
- log("Rewriting tail recursive call: " + fun.pos.lineContent.trim)
+ debuglog("Rewriting tail recursive call: " + fun.pos.lineContent.trim)
ctx.accessed = true
typedPos(fun.pos)(Apply(Ident(ctx.label), recv :: transformArgs))
@@ -228,7 +228,7 @@ abstract class TailCalls extends Transform {
debuglog("Considering " + dd.name + " for tailcalls")
val newRHS = transform(rhs0, newCtx)
- deriveDefDef(tree)(rhs =>
+ deriveDefDef(tree)(rhs =>
if (newCtx.isTransformed) {
/** We have rewritten the tree, but there may be nested recursive calls remaining.
* If @tailrec is given we need to fail those now.
@@ -262,11 +262,7 @@ abstract class TailCalls extends Transform {
)
case CaseDef(pat, guard, body) =>
- treeCopy.CaseDef(tree,
- pat,
- guard,
- transform(body)
- )
+ deriveCaseDef(tree)(transform)
case If(cond, thenp, elsep) =>
treeCopy.If(tree,
diff --git a/src/compiler/scala/tools/nsc/transform/UnCurry.scala b/src/compiler/scala/tools/nsc/transform/UnCurry.scala
index 4b587a3f41..ee565530b7 100644
--- a/src/compiler/scala/tools/nsc/transform/UnCurry.scala
+++ b/src/compiler/scala/tools/nsc/transform/UnCurry.scala
@@ -144,13 +144,13 @@ abstract class UnCurry extends InfoTransform
* todo: maybe clone a pre-existing exception instead?
* (but what to do about exceptions that miss their targets?)
*/
- private def nonLocalReturnThrow(expr: Tree, meth: Symbol) =
- localTyper.typed {
- Throw(
- New(
- TypeTree(nonLocalReturnExceptionType(expr.tpe)),
- List(List(Ident(nonLocalReturnKey(meth)), expr))))
- }
+ private def nonLocalReturnThrow(expr: Tree, meth: Symbol) = localTyper typed {
+ Throw(
+ nonLocalReturnExceptionType(expr.tpe.widen),
+ Ident(nonLocalReturnKey(meth)),
+ expr
+ )
+ }
/** Transform (body, key) to:
*
@@ -166,31 +166,18 @@ abstract class UnCurry extends InfoTransform
* }
*/
private def nonLocalReturnTry(body: Tree, key: Symbol, meth: Symbol) = {
- localTyper.typed {
- val extpe = nonLocalReturnExceptionType(meth.tpe.finalResultType)
- val ex = meth.newValue(nme.ex, body.pos) setInfo extpe
- val pat = Bind(ex,
- Typed(Ident(nme.WILDCARD),
- AppliedTypeTree(Ident(NonLocalReturnControlClass),
- List(Bind(tpnme.WILDCARD,
- EmptyTree)))))
- val rhs =
- If(
- Apply(
- Select(
- Apply(Select(Ident(ex), "key"), List()),
- Object_eq),
- List(Ident(key))),
- Apply(
- TypeApply(
- Select(
- Apply(Select(Ident(ex), "value"), List()),
- Any_asInstanceOf),
- List(TypeTree(meth.tpe.finalResultType))),
- List()),
- Throw(Ident(ex)))
- val keyDef = ValDef(key, New(ObjectClass))
- val tryCatch = Try(body, List(CaseDef(pat, EmptyTree, rhs)), EmptyTree)
+ localTyper typed {
+ val extpe = nonLocalReturnExceptionType(meth.tpe.finalResultType)
+ val ex = meth.newValue(body.pos, nme.ex) setInfo extpe
+ val pat = gen.mkBindForCase(ex, NonLocalReturnControlClass, List(meth.tpe.finalResultType))
+ val rhs = (
+ IF ((ex DOT nme.key)() OBJ_EQ Ident(key))
+ THEN ((ex DOT nme.value)())
+ ELSE (Throw(Ident(ex)))
+ )
+ val keyDef = ValDef(key, New(ObjectClass.tpe))
+ val tryCatch = Try(body, pat -> rhs)
+
Block(List(keyDef), tryCatch)
}
}
@@ -260,7 +247,7 @@ abstract class UnCurry extends InfoTransform
else List(ObjectClass.tpe, fun.tpe, SerializableClass.tpe)
anonClass setInfo ClassInfoType(parents, newScope, anonClass)
- val applyMethod = anonClass.newMethod(nme.apply, fun.pos, FINAL)
+ val applyMethod = anonClass.newMethod(nme.apply, fun.pos, FINAL)
applyMethod setInfoAndEnter MethodType(applyMethod newSyntheticValueParams formals, restpe)
anonClass addAnnotation serialVersionUIDAnnotation
@@ -357,7 +344,7 @@ abstract class UnCurry extends InfoTransform
localTyper.typedPos(fun.pos) {
Block(
List(ClassDef(anonClass, NoMods, List(List()), List(List()), members, fun.pos)),
- Typed(New(anonClass), TypeTree(fun.tpe)))
+ Typed(New(anonClass.tpe), TypeTree(fun.tpe)))
}
}
}
@@ -370,7 +357,7 @@ abstract class UnCurry extends InfoTransform
// when calling into scala varargs, make sure it's a sequence.
def arrayToSequence(tree: Tree, elemtp: Type) = {
- atPhase(phase.next) {
+ afterUncurry {
localTyper.typedPos(pos) {
val pt = arrayType(elemtp)
val adaptedTree = // might need to cast to Array[elemtp], as arrays are not covariant
@@ -394,7 +381,7 @@ abstract class UnCurry extends InfoTransform
else if (tp.bounds.hi ne tp) getManifest(tp.bounds.hi)
else localTyper.getManifestTree(tree, tp, false)
}
- atPhase(phase.next) {
+ afterUncurry {
localTyper.typedPos(pos) {
Apply(gen.mkAttributedSelect(tree, toArraySym),
List(getManifest(tree.tpe.baseType(TraversableClass).typeArgs.head)))
@@ -419,7 +406,7 @@ abstract class UnCurry extends InfoTransform
else arrayToSequence(mkArray, varargsElemType)
}
- atPhase(phase.next) {
+ afterUncurry {
if (isJava && isPrimitiveArray(suffix.tpe) && isArrayOfSymbol(fun.tpe.params.last.tpe, ObjectClass)) {
suffix = localTyper.typedPos(pos) {
gen.mkRuntimeCall(nme.toObjectArray, List(suffix))
@@ -465,6 +452,25 @@ abstract class UnCurry extends InfoTransform
}
}
+ private def isSelfSynchronized(ddef: DefDef) = ddef.rhs match {
+ case Apply(fn @ TypeApply(Select(sel, _), _), _) =>
+ fn.symbol == Object_synchronized && sel.symbol == ddef.symbol.enclClass && !ddef.symbol.enclClass.isTrait
+ case _ => false
+ }
+
+ /** If an eligible method is entirely wrapped in a call to synchronized
+ * locked on the same instance, remove the synchronized scaffolding and
+ * mark the method symbol SYNCHRONIZED for bytecode generation.
+ */
+ private def translateSynchronized(tree: Tree) = tree match {
+ case dd @ DefDef(_, _, _, _, _, Apply(fn, body :: Nil)) if isSelfSynchronized(dd) =>
+ log("Translating " + dd.symbol.defString + " into synchronized method")
+ dd.symbol setFlag SYNCHRONIZED
+ deriveDefDef(dd)(_ => body)
+ case _ => tree
+ }
+ def isNonLocalReturn(ret: Return) = ret.symbol != currentOwner.enclMethod || currentOwner.isLazy
+
// ------ The tree transformers --------------------------------------------------------
def mainTransform(tree: Tree): Tree = {
@@ -508,9 +514,10 @@ abstract class UnCurry extends InfoTransform
// breakage until a reasonable interface is settled upon.
if ((sym ne null) && (sym.elisionLevel.exists (_ < settings.elidebelow.value || settings.noassertions.value)))
replaceElidableTree(tree)
- else tree match {
+ else translateSynchronized(tree) match {
case dd @ DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
if (dd.symbol hasAnnotation VarargsClass) saveRepeatedParams(dd)
+
withNeedLift(false) {
if (dd.symbol.isClassConstructor) {
atOwner(sym) {
@@ -525,11 +532,11 @@ abstract class UnCurry extends InfoTransform
treeCopy.Block(rhs, presupers ::: supercalls ::: others, transform(expr))
}
treeCopy.DefDef(
- tree, mods, name, transformTypeDefs(tparams),
+ dd, mods, name, transformTypeDefs(tparams),
transformValDefss(vparamss), transform(tpt), rhs1)
}
} else {
- super.transform(tree)
+ super.transform(dd)
}
}
case ValDef(_, _, _, rhs) =>
@@ -564,6 +571,9 @@ abstract class UnCurry extends InfoTransform
case Assign(lhs, _) if lhs.symbol.owner != currentMethod || lhs.symbol.hasFlag(LAZY | ACCESSOR) =>
withNeedLift(true) { super.transform(tree) }
+ case ret @ Return(_) if (isNonLocalReturn(ret)) =>
+ withNeedLift(true) { super.transform(ret) }
+
case Try(block, catches, finalizer) =>
if (needTryLift || shouldBeLiftedAnyway(tree)) transform(liftTree(tree))
else super.transform(tree)
@@ -594,7 +604,7 @@ abstract class UnCurry extends InfoTransform
result setType uncurryTreeType(result.tpe)
}
- def postTransform(tree: Tree): Tree = atPhase(phase.next) {
+ def postTransform(tree: Tree): Tree = afterUncurry {
def applyUnary(): Tree = {
// TODO_NMT: verify that the inner tree of a type-apply also gets parens if the
// whole tree is a polymorphic nullary method application
@@ -620,13 +630,11 @@ abstract class UnCurry extends InfoTransform
* In particular, this case will add:
* - synthetic Java varargs forwarders for repeated parameters
*/
- case Template(parents, self, body) =>
+ case Template(_, _, _) =>
localTyper = typer.atOwner(tree, currentClass)
- val tmpl = if (!forMSIL || forMSIL) {
- treeCopy.Template(tree, parents, self, transformTrees(newMembers.toList) ::: body)
- } else super.transform(tree).asInstanceOf[Template]
- newMembers.clear
- tmpl
+ try deriveTemplate(tree)(transformTrees(newMembers.toList) ::: _)
+ finally newMembers.clear()
+
case dd @ DefDef(_, _, _, vparamss0, _, rhs0) =>
val flatdd = copyDefDef(dd)(
vparamss = List(vparamss0.flatten),
@@ -671,9 +679,9 @@ abstract class UnCurry extends InfoTransform
applyUnary()
case Select(_, _) | TypeApply(_, _) =>
applyUnary()
- case Return(expr) if (tree.symbol != currentOwner.enclMethod || currentOwner.isLazy) =>
- debuglog("non local return in "+tree.symbol+" from "+currentOwner.enclMethod)
- atPos(tree.pos)(nonLocalReturnThrow(expr, tree.symbol))
+ case ret @ Return(expr) if (isNonLocalReturn(ret)) =>
+ debuglog("non local return in "+ret.symbol+" from "+currentOwner.enclMethod)
+ atPos(ret.pos)(nonLocalReturnThrow(expr, ret.symbol))
case TypeTree() =>
tree
case _ =>
@@ -777,7 +785,7 @@ abstract class UnCurry extends InfoTransform
// add the method to `newMembers`
newMembers += forwtree
}
-
+
flatdd
}
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala b/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala
index ea5223e32f..afe0b42167 100644
--- a/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala
@@ -31,14 +31,14 @@ trait ContextErrors {
case class NormalTypeError(underlyingTree: Tree, errMsg: String, kind: ErrorKind = ErrorKinds.Normal)
extends AbsTypeError {
-
+
def errPos:Position = underlyingTree.pos
override def toString() = "[Type error at:" + underlyingTree.pos + "] " + errMsg
}
case class SymbolTypeError(underlyingSym: Symbol, errMsg: String, kind: ErrorKind = ErrorKinds.Normal)
extends AbsTypeError {
-
+
def errPos = underlyingSym.pos
}
@@ -76,7 +76,7 @@ trait ContextErrors {
}
def issueTypeError(err: AbsTypeError)(implicit context: Context) { context.issue(err) }
-
+
def typeErrorMsg(found: Type, req: Type, possiblyMissingArgs: Boolean) = {
def missingArgsMsg = if (possiblyMissingArgs) "\n possible cause: missing arguments for method or constructor" else ""
"type mismatch" + foundReqMsg(found, req) + missingArgsMsg
@@ -143,12 +143,12 @@ trait ContextErrors {
found
}
assert(!found.isErroneous && !req.isErroneous, (found, req))
-
+
issueNormalTypeError(tree, withAddendum(tree.pos)(typeErrorMsg(found, req, infer.isPossiblyMissingArgs(found, req))) )
if (settings.explaintypes.value)
explainTypes(found, req)
}
-
+
def WithFilterError(tree: Tree, ex: AbsTypeError) = {
issueTypeError(ex)
setError(tree)
@@ -177,13 +177,13 @@ trait ContextErrors {
val calcSimilar = (
name.length > 2 && (
startingIdentCx.reportErrors
- || startingIdentCx.enclClassOrMethod.reportErrors
+ || startingIdentCx.enclClassOrMethod.reportErrors
)
)
- // avoid calculating if we're in "silent" mode.
- // name length check to limit unhelpful suggestions for e.g. "x" and "b1"
+ // avoid calculating if we're in "silent" mode.
+ // name length check to limit unhelpful suggestions for e.g. "x" and "b1"
val similar = {
- if (!calcSimilar) ""
+ if (!calcSimilar) ""
else {
val allowed = (
startingIdentCx.enclosingContextChain
@@ -624,11 +624,21 @@ trait ContextErrors {
setError(tree)
}
- // checkNoDoubleDefs...
- def DefDefinedTwiceError(sym0: Symbol, sym1: Symbol) =
- issueSymbolTypeError(sym0, sym1+" is defined twice"+
- {if(!settings.debug.value) "" else " in "+context0.unit}+
- {if (sym0.isMacro && sym1.isMacro) " \n(note that macros cannot be overloaded)" else ""})
+ // checkNoDoubleDefs...
+ // @PP: I hacked the filename in (context0.unit) to work around SI-4893. It would be
+ // much better if every symbol could offer some idea of where it came from, else
+ // the obviously untrue claim that something has been defined twice can only frustrate.
+ // There's no direct test because partest doesn't work, but to reproduce, separately
+ // compile the next two lines:
+ // package object foo { val x: Class[_] = null }
+ // package foo
+ def DefDefinedTwiceError(sym0: Symbol, sym1: Symbol) = {
+ val isBug = sym0.isAbstractType && sym1.isAbstractType && (sym0.name startsWith "_$")
+ issueSymbolTypeError(sym0, sym1+" is defined twice in " + context0.unit
+ + ( if (sym0.isMacro && sym1.isMacro) "\n(note that macros cannot be overloaded)" else "" )
+ + ( if (isBug) "\n(this error is likely due to a bug in the scala compiler involving wildcards in package objects)" else "" )
+ )
+ }
// cyclic errors
def CyclicAliasingOrSubtypingError(errPos: Position, sym0: Symbol) =
@@ -662,7 +672,7 @@ trait ContextErrors {
type ErrorType = Value
val WrongNumber, NoParams, ArgsDoNotConform = Value
}
-
+
private def ambiguousErrorMsgPos(pos: Position, pre: Type, sym1: Symbol, sym2: Symbol, rest: String) =
if (sym1.hasDefaultFlag && sym2.hasDefaultFlag && sym1.enclClass == sym2.enclClass) {
val methodName = nme.defaultGetterToMethod(sym1.name)
@@ -708,11 +718,11 @@ trait ContextErrors {
"constructor cannot be instantiated to expected type" + foundReqMsg(restpe, pt))
setError(tree)
}
-
+
def NoBestMethodAlternativeError(tree: Tree, argtpes: List[Type], pt: Type) = {
issueNormalTypeError(tree,
applyErrorMsg(tree, " cannot be applied to ", argtpes, pt))
- // since inferMethodAlternative modifies the state of the tree
+ // since inferMethodAlternative modifies the state of the tree
// we have to set the type of tree to ErrorType only in the very last
// fallback action that is done in the inference (tracking it manually is error prone).
// This avoids entering infinite loop in doTypeApply.
@@ -832,14 +842,14 @@ trait ContextErrors {
object NamerErrorGen {
implicit val context0 = context
-
+
object SymValidateErrors extends Enumeration {
val ImplicitConstr, ImplicitNotTerm, ImplicitTopObject,
OverrideClass, SealedNonClass, AbstractNonClass,
OverrideConstr, AbstractOverride, LazyAndEarlyInit,
ByNameParameter, AbstractVar = Value
}
-
+
object DuplicatesErrorKinds extends Enumeration {
val RenamedTwice, AppearsTwice = Value
}
@@ -847,7 +857,7 @@ trait ContextErrors {
import SymValidateErrors._
import DuplicatesErrorKinds._
import symtab.Flags
-
+
def TypeSigError(tree: Tree, ex: TypeError) = {
ex match {
case CyclicReference(sym, info: TypeCompleter) =>
@@ -856,7 +866,7 @@ trait ContextErrors {
context0.issue(TypeErrorWithUnderlyingTree(tree, ex))
}
}
-
+
def GetterDefinedTwiceError(getter: Symbol) =
issueSymbolTypeError(getter, getter+" is defined twice")
@@ -899,37 +909,37 @@ trait ContextErrors {
val msg = errKind match {
case ImplicitConstr =>
"`implicit' modifier not allowed for constructors"
-
+
case ImplicitNotTerm =>
"`implicit' modifier can be used only for values, variables and methods"
-
+
case ImplicitTopObject =>
"`implicit' modifier cannot be used for top-level objects"
-
+
case OverrideClass =>
"`override' modifier not allowed for classes"
-
+
case SealedNonClass =>
"`sealed' modifier can be used only for classes"
-
+
case AbstractNonClass =>
"`abstract' modifier can be used only for classes; it should be omitted for abstract members"
-
+
case OverrideConstr =>
"`override' modifier not allowed for constructors"
-
+
case AbstractOverride =>
"`abstract override' modifier only allowed for members of traits"
-
+
case LazyAndEarlyInit =>
"`lazy' definitions may not be initialized early"
-
+
case ByNameParameter =>
"pass-by-name arguments not allowed for case class parameters"
-
+
case AbstractVar =>
"only classes can have declared but undefined members" + abstractVarMessage(sym)
-
+
}
issueSymbolTypeError(sym, msg)
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/Contexts.scala b/src/compiler/scala/tools/nsc/typechecker/Contexts.scala
index 8586ebf0d4..90e07023bb 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Contexts.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Contexts.scala
@@ -21,7 +21,7 @@ trait Contexts { self: Analyzer =>
outer = this
enclClass = this
enclMethod = this
-
+
override def nextEnclosing(p: Context => Boolean): Context = this
override def enclosingContextChain: List[Context] = Nil
override def implicitss: List[List[ImplicitInfo]] = Nil
@@ -128,7 +128,7 @@ trait Contexts { self: Analyzer =>
var typingIndentLevel: Int = 0
def typingIndent = " " * typingIndentLevel
-
+
var buffer: Set[AbsTypeError] = _
def enclClassOrMethod: Context =
@@ -179,7 +179,7 @@ trait Contexts { self: Analyzer =>
buffer.clear()
current
}
-
+
def logError(err: AbsTypeError) = buffer += err
def withImplicitsDisabled[T](op: => T): T = {
@@ -240,7 +240,7 @@ trait Contexts { self: Analyzer =>
c.implicitsEnabled = true
c
}
-
+
def makeNewImport(sym: Symbol): Context =
makeNewImport(gen.mkWildcardImport(sym))
diff --git a/src/compiler/scala/tools/nsc/typechecker/Duplicators.scala b/src/compiler/scala/tools/nsc/typechecker/Duplicators.scala
index 179bea0035..29831c8469 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Duplicators.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Duplicators.scala
@@ -36,7 +36,7 @@ abstract class Duplicators extends Analyzer {
} else resetClassOwners
envSubstitution = new SubstSkolemsTypeMap(env.keysIterator.toList, env.valuesIterator.toList)
- log("retyped with env: " + env)
+ debuglog("retyped with env: " + env)
(new BodyDuplicator(context)).typed(tree)
}
@@ -82,14 +82,14 @@ abstract class Duplicators extends Analyzer {
val sym1 = context.scope.lookup(sym.name)
// assert(sym1 ne NoSymbol, tpe)
if ((sym1 ne NoSymbol) && (sym1 ne sym)) {
- log("fixing " + sym + " -> " + sym1)
+ debuglog("fixing " + sym + " -> " + sym1)
typeRef(NoPrefix, sym1, mapOverArgs(args, sym1.typeParams))
} else super.mapOver(tpe)
case TypeRef(pre, sym, args) =>
val newsym = updateSym(sym)
if (newsym ne sym) {
- log("fixing " + sym + " -> " + newsym)
+ debuglog("fixing " + sym + " -> " + newsym)
typeRef(mapOver(pre), newsym, mapOverArgs(args, newsym.typeParams))
} else
super.mapOver(tpe)
@@ -97,7 +97,7 @@ abstract class Duplicators extends Analyzer {
case SingleType(pre, sym) =>
val sym1 = updateSym(sym)
if (sym1 ne sym) {
- log("fixing " + sym + " -> " + sym1)
+ debuglog("fixing " + sym + " -> " + sym1)
singleType(mapOver(pre), sym1)
} else
super.mapOver(tpe)
@@ -105,7 +105,7 @@ abstract class Duplicators extends Analyzer {
case ThisType(sym) =>
val sym1 = updateSym(sym)
if (sym1 ne sym) {
- log("fixing " + sym + " -> " + sym1)
+ debuglog("fixing " + sym + " -> " + sym1)
ThisType(sym1)
} else
super.mapOver(tpe)
@@ -136,26 +136,26 @@ abstract class Duplicators extends Analyzer {
private def invalidate(tree: Tree) {
debuglog("attempting to invalidate " + tree.symbol + ", owner - " + (if (tree.symbol ne null) tree.symbol.owner else "<NULL>"))
if (tree.isDef && tree.symbol != NoSymbol) {
- log("invalid " + tree.symbol)
+ debuglog("invalid " + tree.symbol)
invalidSyms(tree.symbol) = tree
tree match {
case ldef @ LabelDef(name, params, rhs) =>
- log("LabelDef " + name + " sym.info: " + ldef.symbol.info)
+ debuglog("LabelDef " + name + " sym.info: " + ldef.symbol.info)
invalidSyms(ldef.symbol) = ldef
// breakIf(true, this, ldef, context)
val newsym = ldef.symbol.cloneSymbol(context.owner)
newsym.setInfo(fixType(ldef.symbol.info))
ldef.symbol = newsym
- log("newsym: " + newsym + " info: " + newsym.info)
+ debuglog("newsym: " + newsym + " info: " + newsym.info)
case vdef @ ValDef(mods, name, _, rhs) if mods.hasFlag(Flags.LAZY) =>
- log("ValDef " + name + " sym.info: " + vdef.symbol.info)
+ debuglog("ValDef " + name + " sym.info: " + vdef.symbol.info)
invalidSyms(vdef.symbol) = vdef
val newsym = vdef.symbol.cloneSymbol(context.owner)
newsym.setInfo(fixType(vdef.symbol.info))
vdef.symbol = newsym
- log("newsym: " + newsym + " info: " + newsym.info)
+ debuglog("newsym: " + newsym + " info: " + newsym.info)
case DefDef(_, name, tparams, vparamss, _, rhs) =>
// invalidate parameters
@@ -182,7 +182,7 @@ abstract class Duplicators extends Analyzer {
}
ddef.symbol = NoSymbol
enterSym(context, ddef)
- log("remapping this of " + oldClassOwner + " to " + newClassOwner)
+ debuglog("remapping this of " + oldClassOwner + " to " + newClassOwner)
typed(ddef)
}
@@ -228,7 +228,7 @@ abstract class Duplicators extends Analyzer {
ttree
case Block(stats, res) =>
- log("invalidating block")
+ debuglog("invalidating block")
invalidate(stats)
invalidate(res)
tree.tpe = null
@@ -256,7 +256,7 @@ abstract class Duplicators extends Analyzer {
case ldef @ LabelDef(name, params, rhs) =>
// log("label def: " + ldef)
ldef.tpe = null
- val params1 = params map { p => Ident(updateSym(p.symbol)) }
+ val params1 = params map (p => Ident(updateSym(p.symbol)))
super.typed(treeCopy.LabelDef(tree, name, params1, rhs), mode, pt)
case Bind(name, _) =>
@@ -266,13 +266,13 @@ abstract class Duplicators extends Analyzer {
super.typed(tree, mode, pt)
case Ident(_) if tree.symbol.isLabel =>
- log("Ident to labeldef " + tree + " switched to ")
+ debuglog("Ident to labeldef " + tree + " switched to ")
tree.symbol = updateSym(tree.symbol)
tree.tpe = null
super.typed(tree, mode, pt)
case Ident(_) if (origtreesym ne null) && origtreesym.isLazy =>
- log("Ident to a lazy val " + tree + ", " + tree.symbol + " updated to " + origtreesym)
+ debuglog("Ident to a lazy val " + tree + ", " + tree.symbol + " updated to " + origtreesym)
tree.symbol = updateSym(origtreesym)
tree.tpe = null
super.typed(tree, mode, pt)
@@ -336,7 +336,7 @@ abstract class Duplicators extends Analyzer {
tree
case _ =>
- log("default: " + tree)
+ debuglog("Duplicators default case: " + tree.summaryString)
if (tree.hasSymbol && tree.symbol != NoSymbol && (tree.symbol.owner == definitions.AnyClass)) {
tree.symbol = NoSymbol // maybe we can find a more specific member in a subclass of Any (see AnyVal members, like ==)
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/Implicits.scala b/src/compiler/scala/tools/nsc/typechecker/Implicits.scala
index 3d2f86d54d..0ddacf7d36 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Implicits.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Implicits.scala
@@ -215,7 +215,7 @@ trait Implicits {
object HasMethodMatching {
val dummyMethod = NoSymbol.newTermSymbol(newTermName("typer$dummy"))
def templateArgType(argtpe: Type) = new BoundedWildcardType(TypeBounds.lower(argtpe))
-
+
def apply(name: Name, argtpes: List[Type], restpe: Type): Type = {
val mtpe = MethodType(dummyMethod.newSyntheticValueParams(argtpes map templateArgType), restpe)
memberWildcardType(name, mtpe)
@@ -571,7 +571,7 @@ trait Implicits {
else {
val tvars = undetParams map freshVar
def ptInstantiated = pt.instantiateTypeParams(undetParams, tvars)
-
+
printInference("[search] considering %s (pt contains %s) trying %s against pt=%s".format(
if (undetParams.isEmpty) "no tparams" else undetParams.map(_.name).mkString(", "),
typeVarsInType(ptInstantiated) filterNot (_.isGround) match { case Nil => "no tvars" ; case tvs => tvs.mkString(", ") },
@@ -594,7 +594,7 @@ trait Implicits {
// we must be conservative in leaving type params in undetparams
// prototype == WildcardType: want to remove all inferred Nothings
val AdjustedTypeArgs(okParams, okArgs) = adjustTypeArgs(undetParams, tvars, targs)
-
+
val subst: TreeTypeSubstituter =
if (okParams.isEmpty) EmptyTreeTypeSubstituter
else {
@@ -621,7 +621,7 @@ trait Implicits {
case Apply(TypeApply(fun, args), _) => typedTypeApply(itree2, EXPRmode, fun, args) // t2421c
case t => t
}
-
+
if (context.hasErrors)
fail("typing TypeApply reported errors for the implicit tree")
else {
@@ -780,13 +780,13 @@ trait Implicits {
val newPending = undoLog undo {
is filterNot (alt => alt == i || {
try improves(i, alt)
- catch {
- case e: CyclicReference =>
+ catch {
+ case e: CyclicReference =>
if (printInfers) {
println(i+" discarded because cyclic reference occurred")
e.printStackTrace()
}
- true
+ true
}
})
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/Infer.scala b/src/compiler/scala/tools/nsc/typechecker/Infer.scala
index e1aa8b46eb..c09e535117 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Infer.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Infer.scala
@@ -210,9 +210,9 @@ trait Infer {
def getContext = context
def issue(err: AbsTypeError): Unit = context.issue(err)
-
- def isPossiblyMissingArgs(found: Type, req: Type) = (found.resultApprox ne found) && isWeaklyCompatible(found.resultApprox, req)
-
+
+ def isPossiblyMissingArgs(found: Type, req: Type) = (found.resultApprox ne found) && isWeaklyCompatible(found.resultApprox, req)
+
def explainTypes(tp1: Type, tp2: Type) =
withDisambiguation(List(), tp1, tp2)(global.explainTypes(tp1, tp2))
@@ -465,7 +465,7 @@ trait Infer {
*/
def adjustTypeArgs(tparams: List[Symbol], tvars: List[TypeVar], targs: List[Type], restpe: Type = WildcardType): AdjustedTypeArgs.Result = {
val buf = AdjustedTypeArgs.Result.newBuilder[Symbol, Option[Type]]
-
+
foreach3(tparams, tvars, targs) { (tparam, tvar, targ) =>
val retract = (
targ.typeSymbol == NothingClass // only retract Nothings
diff --git a/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala b/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala
index 915d7a98db..088a56cd7b 100644
--- a/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala
@@ -18,7 +18,7 @@ trait MethodSynthesis {
import global._
import definitions._
import CODE._
-
+
object synthesisUtil {
type M[T] = Manifest[T]
type CM[T] = ClassManifest[T]
@@ -39,7 +39,7 @@ trait MethodSynthesis {
typeRef(container.typeConstructor.prefix, container, args map (_.tpe))
}
-
+
def companionType[T](implicit m: M[T]) =
getRequiredModule(m.erasure.getName).tpe
@@ -71,7 +71,7 @@ trait MethodSynthesis {
class ClassMethodSynthesis(val clazz: Symbol, localTyper: Typer) {
private def isOverride(name: TermName) =
clazzMember(name).alternatives exists (sym => !sym.isDeferred && (sym.owner != clazz))
-
+
def newMethodFlags(name: TermName) = {
val overrideFlag = if (isOverride(name)) OVERRIDE else 0L
overrideFlag | SYNTHETIC
@@ -82,7 +82,7 @@ trait MethodSynthesis {
}
private def finishMethod(method: Symbol, f: Symbol => Tree): Tree =
- logResult("finishMethod")(localTyper typed ValOrDefDef(method, f(method)))
+ localTyper typed ValOrDefDef(method, f(method))
private def createInternal(name: Name, f: Symbol => Tree, info: Type): Tree = {
val m = clazz.newMethod(name.toTermName, clazz.pos.focus, newMethodFlags(name))
@@ -200,7 +200,7 @@ trait MethodSynthesis {
map (acc => atPos(vd.pos.focus)(acc derive annotations))
filterNot (_ eq EmptyTree)
)
- log(trees.mkString("Accessor trees:\n ", "\n ", "\n"))
+ // log(trees.mkString("Accessor trees:\n ", "\n ", "\n"))
if (vd.symbol.isLazy) List(stat)
else trees
case _ =>
@@ -282,7 +282,7 @@ trait MethodSynthesis {
}
}
private def logDerived(result: Tree): Tree = {
- log("[+derived] " + ojoin(mods.defaultFlagString, basisSym.accurateKindString, basisSym.getterName.decode)
+ debuglog("[+derived] " + ojoin(mods.defaultFlagString, basisSym.accurateKindString, basisSym.getterName.decode)
+ " (" + derivedSym + ")\n " + result)
result
@@ -344,7 +344,7 @@ trait MethodSynthesis {
if (mods.isDeferred)
tpt setOriginal tree.tpt
- // TODO - reconcile this with the DefDef creator in Trees (which
+ // TODO - reconcile this with the DefDef creator in Trees (which
// at this writing presented no way to pass a tree in for tpt.)
atPos(derivedSym.pos) {
DefDef(
@@ -376,7 +376,7 @@ trait MethodSynthesis {
override def keepClean = !mods.isParamAccessor
override def derivedTree = (
if (mods.isDeferred) EmptyTree
- else treeCopy.ValDef(tree, mods | flagsExtra, name, tree.tpt, tree.rhs)
+ else copyValDef(tree)(mods = mods | flagsExtra, name = this.name)
)
}
case class Param(tree: ValDef) extends DerivedFromValDef {
diff --git a/src/compiler/scala/tools/nsc/typechecker/Namers.scala b/src/compiler/scala/tools/nsc/typechecker/Namers.scala
index 3347d2e767..82bcb93965 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Namers.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Namers.scala
@@ -113,7 +113,7 @@ trait Namers extends MethodSynthesis {
private def contextFile = context.unit.source.file
private def typeErrorHandler[T](tree: Tree, alt: T): PartialFunction[Throwable, T] = {
case ex: TypeError =>
- // H@ need to ensure that we handle only cyclic references
+ // H@ need to ensure that we handle only cyclic references
TypeSigError(tree, ex)
alt
}
@@ -277,12 +277,16 @@ trait Namers extends MethodSynthesis {
def assignAndEnterFinishedSymbol(tree: MemberDef): Symbol = {
val sym = assignAndEnterSymbol(tree)
sym setInfo completerOf(tree)
- log("[+info] " + sym.fullLocationString)
+ // log("[+info] " + sym.fullLocationString)
sym
}
private def logAssignSymbol(tree: Tree, sym: Symbol): Symbol = {
- log("[+symbol] " + sym.debugLocationString)
+ sym.name.toTermName match {
+ case nme.IMPORT | nme.OUTER | nme.ANON_CLASS_NAME | nme.ANON_FUN_NAME | nme.CONSTRUCTOR => ()
+ case _ =>
+ log("[+symbol] " + sym.debugLocationString)
+ }
tree.symbol = sym
sym
}
@@ -296,7 +300,7 @@ trait Namers extends MethodSynthesis {
val pos = tree.pos
val isParameter = tree.mods.isParameter
val flags = tree.mods.flags & mask
-
+
tree match {
case TypeDef(_, _, _, _) if isParameter => owner.newTypeParameter(name.toTypeName, pos, flags)
case TypeDef(_, _, _, _) => owner.newTypeSymbol(name.toTypeName, pos, flags)
diff --git a/src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala b/src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala
index 3a3c244d1c..e8d3b7a7de 100644
--- a/src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala
@@ -37,21 +37,17 @@ trait NamesDefaults { self: Analyzer =>
}
def isNamed(arg: Tree) = nameOf(arg).isDefined
- /** @param pos maps indicies from old to new */
+ /** @param pos maps indices from old to new */
def reorderArgs[T: ClassManifest](args: List[T], pos: Int => Int): List[T] = {
val res = new Array[T](args.length)
- // (hopefully) faster than zipWithIndex
- (0 /: args) { case (index, arg) => res(pos(index)) = arg; index + 1 }
+ foreachWithIndex(args)((arg, index) => res(pos(index)) = arg)
res.toList
}
- /** @param pos maps indicies from new to old (!) */
+ /** @param pos maps indices from new to old (!) */
def reorderArgsInv[T: ClassManifest](args: List[T], pos: Int => Int): List[T] = {
val argsArray = args.toArray
- val res = new mutable.ListBuffer[T]
- for (i <- 0 until argsArray.length)
- res += argsArray(pos(i))
- res.toList
+ argsArray.indices map (i => argsArray(pos(i))) toList
}
/** returns `true` if every element is equal to its index */
@@ -432,11 +428,11 @@ trait NamesDefaults { self: Analyzer =>
}
} else NoSymbol
}
-
+
private def savingUndeterminedTParams[T](context: Context)(fn: List[Symbol] => T): T = {
val savedParams = context.extractUndetparams()
val savedReporting = context.ambiguousErrors
-
+
context.setAmbiguousErrors(false)
try fn(savedParams)
finally {
@@ -455,7 +451,7 @@ trait NamesDefaults { self: Analyzer =>
|| (ctx.owner.rawInfo.member(name) != NoSymbol)
)
)
-
+
/** A full type check is very expensive; let's make sure there's a name
* somewhere which could potentially be ambiguous before we go that route.
*/
@@ -507,7 +503,7 @@ trait NamesDefaults { self: Analyzer =>
/**
* Removes name assignments from args. Additionally, returns an array mapping
- * argument indicies from call-site-order to definition-site-order.
+ * argument indices from call-site-order to definition-site-order.
*
* Verifies that names are not specified twice, positional args don't appear
* after named ones.
@@ -523,7 +519,7 @@ trait NamesDefaults { self: Analyzer =>
def matchesName(param: Symbol) = !param.isSynthetic && (
(param.name == name) || (param.deprecatedParamName match {
case Some(`name`) =>
- context0.unit.deprecationWarning(arg.pos,
+ context0.unit.deprecationWarning(arg.pos,
"the parameter name "+ name +" has been deprecated. Use "+ param.name +" instead.")
true
case _ => false
diff --git a/src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala b/src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala
index 25dcc302ce..b060fd7121 100644
--- a/src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala
@@ -253,13 +253,14 @@ trait PatMatVirtualiser extends ast.TreeDSL { self: Analyzer =>
withSubPats(typeTestTreeMaker :+ extractor.treeMaker(patBinderOrCasted, pos), extractor.subBindersAndPatterns: _*)
}
- /** Decompose the pattern in `tree`, of shape C(p_1, ..., p_N), into a list of N symbols, and a list of its N sub-trees
- * The list of N symbols contains symbols for every bound name as well as the un-named sub-patterns (fresh symbols are generated here for these)
- *
- * @arg patBinder symbol used to refer to the result of the previous pattern's extractor (will later be replaced by the outer tree with the correct tree to refer to that patterns result)
- */
+
object MaybeBoundTyped {
- // the returned type is the one inferred by inferTypedPattern (`owntype`)
+ /** Decompose the pattern in `tree`, of shape C(p_1, ..., p_N), into a list of N symbols, and a list of its N sub-trees
+ * The list of N symbols contains symbols for every bound name as well as the un-named sub-patterns (fresh symbols are generated here for these).
+ * The returned type is the one inferred by inferTypedPattern (`owntype`)
+ *
+ * @arg patBinder symbol used to refer to the result of the previous pattern's extractor (will later be replaced by the outer tree with the correct tree to refer to that patterns result)
+ */
def unapply(tree: Tree): Option[(Symbol, Type)] = tree match {
case Bound(subpatBinder, typed@Typed(expr, tpt)) => Some((subpatBinder, typed.tpe))
case Bind(_, typed@Typed(expr, tpt)) => Some((patBinder, typed.tpe))
@@ -1159,7 +1160,7 @@ class Foo(x: Other) { x._1 } // no error in this order
def _match(n: Name): SelectStart = matchStrategy DOT n
private lazy val oneSig: Type =
- typer.typed(_match(vpmName.one), EXPRmode | POLYmode | TAPPmode | FUNmode, WildcardType).tpe // TODO: error message
+ typer.typed(_match(vpmName.one), EXPRmode | POLYmode | TAPPmode | FUNmode, WildcardType).tpe // TODO: error message
}
trait PureCodegen extends CodegenCore with PureMatchMonadInterface {
diff --git a/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala b/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
index 89617e6f2c..507ffd55d7 100644
--- a/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
@@ -150,7 +150,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
}
// Override checking ------------------------------------------------------------
-
+
def isJavaVarargsAncestor(clazz: Symbol) = (
clazz.isClass
&& clazz.isJavaDefined
@@ -167,14 +167,14 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
log("Found java varargs ancestor in " + clazz.fullLocationString + ".")
val self = clazz.thisType
val bridges = new ListBuffer[Tree]
-
+
def varargBridge(member: Symbol, bridgetpe: Type): Tree = {
log("Generating varargs bridge for " + member.fullLocationString + " of type " + bridgetpe)
-
+
val bridge = member.cloneSymbolImpl(clazz, member.flags | VBRIDGE) setPos clazz.pos
bridge.setInfo(bridgetpe.cloneInfo(bridge))
clazz.info.decls enter bridge
-
+
val params = bridge.paramss.head
val elemtp = params.last.tpe.typeArgs.head
val idents = params map Ident
@@ -183,7 +183,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
localTyper typed DefDef(bridge, body)
}
-
+
// For all concrete non-private members that have a (Scala) repeated parameter:
// compute the corresponding method type `jtpe` with a Java repeated parameter
// if a method with type `jtpe` exists and that method is not a varargs bridge
@@ -203,7 +203,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
}
}
}
-
+
bridges.toList
}
else Nil
@@ -277,7 +277,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
*/
def checkOverride(member: Symbol, other: Symbol) {
debuglog("Checking validity of %s overriding %s".format(member.fullLocationString, other.fullLocationString))
-
+
def memberTp = self.memberType(member)
def otherTp = self.memberType(other)
def noErrorType = other.tpe != ErrorType && member.tpe != ErrorType
@@ -337,7 +337,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
def deferredCheck = member.isDeferred || !other.isDeferred
def subOther(s: Symbol) = s isSubClass other.owner
def subMember(s: Symbol) = s isSubClass member.owner
-
+
if (subOther(member.owner) && deferredCheck) {
//Console.println(infoString(member) + " shadows1 " + infoString(other) " in " + clazz);//DEBUG
return
@@ -430,12 +430,12 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
if( !(sameLength(member.typeParams, other.typeParams) && (memberTp.substSym(member.typeParams, other.typeParams) =:= otherTp)) ) // (1.6)
overrideTypeError();
- }
+ }
else if (other.isAbstractType) {
//if (!member.typeParams.isEmpty) // (1.7) @MAT
// overrideError("may not be parameterized");
val otherTp = self.memberInfo(other)
-
+
if (!(otherTp.bounds containsType memberTp)) { // (1.7.1)
overrideTypeError(); // todo: do an explaintypes with bounds here
explainTypes(_.bounds containsType _, otherTp, memberTp)
@@ -528,13 +528,13 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
def uncurryAndErase(tp: Type) = erasure.erasure(sym, uncurry.transformInfo(sym, tp))
val tp1 = uncurryAndErase(clazz.thisType.memberType(sym))
val tp2 = uncurryAndErase(clazz.thisType.memberType(other))
- atPhase(currentRun.erasurePhase.next)(tp1 matches tp2)
+ afterErasure(tp1 matches tp2)
})
def ignoreDeferred(member: Symbol) = (
(member.isAbstractType && !member.isFBounded) || (
member.isJavaDefined &&
- // the test requires atPhase(erasurePhase.next) so shouldn't be
+ // the test requires afterErasure so shouldn't be
// done if the compiler has no erasure phase available
(currentRun.erasurePhase == NoPhase || javaErasedOverridingSym(member) != NoSymbol)
)
@@ -1175,7 +1175,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
case vsym => ValDef(vsym)
}
}
- def createStaticModuleAccessor() = atPhase(phase.next) {
+ def createStaticModuleAccessor() = afterRefchecks {
val method = (
sym.owner.newMethod(sym.name.toTermName, sym.pos, (sym.flags | STABLE) & ~MODULE)
setInfoAndEnter NullaryMethodType(sym.moduleClass.tpe)
@@ -1186,7 +1186,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
vdef,
localTyper.typedPos(tree.pos) {
val vsym = vdef.symbol
- atPhase(phase.next) {
+ afterRefchecks {
val rhs = gen.newModule(sym, vsym.tpe)
val body = if (sym.owner.isTrait) rhs else gen.mkAssignAndReturn(vsym, rhs)
DefDef(sym, body.changeOwner(vsym -> sym))
@@ -1222,12 +1222,12 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
else gen.mkAssignAndReturn(vsym, rhs)
)
val lazyDef = atPos(tree.pos)(DefDef(lazySym, body.changeOwner(vsym -> lazySym)))
- log("Made lazy def: " + lazyDef)
+ debuglog("Created lazy accessor: " + lazyDef)
if (hasUnitType) List(typed(lazyDef))
else List(
typed(ValDef(vsym)),
- atPhase(phase.next)(typed(lazyDef))
+ afterRefchecks(typed(lazyDef))
)
}
@@ -1548,8 +1548,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
checkOverloadedRestrictions(currentOwner)
val bridges = addVarargBridges(currentOwner)
checkAllOverrides(currentOwner)
- if (bridges.nonEmpty) treeCopy.Template(tree, parents, self, body ::: bridges)
- else tree
+ if (bridges.nonEmpty) deriveTemplate(tree)(_ ::: bridges) else tree
case dc@TypeTreeWithDeferredRefCheck() => assert(false, "adapt should have turned dc: TypeTreeWithDeferredRefCheck into tpt: TypeTree, with tpt.original == dc"); dc
case tpt@TypeTree() =>
diff --git a/src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala b/src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala
index 0ab09b4fec..5318268bf2 100644
--- a/src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala
@@ -177,7 +177,7 @@ abstract class SuperAccessors extends transform.Transform with transform.TypingT
case ModuleDef(_, _, _) =>
checkCompanionNameClashes(sym)
super.transform(tree)
- case Template(parents, self, body) =>
+ case Template(_, _, body) =>
val ownAccDefs = new ListBuffer[Tree]
accDefs(currentOwner) = ownAccDefs
@@ -189,7 +189,7 @@ abstract class SuperAccessors extends transform.Transform with transform.TypingT
val body1 = atOwner(currentOwner)(transformTrees(body))
accDefs -= currentOwner
ownAccDefs ++= body1
- treeCopy.Template(tree, parents, self, ownAccDefs.toList)
+ deriveTemplate(tree)(_ => ownAccDefs.toList)
case TypeApply(sel @ Select(This(_), name), args) =>
mayNeedProtectedAccessor(sel, args, false)
@@ -334,7 +334,7 @@ abstract class SuperAccessors extends transform.Transform with transform.TypingT
}
val selection = Select(This(clazz), protAcc)
def mkApply(fn: Tree) = Apply(fn, qual :: Nil)
- val res = atPos(tree.pos) {
+ val res = atPos(tree.pos) {
targs.head match {
case EmptyTree => mkApply(selection)
case _ => mkApply(TypeApply(selection, targs))
@@ -376,18 +376,18 @@ abstract class SuperAccessors extends transform.Transform with transform.TypingT
val clazz = hostForAccessorOf(field, currentOwner.enclClass)
assert(clazz != NoSymbol, field)
debuglog("Decided for host class: " + clazz)
-
+
val accName = nme.protSetterName(field.originalName)
val protectedAccessor = clazz.info decl accName orElse {
val protAcc = clazz.newMethod(accName, field.pos)
val paramTypes = List(clazz.typeOfThis, field.tpe)
val params = protAcc newSyntheticValueParams paramTypes
val accessorType = MethodType(params, UnitClass.tpe)
-
+
protAcc setInfoAndEnter accessorType
val obj :: value :: Nil = params
storeAccessorDefinition(clazz, DefDef(protAcc, Assign(Select(Ident(obj), field.name), Ident(value))))
-
+
protAcc
}
atPos(tree.pos)(Select(This(clazz), protectedAccessor))
diff --git a/src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala b/src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala
index cf90577959..7559b78db3 100644
--- a/src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala
@@ -115,7 +115,7 @@ trait SyntheticMethods extends ast.TreeDSL {
* def canEqual(that: Any) = that.isInstanceOf[This]
*/
def canEqualMethod: Tree = (
- createMethod(nme.canEqual_, List(AnyClass.tpe), BooleanClass.tpe)(m =>
+ createMethod(nme.canEqual_, List(AnyClass.tpe), BooleanClass.tpe)(m =>
Ident(m.firstParam) IS_OBJ classExistentialType(clazz))
)
@@ -248,11 +248,11 @@ trait SyntheticMethods extends ast.TreeDSL {
}
if (phase.id > currentRun.typerPhase.id) templ
- else treeCopy.Template(templ, templ.parents, templ.self,
+ else deriveTemplate(templ)(body =>
if (clazz.isCase) caseTemplateBody()
else synthesize() match {
- case Nil => templ.body // avoiding unnecessary copy
- case ms => templ.body ++ ms
+ case Nil => body // avoiding unnecessary copy
+ case ms => body ++ ms
}
)
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala b/src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala
index 4f4087a953..1434002121 100644
--- a/src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala
@@ -415,7 +415,7 @@ trait TypeDiagnostics {
"\nIf applicable, you may wish to try moving some members into another object."
)
}
-
+
/** Report a type error.
*
* @param pos0 The position where to report the error
diff --git a/src/compiler/scala/tools/nsc/typechecker/Typers.scala b/src/compiler/scala/tools/nsc/typechecker/Typers.scala
index 1c1f35aac2..556c680cda 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Typers.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Typers.scala
@@ -156,7 +156,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
case ErrorType =>
fun
}
-
+
def inferView(tree: Tree, from: Type, to: Type, reportAmbiguous: Boolean): Tree =
inferView(tree, from, to, reportAmbiguous, true)
@@ -276,7 +276,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
}
tp match {
case TypeRef(pre, sym, args) =>
- checkNotLocked(sym) &&
+ checkNotLocked(sym) &&
((!sym.isNonClassType) || checkNonCyclic(pos, appliedType(pre.memberInfo(sym), args), sym))
// @M! info for a type ref to a type parameter now returns a polytype
// @M was: checkNonCyclic(pos, pre.memberInfo(sym).subst(sym.typeParams, args), sym)
@@ -614,7 +614,12 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
if (tree.isErrorTyped) tree
else if ((mode & (PATTERNmode | FUNmode)) == PATTERNmode && tree.isTerm) { // (1)
- if (sym.isValue) checkStable(tree)
+ if (sym.isValue) {
+ val tree1 = checkStable(tree)
+ // A module reference in a pattern has type Foo.type, not "object Foo"
+ if (sym.isModule && !sym.isMethod) tree1 setType singleType(pre, sym)
+ else tree1
+ }
else fail()
} else if ((mode & (EXPRmode | QUALmode)) == EXPRmode && !sym.isValue && !phase.erasedTypes) { // (2)
fail()
@@ -1092,7 +1097,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
// Note: implicit arguments are still inferred (this kind of "chaining" is allowed)
)
}
-
+
def adaptToMember(qual: Tree, searchTemplate: Type): Tree =
adaptToMember(qual, searchTemplate, true, true)
def adaptToMember(qual: Tree, searchTemplate: Type, reportAmbiguous: Boolean): Tree =
@@ -1107,12 +1112,12 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
}
inferView(qual, qual.tpe, searchTemplate, reportAmbiguous, saveErrors) match {
case EmptyTree => qual
- case coercion =>
+ case coercion =>
if (settings.logImplicitConv.value)
unit.echo(qual.pos,
"applied implicit conversion from %s to %s = %s".format(
qual.tpe, searchTemplate, coercion.symbol.defString))
-
+
typedQualifier(atPos(qual.pos)(new ApplyImplicitView(coercion, List(qual))))
}
}
@@ -1503,12 +1508,12 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
if (templ.symbol == NoSymbol)
templ setSymbol clazz.newLocalDummy(templ.pos)
val self1 = templ.self match {
- case vd @ ValDef(mods, name, tpt, EmptyTree) =>
+ case vd @ ValDef(_, _, tpt, EmptyTree) =>
val tpt1 = checkNoEscaping.privates(
clazz.thisSym,
treeCopy.TypeTree(tpt).setOriginal(tpt) setType vd.symbol.tpe
)
- treeCopy.ValDef(vd, mods, name, tpt1, EmptyTree) setType NoType
+ copyValDef(vd)(tpt = tpt1, rhs = EmptyTree) setType NoType
}
// was:
// val tpt1 = checkNoEscaping.privates(clazz.thisSym, typedType(tpt))
@@ -1861,8 +1866,9 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
val restpe = ldef.symbol.tpe.resultType
val rhs1 = typed(ldef.rhs, restpe)
ldef.params foreach (param => param.tpe = param.symbol.tpe)
- treeCopy.LabelDef(ldef, ldef.name, ldef.params, rhs1) setType restpe
- } else {
+ deriveLabelDef(ldef)(_ => rhs1) setType restpe
+ }
+ else {
val initpe = ldef.symbol.tpe.resultType
val rhs1 = typed(ldef.rhs)
val restpe = rhs1.tpe
@@ -1875,7 +1881,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
context.owner.newLabel(ldef.name, ldef.pos) setInfo MethodType(List(), restpe))
val rhs2 = typed(resetAllAttrs(ldef.rhs), restpe)
ldef.params foreach (param => param.tpe = param.symbol.tpe)
- treeCopy.LabelDef(ldef, ldef.name, ldef.params, rhs2) setSymbol sym2 setType restpe
+ deriveLabelDef(ldef)(_ => rhs2) setSymbol sym2 setType restpe
}
}
}
@@ -2174,9 +2180,9 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
if (!e.sym.isErroneous && !e1.sym.isErroneous && !e.sym.hasDefaultFlag &&
!e.sym.hasAnnotation(BridgeClass) && !e1.sym.hasAnnotation(BridgeClass)) {
log("Double definition detected:\n " +
- ((e.sym.getClass, e.sym.info, e.sym.ownerChain)) + "\n " +
+ ((e.sym.getClass, e.sym.info, e.sym.ownerChain)) + "\n " +
((e1.sym.getClass, e1.sym.info, e1.sym.ownerChain)))
-
+
DefDefinedTwiceError(e.sym, e1.sym)
scope.unlink(e1) // need to unlink to avoid later problems with lub; see #2779
}
@@ -2861,7 +2867,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
def isRawParameter(sym: Symbol) = // is it a type parameter leaked by a raw type?
sym.isTypeParameter && sym.owner.isJavaDefined
-
+
/** If we map a set of hidden symbols to their existential bounds, we
* have a problem: the bounds may themselves contain references to the
* hidden symbols. So this recursively calls existentialBound until
@@ -2888,7 +2894,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
})
}).toMap
}
-
+
/** Given a set `rawSyms` of term- and type-symbols, and a type
* `tp`, produce a set of fresh type parameters and a type so that
* it can be abstracted to an existential type. Every type symbol
@@ -2932,10 +2938,10 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
def packSymbols(hidden: List[Symbol], tp: Type): Type =
if (hidden.isEmpty) tp
else existentialTransform(hidden, tp)(existentialAbstraction)
-
+
def isReferencedFrom(ctx: Context, sym: Symbol): Boolean =
- ctx.owner.isTerm &&
- (ctx.scope.exists { dcl => dcl.isInitialized && (dcl.info contains sym) }) ||
+ ctx.owner.isTerm &&
+ (ctx.scope.exists { dcl => dcl.isInitialized && (dcl.info contains sym) }) ||
{
var ctx1 = ctx.outer
while ((ctx1 != NoContext) && (ctx1.scope eq ctx.scope)) ctx1 = ctx1.outer
@@ -3644,8 +3650,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
if (ps.isEmpty)
ps = site.parents filter (_.typeSymbol.toInterface.name == mix)
if (ps.isEmpty) {
- if (settings.debug.value)
- Console.println(site.parents map (_.typeSymbol.name))//debug
+ debuglog("Fatal: couldn't find site " + site + " in " + site.parents.map(_.typeSymbol.name))
if (phase.erasedTypes && context.enclClass.owner.isImplClass) {
// println(qual1)
// println(clazz)
@@ -3888,7 +3893,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
reallyExists(sym) &&
((mode & PATTERNmode | FUNmode) != (PATTERNmode | FUNmode) || !sym.isSourceMethod || sym.hasFlag(ACCESSOR))
}
-
+
if (defSym == NoSymbol) {
var defEntry: ScopeEntry = null // the scope entry of defSym, if defined in a local scope
@@ -4112,7 +4117,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
}
def adaptCase(cdef: CaseDef, tpe: Type): CaseDef =
- treeCopy.CaseDef(cdef, cdef.pat, cdef.guard, adapt(cdef.body, mode, tpe))
+ deriveCaseDef(cdef)(adapt(_, mode, tpe))
// begin typed1
val sym: Symbol = tree.symbol
@@ -4384,7 +4389,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
case ReferenceToBoxed(idt @ Ident(_)) =>
val id1 = typed1(idt, mode, pt) match { case id: Ident => id }
- treeCopy.ReferenceToBoxed(tree, id1) setType AnyRefClass.tpe
+ treeCopy.ReferenceToBoxed(tree, id1) setType AnyRefClass.tpe
case Literal(value) =>
tree setType (
@@ -4630,7 +4635,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
case None => typed(tree, mode, pt)
}
- def findManifest(tp: Type, full: Boolean) = atPhase(currentRun.typerPhase) {
+ def findManifest(tp: Type, full: Boolean) = beforeTyper {
inferImplicit(
EmptyTree,
appliedType((if (full) FullManifestClass else PartialManifestClass).typeConstructor, List(tp)),
diff --git a/src/compiler/scala/tools/nsc/typechecker/Unapplies.scala b/src/compiler/scala/tools/nsc/typechecker/Unapplies.scala
index 19b8632ed7..312958feca 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Unapplies.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Unapplies.scala
@@ -22,7 +22,7 @@ trait Unapplies extends ast.TreeDSL
import treeInfo.{ isRepeatedParamType, isByNameParamType }
private val unapplyParamName = nme.x_0
-
+
/** returns type list for return type of the extraction */
def unapplyTypeList(ufn: Symbol, ufntpe: Type) = {
assert(ufn.isMethod, ufn)
diff --git a/src/compiler/scala/tools/nsc/util/DocStrings.scala b/src/compiler/scala/tools/nsc/util/DocStrings.scala
index 24c9926ad8..fbe92e5d84 100755
--- a/src/compiler/scala/tools/nsc/util/DocStrings.scala
+++ b/src/compiler/scala/tools/nsc/util/DocStrings.scala
@@ -71,10 +71,10 @@ object DocStrings {
* Every section starts with a `@` and extends to the next `@`, or
* to the end of the comment string, but excluding the final two
* characters which terminate the comment.
- *
- * Also take usecases into account - they need to expand until the next
- * usecase or the end of the string, as they might include other sections
- * of their own
+ *
+ * Also take usecases into account - they need to expand until the next
+ * usecase or the end of the string, as they might include other sections
+ * of their own
*/
def tagIndex(str: String, p: Int => Boolean = (idx => true)): List[(Int, Int)] =
findAll(str, 0) (idx => str(idx) == '@' && p(idx)) match {
@@ -84,10 +84,10 @@ object DocStrings {
idxs2 zip (idxs2.tail ::: List(str.length - 2))
}
}
-
+
/**
- * Merge sections following an usecase into the usecase comment, so they
- * can override the parent symbol's sections
+ * Merge sections following an usecase into the usecase comment, so they
+ * can override the parent symbol's sections
*/
def mergeUsecaseSections(str: String, idxs: List[Int]): List[Int] = {
idxs.find(str.substring(_).startsWith("@usecase")) match {
@@ -99,7 +99,7 @@ object DocStrings {
idxs
}
}
-
+
/** Does interval `iv` start with given `tag`?
*/
def startsWithTag(str: String, section: (Int, Int), tag: String): Boolean =
diff --git a/src/compiler/scala/tools/nsc/util/ProxyReport.scala b/src/compiler/scala/tools/nsc/util/ProxyReport.scala
index 2f4f029308..4fc86c3a32 100644
--- a/src/compiler/scala/tools/nsc/util/ProxyReport.scala
+++ b/src/compiler/scala/tools/nsc/util/ProxyReport.scala
@@ -141,6 +141,6 @@ object ProxyReportRunner {
s.processArguments(args.toList.tail, true)
val g = new ProxyGlobal(s)
val run = new g.Run()
- g.atPhase(run.typerPhase.next)(g.proxyReport.generate(dir))
+ g.afterTyper(g.proxyReport.generate(dir))
}
}
diff --git a/src/compiler/scala/tools/nsc/util/Statistics.scala b/src/compiler/scala/tools/nsc/util/Statistics.scala
index f7c27dceb5..d1cdd30dd8 100644
--- a/src/compiler/scala/tools/nsc/util/Statistics.scala
+++ b/src/compiler/scala/tools/nsc/util/Statistics.scala
@@ -20,7 +20,7 @@ class Statistics extends scala.reflect.internal.util.Statistics {
val typedSelectCount = new Counter
val typerNanos = new Timer
val classReadNanos = new Timer
-
+
val failedApplyNanos = new Timer
val failedOpEqNanos = new Timer
val failedSilentNanos = new Timer
diff --git a/src/compiler/scala/tools/nsc/util/WeakHashSet.scala b/src/compiler/scala/tools/nsc/util/WeakHashSet.scala
new file mode 100644
index 0000000000..6a10422b00
--- /dev/null
+++ b/src/compiler/scala/tools/nsc/util/WeakHashSet.scala
@@ -0,0 +1,60 @@
+package scala.tools.nsc.util
+
+import scala.collection.mutable
+import scala.collection.mutable.ArrayBuffer
+import scala.collection.mutable.Builder
+import scala.collection.mutable.SetBuilder
+import scala.runtime.AbstractFunction1
+
+/** A bare-bones implementation of a mutable `Set` that uses weak references
+ * to hold the elements.
+ *
+ * This implementation offers only add/remove/test operations,
+ * therefore it does not fulfill the contract of Scala collection sets.
+ */
+class WeakHashSet[T <: AnyRef] extends AbstractFunction1[T, Boolean] {
+ private val underlying = mutable.HashSet[WeakReferenceWithEquals[T]]()
+
+ /** Add the given element to this set. */
+ def +=(elem: T): this.type = {
+ underlying += new WeakReferenceWithEquals(elem)
+ this
+ }
+
+ /** Remove the given element from this set. */
+ def -=(elem: T): this.type = {
+ underlying -= new WeakReferenceWithEquals(elem)
+ this
+ }
+
+ /** Does the given element belong to this set? */
+ def contains(elem: T): Boolean =
+ underlying.contains(new WeakReferenceWithEquals(elem))
+
+ /** Does the given element belong to this set? */
+ def apply(elem: T): Boolean = contains(elem)
+
+ /** Return the number of elements in this set, including reclaimed elements. */
+ def size = underlying.size
+
+ /** Remove all elements in this set. */
+ def clear() = underlying.clear()
+}
+
+/** A WeakReference implementation that implements equals and hashCode by
+ * delegating to the referent.
+ */
+class WeakReferenceWithEquals[T <: AnyRef](ref: T) {
+ def get(): T = underlying.get()
+
+ override val hashCode = ref.hashCode
+
+ override def equals(other: Any): Boolean = other match {
+ case wf: WeakReferenceWithEquals[_] =>
+ underlying.get() == wf.get()
+ case _ =>
+ false
+ }
+
+ private val underlying = new java.lang.ref.WeakReference(ref)
+}
diff --git a/src/compiler/scala/tools/util/EditDistance.scala b/src/compiler/scala/tools/util/EditDistance.scala
index 5f152ecabb..0af34020a8 100644
--- a/src/compiler/scala/tools/util/EditDistance.scala
+++ b/src/compiler/scala/tools/util/EditDistance.scala
@@ -8,7 +8,7 @@ package util
object EditDistance {
import java.lang.Character.{ toLowerCase => lower }
-
+
def similarString(name: String, allowed: TraversableOnce[String]): String = {
val suggested = suggestions(name, allowed.toSeq, maxDistance = 1, maxSuggestions = 2)
if (suggested.isEmpty) ""
diff --git a/src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala b/src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala
index 9930f28229..0382304bad 100644
--- a/src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala
+++ b/src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala
@@ -53,23 +53,23 @@ abstract class CPSAnnotationChecker extends CPSUtils {
if ((annots1 corresponds annots2)(_.atp <:< _.atp))
return true
- // Need to handle uninstantiated type vars specially:
-
+ // Need to handle uninstantiated type vars specially:
+
// g map (x => x) with expected type List[Int] @cps
// results in comparison ?That <:< List[Int] @cps
-
+
// Instantiating ?That to an annotated type would fail during
// transformation.
-
+
// Instead we force-compare tpe1 <:< tpe2.withoutAnnotations
// to trigger instantiation of the TypeVar to the base type
-
+
// This is a bit unorthodox (we're only supposed to look at
// annotations here) but seems to work.
-
+
if (!annots2.isEmpty && !tpe1.isGround)
return tpe1 <:< tpe2.withoutAnnotations
-
+
false
}
@@ -355,7 +355,7 @@ abstract class CPSAnnotationChecker extends CPSUtils {
}
case _ => Nil
}
-
+
val types = cpsParamAnnotation(t.tpe)
// TODO: check that it has been adapted and if so correctly
extra ++ (if (types.isEmpty) Nil else List(single(types)))
diff --git a/src/continuations/plugin/scala/tools/selectivecps/CPSUtils.scala b/src/continuations/plugin/scala/tools/selectivecps/CPSUtils.scala
index 8bbda5dd05..075009ce5e 100644
--- a/src/continuations/plugin/scala/tools/selectivecps/CPSUtils.scala
+++ b/src/continuations/plugin/scala/tools/selectivecps/CPSUtils.scala
@@ -12,7 +12,7 @@ trait CPSUtils {
var cpsEnabled = true
val verbose: Boolean = System.getProperty("cpsVerbose", "false") == "true"
def vprintln(x: =>Any): Unit = if (verbose) println(x)
-
+
object cpsNames {
val catches = newTermName("$catches")
val ex = newTermName("$ex")
diff --git a/src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala b/src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala
index cbb33e68c3..d98169f21a 100644
--- a/src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala
+++ b/src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala
@@ -47,20 +47,20 @@ abstract class SelectiveANFTransform extends PluginComponent with Transform with
// ValDef case here.
case dd @ DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
- log("transforming " + dd.symbol)
+ debuglog("transforming " + dd.symbol)
atOwner(dd.symbol) {
val rhs1 = transExpr(rhs, None, getExternalAnswerTypeAnn(tpt.tpe))
- log("result "+rhs1)
- log("result is of type "+rhs1.tpe)
+ debuglog("result "+rhs1)
+ debuglog("result is of type "+rhs1.tpe)
treeCopy.DefDef(dd, mods, name, transformTypeDefs(tparams), transformValDefss(vparamss),
transform(tpt), rhs1)
}
case ff @ Function(vparams, body) =>
- log("transforming anon function " + ff.symbol)
+ debuglog("transforming anon function " + ff.symbol)
atOwner(ff.symbol) {
@@ -88,14 +88,14 @@ abstract class SelectiveANFTransform extends PluginComponent with Transform with
transExpr(body, None, ext)
}
- log("result "+body1)
- log("result is of type "+body1.tpe)
+ debuglog("result "+body1)
+ debuglog("result is of type "+body1.tpe)
treeCopy.Function(ff, transformValDefs(vparams), body1)
}
case vd @ ValDef(mods, name, tpt, rhs) => // object-level valdefs
- log("transforming valdef " + vd.symbol)
+ debuglog("transforming valdef " + vd.symbol)
atOwner(vd.symbol) {
@@ -298,8 +298,8 @@ abstract class SelectiveANFTransform extends PluginComponent with Transform with
if (!expr.isEmpty && (expr.tpe.typeSymbol ne NothingClass)) {
// must convert!
- log("cps type conversion (has: " + cpsA + "/" + spc + "/" + expr.tpe + ")")
- log("cps type conversion (expected: " + cpsR.get + "): " + expr)
+ debuglog("cps type conversion (has: " + cpsA + "/" + spc + "/" + expr.tpe + ")")
+ debuglog("cps type conversion (expected: " + cpsR.get + "): " + expr)
if (!hasPlusMarker(expr.tpe))
unit.warning(tree.pos, "expression " + tree + " is cps-transformed unexpectedly")
@@ -322,7 +322,7 @@ abstract class SelectiveANFTransform extends PluginComponent with Transform with
} else if (!cpsR.isDefined && bot.isDefined) {
// error!
- log("cps type error: " + expr)
+ debuglog("cps type error: " + expr)
//println("cps type error: " + expr + "/" + expr.tpe + "/" + getAnswerTypeAnn(expr.tpe))
//println(cpsR + "/" + spc + "/" + bot)
diff --git a/src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala b/src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala
index a90dc36639..6453671eac 100644
--- a/src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala
+++ b/src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala
@@ -39,10 +39,10 @@ abstract class SelectiveCPSTransform extends PluginComponent with
val newtp = transformCPSType(tp)
if (newtp != tp)
- log("transformInfo changed type for " + sym + " to " + newtp);
+ debuglog("transformInfo changed type for " + sym + " to " + newtp);
if (sym == MethReifyR)
- log("transformInfo (not)changed type for " + sym + " to " + newtp);
+ debuglog("transformInfo (not)changed type for " + sym + " to " + newtp);
newtp
}
@@ -83,13 +83,13 @@ abstract class SelectiveCPSTransform extends PluginComponent with
case Apply(TypeApply(fun, targs), args)
if (fun.symbol == MethShift) =>
- log("found shift: " + tree)
+ debuglog("found shift: " + tree)
atPos(tree.pos) {
val funR = gen.mkAttributedRef(MethShiftR) // TODO: correct?
//gen.mkAttributedSelect(gen.mkAttributedSelect(gen.mkAttributedSelect(gen.mkAttributedIdent(ScalaPackage),
//ScalaPackage.tpe.member("util")), ScalaPackage.tpe.member("util").tpe.member("continuations")), MethShiftR)
//gen.mkAttributedRef(ModCPS.tpe, MethShiftR) // TODO: correct?
- log(funR.tpe)
+ debuglog("funR.tpe = " + funR.tpe)
Apply(
TypeApply(funR, targs).setType(appliedType(funR.tpe, targs.map((t:Tree) => t.tpe))),
args.map(transform(_))
@@ -98,10 +98,10 @@ abstract class SelectiveCPSTransform extends PluginComponent with
case Apply(TypeApply(fun, targs), args)
if (fun.symbol == MethShiftUnit) =>
- log("found shiftUnit: " + tree)
+ debuglog("found shiftUnit: " + tree)
atPos(tree.pos) {
val funR = gen.mkAttributedRef(MethShiftUnitR) // TODO: correct?
- log(funR.tpe)
+ debuglog("funR.tpe = " + funR.tpe)
Apply(
TypeApply(funR, List(targs(0), targs(1))).setType(appliedType(funR.tpe,
List(targs(0).tpe, targs(1).tpe))),
@@ -114,7 +114,7 @@ abstract class SelectiveCPSTransform extends PluginComponent with
log("found reify: " + tree)
atPos(tree.pos) {
val funR = gen.mkAttributedRef(MethReifyR) // TODO: correct?
- log(funR.tpe)
+ debuglog("funR.tpe = " + funR.tpe)
Apply(
TypeApply(funR, targs).setType(appliedType(funR.tpe, targs.map((t:Tree) => t.tpe))),
args.map(transform(_))
@@ -258,17 +258,17 @@ abstract class SelectiveCPSTransform extends PluginComponent with
case vd @ ValDef(mods, name, tpt, rhs)
if (vd.symbol.hasAnnotation(MarkerCPSSym)) =>
- log("found marked ValDef "+name+" of type " + vd.symbol.tpe)
+ debuglog("found marked ValDef "+name+" of type " + vd.symbol.tpe)
val tpe = vd.symbol.tpe
val rhs1 = atOwner(vd.symbol) { transform(rhs) }
rhs1.changeOwner(vd.symbol -> currentOwner) // TODO: don't traverse twice
- log("valdef symbol " + vd.symbol + " has type " + tpe)
- log("right hand side " + rhs1 + " has type " + rhs1.tpe)
+ debuglog("valdef symbol " + vd.symbol + " has type " + tpe)
+ debuglog("right hand side " + rhs1 + " has type " + rhs1.tpe)
- log("currentOwner: " + currentOwner)
- log("currentMethod: " + currentMethod)
+ debuglog("currentOwner: " + currentOwner)
+ debuglog("currentMethod: " + currentMethod)
val (bodyStms, bodyExpr) = transBlock(rest, expr)
// FIXME: result will later be traversed again by TreeSymSubstituter and
@@ -308,12 +308,12 @@ abstract class SelectiveCPSTransform extends PluginComponent with
// see note about multiple traversals above
- log("fun.symbol: "+fun.symbol)
- log("fun.symbol.owner: "+fun.symbol.owner)
- log("arg.owner: "+arg.owner)
+ debuglog("fun.symbol: "+fun.symbol)
+ debuglog("fun.symbol.owner: "+fun.symbol.owner)
+ debuglog("arg.owner: "+arg.owner)
- log("fun.tpe:"+fun.tpe)
- log("return type of fun:"+body1.tpe)
+ debuglog("fun.tpe:"+fun.tpe)
+ debuglog("return type of fun:"+body1.tpe)
var methodName = nme.map
@@ -324,7 +324,7 @@ abstract class SelectiveCPSTransform extends PluginComponent with
else
unit.error(rhs.pos, "cannot compute type for CPS-transformed function result")
- log("will use method:"+methodName)
+ debuglog("will use method:"+methodName)
localTyper.typed(atPos(vd.symbol.pos) {
Apply(Select(ctxR, ctxR.tpe.member(methodName)), List(fun))
@@ -335,7 +335,7 @@ abstract class SelectiveCPSTransform extends PluginComponent with
try {
if (specialCaseTrivial) {
- log("will optimize possible tail call: " + bodyExpr)
+ debuglog("will optimize possible tail call: " + bodyExpr)
// FIXME: flatMap impl has become more complicated due to
// exceptions. do we need to put a try/catch in the then part??
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java
index 3fad92cbf1..e9389e9acb 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java
@@ -1,669 +1,2324 @@
/*
+
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.locks.*;
-import java.util.concurrent.atomic.*;
-import sun.misc.Unsafe;
-import java.lang.reflect.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+//import java.util.concurrent.AbstractExecutorService;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+//import java.util.concurrent.RunnableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.AbstractQueuedSynchronizer;
+import java.util.concurrent.locks.Condition;
+
+interface RunnableFuture<T> extends Runnable {
+ //TR placeholder for java.util.concurrent.RunnableFuture
+}
/**
- * An {@link ExecutorService} for running {@link ForkJoinTask}s. A
- * ForkJoinPool provides the entry point for submissions from
- * non-ForkJoinTasks, as well as management and monitoring operations.
- * Normally a single ForkJoinPool is used for a large number of
- * submitted tasks. Otherwise, use would not usually outweigh the
- * construction and bookkeeping overhead of creating a large set of
- * threads.
+ * An {@link ExecutorService} for running {@link ForkJoinTask}s.
+ * A {@code ForkJoinPool} provides the entry point for submissions
+ * from non-{@code ForkJoinTask} clients, as well as management and
+ * monitoring operations.
*
- * <p>ForkJoinPools differ from other kinds of Executors mainly in
- * that they provide <em>work-stealing</em>: all threads in the pool
- * attempt to find and execute subtasks created by other active tasks
- * (eventually blocking if none exist). This makes them efficient when
- * most tasks spawn other subtasks (as do most ForkJoinTasks), as well
- * as the mixed execution of some plain Runnable- or Callable- based
- * activities along with ForkJoinTasks. When setting
- * <tt>setAsyncMode</tt>, a ForkJoinPools may also be appropriate for
- * use with fine-grained tasks that are never joined. Otherwise, other
- * ExecutorService implementations are typically more appropriate
- * choices.
+ * <p>A {@code ForkJoinPool} differs from other kinds of {@link
+ * ExecutorService} mainly by virtue of employing
+ * <em>work-stealing</em>: all threads in the pool attempt to find and
+ * execute tasks submitted to the pool and/or created by other active
+ * tasks (eventually blocking waiting for work if none exist). This
+ * enables efficient processing when most tasks spawn other subtasks
+ * (as do most {@code ForkJoinTask}s), as well as when many small
+ * tasks are submitted to the pool from external clients. Especially
+ * when setting <em>asyncMode</em> to true in constructors, {@code
+ * ForkJoinPool}s may also be appropriate for use with event-style
+ * tasks that are never joined.
*
- * <p>A ForkJoinPool may be constructed with a given parallelism level
- * (target pool size), which it attempts to maintain by dynamically
- * adding, suspending, or resuming threads, even if some tasks are
- * waiting to join others. However, no such adjustments are performed
- * in the face of blocked IO or other unmanaged synchronization. The
- * nested <code>ManagedBlocker</code> interface enables extension of
- * the kinds of synchronization accommodated. The target parallelism
- * level may also be changed dynamically (<code>setParallelism</code>)
- * and thread construction can be limited using methods
- * <code>setMaximumPoolSize</code> and/or
- * <code>setMaintainsParallelism</code>.
+ * <p>A {@code ForkJoinPool} is constructed with a given target
+ * parallelism level; by default, equal to the number of available
+ * processors. The pool attempts to maintain enough active (or
+ * available) threads by dynamically adding, suspending, or resuming
+ * internal worker threads, even if some tasks are stalled waiting to
+ * join others. However, no such adjustments are guaranteed in the
+ * face of blocked IO or other unmanaged synchronization. The nested
+ * {@link ManagedBlocker} interface enables extension of the kinds of
+ * synchronization accommodated.
*
* <p>In addition to execution and lifecycle control methods, this
* class provides status check methods (for example
- * <code>getStealCount</code>) that are intended to aid in developing,
+ * {@link #getStealCount}) that are intended to aid in developing,
* tuning, and monitoring fork/join applications. Also, method
- * <code>toString</code> returns indications of pool state in a
+ * {@link #toString} returns indications of pool state in a
* convenient form for informal monitoring.
*
+ * <p> As is the case with other ExecutorServices, there are three
+ * main task execution methods summarized in the following table.
+ * These are designed to be used primarily by clients not already
+ * engaged in fork/join computations in the current pool. The main
+ * forms of these methods accept instances of {@code ForkJoinTask},
+ * but overloaded forms also allow mixed execution of plain {@code
+ * Runnable}- or {@code Callable}- based activities as well. However,
+ * tasks that are already executing in a pool should normally instead
+ * use the within-computation forms listed in the table unless using
+ * async event-style tasks that are not usually joined, in which case
+ * there is little difference among choice of methods.
+ *
+ * <table BORDER CELLPADDING=3 CELLSPACING=1>
+ * <tr>
+ * <td></td>
+ * <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td>
+ * <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td>
+ * </tr>
+ * <tr>
+ * <td> <b>Arrange async execution</td>
+ * <td> {@link #execute(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#fork}</td>
+ * </tr>
+ * <tr>
+ * <td> <b>Await and obtain result</td>
+ * <td> {@link #invoke(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#invoke}</td>
+ * </tr>
+ * <tr>
+ * <td> <b>Arrange exec and obtain Future</td>
+ * <td> {@link #submit(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
+ * </tr>
+ * </table>
+ *
+ * <p><b>Sample Usage.</b> Normally a single {@code ForkJoinPool} is
+ * used for all parallel task execution in a program or subsystem.
+ * Otherwise, use would not usually outweigh the construction and
+ * bookkeeping overhead of creating a large set of threads. For
+ * example, a common pool could be used for the {@code SortTasks}
+ * illustrated in {@link RecursiveAction}. Because {@code
+ * ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon
+ * daemon} mode, there is typically no need to explicitly {@link
+ * #shutdown} such a pool upon program exit.
+ *
+ * <pre> {@code
+ * static final ForkJoinPool mainPool = new ForkJoinPool();
+ * ...
+ * public void sort(long[] array) {
+ * mainPool.invoke(new SortTask(array, 0, array.length));
+ * }}</pre>
+ *
* <p><b>Implementation notes</b>: This implementation restricts the
* maximum number of running threads to 32767. Attempts to create
- * pools with greater than the maximum result in
- * IllegalArgumentExceptions.
+ * pools with greater than the maximum number result in
+ * {@code IllegalArgumentException}.
+ *
+ * <p>This implementation rejects submitted tasks (that is, by throwing
+ * {@link RejectedExecutionException}) only when the pool is shut down
+ * or internal resources have been exhausted.
+ *
+ * @since 1.7
+ * @author Doug Lea
*/
public class ForkJoinPool /*extends AbstractExecutorService*/ {
/*
- * See the extended comments interspersed below for design,
- * rationale, and walkthroughs.
+ * Implementation Overview
+ *
+ * This class and its nested classes provide the main
+ * functionality and control for a set of worker threads:
+ * Submissions from non-FJ threads enter into submission queues.
+ * Workers take these tasks and typically split them into subtasks
+ * that may be stolen by other workers. Preference rules give
+ * first priority to processing tasks from their own queues (LIFO
+ * or FIFO, depending on mode), then to randomized FIFO steals of
+ * tasks in other queues.
+ *
+ * WorkQueues
+ * ==========
+ *
+ * Most operations occur within work-stealing queues (in nested
+ * class WorkQueue). These are special forms of Deques that
+ * support only three of the four possible end-operations -- push,
+ * pop, and poll (aka steal), under the further constraints that
+ * push and pop are called only from the owning thread (or, as
+ * extended here, under a lock), while poll may be called from
+ * other threads. (If you are unfamiliar with them, you probably
+ * want to read Herlihy and Shavit's book "The Art of
+ * Multiprocessor programming", chapter 16 describing these in
+ * more detail before proceeding.) The main work-stealing queue
+ * design is roughly similar to those in the papers "Dynamic
+ * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
+ * (http://research.sun.com/scalable/pubs/index.html) and
+ * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
+ * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
+ * The main differences ultimately stem from GC requirements that
+ * we null out taken slots as soon as we can, to maintain as small
+ * a footprint as possible even in programs generating huge
+ * numbers of tasks. To accomplish this, we shift the CAS
+ * arbitrating pop vs poll (steal) from being on the indices
+ * ("base" and "top") to the slots themselves. So, both a
+ * successful pop and poll mainly entail a CAS of a slot from
+ * non-null to null. Because we rely on CASes of references, we
+ * do not need tag bits on base or top. They are simple ints as
+ * used in any circular array-based queue (see for example
+ * ArrayDeque). Updates to the indices must still be ordered in a
+ * way that guarantees that top == base means the queue is empty,
+ * but otherwise may err on the side of possibly making the queue
+ * appear nonempty when a push, pop, or poll have not fully
+ * committed. Note that this means that the poll operation,
+ * considered individually, is not wait-free. One thief cannot
+ * successfully continue until another in-progress one (or, if
+ * previously empty, a push) completes. However, in the
+ * aggregate, we ensure at least probabilistic non-blockingness.
+ * If an attempted steal fails, a thief always chooses a different
+ * random victim target to try next. So, in order for one thief to
+ * progress, it suffices for any in-progress poll or new push on
+ * any empty queue to complete. (This is why we normally use
+ * method pollAt and its variants that try once at the apparent
+ * base index, else consider alternative actions, rather than
+ * method poll.)
+ *
+ * This approach also enables support of a user mode in which local
+ * task processing is in FIFO, not LIFO order, simply by using
+ * poll rather than pop. This can be useful in message-passing
+ * frameworks in which tasks are never joined. However neither
+ * mode considers affinities, loads, cache localities, etc, so
+ * rarely provide the best possible performance on a given
+ * machine, but portably provide good throughput by averaging over
+ * these factors. (Further, even if we did try to use such
+ * information, we do not usually have a basis for exploiting it.
+ * For example, some sets of tasks profit from cache affinities,
+ * but others are harmed by cache pollution effects.)
+ *
+ * WorkQueues are also used in a similar way for tasks submitted
+ * to the pool. We cannot mix these tasks in the same queues used
+ * for work-stealing (this would contaminate lifo/fifo
+ * processing). Instead, we loosely associate submission queues
+ * with submitting threads, using a form of hashing. The
+ * ThreadLocal Submitter class contains a value initially used as
+ * a hash code for choosing existing queues, but may be randomly
+ * repositioned upon contention with other submitters. In
+ * essence, submitters act like workers except that they never
+ * take tasks, and they are multiplexed on to a finite number of
+ * shared work queues. However, classes are set up so that future
+ * extensions could allow submitters to optionally help perform
+ * tasks as well. Insertion of tasks in shared mode requires a
+ * lock (mainly to protect in the case of resizing) but we use
+ * only a simple spinlock (using bits in field runState), because
+ * submitters encountering a busy queue move on to try or create
+ * other queues -- they block only when creating and registering
+ * new queues.
+ *
+ * Management
+ * ==========
+ *
+ * The main throughput advantages of work-stealing stem from
+ * decentralized control -- workers mostly take tasks from
+ * themselves or each other. We cannot negate this in the
+ * implementation of other management responsibilities. The main
+ * tactic for avoiding bottlenecks is packing nearly all
+ * essentially atomic control state into two volatile variables
+ * that are by far most often read (not written) as status and
+ * consistency checks.
+ *
+ * Field "ctl" contains 64 bits holding all the information needed
+ * to atomically decide to add, inactivate, enqueue (on an event
+ * queue), dequeue, and/or re-activate workers. To enable this
+ * packing, we restrict maximum parallelism to (1<<15)-1 (which is
+ * far in excess of normal operating range) to allow ids, counts,
+ * and their negations (used for thresholding) to fit into 16bit
+ * fields.
+ *
+ * Field "runState" contains 32 bits needed to register and
+ * deregister WorkQueues, as well as to enable shutdown. It is
+ * only modified under a lock (normally briefly held, but
+ * occasionally protecting allocations and resizings) but even
+ * when locked remains available to check consistency.
+ *
+ * Recording WorkQueues. WorkQueues are recorded in the
+ * "workQueues" array that is created upon pool construction and
+ * expanded if necessary. Updates to the array while recording
+ * new workers and unrecording terminated ones are protected from
+ * each other by a lock but the array is otherwise concurrently
+ * readable, and accessed directly. To simplify index-based
+ * operations, the array size is always a power of two, and all
+ * readers must tolerate null slots. Shared (submission) queues
+ * are at even indices, worker queues at odd indices. Grouping
+ * them together in this way simplifies and speeds up task
+ * scanning.
+ *
+ * All worker thread creation is on-demand, triggered by task
+ * submissions, replacement of terminated workers, and/or
+ * compensation for blocked workers. However, all other support
+ * code is set up to work with other policies. To ensure that we
+ * do not hold on to worker references that would prevent GC, ALL
+ * accesses to workQueues are via indices into the workQueues
+ * array (which is one source of some of the messy code
+ * constructions here). In essence, the workQueues array serves as
+ * a weak reference mechanism. Thus for example the wait queue
+ * field of ctl stores indices, not references. Access to the
+ * workQueues in associated methods (for example signalWork) must
+ * both index-check and null-check the IDs. All such accesses
+ * ignore bad IDs by returning out early from what they are doing,
+ * since this can only be associated with termination, in which
+ * case it is OK to give up. All uses of the workQueues array
+ * also check that it is non-null (even if previously
+ * non-null). This allows nulling during termination, which is
+ * currently not necessary, but remains an option for
+ * resource-revocation-based shutdown schemes. It also helps
+ * reduce JIT issuance of uncommon-trap code, which tends to
+ * unnecessarily complicate control flow in some methods.
+ *
+ * Event Queuing. Unlike HPC work-stealing frameworks, we cannot
+ * let workers spin indefinitely scanning for tasks when none can
+ * be found immediately, and we cannot start/resume workers unless
+ * there appear to be tasks available. On the other hand, we must
+ * quickly prod them into action when new tasks are submitted or
+ * generated. In many usages, ramp-up time to activate workers is
+ * the main limiting factor in overall performance (this is
+ * compounded at program start-up by JIT compilation and
+ * allocation). So we try to streamline this as much as possible.
+ * We park/unpark workers after placing in an event wait queue
+ * when they cannot find work. This "queue" is actually a simple
+ * Treiber stack, headed by the "id" field of ctl, plus a 15bit
+ * counter value (that reflects the number of times a worker has
+ * been inactivated) to avoid ABA effects (we need only as many
+ * version numbers as worker threads). Successors are held in
+ * field WorkQueue.nextWait. Queuing deals with several intrinsic
+ * races, mainly that a task-producing thread can miss seeing (and
+ * signalling) another thread that gave up looking for work but
+ * has not yet entered the wait queue. We solve this by requiring
+ * a full sweep of all workers (via repeated calls to method
+ * scan()) both before and after a newly waiting worker is added
+ * to the wait queue. During a rescan, the worker might release
+ * some other queued worker rather than itself, which has the same
+ * net effect. Because enqueued workers may actually be rescanning
+ * rather than waiting, we set and clear the "parker" field of
+ * WorkQueues to reduce unnecessary calls to unpark. (This
+ * requires a secondary recheck to avoid missed signals.) Note
+ * the unusual conventions about Thread.interrupts surrounding
+ * parking and other blocking: Because interrupts are used solely
+ * to alert threads to check termination, which is checked anyway
+ * upon blocking, we clear status (using Thread.interrupted)
+ * before any call to park, so that park does not immediately
+ * return due to status being set via some other unrelated call to
+ * interrupt in user code.
+ *
+ * Signalling. We create or wake up workers only when there
+ * appears to be at least one task they might be able to find and
+ * execute. When a submission is added or another worker adds a
+ * task to a queue that previously had fewer than two tasks, they
+ * signal waiting workers (or trigger creation of new ones if
+ * fewer than the given parallelism level -- see signalWork).
+ * These primary signals are buttressed by signals during rescans;
+ * together these cover the signals needed in cases when more
+ * tasks are pushed but untaken, and improve performance compared
+ * to having one thread wake up all workers.
+ *
+ * Trimming workers. To release resources after periods of lack of
+ * use, a worker starting to wait when the pool is quiescent will
+ * time out and terminate if the pool has remained quiescent for
+ * SHRINK_RATE nanosecs. This will slowly propagate, eventually
+ * terminating all workers after long periods of non-use.
+ *
+ * Shutdown and Termination. A call to shutdownNow atomically sets
+ * a runState bit and then (non-atomically) sets each worker's
+ * runState status, cancels all unprocessed tasks, and wakes up
+ * all waiting workers. Detecting whether termination should
+ * commence after a non-abrupt shutdown() call requires more work
+ * and bookkeeping. We need consensus about quiescence (i.e., that
+ * there is no more work). The active count provides a primary
+ * indication but non-abrupt shutdown still requires a rechecking
+ * scan for any workers that are inactive but not queued.
+ *
+ * Joining Tasks
+ * =============
+ *
+ * Any of several actions may be taken when one worker is waiting
+ * to join a task stolen (or always held) by another. Because we
+ * are multiplexing many tasks on to a pool of workers, we can't
+ * just let them block (as in Thread.join). We also cannot just
+ * reassign the joiner's run-time stack with another and replace
+ * it later, which would be a form of "continuation", that even if
+ * possible is not necessarily a good idea since we sometimes need
+ * both an unblocked task and its continuation to progress.
+ * Instead we combine two tactics:
+ *
+ * Helping: Arranging for the joiner to execute some task that it
+ * would be running if the steal had not occurred.
+ *
+ * Compensating: Unless there are already enough live threads,
+ * method tryCompensate() may create or re-activate a spare
+ * thread to compensate for blocked joiners until they unblock.
+ *
+ * A third form (implemented in tryRemoveAndExec and
+ * tryPollForAndExec) amounts to helping a hypothetical
+ * compensator: If we can readily tell that a possible action of a
+ * compensator is to steal and execute the task being joined, the
+ * joining thread can do so directly, without the need for a
+ * compensation thread (although at the expense of larger run-time
+ * stacks, but the tradeoff is typically worthwhile).
+ *
+ * The ManagedBlocker extension API can't use helping so relies
+ * only on compensation in method awaitBlocker.
+ *
+ * The algorithm in tryHelpStealer entails a form of "linear"
+ * helping: Each worker records (in field currentSteal) the most
+ * recent task it stole from some other worker. Plus, it records
+ * (in field currentJoin) the task it is currently actively
+ * joining. Method tryHelpStealer uses these markers to try to
+ * find a worker to help (i.e., steal back a task from and execute
+ * it) that could hasten completion of the actively joined task.
+ * In essence, the joiner executes a task that would be on its own
+ * local deque had the to-be-joined task not been stolen. This may
+ * be seen as a conservative variant of the approach in Wagner &
+ * Calder "Leapfrogging: a portable technique for implementing
+ * efficient futures" SIGPLAN Notices, 1993
+ * (http://portal.acm.org/citation.cfm?id=155354). It differs in
+ * that: (1) We only maintain dependency links across workers upon
+ * steals, rather than use per-task bookkeeping. This sometimes
+ * requires a linear scan of workQueues array to locate stealers,
+ * but often doesn't because stealers leave hints (that may become
+ * stale/wrong) of where to locate them. A stealHint is only a
+ * hint because a worker might have had multiple steals and the
+ * hint records only one of them (usually the most current).
+ * Hinting isolates cost to when it is needed, rather than adding
+ * to per-task overhead. (2) It is "shallow", ignoring nesting
+ * and potentially cyclic mutual steals. (3) It is intentionally
+ * racy: field currentJoin is updated only while actively joining,
+ * which means that we miss links in the chain during long-lived
+ * tasks, GC stalls etc (which is OK since blocking in such cases
+ * is usually a good idea). (4) We bound the number of attempts
+ * to find work (see MAX_HELP) and fall back to suspending the
+ * worker and if necessary replacing it with another.
+ *
+ * It is impossible to keep exactly the target parallelism number
+ * of threads running at any given time. Determining the
+ * existence of conservatively safe helping targets, the
+ * availability of already-created spares, and the apparent need
+ * to create new spares are all racy, so we rely on multiple
+ * retries of each. Compensation in the apparent absence of
+ * helping opportunities is challenging to control on JVMs, where
+ * GC and other activities can stall progress of tasks that in
+ * turn stall out many other dependent tasks, without us being
+ * able to determine whether they will ever require compensation.
+ * Even though work-stealing otherwise encounters little
+ * degradation in the presence of more threads than cores,
+ * aggressively adding new threads in such cases entails risk of
+ * unwanted positive feedback control loops in which more threads
+ * cause more dependent stalls (as well as delayed progress of
+ * unblocked threads to the point that we know they are available)
+ * leading to more situations requiring more threads, and so
+ * on. This aspect of control can be seen as an (analytically
+ * intractable) game with an opponent that may choose the worst
+ * (for us) active thread to stall at any time. We take several
+ * precautions to bound losses (and thus bound gains), mainly in
+ * methods tryCompensate and awaitJoin: (1) We only try
+ * compensation after attempting enough helping steps (measured
+ * via counting and timing) that we have already consumed the
+ * estimated cost of creating and activating a new thread. (2) We
+ * allow up to 50% of threads to be blocked before initially
+ * adding any others, and unless completely saturated, check that
+ * some work is available for a new worker before adding. Also, we
+ * create up to only 50% more threads until entering a mode that
+ * only adds a thread if all others are possibly blocked. All
+ * together, this means that we might be half as fast to react,
+ * and create half as many threads as possible in the ideal case,
+ * but present vastly fewer anomalies in all other cases compared
+ * to both more aggressive and more conservative alternatives.
+ *
+ * Style notes: There is a lot of representation-level coupling
+ * among classes ForkJoinPool, ForkJoinWorkerThread, and
+ * ForkJoinTask. The fields of WorkQueue maintain data structures
+ * managed by ForkJoinPool, so are directly accessed. There is
+ * little point trying to reduce this, since any associated future
+ * changes in representations will need to be accompanied by
+ * algorithmic changes anyway. Several methods intrinsically
+ * sprawl because they must accumulate sets of consistent reads of
+ * volatiles held in local variables. Methods signalWork() and
+ * scan() are the main bottlenecks, so are especially heavily
+ * micro-optimized/mangled. There are lots of inline assignments
+ * (of form "while ((local = field) != 0)") which are usually the
+ * simplest way to ensure the required read orderings (which are
+ * sometimes critical). This leads to a "C"-like style of listing
+ * declarations of these locals at the heads of methods or blocks.
+ * There are several occurrences of the unusual "do {} while
+ * (!cas...)" which is the simplest way to force an update of a
+ * CAS'ed variable. There are also other coding oddities that help
+ * some methods perform reasonably even when interpreted (not
+ * compiled).
+ *
+ * The order of declarations in this file is:
+ * (1) Static utility functions
+ * (2) Nested (static) classes
+ * (3) Static fields
+ * (4) Fields, along with constants used when unpacking some of them
+ * (5) Internal control methods
+ * (6) Callbacks and other support for ForkJoinTask methods
+ * (7) Exported methods
+ * (8) Static block initializing statics in minimally dependent order
*/
- /** Mask for packing and unpacking shorts */
- private static final int shortMask = 0xffff;
-
- /** Max pool size -- must be a power of two minus 1 */
- private static final int MAX_THREADS = 0x7FFF;
+ // Static utilities
- // placeholder for java.util.concurrent.RunnableFuture
- interface RunnableFuture<T> extends Runnable {
+ /**
+ * If there is a security manager, makes sure caller has
+ * permission to modify threads.
+ */
+ private static void checkPermission() {
+ SecurityManager security = System.getSecurityManager();
+ if (security != null)
+ security.checkPermission(modifyThreadPermission);
}
+ // Nested classes
+
/**
- * Factory for creating new ForkJoinWorkerThreads. A
- * ForkJoinWorkerThreadFactory must be defined and used for
- * ForkJoinWorkerThread subclasses that extend base functionality
- * or initialize threads with different contexts.
+ * Factory for creating new {@link ForkJoinWorkerThread}s.
+ * A {@code ForkJoinWorkerThreadFactory} must be defined and used
+ * for {@code ForkJoinWorkerThread} subclasses that extend base
+ * functionality or initialize threads with different contexts.
*/
public static interface ForkJoinWorkerThreadFactory {
/**
* Returns a new worker thread operating in the given pool.
*
* @param pool the pool this thread works in
- * @throws NullPointerException if pool is null;
+ * @throws NullPointerException if the pool is null
*/
public ForkJoinWorkerThread newThread(ForkJoinPool pool);
}
/**
- * Default ForkJoinWorkerThreadFactory implementation, creates a
+ * Default ForkJoinWorkerThreadFactory implementation; creates a
* new ForkJoinWorkerThread.
*/
- static class DefaultForkJoinWorkerThreadFactory
+ static class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public ForkJoinWorkerThread newThread(ForkJoinPool pool) {
- try {
- return new ForkJoinWorkerThread(pool);
- } catch (OutOfMemoryError oom) {
- return null;
- }
+ return new ForkJoinWorkerThread(pool);
}
}
/**
- * Creates a new ForkJoinWorkerThread. This factory is used unless
- * overridden in ForkJoinPool constructors.
+ * A simple non-reentrant lock used for exclusion when managing
+ * queues and workers. We use a custom lock so that we can readily
+ * probe lock state in constructions that check among alternative
+ * actions. The lock is normally only very briefly held, and
+ * sometimes treated as a spinlock, but other usages block to
+ * reduce overall contention in those cases where locked code
+ * bodies perform allocation/resizing.
*/
- public static final ForkJoinWorkerThreadFactory
- defaultForkJoinWorkerThreadFactory =
- new DefaultForkJoinWorkerThreadFactory();
-
- /**
- * Permission required for callers of methods that may start or
- * kill threads.
- */
- private static final RuntimePermission modifyThreadPermission =
- new RuntimePermission("modifyThread");
+ static final class Mutex extends AbstractQueuedSynchronizer {
+ public final boolean tryAcquire(int ignore) {
+ return compareAndSetState(0, 1);
+ }
+ public final boolean tryRelease(int ignore) {
+ setState(0);
+ return true;
+ }
+ public final void lock() { acquire(0); }
+ public final void unlock() { release(0); }
+ public final boolean isHeldExclusively() { return getState() == 1; }
+ public final Condition newCondition() { return new ConditionObject(); }
+ }
/**
- * If there is a security manager, makes sure caller has
- * permission to modify threads.
+ * Class for artificial tasks that are used to replace the target
+ * of local joins if they are removed from an interior queue slot
+ * in WorkQueue.tryRemoveAndExec. We don't need the proxy to
+ * actually do anything beyond having a unique identity.
*/
- private static void checkPermission() {
- SecurityManager security = System.getSecurityManager();
- if (security != null)
- security.checkPermission(modifyThreadPermission);
+ static final class EmptyTask extends ForkJoinTask<Void> {
+ EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void x) {}
+ public final boolean exec() { return true; }
}
/**
- * Generator for assigning sequence numbers as pool names.
- */
- private static final AtomicInteger poolNumberGenerator =
- new AtomicInteger();
+ * Queues supporting work-stealing as well as external task
+ * submission. See above for main rationale and algorithms.
+ * Implementation relies heavily on "Unsafe" intrinsics
+ * and selective use of "volatile":
+ *
+ * Field "base" is the index (mod array.length) of the least valid
+ * queue slot, which is always the next position to steal (poll)
+ * from if nonempty. Reads and writes require volatile orderings
+ * but not CAS, because updates are only performed after slot
+ * CASes.
+ *
+ * Field "top" is the index (mod array.length) of the next queue
+ * slot to push to or pop from. It is written only by owner thread
+ * for push, or under lock for trySharedPush, and accessed by
+ * other threads only after reading (volatile) base. Both top and
+ * base are allowed to wrap around on overflow, but (top - base)
+ * (or more commonly -(base - top) to force volatile read of base
+ * before top) still estimates size.
+ *
+ * The array slots are read and written using the emulation of
+ * volatiles/atomics provided by Unsafe. Insertions must in
+ * general use putOrderedObject as a form of releasing store to
+ * ensure that all writes to the task object are ordered before
+ * its publication in the queue. (Although we can avoid one case
+ * of this when locked in trySharedPush.) All removals entail a
+ * CAS to null. The array is always a power of two. To ensure
+ * safety of Unsafe array operations, all accesses perform
+ * explicit null checks and implicit bounds checks via
+ * power-of-two masking.
+ *
+ * In addition to basic queuing support, this class contains
+ * fields described elsewhere to control execution. It turns out
+ * to work better memory-layout-wise to include them in this
+ * class rather than a separate class.
+ *
+ * Performance on most platforms is very sensitive to placement of
+ * instances of both WorkQueues and their arrays -- we absolutely
+ * do not want multiple WorkQueue instances or multiple queue
+ * arrays sharing cache lines. (It would be best for queue objects
+ * and their arrays to share, but there is nothing available to
+ * help arrange that). Unfortunately, because they are recorded
+ * in a common array, WorkQueue instances are often moved to be
+ * adjacent by garbage collectors. To reduce impact, we use field
+ * padding that works OK on common platforms; this effectively
+ * trades off slightly slower average field access for the sake of
+ * avoiding really bad worst-case access. (Until better JVM
+ * support is in place, this padding is dependent on transient
+ * properties of JVM field layout rules.) We also take care in
+ * allocating, sizing and resizing the array. Non-shared queue
+ * arrays are initialized (via method growArray) by workers before
+ * use. Others are allocated on first use.
+ */
+ static final class WorkQueue {
+ /**
+ * Capacity of work-stealing queue array upon initialization.
+ * Must be a power of two; at least 4, but should be larger to
+ * reduce or eliminate cacheline sharing among queues.
+ * Currently, it is much larger, as a partial workaround for
+ * the fact that JVMs often place arrays in locations that
+ * share GC bookkeeping (especially cardmarks) such that
+ * per-write accesses encounter serious memory contention.
+ */
+ static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
- /**
- * Array holding all worker threads in the pool. Initialized upon
- * first use. Array size must be a power of two. Updates and
- * replacements are protected by workerLock, but it is always kept
- * in a consistent enough state to be randomly accessed without
- * locking by workers performing work-stealing.
- */
- public volatile ForkJoinWorkerThread[] workers;
+ /**
+ * Maximum size for queue arrays. Must be a power of two less
+ * than or equal to 1 << (31 - width of array entry) to ensure
+ * lack of wraparound of index calculations, but defined to a
+ * value a bit less than this to help users trap runaway
+ * programs before saturating systems.
+ */
+ static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
+
+ volatile long totalSteals; // cumulative number of steals
+ int seed; // for random scanning; initialize nonzero
+ volatile int eventCount; // encoded inactivation count; < 0 if inactive
+ int nextWait; // encoded record of next event waiter
+ int rescans; // remaining scans until block
+ int nsteals; // top-level task executions since last idle
+ final int mode; // lifo, fifo, or shared
+ int poolIndex; // index of this queue in pool (or 0)
+ int stealHint; // index of most recent known stealer
+ volatile int runState; // 1: locked, -1: terminate; else 0
+ volatile int base; // index of next slot for poll
+ int top; // index of next slot for push
+ ForkJoinTask<?>[] array; // the elements (initially unallocated)
+ final ForkJoinPool pool; // the containing pool (may be null)
+ final ForkJoinWorkerThread owner; // owning thread or null if shared
+ volatile Thread parker; // == owner during call to park; else null
+ ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
+ ForkJoinTask<?> currentSteal; // current non-local task being executed
+ // Heuristic padding to ameliorate unfortunate memory placements
+ Object p00, p01, p02, p03, p04, p05, p06, p07;
+ Object p08, p09, p0a, p0b, p0c, p0d, p0e;
+
+ WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode) {
+ this.mode = mode;
+ this.pool = pool;
+ this.owner = owner;
+ // Place indices in the center of array (that is not yet allocated)
+ base = top = INITIAL_QUEUE_CAPACITY >>> 1;
+ }
- /**
- * Lock protecting access to workers.
- */
- private final ReentrantLock workerLock;
+ /**
+ * Returns the approximate number of tasks in the queue.
+ */
+ final int queueSize() {
+ int n = base - top; // non-owner callers must read base first
+ return (n >= 0) ? 0 : -n; // ignore transient negative
+ }
- /**
- * Condition for awaitTermination.
- */
- private final Condition termination;
+ /**
+ * Provides a more accurate estimate of whether this queue has
+ * any tasks than does queueSize, by checking whether a
+ * near-empty queue has at least one unclaimed task.
+ */
+ final boolean isEmpty() {
+ ForkJoinTask<?>[] a; int m, s;
+ int n = base - (s = top);
+ return (n >= 0 ||
+ (n == -1 &&
+ ((a = array) == null ||
+ (m = a.length - 1) < 0 ||
+ U.getObjectVolatile
+ (a, ((m & (s - 1)) << ASHIFT) + ABASE) == null)));
+ }
+
+ /**
+ * Pushes a task. Call only by owner in unshared queues.
+ *
+ * @param task the task. Caller must ensure non-null.
+ * @throw RejectedExecutionException if array cannot be resized
+ */
+ final void push(ForkJoinTask<?> task) {
+ ForkJoinTask<?>[] a; ForkJoinPool p;
+ int s = top, m, n;
+ if ((a = array) != null) { // ignore if queue removed
+ U.putOrderedObject
+ (a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task);
+ if ((n = (top = s + 1) - base) <= 2) {
+ if ((p = pool) != null)
+ p.signalWork();
+ }
+ else if (n >= m)
+ growArray(true);
+ }
+ }
+
+ /**
+ * Pushes a task if lock is free and array is either big
+ * enough or can be resized to be big enough.
+ *
+ * @param task the task. Caller must ensure non-null.
+ * @return true if submitted
+ */
+ final boolean trySharedPush(ForkJoinTask<?> task) {
+ boolean submitted = false;
+ if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) {
+ ForkJoinTask<?>[] a = array;
+ int s = top;
+ try {
+ if ((a != null && a.length > s + 1 - base) ||
+ (a = growArray(false)) != null) { // must presize
+ int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
+ U.putObject(a, (long)j, task); // don't need "ordered"
+ top = s + 1;
+ submitted = true;
+ }
+ } finally {
+ runState = 0; // unlock
+ }
+ }
+ return submitted;
+ }
+
+ /**
+ * Takes next task, if one exists, in LIFO order. Call only
+ * by owner in unshared queues. (We do not have a shared
+ * version of this method because it is never needed.)
+ */
+ final ForkJoinTask<?> pop() {
+ ForkJoinTask<?> t; int m;
+ ForkJoinTask<?>[] a = array;
+ if (a != null && (m = a.length - 1) >= 0) {
+ for (int s; (s = top - 1) - base >= 0;) {
+ int j = ((m & s) << ASHIFT) + ABASE;
+ if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) == null)
+ break;
+ if (U.compareAndSwapObject(a, j, t, null)) {
+ top = s;
+ return t;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes a task in FIFO order if b is base of queue and a task
+ * can be claimed without contention. Specialized versions
+ * appear in ForkJoinPool methods scan and tryHelpStealer.
+ */
+ final ForkJoinTask<?> pollAt(int b) {
+ ForkJoinTask<?> t; ForkJoinTask<?>[] a;
+ if ((a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
+ base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ base = b + 1;
+ return t;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes next task, if one exists, in FIFO order.
+ */
+ final ForkJoinTask<?> poll() {
+ ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
+ while ((b = base) - top < 0 && (a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t != null) {
+ if (base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ base = b + 1;
+ return t;
+ }
+ }
+ else if (base == b) {
+ if (b + 1 == top)
+ break;
+ Thread.yield(); // wait for lagging update
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes next task, if one exists, in order specified by mode.
+ */
+ final ForkJoinTask<?> nextLocalTask() {
+ return mode == 0 ? pop() : poll();
+ }
+
+ /**
+ * Returns next task, if one exists, in order specified by mode.
+ */
+ final ForkJoinTask<?> peek() {
+ ForkJoinTask<?>[] a = array; int m;
+ if (a == null || (m = a.length - 1) < 0)
+ return null;
+ int i = mode == 0 ? top - 1 : base;
+ int j = ((i & m) << ASHIFT) + ABASE;
+ return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ }
+
+ /**
+ * Pops the given task only if it is at the current top.
+ */
+ final boolean tryUnpush(ForkJoinTask<?> t) {
+ ForkJoinTask<?>[] a; int s;
+ if ((a = array) != null && (s = top) != base &&
+ U.compareAndSwapObject
+ (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
+ top = s;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Polls the given task only if it is at the current base.
+ */
+ final boolean pollFor(ForkJoinTask<?> task) {
+ ForkJoinTask<?>[] a; int b;
+ if ((b = base) - top < 0 && (a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ if (U.getObjectVolatile(a, j) == task && base == b &&
+ U.compareAndSwapObject(a, j, task, null)) {
+ base = b + 1;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * If present, removes from queue and executes the given task, or
+ * any other cancelled task. Returns (true) immediately on any CAS
+ * or consistency check failure so caller can retry.
+ *
+ * @return false if no progress can be made
+ */
+ final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
+ boolean removed = false, empty = true, progress = true;
+ ForkJoinTask<?>[] a; int m, s, b, n;
+ if ((a = array) != null && (m = a.length - 1) >= 0 &&
+ (n = (s = top) - (b = base)) > 0) {
+ for (ForkJoinTask<?> t;;) { // traverse from s to b
+ int j = ((--s & m) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t == null) // inconsistent length
+ break;
+ else if (t == task) {
+ if (s + 1 == top) { // pop
+ if (!U.compareAndSwapObject(a, j, task, null))
+ break;
+ top = s;
+ removed = true;
+ }
+ else if (base == b) // replace with proxy
+ removed = U.compareAndSwapObject(a, j, task,
+ new EmptyTask());
+ break;
+ }
+ else if (t.status >= 0)
+ empty = false;
+ else if (s + 1 == top) { // pop and throw away
+ if (U.compareAndSwapObject(a, j, t, null))
+ top = s;
+ break;
+ }
+ if (--n == 0) {
+ if (!empty && base == b)
+ progress = false;
+ break;
+ }
+ }
+ }
+ if (removed)
+ task.doExec();
+ return progress;
+ }
+
+ /**
+ * Initializes or doubles the capacity of array. Call either
+ * by owner or with lock held -- it is OK for base, but not
+ * top, to move while resizings are in progress.
+ *
+ * @param rejectOnFailure if true, throw exception if capacity
+ * exceeded (relayed ultimately to user); else return null.
+ */
+ final ForkJoinTask<?>[] growArray(boolean rejectOnFailure) {
+ ForkJoinTask<?>[] oldA = array;
+ int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
+ if (size <= MAXIMUM_QUEUE_CAPACITY) {
+ int oldMask, t, b;
+ ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
+ if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
+ (t = top) - (b = base) > 0) {
+ int mask = size - 1;
+ do {
+ ForkJoinTask<?> x;
+ int oldj = ((b & oldMask) << ASHIFT) + ABASE;
+ int j = ((b & mask) << ASHIFT) + ABASE;
+ x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
+ if (x != null &&
+ U.compareAndSwapObject(oldA, oldj, x, null))
+ U.putObjectVolatile(a, j, x);
+ } while (++b != t);
+ }
+ return a;
+ }
+ else if (!rejectOnFailure)
+ return null;
+ else
+ throw new RejectedExecutionException("Queue capacity exceeded");
+ }
+
+ /**
+ * Removes and cancels all known tasks, ignoring any exceptions.
+ */
+ final void cancelAll() {
+ ForkJoinTask.cancelIgnoringExceptions(currentJoin);
+ ForkJoinTask.cancelIgnoringExceptions(currentSteal);
+ for (ForkJoinTask<?> t; (t = poll()) != null; )
+ ForkJoinTask.cancelIgnoringExceptions(t);
+ }
+
+ /**
+ * Computes next value for random probes. Scans don't require
+ * a very high quality generator, but also not a crummy one.
+ * Marsaglia xor-shift is cheap and works well enough. Note:
+ * This is manually inlined in its usages in ForkJoinPool to
+ * avoid writes inside busy scan loops.
+ */
+ final int nextSeed() {
+ int r = seed;
+ r ^= r << 13;
+ r ^= r >>> 17;
+ return seed = r ^= r << 5;
+ }
+
+ // Execution methods
+
+ /**
+ * Removes and runs tasks until empty, using local mode
+ * ordering. Normally called only after checking for apparent
+ * non-emptiness.
+ */
+ final void runLocalTasks() {
+ // hoist checks from repeated pop/poll
+ ForkJoinTask<?>[] a; int m;
+ if ((a = array) != null && (m = a.length - 1) >= 0) {
+ if (mode == 0) {
+ for (int s; (s = top - 1) - base >= 0;) {
+ int j = ((m & s) << ASHIFT) + ABASE;
+ ForkJoinTask<?> t =
+ (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t != null) {
+ if (U.compareAndSwapObject(a, j, t, null)) {
+ top = s;
+ t.doExec();
+ }
+ }
+ else
+ break;
+ }
+ }
+ else {
+ for (int b; (b = base) - top < 0;) {
+ int j = ((m & b) << ASHIFT) + ABASE;
+ ForkJoinTask<?> t =
+ (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t != null) {
+ if (base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ base = b + 1;
+ t.doExec();
+ }
+ } else if (base == b) {
+ if (b + 1 == top)
+ break;
+ Thread.yield(); // wait for lagging update
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Executes a top-level task and any local tasks remaining
+ * after execution.
+ *
+ * @return true unless terminating
+ */
+ final boolean runTask(ForkJoinTask<?> t) {
+ boolean alive = true;
+ if (t != null) {
+ currentSteal = t;
+ t.doExec();
+ if (top != base) // conservative guard
+ runLocalTasks();
+ ++nsteals;
+ currentSteal = null;
+ }
+ else if (runState < 0) // terminating
+ alive = false;
+ return alive;
+ }
+
+ /**
+ * Executes a non-top-level (stolen) task.
+ */
+ final void runSubtask(ForkJoinTask<?> t) {
+ if (t != null) {
+ ForkJoinTask<?> ps = currentSteal;
+ currentSteal = t;
+ t.doExec();
+ currentSteal = ps;
+ }
+ }
+
+ /**
+ * Returns true if owned and not known to be blocked.
+ */
+ final boolean isApparentlyUnblocked() {
+ Thread wt; Thread.State s;
+ return (eventCount >= 0 &&
+ (wt = owner) != null &&
+ (s = wt.getState()) != Thread.State.BLOCKED &&
+ s != Thread.State.WAITING &&
+ s != Thread.State.TIMED_WAITING);
+ }
+
+ /**
+ * If this owned and is not already interrupted, try to
+ * interrupt and/or unpark, ignoring exceptions.
+ */
+ final void interruptOwner() {
+ Thread wt, p;
+ if ((wt = owner) != null && !wt.isInterrupted()) {
+ try {
+ wt.interrupt();
+ } catch (SecurityException ignore) {
+ }
+ }
+ if ((p = parker) != null)
+ U.unpark(p);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long RUNSTATE;
+ private static final int ABASE;
+ private static final int ASHIFT;
+ static {
+ int s;
+ try {
+ U = getUnsafe();
+ Class<?> k = WorkQueue.class;
+ Class<?> ak = ForkJoinTask[].class;
+ RUNSTATE = U.objectFieldOffset
+ (k.getDeclaredField("runState"));
+ ABASE = U.arrayBaseOffset(ak);
+ s = U.arrayIndexScale(ak);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ if ((s & (s-1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
+ }
+ }
/**
- * The uncaught exception handler used when any worker
- * abrupty terminates
- */
- private Thread.UncaughtExceptionHandler ueh;
+ * Per-thread records for threads that submit to pools. Currently
+ * holds only pseudo-random seed / index that is used to choose
+ * submission queues in method doSubmit. In the future, this may
+ * also incorporate a means to implement different task rejection
+ * and resubmission policies.
+ *
+ * Seeds for submitters and workers/workQueues work in basically
+ * the same way but are initialized and updated using slightly
+ * different mechanics. Both are initialized using the same
+ * approach as in class ThreadLocal, where successive values are
+ * unlikely to collide with previous values. This is done during
+ * registration for workers, but requires a separate AtomicInteger
+ * for submitters. Seeds are then randomly modified upon
+ * collisions using xorshifts, which requires a non-zero seed.
+ */
+ static final class Submitter {
+ int seed;
+ Submitter() {
+ int s = nextSubmitterSeed.getAndAdd(SEED_INCREMENT);
+ seed = (s == 0) ? 1 : s; // ensure non-zero
+ }
+ }
+
+ /** ThreadLocal class for Submitters */
+ static final class ThreadSubmitter extends ThreadLocal<Submitter> {
+ public Submitter initialValue() { return new Submitter(); }
+ }
+
+ // static fields (initialized in static initializer below)
/**
- * Creation factory for worker threads.
+ * Creates a new ForkJoinWorkerThread. This factory is used unless
+ * overridden in ForkJoinPool constructors.
*/
- private final ForkJoinWorkerThreadFactory factory;
+ public static final ForkJoinWorkerThreadFactory
+ defaultForkJoinWorkerThreadFactory;
/**
- * Head of stack of threads that were created to maintain
- * parallelism when other threads blocked, but have since
- * suspended when the parallelism level rose.
+ * Generator for assigning sequence numbers as pool names.
*/
- private volatile WaitQueueNode spareStack;
+ private static final AtomicInteger poolNumberGenerator;
/**
- * Sum of per-thread steal counts, updated only when threads are
- * idle or terminating.
+ * Generator for initial hashes/seeds for submitters. Accessed by
+ * Submitter class constructor.
*/
- private final AtomicLong stealCount;
+ static final AtomicInteger nextSubmitterSeed;
/**
- * Queue for external submissions.
+ * Permission required for callers of methods that may start or
+ * kill threads.
*/
- private final LinkedTransferQueue<ForkJoinTask<?>> submissionQueue;
+ private static final RuntimePermission modifyThreadPermission;
/**
- * Head of Treiber stack for barrier sync. See below for explanation
+ * Per-thread submission bookeeping. Shared across all pools
+ * to reduce ThreadLocal pollution and because random motion
+ * to avoid contention in one pool is likely to hold for others.
*/
- private volatile WaitQueueNode syncStack;
+ private static final ThreadSubmitter submitters;
+
+ // static constants
/**
- * The count for event barrier
+ * The wakeup interval (in nanoseconds) for a worker waiting for a
+ * task when the pool is quiescent to instead try to shrink the
+ * number of workers. The exact value does not matter too
+ * much. It must be short enough to release resources during
+ * sustained periods of idleness, but not so short that threads
+ * are continually re-created.
*/
- private volatile long eventCount;
+ private static final long SHRINK_RATE =
+ 4L * 1000L * 1000L * 1000L; // 4 seconds
/**
- * Pool number, just for assigning useful names to worker threads
+ * The timeout value for attempted shrinkage, includes
+ * some slop to cope with system timer imprecision.
*/
- private final int poolNumber;
+ private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10);
/**
- * The maximum allowed pool size
+ * The maximum stolen->joining link depth allowed in method
+ * tryHelpStealer. Must be a power of two. This value also
+ * controls the maximum number of times to try to help join a task
+ * without any apparent progress or change in pool state before
+ * giving up and blocking (see awaitJoin). Depths for legitimate
+ * chains are unbounded, but we use a fixed constant to avoid
+ * (otherwise unchecked) cycles and to bound staleness of
+ * traversal parameters at the expense of sometimes blocking when
+ * we could be helping.
*/
- private volatile int maxPoolSize;
+ private static final int MAX_HELP = 32;
/**
- * The desired parallelism level, updated only under workerLock.
+ * Secondary time-based bound (in nanosecs) for helping attempts
+ * before trying compensated blocking in awaitJoin. Used in
+ * conjunction with MAX_HELP to reduce variance due to different
+ * polling rates associated with different helping options. The
+ * value should roughly approximate the time required to create
+ * and/or activate a worker thread.
*/
- private volatile int parallelism;
+ private static final long COMPENSATION_DELAY = 100L * 1000L; // 0.1 millisec
/**
- * True if use local fifo, not default lifo, for local polling
+ * Increment for seed generators. See class ThreadLocal for
+ * explanation.
*/
- private volatile boolean locallyFifo;
+ private static final int SEED_INCREMENT = 0x61c88647;
/**
- * Holds number of total (i.e., created and not yet terminated)
- * and running (i.e., not blocked on joins or other managed sync)
- * threads, packed into one int to ensure consistent snapshot when
- * making decisions about creating and suspending spare
- * threads. Updated only by CAS. Note: CASes in
- * updateRunningCount and preJoin running active count is in low
- * word, so need to be modified if this changes
- */
- private volatile int workerCounts;
+ * Bits and masks for control variables
+ *
+ * Field ctl is a long packed with:
+ * AC: Number of active running workers minus target parallelism (16 bits)
+ * TC: Number of total workers minus target parallelism (16 bits)
+ * ST: true if pool is terminating (1 bit)
+ * EC: the wait count of top waiting thread (15 bits)
+ * ID: poolIndex of top of Treiber stack of waiters (16 bits)
+ *
+ * When convenient, we can extract the upper 32 bits of counts and
+ * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
+ * (int)ctl. The ec field is never accessed alone, but always
+ * together with id and st. The offsets of counts by the target
+ * parallelism and the positionings of fields makes it possible to
+ * perform the most common checks via sign tests of fields: When
+ * ac is negative, there are not enough active workers, when tc is
+ * negative, there are not enough total workers, and when e is
+ * negative, the pool is terminating. To deal with these possibly
+ * negative fields, we use casts in and out of "short" and/or
+ * signed shifts to maintain signedness.
+ *
+ * When a thread is queued (inactivated), its eventCount field is
+ * set negative, which is the only way to tell if a worker is
+ * prevented from executing tasks, even though it must continue to
+ * scan for them to avoid queuing races. Note however that
+ * eventCount updates lag releases so usage requires care.
+ *
+ * Field runState is an int packed with:
+ * SHUTDOWN: true if shutdown is enabled (1 bit)
+ * SEQ: a sequence number updated upon (de)registering workers (30 bits)
+ * INIT: set true after workQueues array construction (1 bit)
+ *
+ * The sequence number enables simple consistency checks:
+ * Staleness of read-only operations on the workQueues array can
+ * be checked by comparing runState before vs after the reads.
+ */
+
+ // bit positions/shifts for fields
+ private static final int AC_SHIFT = 48;
+ private static final int TC_SHIFT = 32;
+ private static final int ST_SHIFT = 31;
+ private static final int EC_SHIFT = 16;
+
+ // bounds
+ private static final int SMASK = 0xffff; // short bits
+ private static final int MAX_CAP = 0x7fff; // max #workers - 1
+ private static final int SQMASK = 0xfffe; // even short bits
+ private static final int SHORT_SIGN = 1 << 15;
+ private static final int INT_SIGN = 1 << 31;
+
+ // masks
+ private static final long STOP_BIT = 0x0001L << ST_SHIFT;
+ private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
+ private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
+
+ // units for incrementing and decrementing
+ private static final long TC_UNIT = 1L << TC_SHIFT;
+ private static final long AC_UNIT = 1L << AC_SHIFT;
+
+ // masks and units for dealing with u = (int)(ctl >>> 32)
+ private static final int UAC_SHIFT = AC_SHIFT - 32;
+ private static final int UTC_SHIFT = TC_SHIFT - 32;
+ private static final int UAC_MASK = SMASK << UAC_SHIFT;
+ private static final int UTC_MASK = SMASK << UTC_SHIFT;
+ private static final int UAC_UNIT = 1 << UAC_SHIFT;
+ private static final int UTC_UNIT = 1 << UTC_SHIFT;
+
+ // masks and units for dealing with e = (int)ctl
+ private static final int E_MASK = 0x7fffffff; // no STOP_BIT
+ private static final int E_SEQ = 1 << EC_SHIFT;
+
+ // runState bits
+ private static final int SHUTDOWN = 1 << 31;
+
+ // access mode for WorkQueue
+ static final int LIFO_QUEUE = 0;
+ static final int FIFO_QUEUE = 1;
+ static final int SHARED_QUEUE = -1;
+
+ // Instance fields
- private static int totalCountOf(int s) { return s >>> 16; }
- private static int runningCountOf(int s) { return s & shortMask; }
- private static int workerCountsFor(int t, int r) { return (t << 16) + r; }
+ /*
+ * Field layout order in this class tends to matter more than one
+ * would like. Runtime layout order is only loosely related to
+ * declaration order and may differ across JVMs, but the following
+ * empirically works OK on current JVMs.
+ */
+
+ volatile long ctl; // main pool control
+ final int parallelism; // parallelism level
+ final int localMode; // per-worker scheduling mode
+ final int submitMask; // submit queue index bound
+ int nextSeed; // for initializing worker seeds
+ volatile int runState; // shutdown status and seq
+ WorkQueue[] workQueues; // main registry
+ final Mutex lock; // for registration
+ final Condition termination; // for awaitTermination
+ final ForkJoinWorkerThreadFactory factory; // factory for new workers
+ final Thread.UncaughtExceptionHandler ueh; // per-worker UEH
+ final AtomicLong stealCount; // collect counts when terminated
+ final AtomicInteger nextWorkerNumber; // to create worker name string
+ final String workerNamePrefix; // to create worker name string
+
+ // Creating, registering, and deregistering workers
+
+ /**
+ * Tries to create and start a worker
+ */
+ private void addWorker() {
+ Throwable ex = null;
+ ForkJoinWorkerThread wt = null;
+ try {
+ if ((wt = factory.newThread(this)) != null) {
+ wt.start();
+ return;
+ }
+ } catch (Throwable e) {
+ ex = e;
+ }
+ deregisterWorker(wt, ex); // adjust counts etc on failure
+ }
/**
- * Add delta (which may be negative) to running count. This must
- * be called before (with negative arg) and after (with positive)
- * any managed synchronization (i.e., mainly, joins)
- * @param delta the number to add
+ * Callback from ForkJoinWorkerThread constructor to assign a
+ * public name. This must be separate from registerWorker because
+ * it is called during the "super" constructor call in
+ * ForkJoinWorkerThread.
*/
- final void updateRunningCount(int delta) {
- int s;
- do;while (!casWorkerCounts(s = workerCounts, s + delta));
+ final String nextWorkerName() {
+ return workerNamePrefix.concat
+ (Integer.toString(nextWorkerNumber.addAndGet(1)));
}
/**
- * Add delta (which may be negative) to both total and running
- * count. This must be called upon creation and termination of
- * worker threads.
- * @param delta the number to add
+ * Callback from ForkJoinWorkerThread constructor to establish its
+ * poolIndex and record its WorkQueue. To avoid scanning bias due
+ * to packing entries in front of the workQueues array, we treat
+ * the array as a simple power-of-two hash table using per-thread
+ * seed as hash, expanding as needed.
+ *
+ * @param w the worker's queue
*/
- private void updateWorkerCount(int delta) {
- int d = delta + (delta << 16); // add to both lo and hi parts
- int s;
- do;while (!casWorkerCounts(s = workerCounts, s + d));
+ final void registerWorker(WorkQueue w) {
+ Mutex lock = this.lock;
+ lock.lock();
+ try {
+ WorkQueue[] ws = workQueues;
+ if (w != null && ws != null) { // skip on shutdown/failure
+ int rs, n;
+ while ((n = ws.length) < // ensure can hold total
+ (parallelism + (short)(ctl >>> TC_SHIFT) << 1))
+ workQueues = ws = Arrays.copyOf(ws, n << 1);
+ int m = n - 1;
+ int s = nextSeed += SEED_INCREMENT; // rarely-colliding sequence
+ w.seed = (s == 0) ? 1 : s; // ensure non-zero seed
+ int r = (s << 1) | 1; // use odd-numbered indices
+ while (ws[r &= m] != null) // step by approx half size
+ r += ((n >>> 1) & SQMASK) + 2;
+ w.eventCount = w.poolIndex = r; // establish before recording
+ ws[r] = w; // also update seq
+ runState = ((rs = runState) & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN);
+ }
+ } finally {
+ lock.unlock();
+ }
}
/**
- * Lifecycle control. High word contains runState, low word
- * contains the number of workers that are (probably) executing
- * tasks. This value is atomically incremented before a worker
- * gets a task to run, and decremented when worker has no tasks
- * and cannot find any. These two fields are bundled together to
- * support correct termination triggering. Note: activeCount
- * CAS'es cheat by assuming active count is in low word, so need
- * to be modified if this changes
- */
- private volatile int runControl;
-
- // RunState values. Order among values matters
- private static final int RUNNING = 0;
- private static final int SHUTDOWN = 1;
- private static final int TERMINATING = 2;
- private static final int TERMINATED = 3;
+ * Final callback from terminating worker, as well as upon failure
+ * to construct or start a worker in addWorker. Removes record of
+ * worker from array, and adjusts counts. If pool is shutting
+ * down, tries to complete termination.
+ *
+ * @param wt the worker thread or null if addWorker failed
+ * @param ex the exception causing failure, or null if none
+ */
+ final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
+ Mutex lock = this.lock;
+ WorkQueue w = null;
+ if (wt != null && (w = wt.workQueue) != null) {
+ w.runState = -1; // ensure runState is set
+ stealCount.getAndAdd(w.totalSteals + w.nsteals);
+ int idx = w.poolIndex;
+ lock.lock();
+ try { // remove record from array
+ WorkQueue[] ws = workQueues;
+ if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
+ ws[idx] = null;
+ } finally {
+ lock.unlock();
+ }
+ }
- private static int runStateOf(int c) { return c >>> 16; }
- private static int activeCountOf(int c) { return c & shortMask; }
- private static int runControlFor(int r, int a) { return (r << 16) + a; }
+ long c; // adjust ctl counts
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
+ ((c - TC_UNIT) & TC_MASK) |
+ (c & ~(AC_MASK|TC_MASK)))));
+
+ if (!tryTerminate(false, false) && w != null) {
+ w.cancelAll(); // cancel remaining tasks
+ if (w.array != null) // suppress signal if never ran
+ signalWork(); // wake up or create replacement
+ if (ex == null) // help clean refs on way out
+ ForkJoinTask.helpExpungeStaleExceptions();
+ }
- /**
- * Try incrementing active count; fail on contention. Called by
- * workers before/during executing tasks.
- * @return true on success;
- */
- final boolean tryIncrementActiveCount() {
- int c = runControl;
- return casRunControl(c, c+1);
+ if (ex != null) // rethrow
+ U.throwException(ex);
}
+
+ // Submissions
+
/**
- * Try decrementing active count; fail on contention.
- * Possibly trigger termination on success
- * Called by workers when they can't find tasks.
- * @return true on success
- */
- final boolean tryDecrementActiveCount() {
- int c = runControl;
- int nextc = c - 1;
- if (!casRunControl(c, nextc))
- return false;
- if (canTerminateOnShutdown(nextc))
- terminateOnShutdown();
- return true;
+ * Unless shutting down, adds the given task to a submission queue
+ * at submitter's current queue index (modulo submission
+ * range). If no queue exists at the index, one is created. If
+ * the queue is busy, another index is randomly chosen. The
+ * submitMask bounds the effective number of queues to the
+ * (nearest power of two for) parallelism level.
+ *
+ * @param task the task. Caller must ensure non-null.
+ */
+ private void doSubmit(ForkJoinTask<?> task) {
+ Submitter s = submitters.get();
+ for (int r = s.seed, m = submitMask;;) {
+ WorkQueue[] ws; WorkQueue q;
+ int k = r & m & SQMASK; // use only even indices
+ if (runState < 0 || (ws = workQueues) == null || ws.length <= k)
+ throw new RejectedExecutionException(); // shutting down
+ else if ((q = ws[k]) == null) { // create new queue
+ WorkQueue nq = new WorkQueue(this, null, SHARED_QUEUE);
+ Mutex lock = this.lock; // construct outside lock
+ lock.lock();
+ try { // recheck under lock
+ int rs = runState; // to update seq
+ if (ws == workQueues && ws[k] == null) {
+ ws[k] = nq;
+ runState = ((rs & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN));
+ }
+ } finally {
+ lock.unlock();
+ }
+ }
+ else if (q.trySharedPush(task)) {
+ signalWork();
+ return;
+ }
+ else if (m > 1) { // move to a different index
+ r ^= r << 13; // same xorshift as WorkQueues
+ r ^= r >>> 17;
+ s.seed = r ^= r << 5;
+ }
+ else
+ Thread.yield(); // yield if no alternatives
+ }
}
+ // Maintaining ctl counts
+
/**
- * Return true if argument represents zero active count and
- * nonzero runstate, which is the triggering condition for
- * terminating on shutdown.
+ * Increments active count; mainly called upon return from blocking.
*/
- private static boolean canTerminateOnShutdown(int c) {
- return ((c & -c) >>> 16) != 0; // i.e. least bit is nonzero runState bit
+ final void incrementActiveCount() {
+ long c;
+ do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT));
}
/**
- * Transition run state to at least the given state. Return true
- * if not already at least given state.
+ * Tries to activate or create a worker if too few are active.
*/
- private boolean transitionRunStateTo(int state) {
- for (;;) {
- int c = runControl;
- if (runStateOf(c) >= state)
- return false;
- if (casRunControl(c, runControlFor(state, activeCountOf(c))))
- return true;
+ final void signalWork() {
+ long c; int u;
+ while ((u = (int)((c = ctl) >>> 32)) < 0) { // too few active
+ WorkQueue[] ws = workQueues; int e, i; WorkQueue w; Thread p;
+ if ((e = (int)c) > 0) { // at least one waiting
+ if (ws != null && (i = e & SMASK) < ws.length &&
+ (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
+ long nc = (((long)(w.nextWait & E_MASK)) |
+ ((long)(u + UAC_UNIT) << 32));
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = w.parker) != null)
+ U.unpark(p); // activate and release
+ break;
+ }
+ }
+ else
+ break;
+ }
+ else if (e == 0 && (u & SHORT_SIGN) != 0) { // too few total
+ long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
+ ((u + UAC_UNIT) & UAC_MASK)) << 32;
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ addWorker();
+ break;
+ }
+ }
+ else
+ break;
}
}
- /**
- * Controls whether to add spares to maintain parallelism
- */
- private volatile boolean maintainsParallelism;
- // Constructors
+ // Scanning for tasks
/**
- * Creates a ForkJoinPool with a pool size equal to the number of
- * processors available on the system and using the default
- * ForkJoinWorkerThreadFactory,
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
+ * Top-level runloop for workers, called by ForkJoinWorkerThread.run.
*/
- public ForkJoinPool() {
- this(Runtime.getRuntime().availableProcessors(),
- defaultForkJoinWorkerThreadFactory);
+ final void runWorker(WorkQueue w) {
+ w.growArray(false); // initialize queue array in this thread
+ do {} while (w.runTask(scan(w)));
}
/**
- * Creates a ForkJoinPool with the indicated parellelism level
- * threads, and using the default ForkJoinWorkerThreadFactory,
- * @param parallelism the number of worker threads
- * @throws IllegalArgumentException if parallelism less than or
- * equal to zero
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
- */
- public ForkJoinPool(int parallelism) {
- this(parallelism, defaultForkJoinWorkerThreadFactory);
+ * Scans for and, if found, returns one task, else possibly
+ * inactivates the worker. This method operates on single reads of
+ * volatile state and is designed to be re-invoked continuously,
+ * in part because it returns upon detecting inconsistencies,
+ * contention, or state changes that indicate possible success on
+ * re-invocation.
+ *
+ * The scan searches for tasks across a random permutation of
+ * queues (starting at a random index and stepping by a random
+ * relative prime, checking each at least once). The scan
+ * terminates upon either finding a non-empty queue, or completing
+ * the sweep. If the worker is not inactivated, it takes and
+ * returns a task from this queue. On failure to find a task, we
+ * take one of the following actions, after which the caller will
+ * retry calling this method unless terminated.
+ *
+ * * If pool is terminating, terminate the worker.
+ *
+ * * If not a complete sweep, try to release a waiting worker. If
+ * the scan terminated because the worker is inactivated, then the
+ * released worker will often be the calling worker, and it can
+ * succeed obtaining a task on the next call. Or maybe it is
+ * another worker, but with same net effect. Releasing in other
+ * cases as well ensures that we have enough workers running.
+ *
+ * * If not already enqueued, try to inactivate and enqueue the
+ * worker on wait queue. Or, if inactivating has caused the pool
+ * to be quiescent, relay to idleAwaitWork to check for
+ * termination and possibly shrink pool.
+ *
+ * * If already inactive, and the caller has run a task since the
+ * last empty scan, return (to allow rescan) unless others are
+ * also inactivated. Field WorkQueue.rescans counts down on each
+ * scan to ensure eventual inactivation and blocking.
+ *
+ * * If already enqueued and none of the above apply, park
+ * awaiting signal,
+ *
+ * @param w the worker (via its WorkQueue)
+ * @return a task or null of none found
+ */
+ private final ForkJoinTask<?> scan(WorkQueue w) {
+ WorkQueue[] ws; // first update random seed
+ int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
+ int rs = runState, m; // volatile read order matters
+ if ((ws = workQueues) != null && (m = ws.length - 1) > 0) {
+ int ec = w.eventCount; // ec is negative if inactive
+ int step = (r >>> 16) | 1; // relative prime
+ for (int j = (m + 1) << 2; ; r += step) {
+ WorkQueue q; ForkJoinTask<?> t; ForkJoinTask<?>[] a; int b;
+ if ((q = ws[r & m]) != null && (b = q.base) - q.top < 0 &&
+ (a = q.array) != null) { // probably nonempty
+ int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, i);
+ if (q.base == b && ec >= 0 && t != null &&
+ U.compareAndSwapObject(a, i, t, null)) {
+ q.base = b + 1; // specialization of pollAt
+ return t;
+ }
+ else if ((t != null || b + 1 != q.top) &&
+ (ec < 0 || j <= m)) {
+ rs = 0; // mark scan as imcomplete
+ break; // caller can retry after release
+ }
+ }
+ if (--j < 0)
+ break;
+ }
+ long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns;
+ if (e < 0) // decode ctl on empty scan
+ w.runState = -1; // pool is terminating
+ else if (rs == 0 || rs != runState) { // incomplete scan
+ WorkQueue v; Thread p; // try to release a waiter
+ if (e > 0 && a < 0 && w.eventCount == ec &&
+ (v = ws[e & m]) != null && v.eventCount == (e | INT_SIGN)) {
+ long nc = ((long)(v.nextWait & E_MASK) |
+ ((c + AC_UNIT) & (AC_MASK|TC_MASK)));
+ if (ctl == c && U.compareAndSwapLong(this, CTL, c, nc)) {
+ v.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = v.parker) != null)
+ U.unpark(p);
+ }
+ }
+ }
+ else if (ec >= 0) { // try to enqueue/inactivate
+ long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK));
+ w.nextWait = e;
+ w.eventCount = ec | INT_SIGN; // mark as inactive
+ if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc))
+ w.eventCount = ec; // unmark on CAS failure
+ else {
+ if ((ns = w.nsteals) != 0) {
+ w.nsteals = 0; // set rescans if ran task
+ w.rescans = (a > 0) ? 0 : a + parallelism;
+ w.totalSteals += ns;
+ }
+ if (a == 1 - parallelism) // quiescent
+ idleAwaitWork(w, nc, c);
+ }
+ }
+ else if (w.eventCount < 0) { // already queued
+ if ((nr = w.rescans) > 0) { // continue rescanning
+ int ac = a + parallelism;
+ if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0)
+ Thread.yield(); // yield before block
+ }
+ else {
+ Thread.interrupted(); // clear status
+ Thread wt = Thread.currentThread();
+ U.putObject(wt, PARKBLOCKER, this);
+ w.parker = wt; // emulate LockSupport.park
+ if (w.eventCount < 0) // recheck
+ U.park(false, 0L);
+ w.parker = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ }
+ }
+ }
+ return null;
}
/**
- * Creates a ForkJoinPool with parallelism equal to the number of
- * processors available on the system and using the given
- * ForkJoinWorkerThreadFactory,
- * @param factory the factory for creating new threads
- * @throws NullPointerException if factory is null
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
- */
- public ForkJoinPool(ForkJoinWorkerThreadFactory factory) {
- this(Runtime.getRuntime().availableProcessors(), factory);
+ * If inactivating worker w has caused the pool to become
+ * quiescent, checks for pool termination, and, so long as this is
+ * not the only worker, waits for event for up to SHRINK_RATE
+ * nanosecs. On timeout, if ctl has not changed, terminates the
+ * worker, which will in turn wake up another worker to possibly
+ * repeat this process.
+ *
+ * @param w the calling worker
+ * @param currentCtl the ctl value triggering possible quiescence
+ * @param prevCtl the ctl value to restore if thread is terminated
+ */
+ private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) {
+ if (w.eventCount < 0 && !tryTerminate(false, false) &&
+ (int)prevCtl != 0 && ctl == currentCtl) {
+ Thread wt = Thread.currentThread();
+ Thread.yield(); // yield before block
+ while (ctl == currentCtl) {
+ long startTime = System.nanoTime();
+ Thread.interrupted(); // timed variant of version in scan()
+ U.putObject(wt, PARKBLOCKER, this);
+ w.parker = wt;
+ if (ctl == currentCtl)
+ U.park(false, SHRINK_RATE);
+ w.parker = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ if (ctl != currentCtl)
+ break;
+ if (System.nanoTime() - startTime >= SHRINK_TIMEOUT &&
+ U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) {
+ w.eventCount = (w.eventCount + E_SEQ) | E_MASK;
+ w.runState = -1; // shrink
+ break;
+ }
+ }
+ }
}
/**
- * Creates a ForkJoinPool with the given parallelism and factory.
+ * Tries to locate and execute tasks for a stealer of the given
+ * task, or in turn one of its stealers, Traces currentSteal ->
+ * currentJoin links looking for a thread working on a descendant
+ * of the given task and with a non-empty queue to steal back and
+ * execute tasks from. The first call to this method upon a
+ * waiting join will often entail scanning/search, (which is OK
+ * because the joiner has nothing better to do), but this method
+ * leaves hints in workers to speed up subsequent calls. The
+ * implementation is very branchy to cope with potential
+ * inconsistencies or loops encountering chains that are stale,
+ * unknown, or so long that they are likely cyclic. All of these
+ * cases are dealt with by just retrying by caller.
*
- * @param parallelism the targeted number of worker threads
- * @param factory the factory for creating new threads
- * @throws IllegalArgumentException if parallelism less than or
- * equal to zero, or greater than implementation limit.
- * @throws NullPointerException if factory is null
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
- */
- public ForkJoinPool(int parallelism, ForkJoinWorkerThreadFactory factory) {
- if (parallelism <= 0 || parallelism > MAX_THREADS)
- throw new IllegalArgumentException();
- if (factory == null)
- throw new NullPointerException();
- checkPermission();
- this.factory = factory;
- this.parallelism = parallelism;
- this.maxPoolSize = MAX_THREADS;
- this.maintainsParallelism = true;
- this.poolNumber = poolNumberGenerator.incrementAndGet();
- this.workerLock = new ReentrantLock();
- this.termination = workerLock.newCondition();
- this.stealCount = new AtomicLong();
- this.submissionQueue = new LinkedTransferQueue<ForkJoinTask<?>>();
- // worker array and workers are lazily constructed
- }
+ * @param joiner the joining worker
+ * @param task the task to join
+ * @return true if found or ran a task (and so is immediately retryable)
+ */
+ private boolean tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
+ WorkQueue[] ws;
+ int m, depth = MAX_HELP; // remaining chain depth
+ boolean progress = false;
+ if ((ws = workQueues) != null && (m = ws.length - 1) > 0 &&
+ task.status >= 0) {
+ ForkJoinTask<?> subtask = task; // current target
+ outer: for (WorkQueue j = joiner;;) {
+ WorkQueue stealer = null; // find stealer of subtask
+ WorkQueue v = ws[j.stealHint & m]; // try hint
+ if (v != null && v.currentSteal == subtask)
+ stealer = v;
+ else { // scan
+ for (int i = 1; i <= m; i += 2) {
+ if ((v = ws[i]) != null && v.currentSteal == subtask &&
+ v != joiner) {
+ stealer = v;
+ j.stealHint = i; // save hint
+ break;
+ }
+ }
+ if (stealer == null)
+ break;
+ }
- /**
- * Create new worker using factory.
- * @param index the index to assign worker
- * @return new worker, or null of factory failed
- */
- private ForkJoinWorkerThread createWorker(int index) {
- Thread.UncaughtExceptionHandler h = ueh;
- ForkJoinWorkerThread w = factory.newThread(this);
- if (w != null) {
- w.poolIndex = index;
- w.setDaemon(true);
- w.setAsyncMode(locallyFifo);
- w.setName("ForkJoinPool-" + poolNumber + "-worker-" + index);
- if (h != null)
- w.setUncaughtExceptionHandler(h);
+ for (WorkQueue q = stealer;;) { // try to help stealer
+ ForkJoinTask[] a; ForkJoinTask<?> t; int b;
+ if (task.status < 0)
+ break outer;
+ if ((b = q.base) - q.top < 0 && (a = q.array) != null) {
+ progress = true;
+ int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, i);
+ if (subtask.status < 0) // must recheck before taking
+ break outer;
+ if (t != null &&
+ q.base == b &&
+ U.compareAndSwapObject(a, i, t, null)) {
+ q.base = b + 1;
+ joiner.runSubtask(t);
+ }
+ else if (q.base == b)
+ break outer; // possibly stalled
+ }
+ else { // descend
+ ForkJoinTask<?> next = stealer.currentJoin;
+ if (--depth <= 0 || subtask.status < 0 ||
+ next == null || next == subtask)
+ break outer; // stale, dead-end, or cyclic
+ subtask = next;
+ j = stealer;
+ break;
+ }
+ }
+ }
}
- return w;
+ return progress;
}
/**
- * Return a good size for worker array given pool size.
- * Currently requires size to be a power of two.
+ * If task is at base of some steal queue, steals and executes it.
+ *
+ * @param joiner the joining worker
+ * @param task the task
*/
- private static int arraySizeFor(int ps) {
- return ps <= 1? 1 : (1 << (32 - Integer.numberOfLeadingZeros(ps-1)));
+ private void tryPollForAndExec(WorkQueue joiner, ForkJoinTask<?> task) {
+ WorkQueue[] ws;
+ if ((ws = workQueues) != null) {
+ for (int j = 1; j < ws.length && task.status >= 0; j += 2) {
+ WorkQueue q = ws[j];
+ if (q != null && q.pollFor(task)) {
+ joiner.runSubtask(task);
+ break;
+ }
+ }
+ }
}
- public static ForkJoinWorkerThread[] copyOfWorkers(ForkJoinWorkerThread[] original, int newLength) {
- ForkJoinWorkerThread[] copy = new ForkJoinWorkerThread[newLength];
- System.arraycopy(original, 0, copy, 0, Math.min(newLength, original.length));
- return copy;
+ /**
+ * Tries to decrement active count (sometimes implicitly) and
+ * possibly release or create a compensating worker in preparation
+ * for blocking. Fails on contention or termination. Otherwise,
+ * adds a new thread if no idle workers are available and either
+ * pool would become completely starved or: (at least half
+ * starved, and fewer than 50% spares exist, and there is at least
+ * one task apparently available). Even though the availability
+ * check requires a full scan, it is worthwhile in reducing false
+ * alarms.
+ *
+ * @param task if non-null, a task being waited for
+ * @param blocker if non-null, a blocker being waited for
+ * @return true if the caller can block, else should recheck and retry
+ */
+ final boolean tryCompensate(ForkJoinTask<?> task, ManagedBlocker blocker) {
+ int pc = parallelism, e;
+ long c = ctl;
+ WorkQueue[] ws = workQueues;
+ if ((e = (int)c) >= 0 && ws != null) {
+ int u, a, ac, hc;
+ int tc = (short)((u = (int)(c >>> 32)) >>> UTC_SHIFT) + pc;
+ boolean replace = false;
+ if ((a = u >> UAC_SHIFT) <= 0) {
+ if ((ac = a + pc) <= 1)
+ replace = true;
+ else if ((e > 0 || (task != null &&
+ ac <= (hc = pc >>> 1) && tc < pc + hc))) {
+ WorkQueue w;
+ for (int j = 0; j < ws.length; ++j) {
+ if ((w = ws[j]) != null && !w.isEmpty()) {
+ replace = true;
+ break; // in compensation range and tasks available
+ }
+ }
+ }
+ }
+ if ((task == null || task.status >= 0) && // recheck need to block
+ (blocker == null || !blocker.isReleasable()) && ctl == c) {
+ if (!replace) { // no compensation
+ long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
+ if (U.compareAndSwapLong(this, CTL, c, nc))
+ return true;
+ }
+ else if (e != 0) { // release an idle worker
+ WorkQueue w; Thread p; int i;
+ if ((i = e & SMASK) < ws.length && (w = ws[i]) != null) {
+ long nc = ((long)(w.nextWait & E_MASK) |
+ (c & (AC_MASK|TC_MASK)));
+ if (w.eventCount == (e | INT_SIGN) &&
+ U.compareAndSwapLong(this, CTL, c, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ return true;
+ }
+ }
+ }
+ else if (tc < MAX_CAP) { // create replacement
+ long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ addWorker();
+ return true;
+ }
+ }
+ }
+ }
+ return false;
}
/**
- * Create or resize array if necessary to hold newLength.
- * Call only under exlusion or lock
- * @return the array
- */
- private ForkJoinWorkerThread[] ensureWorkerArrayCapacity(int newLength) {
- ForkJoinWorkerThread[] ws = workers;
- if (ws == null)
- return workers = new ForkJoinWorkerThread[arraySizeFor(newLength)];
- else if (newLength > ws.length)
- return workers = copyOfWorkers(ws, arraySizeFor(newLength));
- else
- return ws;
+ * Helps and/or blocks until the given task is done.
+ *
+ * @param joiner the joining worker
+ * @param task the task
+ * @return task status on exit
+ */
+ final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
+ ForkJoinTask<?> prevJoin = joiner.currentJoin;
+ joiner.currentJoin = task;
+ long startTime = 0L;
+ for (int k = 0, s; ; ++k) {
+ if ((joiner.isEmpty() ? // try to help
+ !tryHelpStealer(joiner, task) :
+ !joiner.tryRemoveAndExec(task))) {
+ if (k == 0) {
+ startTime = System.nanoTime();
+ tryPollForAndExec(joiner, task); // check uncommon case
+ }
+ else if ((k & (MAX_HELP - 1)) == 0 &&
+ System.nanoTime() - startTime >= COMPENSATION_DELAY &&
+ tryCompensate(task, null)) {
+ if (task.trySetSignal() && task.status >= 0) {
+ synchronized (task) {
+ if (task.status >= 0) {
+ try { // see ForkJoinTask
+ task.wait(); // for explanation
+ } catch (InterruptedException ie) {
+ }
+ }
+ else
+ task.notifyAll();
+ }
+ }
+ long c; // re-activate
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c + AC_UNIT));
+ }
+ }
+ if ((s = task.status) < 0) {
+ joiner.currentJoin = prevJoin;
+ return s;
+ }
+ else if ((k & (MAX_HELP - 1)) == MAX_HELP >>> 1)
+ Thread.yield(); // for politeness
+ }
}
/**
- * Try to shrink workers into smaller array after one or more terminate
+ * Stripped-down variant of awaitJoin used by timed joins. Tries
+ * to help join only while there is continuous progress. (Caller
+ * will then enter a timed wait.)
+ *
+ * @param joiner the joining worker
+ * @param task the task
+ * @return task status on exit
*/
- private void tryShrinkWorkerArray() {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- int len = ws.length;
- int last = len - 1;
- while (last >= 0 && ws[last] == null)
- --last;
- int newLength = arraySizeFor(last+1);
- if (newLength < len)
- workers = copyOfWorkers(ws, newLength);
+ final int helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
+ int s;
+ while ((s = task.status) >= 0 &&
+ (joiner.isEmpty() ?
+ tryHelpStealer(joiner, task) :
+ joiner.tryRemoveAndExec(task)))
+ ;
+ return s;
+ }
+
+ /**
+ * Returns a (probably) non-empty steal queue, if one is found
+ * during a random, then cyclic scan, else null. This method must
+ * be retried by caller if, by the time it tries to use the queue,
+ * it is empty.
+ */
+ private WorkQueue findNonEmptyStealQueue(WorkQueue w) {
+ // Similar to loop in scan(), but ignoring submissions
+ int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
+ int step = (r >>> 16) | 1;
+ for (WorkQueue[] ws;;) {
+ int rs = runState, m;
+ if ((ws = workQueues) == null || (m = ws.length - 1) < 1)
+ return null;
+ for (int j = (m + 1) << 2; ; r += step) {
+ WorkQueue q = ws[((r << 1) | 1) & m];
+ if (q != null && !q.isEmpty())
+ return q;
+ else if (--j < 0) {
+ if (runState == rs)
+ return null;
+ break;
+ }
+ }
}
}
/**
- * Initialize workers if necessary
- */
- final void ensureWorkerInitialization() {
- ForkJoinWorkerThread[] ws = workers;
- if (ws == null) {
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ws = workers;
- if (ws == null) {
- int ps = parallelism;
- ws = ensureWorkerArrayCapacity(ps);
- for (int i = 0; i < ps; ++i) {
- ForkJoinWorkerThread w = createWorker(i);
- if (w != null) {
- ws[i] = w;
- w.start();
- updateWorkerCount(1);
- }
- }
+ * Runs tasks until {@code isQuiescent()}. We piggyback on
+ * active count ctl maintenance, but rather than blocking
+ * when tasks cannot be found, we rescan until all others cannot
+ * find tasks either.
+ */
+ final void helpQuiescePool(WorkQueue w) {
+ for (boolean active = true;;) {
+ if (w.base - w.top < 0)
+ w.runLocalTasks(); // exhaust local queue
+ WorkQueue q = findNonEmptyStealQueue(w);
+ if (q != null) {
+ ForkJoinTask<?> t; int b;
+ if (!active) { // re-establish active count
+ long c;
+ active = true;
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c + AC_UNIT));
+ }
+ if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
+ w.runSubtask(t);
+ }
+ else {
+ long c;
+ if (active) { // decrement active count without queuing
+ active = false;
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c -= AC_UNIT));
+ }
+ else
+ c = ctl; // re-increment on exit
+ if ((int)(c >> AC_SHIFT) + parallelism == 0) {
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c + AC_UNIT));
+ break;
}
- } finally {
- lock.unlock();
}
}
}
/**
- * Worker creation and startup for threads added via setParallelism.
+ * Gets and removes a local or stolen task for the given worker.
+ *
+ * @return a task, if available
+ */
+ final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
+ for (ForkJoinTask<?> t;;) {
+ WorkQueue q; int b;
+ if ((t = w.nextLocalTask()) != null)
+ return t;
+ if ((q = findNonEmptyStealQueue(w)) == null)
+ return null;
+ if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
+ return t;
+ }
+ }
+
+ /**
+ * Returns the approximate (non-atomic) number of idle threads per
+ * active thread to offset steal queue size for method
+ * ForkJoinTask.getSurplusQueuedTaskCount().
*/
- private void createAndStartAddedWorkers() {
- resumeAllSpares(); // Allow spares to convert to nonspare
- int ps = parallelism;
- ForkJoinWorkerThread[] ws = ensureWorkerArrayCapacity(ps);
- int len = ws.length;
- // Sweep through slots, to keep lowest indices most populated
- int k = 0;
- while (k < len) {
- if (ws[k] != null) {
- ++k;
- continue;
+ final int idlePerActive() {
+ // Approximate at powers of two for small values, saturate past 4
+ int p = parallelism;
+ int a = p + (int)(ctl >> AC_SHIFT);
+ return (a > (p >>>= 1) ? 0 :
+ a > (p >>>= 1) ? 1 :
+ a > (p >>>= 1) ? 2 :
+ a > (p >>>= 1) ? 4 :
+ 8);
+ }
+
+ // Termination
+
+ /**
+ * Possibly initiates and/or completes termination. The caller
+ * triggering termination runs three passes through workQueues:
+ * (0) Setting termination status, followed by wakeups of queued
+ * workers; (1) cancelling all tasks; (2) interrupting lagging
+ * threads (likely in external tasks, but possibly also blocked in
+ * joins). Each pass repeats previous steps because of potential
+ * lagging thread creation.
+ *
+ * @param now if true, unconditionally terminate, else only
+ * if no work and no active workers
+ * @param enable if true, enable shutdown when next possible
+ * @return true if now terminating or terminated
+ */
+ private boolean tryTerminate(boolean now, boolean enable) {
+ Mutex lock = this.lock;
+ for (long c;;) {
+ if (((c = ctl) & STOP_BIT) != 0) { // already terminating
+ if ((short)(c >>> TC_SHIFT) == -parallelism) {
+ lock.lock(); // don't need try/finally
+ termination.signalAll(); // signal when 0 workers
+ lock.unlock();
+ }
+ return true;
}
- int s = workerCounts;
- int tc = totalCountOf(s);
- int rc = runningCountOf(s);
- if (rc >= ps || tc >= ps)
- break;
- if (casWorkerCounts (s, workerCountsFor(tc+1, rc+1))) {
- ForkJoinWorkerThread w = createWorker(k);
- if (w != null) {
- ws[k++] = w;
- w.start();
+ if (runState >= 0) { // not yet enabled
+ if (!enable)
+ return false;
+ lock.lock();
+ runState |= SHUTDOWN;
+ lock.unlock();
+ }
+ if (!now) { // check if idle & no tasks
+ if ((int)(c >> AC_SHIFT) != -parallelism ||
+ hasQueuedSubmissions())
+ return false;
+ // Check for unqueued inactive workers. One pass suffices.
+ WorkQueue[] ws = workQueues; WorkQueue w;
+ if (ws != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && w.eventCount >= 0)
+ return false;
+ }
}
- else {
- updateWorkerCount(-1); // back out on failed creation
- break;
+ }
+ if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
+ for (int pass = 0; pass < 3; ++pass) {
+ WorkQueue[] ws = workQueues;
+ if (ws != null) {
+ WorkQueue w;
+ int n = ws.length;
+ for (int i = 0; i < n; ++i) {
+ if ((w = ws[i]) != null) {
+ w.runState = -1;
+ if (pass > 0) {
+ w.cancelAll();
+ if (pass > 1)
+ w.interruptOwner();
+ }
+ }
+ }
+ // Wake up workers parked on event queue
+ int i, e; long cc; Thread p;
+ while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
+ (i = e & SMASK) < n &&
+ (w = ws[i]) != null) {
+ long nc = ((long)(w.nextWait & E_MASK) |
+ ((cc + AC_UNIT) & AC_MASK) |
+ (cc & (TC_MASK|STOP_BIT)));
+ if (w.eventCount == (e | INT_SIGN) &&
+ U.compareAndSwapLong(this, CTL, cc, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ w.runState = -1;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ }
+ }
+ }
}
}
}
}
- // Execution methods
+ // Exported methods
+
+ // Constructors
+
+ /**
+ * Creates a {@code ForkJoinPool} with parallelism equal to {@link
+ * java.lang.Runtime#availableProcessors}, using the {@linkplain
+ * #defaultForkJoinWorkerThreadFactory default thread factory},
+ * no UncaughtExceptionHandler, and non-async LIFO processing mode.
+ *
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool() {
+ this(Runtime.getRuntime().availableProcessors(),
+ defaultForkJoinWorkerThreadFactory, null, false);
+ }
+
+ /**
+ * Creates a {@code ForkJoinPool} with the indicated parallelism
+ * level, the {@linkplain
+ * #defaultForkJoinWorkerThreadFactory default thread factory},
+ * no UncaughtExceptionHandler, and non-async LIFO processing mode.
+ *
+ * @param parallelism the parallelism level
+ * @throws IllegalArgumentException if parallelism less than or
+ * equal to zero, or greater than implementation limit
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool(int parallelism) {
+ this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
+ }
/**
- * Common code for execute, invoke and submit
+ * Creates a {@code ForkJoinPool} with the given parameters.
+ *
+ * @param parallelism the parallelism level. For default value,
+ * use {@link java.lang.Runtime#availableProcessors}.
+ * @param factory the factory for creating new threads. For default value,
+ * use {@link #defaultForkJoinWorkerThreadFactory}.
+ * @param handler the handler for internal worker threads that
+ * terminate due to unrecoverable errors encountered while executing
+ * tasks. For default value, use {@code null}.
+ * @param asyncMode if true,
+ * establishes local first-in-first-out scheduling mode for forked
+ * tasks that are never joined. This mode may be more appropriate
+ * than default locally stack-based mode in applications in which
+ * worker threads only process event-style asynchronous tasks.
+ * For default value, use {@code false}.
+ * @throws IllegalArgumentException if parallelism less than or
+ * equal to zero, or greater than implementation limit
+ * @throws NullPointerException if the factory is null
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
*/
- private <T> void doSubmit(ForkJoinTask<T> task) {
- if (isShutdown())
- throw new RejectedExecutionException();
- if (workers == null)
- ensureWorkerInitialization();
- submissionQueue.offer(task);
- signalIdleWorkers();
+ public ForkJoinPool(int parallelism,
+ ForkJoinWorkerThreadFactory factory,
+ Thread.UncaughtExceptionHandler handler,
+ boolean asyncMode) {
+ checkPermission();
+ if (factory == null)
+ throw new NullPointerException();
+ if (parallelism <= 0 || parallelism > MAX_CAP)
+ throw new IllegalArgumentException();
+ this.parallelism = parallelism;
+ this.factory = factory;
+ this.ueh = handler;
+ this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE;
+ long np = (long)(-parallelism); // offset ctl counts
+ this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
+ // Use nearest power 2 for workQueues size. See Hackers Delight sec 3.2.
+ int n = parallelism - 1;
+ n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16;
+ int size = (n + 1) << 1; // #slots = 2*#workers
+ this.submitMask = size - 1; // room for max # of submit queues
+ this.workQueues = new WorkQueue[size];
+ this.termination = (this.lock = new Mutex()).newCondition();
+ this.stealCount = new AtomicLong();
+ this.nextWorkerNumber = new AtomicInteger();
+ int pn = poolNumberGenerator.incrementAndGet();
+ StringBuilder sb = new StringBuilder("ForkJoinPool-");
+ sb.append(Integer.toString(pn));
+ sb.append("-worker-");
+ this.workerNamePrefix = sb.toString();
+ lock.lock();
+ this.runState = 1; // set init flag
+ lock.unlock();
}
+ // Execution methods
+
/**
- * Performs the given task; returning its result upon completion
+ * Performs the given task, returning its result upon completion.
+ * If the computation encounters an unchecked Exception or Error,
+ * it is rethrown as the outcome of this invocation. Rethrown
+ * exceptions behave in the same way as regular exceptions, but,
+ * when possible, contain stack traces (as displayed for example
+ * using {@code ex.printStackTrace()}) of both the current thread
+ * as well as the thread actually encountering the exception;
+ * minimally only the latter.
+ *
* @param task the task
* @return the task's result
- * @throws NullPointerException if task is null
- * @throws RejectedExecutionException if pool is shut down
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
*/
public <T> T invoke(ForkJoinTask<T> task) {
+ if (task == null)
+ throw new NullPointerException();
doSubmit(task);
return task.join();
}
/**
* Arranges for (asynchronous) execution of the given task.
+ *
* @param task the task
- * @throws NullPointerException if task is null
- * @throws RejectedExecutionException if pool is shut down
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
*/
- public <T> void execute(ForkJoinTask<T> task) {
+ public void execute(ForkJoinTask<?> task) {
+ if (task == null)
+ throw new NullPointerException();
doSubmit(task);
}
// AbstractExecutorService methods
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
public void execute(Runnable task) {
- doSubmit(new AdaptedRunnable<Void>(task, null));
+ if (task == null)
+ throw new NullPointerException();
+ ForkJoinTask<?> job;
+ if (task instanceof ForkJoinTask<?>) // avoid re-wrap
+ job = (ForkJoinTask<?>) task;
+ else
+ job = new ForkJoinTask.AdaptedRunnableAction(task);
+ doSubmit(job);
+ }
+
+ /**
+ * Submits a ForkJoinTask for execution.
+ *
+ * @param task the task to submit
+ * @return the task
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
+ if (task == null)
+ throw new NullPointerException();
+ doSubmit(task);
+ return task;
}
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
public <T> ForkJoinTask<T> submit(Callable<T> task) {
- ForkJoinTask<T> job = new AdaptedCallable<T>(task);
+ ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
doSubmit(job);
return job;
}
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
public <T> ForkJoinTask<T> submit(Runnable task, T result) {
- ForkJoinTask<T> job = new AdaptedRunnable<T>(task, result);
+ ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
doSubmit(job);
return job;
}
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
public ForkJoinTask<?> submit(Runnable task) {
- ForkJoinTask<Void> job = new AdaptedRunnable<Void>(task, null);
+ if (task == null)
+ throw new NullPointerException();
+ ForkJoinTask<?> job;
+ if (task instanceof ForkJoinTask<?>) // avoid re-wrap
+ job = (ForkJoinTask<?>) task;
+ else
+ job = new ForkJoinTask.AdaptedRunnableAction(task);
doSubmit(job);
return job;
}
/**
- * Adaptor for Runnables. This implements RunnableFuture
- * to be compliant with AbstractExecutorService constraints
+ * @throws NullPointerException {@inheritDoc}
+ * @throws RejectedExecutionException {@inheritDoc}
*/
- static final class AdaptedRunnable<T> extends ForkJoinTask<T>
- implements RunnableFuture<T> {
- final Runnable runnable;
- final T resultOnCompletion;
- T result;
- AdaptedRunnable(Runnable runnable, T result) {
- if (runnable == null) throw new NullPointerException();
- this.runnable = runnable;
- this.resultOnCompletion = result;
- }
- public T getRawResult() { return result; }
- public void setRawResult(T v) { result = v; }
- public boolean exec() {
- runnable.run();
- result = resultOnCompletion;
- return true;
- }
- public void run() { invoke(); }
- }
-
- /**
- * Adaptor for Callables
- */
- static final class AdaptedCallable<T> extends ForkJoinTask<T>
- implements RunnableFuture<T> {
- final Callable<T> callable;
- T result;
- AdaptedCallable(Callable<T> callable) {
- if (callable == null) throw new NullPointerException();
- this.callable = callable;
- }
- public T getRawResult() { return result; }
- public void setRawResult(T v) { result = v; }
- public boolean exec() {
- try {
- result = callable.call();
- return true;
- } catch (Error err) {
- throw err;
- } catch (RuntimeException rex) {
- throw rex;
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
- }
- public void run() { invoke(); }
- }
-
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
- ArrayList<ForkJoinTask<T>> ts =
- new ArrayList<ForkJoinTask<T>>(tasks.size());
- for (Callable<T> c : tasks)
- ts.add(new AdaptedCallable<T>(c));
- invoke(new InvokeAll<T>(ts));
- return (List<Future<T>>)(List)ts;
- }
-
- static final class InvokeAll<T> extends RecursiveAction {
- final ArrayList<ForkJoinTask<T>> tasks;
- InvokeAll(ArrayList<ForkJoinTask<T>> tasks) { this.tasks = tasks; }
- public void compute() {
- try { invokeAll(tasks); } catch(Exception ignore) {}
+ // In previous versions of this class, this method constructed
+ // a task to run ForkJoinTask.invokeAll, but now external
+ // invocation of multiple tasks is at least as efficient.
+ List<ForkJoinTask<T>> fs = new ArrayList<ForkJoinTask<T>>(tasks.size());
+ // Workaround needed because method wasn't declared with
+ // wildcards in return type but should have been.
+ @SuppressWarnings({"unchecked", "rawtypes"})
+ List<Future<T>> futures = (List<Future<T>>) (List) fs;
+
+ boolean done = false;
+ try {
+ for (Callable<T> t : tasks) {
+ ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
+ doSubmit(f);
+ fs.add(f);
+ }
+ for (ForkJoinTask<T> f : fs)
+ f.quietlyJoin();
+ done = true;
+ return futures;
+ } finally {
+ if (!done)
+ for (ForkJoinTask<T> f : fs)
+ f.cancel(false);
}
}
- // Configuration and status settings and queries
-
/**
- * Returns the factory used for constructing new workers
+ * Returns the factory used for constructing new workers.
*
* @return the factory used for constructing new workers
*/
@@ -674,92 +2329,17 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
/**
* Returns the handler for internal worker threads that terminate
* due to unrecoverable errors encountered while executing tasks.
- * @return the handler, or null if none
- */
- public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
- Thread.UncaughtExceptionHandler h;
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- h = ueh;
- } finally {
- lock.unlock();
- }
- return h;
- }
-
- /**
- * Sets the handler for internal worker threads that terminate due
- * to unrecoverable errors encountered while executing tasks.
- * Unless set, the current default or ThreadGroup handler is used
- * as handler.
*
- * @param h the new handler
- * @return the old handler, or null if none
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
+ * @return the handler, or {@code null} if none
*/
- public Thread.UncaughtExceptionHandler
- setUncaughtExceptionHandler(Thread.UncaughtExceptionHandler h) {
- checkPermission();
- Thread.UncaughtExceptionHandler old = null;
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- old = ueh;
- ueh = h;
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread w = ws[i];
- if (w != null)
- w.setUncaughtExceptionHandler(h);
- }
- }
- } finally {
- lock.unlock();
- }
- return old;
- }
-
-
- /**
- * Sets the target paralleism level of this pool.
- * @param parallelism the target parallelism
- * @throws IllegalArgumentException if parallelism less than or
- * equal to zero or greater than maximum size bounds.
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
- */
- public void setParallelism(int parallelism) {
- checkPermission();
- if (parallelism <= 0 || parallelism > maxPoolSize)
- throw new IllegalArgumentException();
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- if (!isTerminating()) {
- int p = this.parallelism;
- this.parallelism = parallelism;
- if (parallelism > p)
- createAndStartAddedWorkers();
- else
- trimSpares();
- }
- } finally {
- lock.unlock();
- }
- signalIdleWorkers();
+ public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
+ return ueh;
}
/**
- * Returns the targeted number of worker threads in this pool.
+ * Returns the targeted parallelism level of this pool.
*
- * @return the targeted number of worker threads in this pool
+ * @return the targeted parallelism level of this pool
*/
public int getParallelism() {
return parallelism;
@@ -767,141 +2347,71 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
/**
* Returns the number of worker threads that have started but not
- * yet terminated. This result returned by this method may differ
- * from <code>getParallelism</code> when threads are created to
+ * yet terminated. The result returned by this method may differ
+ * from {@link #getParallelism} when threads are created to
* maintain parallelism when others are cooperatively blocked.
*
* @return the number of worker threads
*/
public int getPoolSize() {
- return totalCountOf(workerCounts);
+ return parallelism + (short)(ctl >>> TC_SHIFT);
}
/**
- * Returns the maximum number of threads allowed to exist in the
- * pool, even if there are insufficient unblocked running threads.
- * @return the maximum
- */
- public int getMaximumPoolSize() {
- return maxPoolSize;
- }
-
- /**
- * Sets the maximum number of threads allowed to exist in the
- * pool, even if there are insufficient unblocked running threads.
- * Setting this value has no effect on current pool size. It
- * controls construction of new threads.
- * @throws IllegalArgumentException if negative or greater then
- * internal implementation limit.
- */
- public void setMaximumPoolSize(int newMax) {
- if (newMax < 0 || newMax > MAX_THREADS)
- throw new IllegalArgumentException();
- maxPoolSize = newMax;
- }
-
-
- /**
- * Returns true if this pool dynamically maintains its target
- * parallelism level. If false, new threads are added only to
- * avoid possible starvation.
- * This setting is by default true;
- * @return true if maintains parallelism
- */
- public boolean getMaintainsParallelism() {
- return maintainsParallelism;
- }
-
- /**
- * Sets whether this pool dynamically maintains its target
- * parallelism level. If false, new threads are added only to
- * avoid possible starvation.
- * @param enable true to maintains parallelism
- */
- public void setMaintainsParallelism(boolean enable) {
- maintainsParallelism = enable;
- }
-
- /**
- * Establishes local first-in-first-out scheduling mode for forked
- * tasks that are never joined. This mode may be more appropriate
- * than default locally stack-based mode in applications in which
- * worker threads only process asynchronous tasks. This method is
- * designed to be invoked only when pool is quiescent, and
- * typically only before any tasks are submitted. The effects of
- * invocations at ather times may be unpredictable.
- *
- * @param async if true, use locally FIFO scheduling
- * @return the previous mode.
- */
- public boolean setAsyncMode(boolean async) {
- boolean oldMode = locallyFifo;
- locallyFifo = async;
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null)
- t.setAsyncMode(async);
- }
- }
- return oldMode;
- }
-
- /**
- * Returns true if this pool uses local first-in-first-out
+ * Returns {@code true} if this pool uses local first-in-first-out
* scheduling mode for forked tasks that are never joined.
*
- * @return true if this pool uses async mode.
+ * @return {@code true} if this pool uses async mode
*/
public boolean getAsyncMode() {
- return locallyFifo;
+ return localMode != 0;
}
/**
* Returns an estimate of the number of worker threads that are
* not blocked waiting to join tasks or for other managed
- * synchronization.
+ * synchronization. This method may overestimate the
+ * number of running threads.
*
* @return the number of worker threads
*/
public int getRunningThreadCount() {
- return runningCountOf(workerCounts);
+ int rc = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && w.isApparentlyUnblocked())
+ ++rc;
+ }
+ }
+ return rc;
}
/**
* Returns an estimate of the number of threads that are currently
* stealing or executing tasks. This method may overestimate the
* number of active threads.
- * @return the number of active threads.
+ *
+ * @return the number of active threads
*/
public int getActiveThreadCount() {
- return activeCountOf(runControl);
- }
-
- /**
- * Returns an estimate of the number of threads that are currently
- * idle waiting for tasks. This method may underestimate the
- * number of idle threads.
- * @return the number of idle threads.
- */
- final int getIdleThreadCount() {
- int c = runningCountOf(workerCounts) - activeCountOf(runControl);
- return (c <= 0)? 0 : c;
+ int r = parallelism + (int)(ctl >> AC_SHIFT);
+ return (r <= 0) ? 0 : r; // suppress momentarily negative values
}
/**
- * Returns true if all worker threads are currently idle. An idle
- * worker is one that cannot obtain a task to execute because none
- * are available to steal from other threads, and there are no
- * pending submissions to the pool. This method is conservative:
- * It might not return true immediately upon idleness of all
- * threads, but will eventually become true if threads remain
- * inactive.
- * @return true if all threads are currently idle
+ * Returns {@code true} if all worker threads are currently idle.
+ * An idle worker is one that cannot obtain a task to execute
+ * because none are available to steal from other threads, and
+ * there are no pending submissions to the pool. This method is
+ * conservative; it might not return {@code true} immediately upon
+ * idleness of all threads, but will eventually become true if
+ * threads remain inactive.
+ *
+ * @return {@code true} if all threads are currently idle
*/
public boolean isQuiescent() {
- return activeCountOf(runControl) == 0;
+ return (int)(ctl >> AC_SHIFT) + parallelism == 0;
}
/**
@@ -909,23 +2419,22 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
* one thread's work queue by another. The reported value
* underestimates the actual total number of steals when the pool
* is not quiescent. This value may be useful for monitoring and
- * tuning fork/join programs: In general, steal counts should be
+ * tuning fork/join programs: in general, steal counts should be
* high enough to keep threads busy, but low enough to avoid
* overhead and contention across threads.
- * @return the number of steals.
+ *
+ * @return the number of steals
*/
public long getStealCount() {
- return stealCount.get();
- }
-
- /**
- * Accumulate steal count from a worker. Call only
- * when worker known to be idle.
- */
- private void updateStealCount(ForkJoinWorkerThread w) {
- int sc = w.getAndClearStealCount();
- if (sc != 0)
- stealCount.addAndGet(sc);
+ long count = stealCount.get();
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.totalSteals;
+ }
+ }
+ return count;
}
/**
@@ -935,77 +2444,106 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
* an approximation, obtained by iterating across all threads in
* the pool. This method may be useful for tuning task
* granularities.
- * @return the number of queued tasks.
+ *
+ * @return the number of queued tasks
*/
public long getQueuedTaskCount() {
long count = 0;
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null)
- count += t.getQueueSize();
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.queueSize();
}
}
return count;
}
/**
- * Returns an estimate of the number tasks submitted to this pool
- * that have not yet begun executing. This method takes time
- * proportional to the number of submissions.
- * @return the number of queued submissions.
+ * Returns an estimate of the number of tasks submitted to this
+ * pool that have not yet begun executing. This method may take
+ * time proportional to the number of submissions.
+ *
+ * @return the number of queued submissions
*/
public int getQueuedSubmissionCount() {
- return submissionQueue.size();
+ int count = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.queueSize();
+ }
+ }
+ return count;
}
/**
- * Returns true if there are any tasks submitted to this pool
- * that have not yet begun executing.
- * @return <code>true</code> if there are any queued submissions.
+ * Returns {@code true} if there are any tasks submitted to this
+ * pool that have not yet begun executing.
+ *
+ * @return {@code true} if there are any queued submissions
*/
public boolean hasQueuedSubmissions() {
- return !submissionQueue.isEmpty();
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && !w.isEmpty())
+ return true;
+ }
+ }
+ return false;
}
/**
* Removes and returns the next unexecuted submission if one is
* available. This method may be useful in extensions to this
* class that re-assign work in systems with multiple pools.
- * @return the next submission, or null if none
+ *
+ * @return the next submission, or {@code null} if none
*/
protected ForkJoinTask<?> pollSubmission() {
- return submissionQueue.poll();
+ WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && (t = w.poll()) != null)
+ return t;
+ }
+ }
+ return null;
}
/**
* Removes all available unexecuted submitted and forked tasks
* from scheduling queues and adds them to the given collection,
* without altering their execution status. These may include
- * artifically generated or wrapped tasks. This method id designed
- * to be invoked only when the pool is known to be
+ * artificially generated or wrapped tasks. This method is
+ * designed to be invoked only when the pool is known to be
* quiescent. Invocations at other times may not remove all
* tasks. A failure encountered while attempting to add elements
- * to collection <tt>c</tt> may result in elements being in
+ * to collection {@code c} may result in elements being in
* neither, either or both collections when the associated
* exception is thrown. The behavior of this operation is
* undefined if the specified collection is modified while the
* operation is in progress.
+ *
* @param c the collection to transfer elements into
* @return the number of elements transferred
*/
- protected int drainTasksTo(Collection<ForkJoinTask<?>> c) {
- int n = submissionQueue.drainTo(c);
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
+ protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
+ int count = 0;
+ WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
+ if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread w = ws[i];
- if (w != null)
- n += w.drainTasksTo(c);
+ if ((w = ws[i]) != null) {
+ while ((t = w.poll()) != null) {
+ c.add(t);
+ ++count;
+ }
+ }
}
}
- return n;
+ return count;
}
/**
@@ -1016,101 +2554,124 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
* @return a string identifying this pool, as well as its state
*/
public String toString() {
- int ps = parallelism;
- int wc = workerCounts;
- int rc = runControl;
- long st = getStealCount();
- long qt = getQueuedTaskCount();
- long qs = getQueuedSubmissionCount();
+ // Use a single pass through workQueues to collect counts
+ long qt = 0L, qs = 0L; int rc = 0;
+ long st = stealCount.get();
+ long c = ctl;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; ++i) {
+ if ((w = ws[i]) != null) {
+ int size = w.queueSize();
+ if ((i & 1) == 0)
+ qs += size;
+ else {
+ qt += size;
+ st += w.totalSteals;
+ if (w.isApparentlyUnblocked())
+ ++rc;
+ }
+ }
+ }
+ }
+ int pc = parallelism;
+ int tc = pc + (short)(c >>> TC_SHIFT);
+ int ac = pc + (int)(c >> AC_SHIFT);
+ if (ac < 0) // ignore transient negative
+ ac = 0;
+ String level;
+ if ((c & STOP_BIT) != 0)
+ level = (tc == 0) ? "Terminated" : "Terminating";
+ else
+ level = runState < 0 ? "Shutting down" : "Running";
return super.toString() +
- "[" + runStateToString(runStateOf(rc)) +
- ", parallelism = " + ps +
- ", size = " + totalCountOf(wc) +
- ", active = " + activeCountOf(rc) +
- ", running = " + runningCountOf(wc) +
+ "[" + level +
+ ", parallelism = " + pc +
+ ", size = " + tc +
+ ", active = " + ac +
+ ", running = " + rc +
", steals = " + st +
", tasks = " + qt +
", submissions = " + qs +
"]";
}
- private static String runStateToString(int rs) {
- switch(rs) {
- case RUNNING: return "Running";
- case SHUTDOWN: return "Shutting down";
- case TERMINATING: return "Terminating";
- case TERMINATED: return "Terminated";
- default: throw new Error("Unknown run state");
- }
- }
-
- // lifecycle control
-
/**
* Initiates an orderly shutdown in which previously submitted
* tasks are executed, but no new tasks will be accepted.
* Invocation has no additional effect if already shut down.
* Tasks that are in the process of being submitted concurrently
* during the course of this method may or may not be rejected.
+ *
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public void shutdown() {
checkPermission();
- transitionRunStateTo(SHUTDOWN);
- if (canTerminateOnShutdown(runControl))
- terminateOnShutdown();
+ tryTerminate(false, true);
}
/**
- * Attempts to stop all actively executing tasks, and cancels all
- * waiting tasks. Tasks that are in the process of being
- * submitted or executed concurrently during the course of this
- * method may or may not be rejected. Unlike some other executors,
- * this method cancels rather than collects non-executed tasks
- * upon termination, so always returns an empty list. However, you
- * can use method <code>drainTasksTo</code> before invoking this
- * method to transfer unexecuted tasks to another collection.
+ * Attempts to cancel and/or stop all tasks, and reject all
+ * subsequently submitted tasks. Tasks that are in the process of
+ * being submitted or executed concurrently during the course of
+ * this method may or may not be rejected. This method cancels
+ * both existing and unexecuted tasks, in order to permit
+ * termination in the presence of task dependencies. So the method
+ * always returns an empty list (unlike the case for some other
+ * Executors).
+ *
* @return an empty list
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public List<Runnable> shutdownNow() {
checkPermission();
- terminate();
+ tryTerminate(true, true);
return Collections.emptyList();
}
/**
- * Returns <code>true</code> if all tasks have completed following shut down.
+ * Returns {@code true} if all tasks have completed following shut down.
*
- * @return <code>true</code> if all tasks have completed following shut down
+ * @return {@code true} if all tasks have completed following shut down
*/
public boolean isTerminated() {
- return runStateOf(runControl) == TERMINATED;
+ long c = ctl;
+ return ((c & STOP_BIT) != 0L &&
+ (short)(c >>> TC_SHIFT) == -parallelism);
}
/**
- * Returns <code>true</code> if the process of termination has
- * commenced but possibly not yet completed.
+ * Returns {@code true} if the process of termination has
+ * commenced but not yet completed. This method may be useful for
+ * debugging. A return of {@code true} reported a sufficient
+ * period after shutdown may indicate that submitted tasks have
+ * ignored or suppressed interruption, or are waiting for IO,
+ * causing this executor not to properly terminate. (See the
+ * advisory notes for class {@link ForkJoinTask} stating that
+ * tasks should not normally entail blocking operations. But if
+ * they do, they must abort them on interrupt.)
*
- * @return <code>true</code> if terminating
+ * @return {@code true} if terminating but not yet terminated
*/
public boolean isTerminating() {
- return runStateOf(runControl) >= TERMINATING;
+ long c = ctl;
+ return ((c & STOP_BIT) != 0L &&
+ (short)(c >>> TC_SHIFT) != -parallelism);
}
/**
- * Returns <code>true</code> if this pool has been shut down.
+ * Returns {@code true} if this pool has been shut down.
*
- * @return <code>true</code> if this pool has been shut down
+ * @return {@code true} if this pool has been shut down
*/
public boolean isShutdown() {
- return runStateOf(runControl) >= SHUTDOWN;
+ return runState < 0;
}
/**
@@ -1120,14 +2681,14 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
- * @return <code>true</code> if this executor terminated and
- * <code>false</code> if the timeout elapsed before termination
+ * @return {@code true} if this executor terminated and
+ * {@code false} if the timeout elapsed before termination
* @throws InterruptedException if interrupted while waiting
*/
public boolean awaitTermination(long timeout, TimeUnit unit)
throws InterruptedException {
long nanos = unit.toNanos(timeout);
- final ReentrantLock lock = this.workerLock;
+ final Mutex lock = this.lock;
lock.lock();
try {
for (;;) {
@@ -1142,729 +2703,189 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
}
}
- // Shutdown and termination support
-
- /**
- * Callback from terminating worker. Null out the corresponding
- * workers slot, and if terminating, try to terminate, else try to
- * shrink workers array.
- * @param w the worker
- */
- final void workerTerminated(ForkJoinWorkerThread w) {
- updateStealCount(w);
- updateWorkerCount(-1);
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- int idx = w.poolIndex;
- if (idx >= 0 && idx < ws.length && ws[idx] == w)
- ws[idx] = null;
- if (totalCountOf(workerCounts) == 0) {
- terminate(); // no-op if already terminating
- transitionRunStateTo(TERMINATED);
- termination.signalAll();
- }
- else if (!isTerminating()) {
- tryShrinkWorkerArray();
- tryResumeSpare(true); // allow replacement
- }
- }
- } finally {
- lock.unlock();
- }
- signalIdleWorkers();
- }
-
- /**
- * Initiate termination.
- */
- private void terminate() {
- if (transitionRunStateTo(TERMINATING)) {
- stopAllWorkers();
- resumeAllSpares();
- signalIdleWorkers();
- cancelQueuedSubmissions();
- cancelQueuedWorkerTasks();
- interruptUnterminatedWorkers();
- signalIdleWorkers(); // resignal after interrupt
- }
- }
-
- /**
- * Possibly terminate when on shutdown state
- */
- private void terminateOnShutdown() {
- if (!hasQueuedSubmissions() && canTerminateOnShutdown(runControl))
- terminate();
- }
-
- /**
- * Clear out and cancel submissions
- */
- private void cancelQueuedSubmissions() {
- ForkJoinTask<?> task;
- while ((task = pollSubmission()) != null)
- task.cancel(false);
- }
-
- /**
- * Clean out worker queues.
- */
- private void cancelQueuedWorkerTasks() {
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null)
- t.cancelTasks();
- }
- }
- } finally {
- lock.unlock();
- }
- }
-
- /**
- * Set each worker's status to terminating. Requires lock to avoid
- * conflicts with add/remove
- */
- private void stopAllWorkers() {
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null)
- t.shutdownNow();
- }
- }
- } finally {
- lock.unlock();
- }
- }
-
- /**
- * Interrupt all unterminated workers. This is not required for
- * sake of internal control, but may help unstick user code during
- * shutdown.
- */
- private void interruptUnterminatedWorkers() {
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null && !t.isTerminated()) {
- try {
- t.interrupt();
- } catch (SecurityException ignore) {
- }
- }
- }
- }
- } finally {
- lock.unlock();
- }
- }
-
-
- /*
- * Nodes for event barrier to manage idle threads. Queue nodes
- * are basic Treiber stack nodes, also used for spare stack.
- *
- * The event barrier has an event count and a wait queue (actually
- * a Treiber stack). Workers are enabled to look for work when
- * the eventCount is incremented. If they fail to find work, they
- * may wait for next count. Upon release, threads help others wake
- * up.
- *
- * Synchronization events occur only in enough contexts to
- * maintain overall liveness:
- *
- * - Submission of a new task to the pool
- * - Resizes or other changes to the workers array
- * - pool termination
- * - A worker pushing a task on an empty queue
- *
- * The case of pushing a task occurs often enough, and is heavy
- * enough compared to simple stack pushes, to require special
- * handling: Method signalWork returns without advancing count if
- * the queue appears to be empty. This would ordinarily result in
- * races causing some queued waiters not to be woken up. To avoid
- * this, the first worker enqueued in method sync (see
- * syncIsReleasable) rescans for tasks after being enqueued, and
- * helps signal if any are found. This works well because the
- * worker has nothing better to do, and so might as well help
- * alleviate the overhead and contention on the threads actually
- * doing work. Also, since event counts increments on task
- * availability exist to maintain liveness (rather than to force
- * refreshes etc), it is OK for callers to exit early if
- * contending with another signaller.
- */
- static final class WaitQueueNode {
- WaitQueueNode next; // only written before enqueued
- volatile ForkJoinWorkerThread thread; // nulled to cancel wait
- final long count; // unused for spare stack
-
- WaitQueueNode(long c, ForkJoinWorkerThread w) {
- count = c;
- thread = w;
- }
-
- /**
- * Wake up waiter, returning false if known to already
- */
- boolean signal() {
- ForkJoinWorkerThread t = thread;
- if (t == null)
- return false;
- thread = null;
- LockSupport.unpark(t);
- return true;
- }
-
- /**
- * Await release on sync
- */
- void awaitSyncRelease(ForkJoinPool p) {
- while (thread != null && !p.syncIsReleasable(this))
- LockSupport.park(this);
- }
-
- /**
- * Await resumption as spare
- */
- void awaitSpareRelease() {
- while (thread != null) {
- if (!Thread.interrupted())
- LockSupport.park(this);
- }
- }
- }
-
- /**
- * Ensures that no thread is waiting for count to advance from the
- * current value of eventCount read on entry to this method, by
- * releasing waiting threads if necessary.
- * @return the count
- */
- final long ensureSync() {
- long c = eventCount;
- WaitQueueNode q;
- while ((q = syncStack) != null && q.count < c) {
- if (casBarrierStack(q, null)) {
- do {
- q.signal();
- } while ((q = q.next) != null);
- break;
- }
- }
- return c;
- }
-
- /**
- * Increments event count and releases waiting threads.
- */
- private void signalIdleWorkers() {
- long c;
- do;while (!casEventCount(c = eventCount, c+1));
- ensureSync();
- }
-
- /**
- * Signal threads waiting to poll a task. Because method sync
- * rechecks availability, it is OK to only proceed if queue
- * appears to be non-empty, and OK to skip under contention to
- * increment count (since some other thread succeeded).
- */
- final void signalWork() {
- long c;
- WaitQueueNode q;
- if (syncStack != null &&
- casEventCount(c = eventCount, c+1) &&
- (((q = syncStack) != null && q.count <= c) &&
- (!casBarrierStack(q, q.next) || !q.signal())))
- ensureSync();
- }
-
- /**
- * Waits until event count advances from last value held by
- * caller, or if excess threads, caller is resumed as spare, or
- * caller or pool is terminating. Updates caller's event on exit.
- * @param w the calling worker thread
- */
- final void sync(ForkJoinWorkerThread w) {
- updateStealCount(w); // Transfer w's count while it is idle
-
- while (!w.isShutdown() && !isTerminating() && !suspendIfSpare(w)) {
- long prev = w.lastEventCount;
- WaitQueueNode node = null;
- WaitQueueNode h;
- while (eventCount == prev &&
- ((h = syncStack) == null || h.count == prev)) {
- if (node == null)
- node = new WaitQueueNode(prev, w);
- if (casBarrierStack(node.next = h, node)) {
- node.awaitSyncRelease(this);
- break;
- }
- }
- long ec = ensureSync();
- if (ec != prev) {
- w.lastEventCount = ec;
- break;
- }
- }
- }
-
- /**
- * Returns true if worker waiting on sync can proceed:
- * - on signal (thread == null)
- * - on event count advance (winning race to notify vs signaller)
- * - on Interrupt
- * - if the first queued node, we find work available
- * If node was not signalled and event count not advanced on exit,
- * then we also help advance event count.
- * @return true if node can be released
- */
- final boolean syncIsReleasable(WaitQueueNode node) {
- long prev = node.count;
- if (!Thread.interrupted() && node.thread != null &&
- (node.next != null ||
- !ForkJoinWorkerThread.hasQueuedTasks(workers)) &&
- eventCount == prev)
- return false;
- if (node.thread != null) {
- node.thread = null;
- long ec = eventCount;
- if (prev <= ec) // help signal
- casEventCount(ec, ec+1);
- }
- return true;
- }
-
- /**
- * Returns true if a new sync event occurred since last call to
- * sync or this method, if so, updating caller's count.
- */
- final boolean hasNewSyncEvent(ForkJoinWorkerThread w) {
- long lc = w.lastEventCount;
- long ec = ensureSync();
- if (ec == lc)
- return false;
- w.lastEventCount = ec;
- return true;
- }
-
- // Parallelism maintenance
-
- /**
- * Decrement running count; if too low, add spare.
- *
- * Conceptually, all we need to do here is add or resume a
- * spare thread when one is about to block (and remove or
- * suspend it later when unblocked -- see suspendIfSpare).
- * However, implementing this idea requires coping with
- * several problems: We have imperfect information about the
- * states of threads. Some count updates can and usually do
- * lag run state changes, despite arrangements to keep them
- * accurate (for example, when possible, updating counts
- * before signalling or resuming), especially when running on
- * dynamic JVMs that don't optimize the infrequent paths that
- * update counts. Generating too many threads can make these
- * problems become worse, because excess threads are more
- * likely to be context-switched with others, slowing them all
- * down, especially if there is no work available, so all are
- * busy scanning or idling. Also, excess spare threads can
- * only be suspended or removed when they are idle, not
- * immediately when they aren't needed. So adding threads will
- * raise parallelism level for longer than necessary. Also,
- * FJ applications often enounter highly transient peaks when
- * many threads are blocked joining, but for less time than it
- * takes to create or resume spares.
- *
- * @param joinMe if non-null, return early if done
- * @param maintainParallelism if true, try to stay within
- * target counts, else create only to avoid starvation
- * @return true if joinMe known to be done
- */
- final boolean preJoin(ForkJoinTask<?> joinMe, boolean maintainParallelism) {
- maintainParallelism &= maintainsParallelism; // overrride
- boolean dec = false; // true when running count decremented
- while (spareStack == null || !tryResumeSpare(dec)) {
- int counts = workerCounts;
- if (dec || (dec = casWorkerCounts(counts, --counts))) { // CAS cheat
- if (!needSpare(counts, maintainParallelism))
- break;
- if (joinMe.status < 0)
- return true;
- if (tryAddSpare(counts))
- break;
- }
- }
- return false;
- }
-
- /**
- * Same idea as preJoin
- */
- final boolean preBlock(ManagedBlocker blocker, boolean maintainParallelism){
- maintainParallelism &= maintainsParallelism;
- boolean dec = false;
- while (spareStack == null || !tryResumeSpare(dec)) {
- int counts = workerCounts;
- if (dec || (dec = casWorkerCounts(counts, --counts))) {
- if (!needSpare(counts, maintainParallelism))
- break;
- if (blocker.isReleasable())
- return true;
- if (tryAddSpare(counts))
- break;
- }
- }
- return false;
- }
-
- /**
- * Returns true if a spare thread appears to be needed. If
- * maintaining parallelism, returns true when the deficit in
- * running threads is more than the surplus of total threads, and
- * there is apparently some work to do. This self-limiting rule
- * means that the more threads that have already been added, the
- * less parallelism we will tolerate before adding another.
- * @param counts current worker counts
- * @param maintainParallelism try to maintain parallelism
- */
- private boolean needSpare(int counts, boolean maintainParallelism) {
- int ps = parallelism;
- int rc = runningCountOf(counts);
- int tc = totalCountOf(counts);
- int runningDeficit = ps - rc;
- int totalSurplus = tc - ps;
- return (tc < maxPoolSize &&
- (rc == 0 || totalSurplus < 0 ||
- (maintainParallelism &&
- runningDeficit > totalSurplus &&
- ForkJoinWorkerThread.hasQueuedTasks(workers))));
- }
-
- /**
- * Add a spare worker if lock available and no more than the
- * expected numbers of threads exist
- * @return true if successful
- */
- private boolean tryAddSpare(int expectedCounts) {
- final ReentrantLock lock = this.workerLock;
- int expectedRunning = runningCountOf(expectedCounts);
- int expectedTotal = totalCountOf(expectedCounts);
- boolean success = false;
- boolean locked = false;
- // confirm counts while locking; CAS after obtaining lock
- try {
- for (;;) {
- int s = workerCounts;
- int tc = totalCountOf(s);
- int rc = runningCountOf(s);
- if (rc > expectedRunning || tc > expectedTotal)
- break;
- if (!locked && !(locked = lock.tryLock()))
- break;
- if (casWorkerCounts(s, workerCountsFor(tc+1, rc+1))) {
- createAndStartSpare(tc);
- success = true;
- break;
- }
- }
- } finally {
- if (locked)
- lock.unlock();
- }
- return success;
- }
-
- /**
- * Add the kth spare worker. On entry, pool coounts are already
- * adjusted to reflect addition.
- */
- private void createAndStartSpare(int k) {
- ForkJoinWorkerThread w = null;
- ForkJoinWorkerThread[] ws = ensureWorkerArrayCapacity(k + 1);
- int len = ws.length;
- // Probably, we can place at slot k. If not, find empty slot
- if (k < len && ws[k] != null) {
- for (k = 0; k < len && ws[k] != null; ++k)
- ;
- }
- if (k < len && !isTerminating() && (w = createWorker(k)) != null) {
- ws[k] = w;
- w.start();
- }
- else
- updateWorkerCount(-1); // adjust on failure
- signalIdleWorkers();
- }
-
- /**
- * Suspend calling thread w if there are excess threads. Called
- * only from sync. Spares are enqueued in a Treiber stack
- * using the same WaitQueueNodes as barriers. They are resumed
- * mainly in preJoin, but are also woken on pool events that
- * require all threads to check run state.
- * @param w the caller
- */
- private boolean suspendIfSpare(ForkJoinWorkerThread w) {
- WaitQueueNode node = null;
- int s;
- while (parallelism < runningCountOf(s = workerCounts)) {
- if (node == null)
- node = new WaitQueueNode(0, w);
- if (casWorkerCounts(s, s-1)) { // representation-dependent
- // push onto stack
- do;while (!casSpareStack(node.next = spareStack, node));
- // block until released by resumeSpare
- node.awaitSpareRelease();
- return true;
- }
- }
- return false;
- }
-
- /**
- * Try to pop and resume a spare thread.
- * @param updateCount if true, increment running count on success
- * @return true if successful
- */
- private boolean tryResumeSpare(boolean updateCount) {
- WaitQueueNode q;
- while ((q = spareStack) != null) {
- if (casSpareStack(q, q.next)) {
- if (updateCount)
- updateRunningCount(1);
- q.signal();
- return true;
- }
- }
- return false;
- }
-
- /**
- * Pop and resume all spare threads. Same idea as ensureSync.
- * @return true if any spares released
- */
- private boolean resumeAllSpares() {
- WaitQueueNode q;
- while ( (q = spareStack) != null) {
- if (casSpareStack(q, null)) {
- do {
- updateRunningCount(1);
- q.signal();
- } while ((q = q.next) != null);
- return true;
- }
- }
- return false;
- }
-
- /**
- * Pop and shutdown excessive spare threads. Call only while
- * holding lock. This is not guaranteed to eliminate all excess
- * threads, only those suspended as spares, which are the ones
- * unlikely to be needed in the future.
- */
- private void trimSpares() {
- int surplus = totalCountOf(workerCounts) - parallelism;
- WaitQueueNode q;
- while (surplus > 0 && (q = spareStack) != null) {
- if (casSpareStack(q, null)) {
- do {
- updateRunningCount(1);
- ForkJoinWorkerThread w = q.thread;
- if (w != null && surplus > 0 &&
- runningCountOf(workerCounts) > 0 && w.shutdown())
- --surplus;
- q.signal();
- } while ((q = q.next) != null);
- }
- }
- }
-
/**
* Interface for extending managed parallelism for tasks running
- * in ForkJoinPools. A ManagedBlocker provides two methods.
- * Method <code>isReleasable</code> must return true if blocking is not
- * necessary. Method <code>block</code> blocks the current thread
- * if necessary (perhaps internally invoking isReleasable before
- * actually blocking.).
+ * in {@link ForkJoinPool}s.
+ *
+ * <p>A {@code ManagedBlocker} provides two methods. Method
+ * {@code isReleasable} must return {@code true} if blocking is
+ * not necessary. Method {@code block} blocks the current thread
+ * if necessary (perhaps internally invoking {@code isReleasable}
+ * before actually blocking). These actions are performed by any
+ * thread invoking {@link ForkJoinPool#managedBlock}. The
+ * unusual methods in this API accommodate synchronizers that may,
+ * but don't usually, block for long periods. Similarly, they
+ * allow more efficient internal handling of cases in which
+ * additional workers may be, but usually are not, needed to
+ * ensure sufficient parallelism. Toward this end,
+ * implementations of method {@code isReleasable} must be amenable
+ * to repeated invocation.
+ *
* <p>For example, here is a ManagedBlocker based on a
* ReentrantLock:
- * <pre>
- * class ManagedLocker implements ManagedBlocker {
- * final ReentrantLock lock;
- * boolean hasLock = false;
- * ManagedLocker(ReentrantLock lock) { this.lock = lock; }
- * public boolean block() {
- * if (!hasLock)
- * lock.lock();
- * return true;
- * }
- * public boolean isReleasable() {
- * return hasLock || (hasLock = lock.tryLock());
- * }
+ * <pre> {@code
+ * class ManagedLocker implements ManagedBlocker {
+ * final ReentrantLock lock;
+ * boolean hasLock = false;
+ * ManagedLocker(ReentrantLock lock) { this.lock = lock; }
+ * public boolean block() {
+ * if (!hasLock)
+ * lock.lock();
+ * return true;
+ * }
+ * public boolean isReleasable() {
+ * return hasLock || (hasLock = lock.tryLock());
+ * }
+ * }}</pre>
+ *
+ * <p>Here is a class that possibly blocks waiting for an
+ * item on a given queue:
+ * <pre> {@code
+ * class QueueTaker<E> implements ManagedBlocker {
+ * final BlockingQueue<E> queue;
+ * volatile E item = null;
+ * QueueTaker(BlockingQueue<E> q) { this.queue = q; }
+ * public boolean block() throws InterruptedException {
+ * if (item == null)
+ * item = queue.take();
+ * return true;
* }
- * </pre>
+ * public boolean isReleasable() {
+ * return item != null || (item = queue.poll()) != null;
+ * }
+ * public E getItem() { // call after pool.managedBlock completes
+ * return item;
+ * }
+ * }}</pre>
*/
public static interface ManagedBlocker {
/**
* Possibly blocks the current thread, for example waiting for
* a lock or condition.
- * @return true if no additional blocking is necessary (i.e.,
- * if isReleasable would return true).
+ *
+ * @return {@code true} if no additional blocking is necessary
+ * (i.e., if isReleasable would return true)
* @throws InterruptedException if interrupted while waiting
- * (the method is not required to do so, but is allowe to).
+ * (the method is not required to do so, but is allowed to)
*/
boolean block() throws InterruptedException;
/**
- * Returns true if blocking is unnecessary.
+ * Returns {@code true} if blocking is unnecessary.
*/
boolean isReleasable();
}
/**
* Blocks in accord with the given blocker. If the current thread
- * is a ForkJoinWorkerThread, this method possibly arranges for a
- * spare thread to be activated if necessary to ensure parallelism
- * while the current thread is blocked. If
- * <code>maintainParallelism</code> is true and the pool supports
- * it ({@link #getMaintainsParallelism}), this method attempts to
- * maintain the pool's nominal parallelism. Otherwise if activates
- * a thread only if necessary to avoid complete starvation. This
- * option may be preferable when blockages use timeouts, or are
- * almost always brief.
- *
- * <p> If the caller is not a ForkJoinTask, this method is behaviorally
- * equivalent to
- * <pre>
- * while (!blocker.isReleasable())
- * if (blocker.block())
- * return;
- * </pre>
- * If the caller is a ForkJoinTask, then the pool may first
- * be expanded to ensure parallelism, and later adjusted.
+ * is a {@link ForkJoinWorkerThread}, this method possibly
+ * arranges for a spare thread to be activated if necessary to
+ * ensure sufficient parallelism while the current thread is blocked.
+ *
+ * <p>If the caller is not a {@link ForkJoinTask}, this method is
+ * behaviorally equivalent to
+ * <pre> {@code
+ * while (!blocker.isReleasable())
+ * if (blocker.block())
+ * return;
+ * }</pre>
+ *
+ * If the caller is a {@code ForkJoinTask}, then the pool may
+ * first be expanded to ensure parallelism, and later adjusted.
*
* @param blocker the blocker
- * @param maintainParallelism if true and supported by this pool,
- * attempt to maintain the pool's nominal parallelism; otherwise
- * activate a thread only if necessary to avoid complete
- * starvation.
- * @throws InterruptedException if blocker.block did so.
- */
- public static void managedBlock(ManagedBlocker blocker,
- boolean maintainParallelism)
+ * @throws InterruptedException if blocker.block did so
+ */
+ public static void managedBlock(ManagedBlocker blocker)
throws InterruptedException {
Thread t = Thread.currentThread();
- ForkJoinPool pool = (t instanceof ForkJoinWorkerThread?
- ((ForkJoinWorkerThread)t).pool : null);
- if (!blocker.isReleasable()) {
- try {
- if (pool == null ||
- !pool.preBlock(blocker, maintainParallelism))
- awaitBlocker(blocker);
- } finally {
- if (pool != null)
- pool.updateRunningCount(1);
+ ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread)t).pool : null);
+ while (!blocker.isReleasable()) {
+ if (p == null || p.tryCompensate(null, blocker)) {
+ try {
+ do {} while (!blocker.isReleasable() && !blocker.block());
+ } finally {
+ if (p != null)
+ p.incrementActiveCount();
+ }
+ break;
}
}
}
- private static void awaitBlocker(ManagedBlocker blocker)
- throws InterruptedException {
- do;while (!blocker.isReleasable() && !blocker.block());
- }
-
- // AbstractExecutorService overrides
+ // AbstractExecutorService overrides. These rely on undocumented
+ // fact that ForkJoinTask.adapt returns ForkJoinTasks that also
+ // implement RunnableFuture.
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
- return new AdaptedRunnable(runnable, value);
+ return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
}
protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
- return new AdaptedCallable(callable);
+ return new ForkJoinTask.AdaptedCallable<T>(callable);
}
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long CTL;
+ private static final long PARKBLOCKER;
+ private static final int ABASE;
+ private static final int ASHIFT;
- // Temporary Unsafe mechanics for preliminary release
- private static Unsafe getUnsafe() throws Throwable {
+ static {
+ poolNumberGenerator = new AtomicInteger();
+ nextSubmitterSeed = new AtomicInteger(0x55555555);
+ modifyThreadPermission = new RuntimePermission("modifyThread");
+ defaultForkJoinWorkerThreadFactory =
+ new DefaultForkJoinWorkerThreadFactory();
+ submitters = new ThreadSubmitter();
+ int s;
try {
- return Unsafe.getUnsafe();
+ U = getUnsafe();
+ Class<?> k = ForkJoinPool.class;
+ Class<?> ak = ForkJoinTask[].class;
+ CTL = U.objectFieldOffset
+ (k.getDeclaredField("ctl"));
+ Class<?> tk = Thread.class;
+ PARKBLOCKER = U.objectFieldOffset
+ (tk.getDeclaredField("parkBlocker"));
+ ABASE = U.arrayBaseOffset(ak);
+ s = U.arrayIndexScale(ak);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ if ((s & (s-1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
- (new java.security.PrivilegedExceptionAction<Unsafe>() {
- public Unsafe run() throws Exception {
- return getUnsafePrivileged();
+ (new java.security
+ .PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ java.lang.reflect.Field f = sun.misc
+ .Unsafe.class.getDeclaredField("theUnsafe");
+ f.setAccessible(true);
+ return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
- throw e.getCause();
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
}
}
}
- private static Unsafe getUnsafePrivileged()
- throws NoSuchFieldException, IllegalAccessException {
- Field f = Unsafe.class.getDeclaredField("theUnsafe");
- f.setAccessible(true);
- return (Unsafe) f.get(null);
- }
-
- private static long fieldOffset(String fieldName)
- throws NoSuchFieldException {
- return _unsafe.objectFieldOffset
- (ForkJoinPool.class.getDeclaredField(fieldName));
- }
-
- static final Unsafe _unsafe;
- static final long eventCountOffset;
- static final long workerCountsOffset;
- static final long runControlOffset;
- static final long syncStackOffset;
- static final long spareStackOffset;
-
- static {
- try {
- _unsafe = getUnsafe();
- eventCountOffset = fieldOffset("eventCount");
- workerCountsOffset = fieldOffset("workerCounts");
- runControlOffset = fieldOffset("runControl");
- syncStackOffset = fieldOffset("syncStack");
- spareStackOffset = fieldOffset("spareStack");
- } catch (Throwable e) {
- throw new RuntimeException("Could not initialize intrinsics", e);
- }
- }
-
- private boolean casEventCount(long cmp, long val) {
- return _unsafe.compareAndSwapLong(this, eventCountOffset, cmp, val);
- }
- private boolean casWorkerCounts(int cmp, int val) {
- return _unsafe.compareAndSwapInt(this, workerCountsOffset, cmp, val);
- }
- private boolean casRunControl(int cmp, int val) {
- return _unsafe.compareAndSwapInt(this, runControlOffset, cmp, val);
- }
- private boolean casSpareStack(WaitQueueNode cmp, WaitQueueNode val) {
- return _unsafe.compareAndSwapObject(this, spareStackOffset, cmp, val);
- }
- private boolean casBarrierStack(WaitQueueNode cmp, WaitQueueNode val) {
- return _unsafe.compareAndSwapObject(this, syncStackOffset, cmp, val);
- }
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java
index dc1a6bcccc..344f6887a6 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java
@@ -1,470 +1,597 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
import java.io.Serializable;
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.*;
-import sun.misc.Unsafe;
-import java.lang.reflect.*;
+import java.util.Collection;
+import java.util.List;
+import java.util.RandomAccess;
+import java.lang.ref.WeakReference;
+import java.lang.ref.ReferenceQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+//import java.util.concurrent.RunnableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.locks.ReentrantLock;
+import java.lang.reflect.Constructor;
/**
- * Abstract base class for tasks that run within a {@link
- * ForkJoinPool}. A ForkJoinTask is a thread-like entity that is much
+ * Abstract base class for tasks that run within a {@link ForkJoinPool}.
+ * A {@code ForkJoinTask} is a thread-like entity that is much
* lighter weight than a normal thread. Huge numbers of tasks and
* subtasks may be hosted by a small number of actual threads in a
* ForkJoinPool, at the price of some usage limitations.
*
- * <p> A "main" ForkJoinTask begins execution when submitted to a
- * {@link ForkJoinPool}. Once started, it will usually in turn start
- * other subtasks. As indicated by the name of this class, many
- * programs using ForkJoinTasks employ only methods <code>fork</code>
- * and <code>join</code>, or derivatives such as
- * <code>invokeAll</code>. However, this class also provides a number
- * of other methods that can come into play in advanced usages, as
- * well as extension mechanics that allow support of new forms of
- * fork/join processing.
+ * <p>A "main" {@code ForkJoinTask} begins execution when submitted
+ * to a {@link ForkJoinPool}. Once started, it will usually in turn
+ * start other subtasks. As indicated by the name of this class,
+ * many programs using {@code ForkJoinTask} employ only methods
+ * {@link #fork} and {@link #join}, or derivatives such as {@link
+ * #invokeAll(ForkJoinTask...) invokeAll}. However, this class also
+ * provides a number of other methods that can come into play in
+ * advanced usages, as well as extension mechanics that allow
+ * support of new forms of fork/join processing.
*
- * <p>A ForkJoinTask is a lightweight form of {@link Future}. The
- * efficiency of ForkJoinTasks stems from a set of restrictions (that
- * are only partially statically enforceable) reflecting their
- * intended use as computational tasks calculating pure functions or
- * operating on purely isolated objects. The primary coordination
- * mechanisms are {@link #fork}, that arranges asynchronous execution,
- * and {@link #join}, that doesn't proceed until the task's result has
- * been computed. Computations should avoid <code>synchronized</code>
- * methods or blocks, and should minimize other blocking
- * synchronization apart from joining other tasks or using
- * synchronizers such as Phasers that are advertised to cooperate with
- * fork/join scheduling. Tasks should also not perform blocking IO,
- * and should ideally access variables that are completely independent
- * of those accessed by other running tasks. Minor breaches of these
- * restrictions, for example using shared output streams, may be
- * tolerable in practice, but frequent use may result in poor
- * performance, and the potential to indefinitely stall if the number
- * of threads not waiting for IO or other external synchronization
- * becomes exhausted. This usage restriction is in part enforced by
- * not permitting checked exceptions such as <code>IOExceptions</code>
- * to be thrown. However, computations may still encounter unchecked
- * exceptions, that are rethrown to callers attempting join
- * them. These exceptions may additionally include
- * RejectedExecutionExceptions stemming from internal resource
- * exhaustion such as failure to allocate internal task queues.
+ * <p>A {@code ForkJoinTask} is a lightweight form of {@link Future}.
+ * The efficiency of {@code ForkJoinTask}s stems from a set of
+ * restrictions (that are only partially statically enforceable)
+ * reflecting their main use as computational tasks calculating pure
+ * functions or operating on purely isolated objects. The primary
+ * coordination mechanisms are {@link #fork}, that arranges
+ * asynchronous execution, and {@link #join}, that doesn't proceed
+ * until the task's result has been computed. Computations should
+ * ideally avoid {@code synchronized} methods or blocks, and should
+ * minimize other blocking synchronization apart from joining other
+ * tasks or using synchronizers such as Phasers that are advertised to
+ * cooperate with fork/join scheduling. Subdividable tasks should also
+ * not perform blocking IO, and should ideally access variables that
+ * are completely independent of those accessed by other running
+ * tasks. These guidelines are loosely enforced by not permitting
+ * checked exceptions such as {@code IOExceptions} to be
+ * thrown. However, computations may still encounter unchecked
+ * exceptions, that are rethrown to callers attempting to join
+ * them. These exceptions may additionally include {@link
+ * RejectedExecutionException} stemming from internal resource
+ * exhaustion, such as failure to allocate internal task
+ * queues. Rethrown exceptions behave in the same way as regular
+ * exceptions, but, when possible, contain stack traces (as displayed
+ * for example using {@code ex.printStackTrace()}) of both the thread
+ * that initiated the computation as well as the thread actually
+ * encountering the exception; minimally only the latter.
+ *
+ * <p>It is possible to define and use ForkJoinTasks that may block,
+ * but doing do requires three further considerations: (1) Completion
+ * of few if any <em>other</em> tasks should be dependent on a task
+ * that blocks on external synchronization or IO. Event-style async
+ * tasks that are never joined often fall into this category. (2) To
+ * minimize resource impact, tasks should be small; ideally performing
+ * only the (possibly) blocking action. (3) Unless the {@link
+ * ForkJoinPool.ManagedBlocker} API is used, or the number of possibly
+ * blocked tasks is known to be less than the pool's {@link
+ * ForkJoinPool#getParallelism} level, the pool cannot guarantee that
+ * enough threads will be available to ensure progress or good
+ * performance.
*
* <p>The primary method for awaiting completion and extracting
* results of a task is {@link #join}, but there are several variants:
* The {@link Future#get} methods support interruptible and/or timed
- * waits for completion and report results using <code>Future</code>
- * conventions. Method {@link #helpJoin} enables callers to actively
- * execute other tasks while awaiting joins, which is sometimes more
- * efficient but only applies when all subtasks are known to be
- * strictly tree-structured. Method {@link #invoke} is semantically
- * equivalent to <code>fork(); join()</code> but always attempts to
- * begin execution in the current thread. The "<em>quiet</em>" forms
- * of these methods do not extract results or report exceptions. These
+ * waits for completion and report results using {@code Future}
+ * conventions. Method {@link #invoke} is semantically
+ * equivalent to {@code fork(); join()} but always attempts to begin
+ * execution in the current thread. The "<em>quiet</em>" forms of
+ * these methods do not extract results or report exceptions. These
* may be useful when a set of tasks are being executed, and you need
* to delay processing of results or exceptions until all complete.
- * Method <code>invokeAll</code> (available in multiple versions)
+ * Method {@code invokeAll} (available in multiple versions)
* performs the most common form of parallel invocation: forking a set
* of tasks and joining them all.
*
- * <p> The ForkJoinTask class is not usually directly subclassed.
+ * <p>In the most typical usages, a fork-join pair act like a call
+ * (fork) and return (join) from a parallel recursive function. As is
+ * the case with other forms of recursive calls, returns (joins)
+ * should be performed innermost-first. For example, {@code a.fork();
+ * b.fork(); b.join(); a.join();} is likely to be substantially more
+ * efficient than joining {@code a} before {@code b}.
+ *
+ * <p>The execution status of tasks may be queried at several levels
+ * of detail: {@link #isDone} is true if a task completed in any way
+ * (including the case where a task was cancelled without executing);
+ * {@link #isCompletedNormally} is true if a task completed without
+ * cancellation or encountering an exception; {@link #isCancelled} is
+ * true if the task was cancelled (in which case {@link #getException}
+ * returns a {@link java.util.concurrent.CancellationException}); and
+ * {@link #isCompletedAbnormally} is true if a task was either
+ * cancelled or encountered an exception, in which case {@link
+ * #getException} will return either the encountered exception or
+ * {@link java.util.concurrent.CancellationException}.
+ *
+ * <p>The ForkJoinTask class is not usually directly subclassed.
* Instead, you subclass one of the abstract classes that support a
- * particular style of fork/join processing. Normally, a concrete
+ * particular style of fork/join processing, typically {@link
+ * RecursiveAction} for computations that do not return results, or
+ * {@link RecursiveTask} for those that do. Normally, a concrete
* ForkJoinTask subclass declares fields comprising its parameters,
- * established in a constructor, and then defines a <code>compute</code>
+ * established in a constructor, and then defines a {@code compute}
* method that somehow uses the control methods supplied by this base
- * class. While these methods have <code>public</code> access (to allow
- * instances of different task subclasses to call each others
+ * class. While these methods have {@code public} access (to allow
+ * instances of different task subclasses to call each other's
* methods), some of them may only be called from within other
- * ForkJoinTasks. Attempts to invoke them in other contexts result in
- * exceptions or errors possibly including ClassCastException.
+ * ForkJoinTasks (as may be determined using method {@link
+ * #inForkJoinPool}). Attempts to invoke them in other contexts
+ * result in exceptions or errors, possibly including
+ * {@code ClassCastException}.
*
- * <p>Most base support methods are <code>final</code> because their
- * implementations are intrinsically tied to the underlying
- * lightweight task scheduling framework, and so cannot be overridden.
- * Developers creating new basic styles of fork/join processing should
- * minimally implement <code>protected</code> methods
- * <code>exec</code>, <code>setRawResult</code>, and
- * <code>getRawResult</code>, while also introducing an abstract
- * computational method that can be implemented in its subclasses,
- * possibly relying on other <code>protected</code> methods provided
- * by this class.
+ * <p>Method {@link #join} and its variants are appropriate for use
+ * only when completion dependencies are acyclic; that is, the
+ * parallel computation can be described as a directed acyclic graph
+ * (DAG). Otherwise, executions may encounter a form of deadlock as
+ * tasks cyclically wait for each other. However, this framework
+ * supports other methods and techniques (for example the use of
+ * {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that
+ * may be of use in constructing custom subclasses for problems that
+ * are not statically structured as DAGs. To support such usages a
+ * ForkJoinTask may be atomically <em>marked</em> using {@link
+ * #markForkJoinTask} and checked for marking using {@link
+ * #isMarkedForkJoinTask}. The ForkJoinTask implementation does not
+ * use these {@code protected} methods or marks for any purpose, but
+ * they may be of use in the construction of specialized subclasses.
+ * For example, parallel graph traversals can use the supplied methods
+ * to avoid revisiting nodes/tasks that have already been processed.
+ * Also, completion based designs can use them to record that one
+ * subtask has completed. (Method names for marking are bulky in part
+ * to encourage definition of methods that reflect their usage
+ * patterns.)
+ *
+ * <p>Most base support methods are {@code final}, to prevent
+ * overriding of implementations that are intrinsically tied to the
+ * underlying lightweight task scheduling framework. Developers
+ * creating new basic styles of fork/join processing should minimally
+ * implement {@code protected} methods {@link #exec}, {@link
+ * #setRawResult}, and {@link #getRawResult}, while also introducing
+ * an abstract computational method that can be implemented in its
+ * subclasses, possibly relying on other {@code protected} methods
+ * provided by this class.
*
* <p>ForkJoinTasks should perform relatively small amounts of
- * computations, othewise splitting into smaller tasks. As a very
- * rough rule of thumb, a task should perform more than 100 and less
- * than 10000 basic computational steps. If tasks are too big, then
- * parellelism cannot improve throughput. If too small, then memory
- * and internal task maintenance overhead may overwhelm processing.
+ * computation. Large tasks should be split into smaller subtasks,
+ * usually via recursive decomposition. As a very rough rule of thumb,
+ * a task should perform more than 100 and less than 10000 basic
+ * computational steps, and should avoid indefinite looping. If tasks
+ * are too big, then parallelism cannot improve throughput. If too
+ * small, then memory and internal task maintenance overhead may
+ * overwhelm processing.
+ *
+ * <p>This class provides {@code adapt} methods for {@link Runnable}
+ * and {@link Callable}, that may be of use when mixing execution of
+ * {@code ForkJoinTasks} with other kinds of tasks. When all tasks are
+ * of this form, consider using a pool constructed in <em>asyncMode</em>.
*
- * <p>ForkJoinTasks are <code>Serializable</code>, which enables them
- * to be used in extensions such as remote execution frameworks. It is
- * in general sensible to serialize tasks only before or after, but
- * not during execution. Serialization is not relied on during
- * execution itself.
+ * <p>ForkJoinTasks are {@code Serializable}, which enables them to be
+ * used in extensions such as remote execution frameworks. It is
+ * sensible to serialize tasks only before or after, but not during,
+ * execution. Serialization is not relied on during execution itself.
+ *
+ * @since 1.7
+ * @author Doug Lea
*/
public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
- /**
- * Run control status bits packed into a single int to minimize
- * footprint and to ensure atomicity (via CAS). Status is
- * initially zero, and takes on nonnegative values until
- * completed, upon which status holds COMPLETED. CANCELLED, or
- * EXCEPTIONAL, which use the top 3 bits. Tasks undergoing
- * blocking waits by other threads have SIGNAL_MASK bits set --
- * bit 15 for external (nonFJ) waits, and the rest a count of
- * waiting FJ threads. (This representation relies on
- * ForkJoinPool max thread limits). Completion of a stolen task
- * with SIGNAL_MASK bits set awakens waiter via notifyAll. Even
- * though suboptimal for some purposes, we use basic builtin
- * wait/notify to take advantage of "monitor inflation" in JVMs
- * that we would otherwise need to emulate to avoid adding further
- * per-task bookkeeping overhead. Note that bits 16-28 are
- * currently unused. Also value 0x80000000 is available as spare
- * completion value.
+ /*
+ * See the internal documentation of class ForkJoinPool for a
+ * general implementation overview. ForkJoinTasks are mainly
+ * responsible for maintaining their "status" field amidst relays
+ * to methods in ForkJoinWorkerThread and ForkJoinPool.
+ *
+ * The methods of this class are more-or-less layered into
+ * (1) basic status maintenance
+ * (2) execution and awaiting completion
+ * (3) user-level methods that additionally report results.
+ * This is sometimes hard to see because this file orders exported
+ * methods in a way that flows well in javadocs.
*/
- volatile int status; // accessed directy by pool and workers
- static final int COMPLETION_MASK = 0xe0000000;
- static final int NORMAL = 0xe0000000; // == mask
- static final int CANCELLED = 0xc0000000;
- static final int EXCEPTIONAL = 0xa0000000;
- static final int SIGNAL_MASK = 0x0000ffff;
- static final int INTERNAL_SIGNAL_MASK = 0x00007fff;
- static final int EXTERNAL_SIGNAL = 0x00008000; // top bit of low word
-
- /**
- * Table of exceptions thrown by tasks, to enable reporting by
- * callers. Because exceptions are rare, we don't directly keep
- * them with task objects, but instead us a weak ref table. Note
- * that cancellation exceptions don't appear in the table, but are
- * instead recorded as status values.
- * Todo: Use ConcurrentReferenceHashMap
+ /*
+ * The status field holds run control status bits packed into a
+ * single int to minimize footprint and to ensure atomicity (via
+ * CAS). Status is initially zero, and takes on nonnegative
+ * values until completed, upon which status (anded with
+ * DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks
+ * undergoing blocking waits by other threads have the SIGNAL bit
+ * set. Completion of a stolen task with SIGNAL set awakens any
+ * waiters via notifyAll. Even though suboptimal for some
+ * purposes, we use basic builtin wait/notify to take advantage of
+ * "monitor inflation" in JVMs that we would otherwise need to
+ * emulate to avoid adding further per-task bookkeeping overhead.
+ * We want these monitors to be "fat", i.e., not use biasing or
+ * thin-lock techniques, so use some odd coding idioms that tend
+ * to avoid them, mainly by arranging that every synchronized
+ * block performs a wait, notifyAll or both.
*/
- static final Map<ForkJoinTask<?>, Throwable> exceptionMap =
- Collections.synchronizedMap
- (new WeakHashMap<ForkJoinTask<?>, Throwable>());
- // within-package utilities
+ /** The run status of this task */
+ volatile int status; // accessed directly by pool and workers
+ static final int DONE_MASK = 0xf0000000; // mask out non-completion bits
+ static final int NORMAL = 0xf0000000; // must be negative
+ static final int CANCELLED = 0xc0000000; // must be < NORMAL
+ static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED
+ static final int SIGNAL = 0x00000001;
+ static final int MARKED = 0x00000002;
/**
- * Get current worker thread, or null if not a worker thread
- */
- static ForkJoinWorkerThread getWorker() {
- Thread t = Thread.currentThread();
- return ((t instanceof ForkJoinWorkerThread)?
- (ForkJoinWorkerThread)t : null);
- }
-
- final boolean casStatus(int cmp, int val) {
- return _unsafe.compareAndSwapInt(this, statusOffset, cmp, val);
- }
-
- /**
- * Workaround for not being able to rethrow unchecked exceptions.
- */
- static void rethrowException(Throwable ex) {
- if (ex != null)
- _unsafe.throwException(ex);
- }
-
- // Setting completion status
-
- /**
- * Mark completion and wake up threads waiting to join this task.
+ * Marks completion and wakes up threads waiting to join this
+ * task. A specialization for NORMAL completion is in method
+ * doExec.
+ *
* @param completion one of NORMAL, CANCELLED, EXCEPTIONAL
+ * @return completion status on exit
*/
- final void setCompletion(int completion) {
- ForkJoinPool pool = getPool();
- if (pool != null) {
- int s; // Clear signal bits while setting completion status
- do;while ((s = status) >= 0 && !casStatus(s, completion));
-
- if ((s & SIGNAL_MASK) != 0) {
- if ((s &= INTERNAL_SIGNAL_MASK) != 0)
- pool.updateRunningCount(s);
- synchronized(this) { notifyAll(); }
+ private int setCompletion(int completion) {
+ for (int s;;) {
+ if ((s = status) < 0)
+ return s;
+ if (U.compareAndSwapInt(this, STATUS, s, s | completion)) {
+ if ((s & SIGNAL) != 0)
+ synchronized (this) { notifyAll(); }
+ return completion;
}
}
- else
- externallySetCompletion(completion);
- }
-
- /**
- * Version of setCompletion for non-FJ threads. Leaves signal
- * bits for unblocked threads to adjust, and always notifies.
- */
- private void externallySetCompletion(int completion) {
- int s;
- do;while ((s = status) >= 0 &&
- !casStatus(s, (s & SIGNAL_MASK) | completion));
- synchronized(this) { notifyAll(); }
- }
-
- /**
- * Sets status to indicate normal completion
- */
- final void setNormalCompletion() {
- // Try typical fast case -- single CAS, no signal, not already done.
- // Manually expand casStatus to improve chances of inlining it
- if (!_unsafe.compareAndSwapInt(this, statusOffset, 0, NORMAL))
- setCompletion(NORMAL);
- }
-
- // internal waiting and notification
-
- /**
- * Performs the actual monitor wait for awaitDone
- */
- private void doAwaitDone() {
- // Minimize lock bias and in/de-flation effects by maximizing
- // chances of waiting inside sync
- try {
- while (status >= 0)
- synchronized(this) { if (status >= 0) wait(); }
- } catch (InterruptedException ie) {
- onInterruptedWait();
- }
}
/**
- * Performs the actual monitor wait for awaitDone
+ * Primary execution method for stolen tasks. Unless done, calls
+ * exec and records status if completed, but doesn't wait for
+ * completion otherwise.
+ *
+ * @return status on exit from this method
*/
- private void doAwaitDone(long startTime, long nanos) {
- synchronized(this) {
+ final int doExec() {
+ int s; boolean completed;
+ if ((s = status) >= 0) {
try {
- while (status >= 0) {
- long nt = nanos - System.nanoTime() - startTime;
- if (nt <= 0)
- break;
- wait(nt / 1000000, (int)(nt % 1000000));
+ completed = exec();
+ } catch (Throwable rex) {
+ return setExceptionalCompletion(rex);
+ }
+ while ((s = status) >= 0 && completed) {
+ if (U.compareAndSwapInt(this, STATUS, s, s | NORMAL)) {
+ if ((s & SIGNAL) != 0)
+ synchronized (this) { notifyAll(); }
+ return NORMAL;
}
- } catch (InterruptedException ie) {
- onInterruptedWait();
}
}
+ return s;
}
- // Awaiting completion
+ /**
+ * Tries to set SIGNAL status. Used by ForkJoinPool. Other
+ * variants are directly incorporated into externalAwaitDone etc.
+ *
+ * @return true if successful
+ */
+ final boolean trySetSignal() {
+ int s;
+ return U.compareAndSwapInt(this, STATUS, s = status, s | SIGNAL);
+ }
/**
- * Sets status to indicate there is joiner, then waits for join,
- * surrounded with pool notifications.
- * @return status upon exit
+ * Blocks a non-worker-thread until completion.
+ * @return status upon completion
*/
- private int awaitDone(ForkJoinWorkerThread w, boolean maintainParallelism) {
- ForkJoinPool pool = w == null? null : w.pool;
+ private int externalAwaitDone() {
+ boolean interrupted = false;
int s;
while ((s = status) >= 0) {
- if (casStatus(s, pool == null? s|EXTERNAL_SIGNAL : s+1)) {
- if (pool == null || !pool.preJoin(this, maintainParallelism))
- doAwaitDone();
- if (((s = status) & INTERNAL_SIGNAL_MASK) != 0)
- adjustPoolCountsOnUnblock(pool);
- break;
+ if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0) {
+ try {
+ wait();
+ } catch (InterruptedException ie) {
+ interrupted = true;
+ }
+ }
+ else
+ notifyAll();
+ }
}
}
+ if (interrupted)
+ Thread.currentThread().interrupt();
return s;
}
/**
- * Timed version of awaitDone
- * @return status upon exit
+ * Blocks a non-worker-thread until completion or interruption.
*/
- private int awaitDone(ForkJoinWorkerThread w, long nanos) {
- ForkJoinPool pool = w == null? null : w.pool;
+ private int externalInterruptibleAwaitDone() throws InterruptedException {
int s;
+ if (Thread.interrupted())
+ throw new InterruptedException();
while ((s = status) >= 0) {
- if (casStatus(s, pool == null? s|EXTERNAL_SIGNAL : s+1)) {
- long startTime = System.nanoTime();
- if (pool == null || !pool.preJoin(this, false))
- doAwaitDone(startTime, nanos);
- if ((s = status) >= 0) {
- adjustPoolCountsOnCancelledWait(pool);
- s = status;
+ if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0)
+ wait();
+ else
+ notifyAll();
}
- if (s < 0 && (s & INTERNAL_SIGNAL_MASK) != 0)
- adjustPoolCountsOnUnblock(pool);
- break;
}
}
return s;
}
- /**
- * Notify pool that thread is unblocked. Called by signalled
- * threads when woken by non-FJ threads (which is atypical).
- */
- private void adjustPoolCountsOnUnblock(ForkJoinPool pool) {
- int s;
- do;while ((s = status) < 0 && !casStatus(s, s & COMPLETION_MASK));
- if (pool != null && (s &= INTERNAL_SIGNAL_MASK) != 0)
- pool.updateRunningCount(s);
- }
/**
- * Notify pool to adjust counts on cancelled or timed out wait
+ * Implementation for join, get, quietlyJoin. Directly handles
+ * only cases of already-completed, external wait, and
+ * unfork+exec. Others are relayed to ForkJoinPool.awaitJoin.
+ *
+ * @return status upon completion
*/
- private void adjustPoolCountsOnCancelledWait(ForkJoinPool pool) {
- if (pool != null) {
- int s;
- while ((s = status) >= 0 && (s & INTERNAL_SIGNAL_MASK) != 0) {
- if (casStatus(s, s - 1)) {
- pool.updateRunningCount(1);
- break;
- }
+ private int doJoin() {
+ int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w;
+ if ((s = status) >= 0) {
+ if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
+ if (!(w = (wt = (ForkJoinWorkerThread)t).workQueue).
+ tryUnpush(this) || (s = doExec()) >= 0)
+ s = wt.pool.awaitJoin(w, this);
}
+ else
+ s = externalAwaitDone();
}
+ return s;
}
/**
- * Handle interruptions during waits.
+ * Implementation for invoke, quietlyInvoke.
+ *
+ * @return status upon completion
*/
- private void onInterruptedWait() {
- ForkJoinWorkerThread w = getWorker();
- if (w == null)
- Thread.currentThread().interrupt(); // re-interrupt
- else if (w.isTerminating())
- cancelIgnoringExceptions();
- // else if FJworker, ignore interrupt
+ private int doInvoke() {
+ int s; Thread t; ForkJoinWorkerThread wt;
+ if ((s = doExec()) >= 0) {
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
+ s = (wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue,
+ this);
+ else
+ s = externalAwaitDone();
+ }
+ return s;
}
- // Recording and reporting exceptions
+ // Exception table support
- private void setDoneExceptionally(Throwable rex) {
- exceptionMap.put(this, rex);
- setCompletion(EXCEPTIONAL);
- }
+ /**
+ * Table of exceptions thrown by tasks, to enable reporting by
+ * callers. Because exceptions are rare, we don't directly keep
+ * them with task objects, but instead use a weak ref table. Note
+ * that cancellation exceptions don't appear in the table, but are
+ * instead recorded as status values.
+ *
+ * Note: These statics are initialized below in static block.
+ */
+ private static final ExceptionNode[] exceptionTable;
+ private static final ReentrantLock exceptionTableLock;
+ private static final ReferenceQueue<Object> exceptionTableRefQueue;
/**
- * Throws the exception associated with status s;
- * @throws the exception
+ * Fixed capacity for exceptionTable.
*/
- private void reportException(int s) {
- if ((s &= COMPLETION_MASK) < NORMAL) {
- if (s == CANCELLED)
- throw new CancellationException();
- else
- rethrowException(exceptionMap.get(this));
+ private static final int EXCEPTION_MAP_CAPACITY = 32;
+
+ /**
+ * Key-value nodes for exception table. The chained hash table
+ * uses identity comparisons, full locking, and weak references
+ * for keys. The table has a fixed capacity because it only
+ * maintains task exceptions long enough for joiners to access
+ * them, so should never become very large for sustained
+ * periods. However, since we do not know when the last joiner
+ * completes, we must use weak references and expunge them. We do
+ * so on each operation (hence full locking). Also, some thread in
+ * any ForkJoinPool will call helpExpungeStaleExceptions when its
+ * pool becomes isQuiescent.
+ */
+ static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
+ final Throwable ex;
+ ExceptionNode next;
+ final long thrower; // use id not ref to avoid weak cycles
+ ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
+ super(task, exceptionTableRefQueue);
+ this.ex = ex;
+ this.next = next;
+ this.thrower = Thread.currentThread().getId();
}
}
/**
- * Returns result or throws exception using j.u.c.Future conventions
- * Only call when isDone known to be true.
+ * Records exception and sets exceptional completion.
+ *
+ * @return status on exit
*/
- private V reportFutureResult()
- throws ExecutionException, InterruptedException {
- int s = status & COMPLETION_MASK;
- if (s < NORMAL) {
- Throwable ex;
- if (s == CANCELLED)
- throw new CancellationException();
- if (s == EXCEPTIONAL && (ex = exceptionMap.get(this)) != null)
- throw new ExecutionException(ex);
- if (Thread.interrupted())
- throw new InterruptedException();
+ private int setExceptionalCompletion(Throwable ex) {
+ int h = System.identityHashCode(this);
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ expungeStaleExceptions();
+ ExceptionNode[] t = exceptionTable;
+ int i = h & (t.length - 1);
+ for (ExceptionNode e = t[i]; ; e = e.next) {
+ if (e == null) {
+ t[i] = new ExceptionNode(this, ex, t[i]);
+ break;
+ }
+ if (e.get() == this) // already present
+ break;
+ }
+ } finally {
+ lock.unlock();
}
- return getRawResult();
+ return setCompletion(EXCEPTIONAL);
}
/**
- * Returns result or throws exception using j.u.c.Future conventions
- * with timeouts
+ * Cancels, ignoring any exceptions thrown by cancel. Used during
+ * worker and pool shutdown. Cancel is spec'ed not to throw any
+ * exceptions, but if it does anyway, we have no recourse during
+ * shutdown, so guard against this case.
*/
- private V reportTimedFutureResult()
- throws InterruptedException, ExecutionException, TimeoutException {
- Throwable ex;
- int s = status & COMPLETION_MASK;
- if (s == NORMAL)
- return getRawResult();
- if (s == CANCELLED)
- throw new CancellationException();
- if (s == EXCEPTIONAL && (ex = exceptionMap.get(this)) != null)
- throw new ExecutionException(ex);
- if (Thread.interrupted())
- throw new InterruptedException();
- throw new TimeoutException();
+ static final void cancelIgnoringExceptions(ForkJoinTask<?> t) {
+ if (t != null && t.status >= 0) {
+ try {
+ t.cancel(false);
+ } catch (Throwable ignore) {
+ }
+ }
}
- // internal execution methods
-
/**
- * Calls exec, recording completion, and rethrowing exception if
- * encountered. Caller should normally check status before calling
- * @return true if completed normally
+ * Removes exception node and clears status
*/
- private boolean tryExec() {
- try { // try block must contain only call to exec
- if (!exec())
- return false;
- } catch (Throwable rex) {
- setDoneExceptionally(rex);
- rethrowException(rex);
- return false; // not reached
+ private void clearExceptionalCompletion() {
+ int h = System.identityHashCode(this);
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ ExceptionNode[] t = exceptionTable;
+ int i = h & (t.length - 1);
+ ExceptionNode e = t[i];
+ ExceptionNode pred = null;
+ while (e != null) {
+ ExceptionNode next = e.next;
+ if (e.get() == this) {
+ if (pred == null)
+ t[i] = next;
+ else
+ pred.next = next;
+ break;
+ }
+ pred = e;
+ e = next;
+ }
+ expungeStaleExceptions();
+ status = 0;
+ } finally {
+ lock.unlock();
}
- setNormalCompletion();
- return true;
}
/**
- * Main execution method used by worker threads. Invokes
- * base computation unless already complete
+ * Returns a rethrowable exception for the given task, if
+ * available. To provide accurate stack traces, if the exception
+ * was not thrown by the current thread, we try to create a new
+ * exception of the same type as the one thrown, but with the
+ * recorded exception as its cause. If there is no such
+ * constructor, we instead try to use a no-arg constructor,
+ * followed by initCause, to the same effect. If none of these
+ * apply, or any fail due to other exceptions, we return the
+ * recorded exception, which is still correct, although it may
+ * contain a misleading stack trace.
+ *
+ * @return the exception, or null if none
*/
- final void quietlyExec() {
- if (status >= 0) {
+ private Throwable getThrowableException() {
+ if ((status & DONE_MASK) != EXCEPTIONAL)
+ return null;
+ int h = System.identityHashCode(this);
+ ExceptionNode e;
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ expungeStaleExceptions();
+ ExceptionNode[] t = exceptionTable;
+ e = t[h & (t.length - 1)];
+ while (e != null && e.get() != this)
+ e = e.next;
+ } finally {
+ lock.unlock();
+ }
+ Throwable ex;
+ if (e == null || (ex = e.ex) == null)
+ return null;
+ if (e.thrower != Thread.currentThread().getId()) {
+ Class<? extends Throwable> ec = ex.getClass();
try {
- if (!exec())
- return;
- } catch(Throwable rex) {
- setDoneExceptionally(rex);
- return;
+ Constructor<?> noArgCtor = null;
+ Constructor<?>[] cs = ec.getConstructors();// public ctors only
+ for (int i = 0; i < cs.length; ++i) {
+ Constructor<?> c = cs[i];
+ Class<?>[] ps = c.getParameterTypes();
+ if (ps.length == 0)
+ noArgCtor = c;
+ else if (ps.length == 1 && ps[0] == Throwable.class)
+ return (Throwable)(c.newInstance(ex));
+ }
+ if (noArgCtor != null) {
+ Throwable wx = (Throwable)(noArgCtor.newInstance());
+ wx.initCause(ex);
+ return wx;
+ }
+ } catch (Exception ignore) {
}
- setNormalCompletion();
}
+ return ex;
}
/**
- * Calls exec, recording but not rethrowing exception
- * Caller should normally check status before calling
- * @return true if completed normally
+ * Poll stale refs and remove them. Call only while holding lock.
*/
- private boolean tryQuietlyInvoke() {
- try {
- if (!exec())
- return false;
- } catch (Throwable rex) {
- setDoneExceptionally(rex);
- return false;
+ private static void expungeStaleExceptions() {
+ for (Object x; (x = exceptionTableRefQueue.poll()) != null;) {
+ if (x instanceof ExceptionNode) {
+ ForkJoinTask<?> key = ((ExceptionNode)x).get();
+ ExceptionNode[] t = exceptionTable;
+ int i = System.identityHashCode(key) & (t.length - 1);
+ ExceptionNode e = t[i];
+ ExceptionNode pred = null;
+ while (e != null) {
+ ExceptionNode next = e.next;
+ if (e == x) {
+ if (pred == null)
+ t[i] = next;
+ else
+ pred.next = next;
+ break;
+ }
+ pred = e;
+ e = next;
+ }
+ }
}
- setNormalCompletion();
- return true;
}
/**
- * Cancel, ignoring any exceptions it throws
+ * If lock is available, poll stale refs and remove them.
+ * Called from ForkJoinPool when pools become quiescent.
*/
- final void cancelIgnoringExceptions() {
- try {
- cancel(false);
- } catch(Throwable ignore) {
+ static final void helpExpungeStaleExceptions() {
+ final ReentrantLock lock = exceptionTableLock;
+ if (lock.tryLock()) {
+ try {
+ expungeStaleExceptions();
+ } finally {
+ lock.unlock();
+ }
}
}
/**
- * Main implementation of helpJoin
+ * Throws exception, if any, associated with the given status.
*/
- private int busyJoin(ForkJoinWorkerThread w) {
- int s;
- ForkJoinTask<?> t;
- while ((s = status) >= 0 && (t = w.scanWhileJoining(this)) != null)
- t.quietlyExec();
- return (s >= 0)? awaitDone(w, false) : s; // block if no work
+ private void reportException(int s) {
+ Throwable ex = ((s == CANCELLED) ? new CancellationException() :
+ (s == EXCEPTIONAL) ? getThrowableException() :
+ null);
+ if (ex != null)
+ U.throwException(ex);
}
// public methods
@@ -472,70 +599,111 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
/**
* Arranges to asynchronously execute this task. While it is not
* necessarily enforced, it is a usage error to fork a task more
- * than once unless it has completed and been reinitialized. This
- * method may be invoked only from within ForkJoinTask
- * computations. Attempts to invoke in other contexts result in
- * exceptions or errors possibly including ClassCastException.
+ * than once unless it has completed and been reinitialized.
+ * Subsequent modifications to the state of this task or any data
+ * it operates on are not necessarily consistently observable by
+ * any thread other than the one executing it unless preceded by a
+ * call to {@link #join} or related methods, or a call to {@link
+ * #isDone} returning {@code true}.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @return {@code this}, to simplify usage
*/
- public final void fork() {
- ((ForkJoinWorkerThread)(Thread.currentThread())).pushTask(this);
+ public final ForkJoinTask<V> fork() {
+ ((ForkJoinWorkerThread)Thread.currentThread()).workQueue.push(this);
+ return this;
}
/**
- * Returns the result of the computation when it is ready.
- * This method differs from <code>get</code> in that abnormal
- * completion results in RuntimeExceptions or Errors, not
- * ExecutionExceptions.
+ * Returns the result of the computation when it {@link #isDone is
+ * done}. This method differs from {@link #get()} in that
+ * abnormal completion results in {@code RuntimeException} or
+ * {@code Error}, not {@code ExecutionException}, and that
+ * interrupts of the calling thread do <em>not</em> cause the
+ * method to abruptly return by throwing {@code
+ * InterruptedException}.
*
* @return the computed result
*/
public final V join() {
- ForkJoinWorkerThread w = getWorker();
- if (w == null || status < 0 || !w.unpushTask(this) || !tryExec())
- reportException(awaitDone(w, true));
+ int s;
+ if ((s = doJoin() & DONE_MASK) != NORMAL)
+ reportException(s);
return getRawResult();
}
/**
* Commences performing this task, awaits its completion if
- * necessary, and return its result.
- * @throws Throwable (a RuntimeException, Error, or unchecked
- * exception) if the underlying computation did so.
+ * necessary, and returns its result, or throws an (unchecked)
+ * {@code RuntimeException} or {@code Error} if the underlying
+ * computation did so.
+ *
* @return the computed result
*/
public final V invoke() {
- if (status >= 0 && tryExec())
- return getRawResult();
- else
- return join();
+ int s;
+ if ((s = doInvoke() & DONE_MASK) != NORMAL)
+ reportException(s);
+ return getRawResult();
}
/**
- * Forks both tasks, returning when <code>isDone</code> holds for
- * both of them or an exception is encountered. This method may be
- * invoked only from within ForkJoinTask computations. Attempts to
- * invoke in other contexts result in exceptions or errors
- * possibly including ClassCastException.
- * @param t1 one task
- * @param t2 the other task
- * @throws NullPointerException if t1 or t2 are null
- * @throws RuntimeException or Error if either task did so.
+ * Forks the given tasks, returning when {@code isDone} holds for
+ * each task or an (unchecked) exception is encountered, in which
+ * case the exception is rethrown. If more than one task
+ * encounters an exception, then this method throws any one of
+ * these exceptions. If any task encounters an exception, the
+ * other may be cancelled. However, the execution status of
+ * individual tasks is not guaranteed upon exceptional return. The
+ * status of each task may be obtained using {@link
+ * #getException()} and related methods to check if they have been
+ * cancelled, completed normally or exceptionally, or left
+ * unprocessed.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @param t1 the first task
+ * @param t2 the second task
+ * @throws NullPointerException if any task is null
*/
- public static void invokeAll(ForkJoinTask<?>t1, ForkJoinTask<?> t2) {
+ public static void invokeAll(ForkJoinTask<?> t1, ForkJoinTask<?> t2) {
+ int s1, s2;
t2.fork();
- t1.invoke();
- t2.join();
+ if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL)
+ t1.reportException(s1);
+ if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL)
+ t2.reportException(s2);
}
/**
- * Forks the given tasks, returning when <code>isDone</code> holds
- * for all of them. If any task encounters an exception, others
- * may be cancelled. This method may be invoked only from within
- * ForkJoinTask computations. Attempts to invoke in other contexts
- * result in exceptions or errors possibly including ClassCastException.
- * @param tasks the array of tasks
- * @throws NullPointerException if tasks or any element are null.
- * @throws RuntimeException or Error if any task did so.
+ * Forks the given tasks, returning when {@code isDone} holds for
+ * each task or an (unchecked) exception is encountered, in which
+ * case the exception is rethrown. If more than one task
+ * encounters an exception, then this method throws any one of
+ * these exceptions. If any task encounters an exception, others
+ * may be cancelled. However, the execution status of individual
+ * tasks is not guaranteed upon exceptional return. The status of
+ * each task may be obtained using {@link #getException()} and
+ * related methods to check if they have been cancelled, completed
+ * normally or exceptionally, or left unprocessed.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @param tasks the tasks
+ * @throws NullPointerException if any task is null
*/
public static void invokeAll(ForkJoinTask<?>... tasks) {
Throwable ex = null;
@@ -548,46 +716,53 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
}
else if (i != 0)
t.fork();
- else {
- t.quietlyInvoke();
- if (ex == null)
- ex = t.getException();
- }
+ else if (t.doInvoke() < NORMAL && ex == null)
+ ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = tasks[i];
if (t != null) {
if (ex != null)
t.cancel(false);
- else {
- t.quietlyJoin();
- if (ex == null)
- ex = t.getException();
- }
+ else if (t.doJoin() < NORMAL)
+ ex = t.getException();
}
}
if (ex != null)
- rethrowException(ex);
+ U.throwException(ex);
}
/**
- * Forks all tasks in the collection, returning when
- * <code>isDone</code> holds for all of them. If any task
- * encounters an exception, others may be cancelled. This method
- * may be invoked only from within ForkJoinTask
- * computations. Attempts to invoke in other contexts resul!t in
- * exceptions or errors possibly including ClassCastException.
+ * Forks all tasks in the specified collection, returning when
+ * {@code isDone} holds for each task or an (unchecked) exception
+ * is encountered, in which case the exception is rethrown. If
+ * more than one task encounters an exception, then this method
+ * throws any one of these exceptions. If any task encounters an
+ * exception, others may be cancelled. However, the execution
+ * status of individual tasks is not guaranteed upon exceptional
+ * return. The status of each task may be obtained using {@link
+ * #getException()} and related methods to check if they have been
+ * cancelled, completed normally or exceptionally, or left
+ * unprocessed.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
* @param tasks the collection of tasks
- * @throws NullPointerException if tasks or any element are null.
- * @throws RuntimeException or Error if any task did so.
+ * @return the tasks argument, to simplify usage
+ * @throws NullPointerException if tasks or any element are null
*/
- public static void invokeAll(Collection<? extends ForkJoinTask<?>> tasks) {
- if (!(tasks instanceof List)) {
- invokeAll(tasks.toArray(new ForkJoinTask[tasks.size()]));
- return;
+ public static <T extends ForkJoinTask<?>> Collection<T> invokeAll(Collection<T> tasks) {
+ if (!(tasks instanceof RandomAccess) || !(tasks instanceof List<?>)) {
+ invokeAll(tasks.toArray(new ForkJoinTask<?>[tasks.size()]));
+ return tasks;
}
+ @SuppressWarnings("unchecked")
List<? extends ForkJoinTask<?>> ts =
- (List<? extends ForkJoinTask<?>>)tasks;
+ (List<? extends ForkJoinTask<?>>) tasks;
Throwable ex = null;
int last = ts.size() - 1;
for (int i = last; i >= 0; --i) {
@@ -598,253 +773,326 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
}
else if (i != 0)
t.fork();
- else {
- t.quietlyInvoke();
- if (ex == null)
- ex = t.getException();
- }
+ else if (t.doInvoke() < NORMAL && ex == null)
+ ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = ts.get(i);
if (t != null) {
if (ex != null)
t.cancel(false);
- else {
- t.quietlyJoin();
- if (ex == null)
- ex = t.getException();
- }
+ else if (t.doJoin() < NORMAL)
+ ex = t.getException();
}
}
if (ex != null)
- rethrowException(ex);
+ U.throwException(ex);
+ return tasks;
}
/**
- * Returns true if the computation performed by this task has
- * completed (or has been cancelled).
- * @return true if this computation has completed
+ * Attempts to cancel execution of this task. This attempt will
+ * fail if the task has already completed or could not be
+ * cancelled for some other reason. If successful, and this task
+ * has not started when {@code cancel} is called, execution of
+ * this task is suppressed. After this method returns
+ * successfully, unless there is an intervening call to {@link
+ * #reinitialize}, subsequent calls to {@link #isCancelled},
+ * {@link #isDone}, and {@code cancel} will return {@code true}
+ * and calls to {@link #join} and related methods will result in
+ * {@code CancellationException}.
+ *
+ * <p>This method may be overridden in subclasses, but if so, must
+ * still ensure that these properties hold. In particular, the
+ * {@code cancel} method itself must not throw exceptions.
+ *
+ * <p>This method is designed to be invoked by <em>other</em>
+ * tasks. To terminate the current task, you can just return or
+ * throw an unchecked exception from its computation method, or
+ * invoke {@link #completeExceptionally}.
+ *
+ * @param mayInterruptIfRunning this value has no effect in the
+ * default implementation because interrupts are not used to
+ * control cancellation.
+ *
+ * @return {@code true} if this task is now cancelled
*/
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED;
+ }
+
public final boolean isDone() {
return status < 0;
}
- /**
- * Returns true if this task was cancelled.
- * @return true if this task was cancelled
- */
public final boolean isCancelled() {
- return (status & COMPLETION_MASK) == CANCELLED;
+ return (status & DONE_MASK) == CANCELLED;
}
/**
- * Asserts that the results of this task's computation will not be
- * used. If a cancellation occurs before atempting to execute this
- * task, then execution will be suppressed, <code>isCancelled</code>
- * will report true, and <code>join</code> will result in a
- * <code>CancellationException</code> being thrown. Otherwise, when
- * cancellation races with completion, there are no guarantees
- * about whether <code>isCancelled</code> will report true, whether
- * <code>join</code> will return normally or via an exception, or
- * whether these behaviors will remain consistent upon repeated
- * invocation.
- *
- * <p>This method may be overridden in subclasses, but if so, must
- * still ensure that these minimal properties hold. In particular,
- * the cancel method itself must not throw exceptions.
- *
- * <p> This method is designed to be invoked by <em>other</em>
- * tasks. To terminate the current task, you can just return or
- * throw an unchecked exception from its computation method, or
- * invoke <code>completeExceptionally</code>.
- *
- * @param mayInterruptIfRunning this value is ignored in the
- * default implementation because tasks are not in general
- * cancelled via interruption.
+ * Returns {@code true} if this task threw an exception or was cancelled.
*
- * @return true if this task is now cancelled
+ * @return {@code true} if this task threw an exception or was cancelled
*/
- public boolean cancel(boolean mayInterruptIfRunning) {
- setCompletion(CANCELLED);
- return (status & COMPLETION_MASK) == CANCELLED;
+ public final boolean isCompletedAbnormally() {
+ return status < NORMAL;
}
/**
- * Returns true if this task threw an exception or was cancelled
- * @return true if this task threw an exception or was cancelled
+ * Returns {@code true} if this task completed without throwing an
+ * exception and was not cancelled.
+ *
+ * @return {@code true} if this task completed without throwing an
+ * exception and was not cancelled
*/
- public final boolean isCompletedAbnormally() {
- return (status & COMPLETION_MASK) < NORMAL;
+ public final boolean isCompletedNormally() {
+ return (status & DONE_MASK) == NORMAL;
}
/**
* Returns the exception thrown by the base computation, or a
- * CancellationException if cancelled, or null if none or if the
- * method has not yet completed.
- * @return the exception, or null if none
+ * {@code CancellationException} if cancelled, or {@code null} if
+ * none or if the method has not yet completed.
+ *
+ * @return the exception, or {@code null} if none
*/
public final Throwable getException() {
- int s = status & COMPLETION_MASK;
- if (s >= NORMAL)
- return null;
- if (s == CANCELLED)
- return new CancellationException();
- return exceptionMap.get(this);
+ int s = status & DONE_MASK;
+ return ((s >= NORMAL) ? null :
+ (s == CANCELLED) ? new CancellationException() :
+ getThrowableException());
}
/**
* Completes this task abnormally, and if not already aborted or
* cancelled, causes it to throw the given exception upon
- * <code>join</code> and related operations. This method may be used
+ * {@code join} and related operations. This method may be used
* to induce exceptions in asynchronous tasks, or to force
* completion of tasks that would not otherwise complete. Its use
- * in other situations is likely to be wrong. This method is
- * overridable, but overridden versions must invoke <code>super</code>
+ * in other situations is discouraged. This method is
+ * overridable, but overridden versions must invoke {@code super}
* implementation to maintain guarantees.
*
- * @param ex the exception to throw. If this exception is
- * not a RuntimeException or Error, the actual exception thrown
- * will be a RuntimeException with cause ex.
+ * @param ex the exception to throw. If this exception is not a
+ * {@code RuntimeException} or {@code Error}, the actual exception
+ * thrown will be a {@code RuntimeException} with cause {@code ex}.
*/
public void completeExceptionally(Throwable ex) {
- setDoneExceptionally((ex instanceof RuntimeException) ||
- (ex instanceof Error)? ex :
- new RuntimeException(ex));
+ setExceptionalCompletion((ex instanceof RuntimeException) ||
+ (ex instanceof Error) ? ex :
+ new RuntimeException(ex));
}
/**
* Completes this task, and if not already aborted or cancelled,
- * returning a <code>null</code> result upon <code>join</code> and related
- * operations. This method may be used to provide results for
- * asynchronous tasks, or to provide alternative handling for
- * tasks that would not otherwise complete normally. Its use in
- * other situations is likely to be wrong. This method is
- * overridable, but overridden versions must invoke <code>super</code>
- * implementation to maintain guarantees.
+ * returning the given value as the result of subsequent
+ * invocations of {@code join} and related operations. This method
+ * may be used to provide results for asynchronous tasks, or to
+ * provide alternative handling for tasks that would not otherwise
+ * complete normally. Its use in other situations is
+ * discouraged. This method is overridable, but overridden
+ * versions must invoke {@code super} implementation to maintain
+ * guarantees.
*
- * @param value the result value for this task.
+ * @param value the result value for this task
*/
public void complete(V value) {
try {
setRawResult(value);
- } catch(Throwable rex) {
- setDoneExceptionally(rex);
+ } catch (Throwable rex) {
+ setExceptionalCompletion(rex);
return;
}
- setNormalCompletion();
- }
-
- public final V get() throws InterruptedException, ExecutionException {
- ForkJoinWorkerThread w = getWorker();
- if (w == null || status < 0 || !w.unpushTask(this) || !tryQuietlyInvoke())
- awaitDone(w, true);
- return reportFutureResult();
- }
-
- public final V get(long timeout, TimeUnit unit)
- throws InterruptedException, ExecutionException, TimeoutException {
- ForkJoinWorkerThread w = getWorker();
- if (w == null || status < 0 || !w.unpushTask(this) || !tryQuietlyInvoke())
- awaitDone(w, unit.toNanos(timeout));
- return reportTimedFutureResult();
+ setCompletion(NORMAL);
}
/**
- * Possibly executes other tasks until this task is ready, then
- * returns the result of the computation. This method may be more
- * efficient than <code>join</code>, but is only applicable when
- * there are no potemtial dependencies between continuation of the
- * current task and that of any other task that might be executed
- * while helping. (This usually holds for pure divide-and-conquer
- * tasks). This method may be invoked only from within
- * ForkJoinTask computations. Attempts to invoke in other contexts
- * resul!t in exceptions or errors possibly including ClassCastException.
+ * Waits if necessary for the computation to complete, and then
+ * retrieves its result.
+ *
* @return the computed result
+ * @throws CancellationException if the computation was cancelled
+ * @throws ExecutionException if the computation threw an
+ * exception
+ * @throws InterruptedException if the current thread is not a
+ * member of a ForkJoinPool and was interrupted while waiting
*/
- public final V helpJoin() {
- ForkJoinWorkerThread w = (ForkJoinWorkerThread)(Thread.currentThread());
- if (status < 0 || !w.unpushTask(this) || !tryExec())
- reportException(busyJoin(w));
+ public final V get() throws InterruptedException, ExecutionException {
+ int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ?
+ doJoin() : externalInterruptibleAwaitDone();
+ Throwable ex;
+ if ((s &= DONE_MASK) == CANCELLED)
+ throw new CancellationException();
+ if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
+ throw new ExecutionException(ex);
return getRawResult();
}
/**
- * Possibly executes other tasks until this task is ready. This
- * method may be invoked only from within ForkJoinTask
- * computations. Attempts to invoke in other contexts resul!t in
- * exceptions or errors possibly including ClassCastException.
+ * Waits if necessary for at most the given time for the computation
+ * to complete, and then retrieves its result, if available.
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return the computed result
+ * @throws CancellationException if the computation was cancelled
+ * @throws ExecutionException if the computation threw an
+ * exception
+ * @throws InterruptedException if the current thread is not a
+ * member of a ForkJoinPool and was interrupted while waiting
+ * @throws TimeoutException if the wait timed out
*/
- public final void quietlyHelpJoin() {
- if (status >= 0) {
- ForkJoinWorkerThread w =
- (ForkJoinWorkerThread)(Thread.currentThread());
- if (!w.unpushTask(this) || !tryQuietlyInvoke())
- busyJoin(w);
+ public final V get(long timeout, TimeUnit unit)
+ throws InterruptedException, ExecutionException, TimeoutException {
+ if (Thread.interrupted())
+ throw new InterruptedException();
+ // Messy in part because we measure in nanosecs, but wait in millisecs
+ int s; long ns, ms;
+ if ((s = status) >= 0 && (ns = unit.toNanos(timeout)) > 0L) {
+ long deadline = System.nanoTime() + ns;
+ ForkJoinPool p = null;
+ ForkJoinPool.WorkQueue w = null;
+ Thread t = Thread.currentThread();
+ if (t instanceof ForkJoinWorkerThread) {
+ ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
+ p = wt.pool;
+ w = wt.workQueue;
+ s = p.helpJoinOnce(w, this); // no retries on failure
+ }
+ boolean canBlock = false;
+ boolean interrupted = false;
+ try {
+ while ((s = status) >= 0) {
+ if (w != null && w.runState < 0)
+ cancelIgnoringExceptions(this);
+ else if (!canBlock) {
+ if (p == null || p.tryCompensate(this, null))
+ canBlock = true;
+ }
+ else {
+ if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L &&
+ U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0) {
+ try {
+ wait(ms);
+ } catch (InterruptedException ie) {
+ if (p == null)
+ interrupted = true;
+ }
+ }
+ else
+ notifyAll();
+ }
+ }
+ if ((s = status) < 0 || interrupted ||
+ (ns = deadline - System.nanoTime()) <= 0L)
+ break;
+ }
+ }
+ } finally {
+ if (p != null && canBlock)
+ p.incrementActiveCount();
+ }
+ if (interrupted)
+ throw new InterruptedException();
+ }
+ if ((s &= DONE_MASK) != NORMAL) {
+ Throwable ex;
+ if (s == CANCELLED)
+ throw new CancellationException();
+ if (s != EXCEPTIONAL)
+ throw new TimeoutException();
+ if ((ex = getThrowableException()) != null)
+ throw new ExecutionException(ex);
}
+ return getRawResult();
}
/**
- * Joins this task, without returning its result or throwing an
+ * Joins this task, without returning its result or throwing its
* exception. This method may be useful when processing
* collections of tasks when some have been cancelled or otherwise
* known to have aborted.
*/
public final void quietlyJoin() {
- if (status >= 0) {
- ForkJoinWorkerThread w = getWorker();
- if (w == null || !w.unpushTask(this) || !tryQuietlyInvoke())
- awaitDone(w, true);
- }
+ doJoin();
}
/**
* Commences performing this task and awaits its completion if
- * necessary, without returning its result or throwing an
- * exception. This method may be useful when processing
- * collections of tasks when some have been cancelled or otherwise
- * known to have aborted.
+ * necessary, without returning its result or throwing its
+ * exception.
*/
public final void quietlyInvoke() {
- if (status >= 0 && !tryQuietlyInvoke())
- quietlyJoin();
+ doInvoke();
}
/**
* Possibly executes tasks until the pool hosting the current task
- * {@link ForkJoinPool#isQuiescent}. This method may be of use in
- * designs in which many tasks are forked, but none are explicitly
- * joined, instead executing them until all are processed.
+ * {@link ForkJoinPool#isQuiescent is quiescent}. This method may
+ * be of use in designs in which many tasks are forked, but none
+ * are explicitly joined, instead executing them until all are
+ * processed.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
*/
public static void helpQuiesce() {
- ((ForkJoinWorkerThread)(Thread.currentThread())).
- helpQuiescePool();
+ ForkJoinWorkerThread wt =
+ (ForkJoinWorkerThread)Thread.currentThread();
+ wt.pool.helpQuiescePool(wt.workQueue);
}
/**
* Resets the internal bookkeeping state of this task, allowing a
- * subsequent <code>fork</code>. This method allows repeated reuse of
+ * subsequent {@code fork}. This method allows repeated reuse of
* this task, but only if reuse occurs when this task has either
* never been forked, or has been forked, then completed and all
* outstanding joins of this task have also completed. Effects
- * under any other usage conditions are not guaranteed, and are
- * almost surely wrong. This method may be useful when executing
+ * under any other usage conditions are not guaranteed.
+ * This method may be useful when executing
* pre-constructed trees of subtasks in loops.
+ *
+ * <p>Upon completion of this method, {@code isDone()} reports
+ * {@code false}, and {@code getException()} reports {@code
+ * null}. However, the value returned by {@code getRawResult} is
+ * unaffected. To clear this value, you can invoke {@code
+ * setRawResult(null)}.
*/
public void reinitialize() {
- if ((status & COMPLETION_MASK) == EXCEPTIONAL)
- exceptionMap.remove(this);
- status = 0;
+ if ((status & DONE_MASK) == EXCEPTIONAL)
+ clearExceptionalCompletion();
+ else
+ status = 0;
}
/**
* Returns the pool hosting the current task execution, or null
- * if this task is executing outside of any pool.
- * @return the pool, or null if none.
+ * if this task is executing outside of any ForkJoinPool.
+ *
+ * @see #inForkJoinPool
+ * @return the pool, or {@code null} if none
*/
public static ForkJoinPool getPool() {
Thread t = Thread.currentThread();
- return ((t instanceof ForkJoinWorkerThread)?
- ((ForkJoinWorkerThread)t).pool : null);
+ return (t instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread) t).pool : null;
+ }
+
+ /**
+ * Returns {@code true} if the current thread is a {@link
+ * ForkJoinWorkerThread} executing as a ForkJoinPool computation.
+ *
+ * @return {@code true} if the current thread is a {@link
+ * ForkJoinWorkerThread} executing as a ForkJoinPool computation,
+ * or {@code false} otherwise
+ */
+ public static boolean inForkJoinPool() {
+ return Thread.currentThread() instanceof ForkJoinWorkerThread;
}
/**
@@ -853,13 +1101,19 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* by the current thread, and has not commenced executing in
* another thread. This method may be useful when arranging
* alternative local processing of tasks that could have been, but
- * were not, stolen. This method may be invoked only from within
- * ForkJoinTask computations. Attempts to invoke in other contexts
- * result in exceptions or errors possibly including ClassCastException.
- * @return true if unforked
+ * were not, stolen.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @return {@code true} if unforked
*/
public boolean tryUnfork() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).unpushTask(this);
+ return ((ForkJoinWorkerThread)Thread.currentThread())
+ .workQueue.tryUnpush(this);
}
/**
@@ -867,15 +1121,22 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* forked by the current worker thread but not yet executed. This
* value may be useful for heuristic decisions about whether to
* fork other tasks.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
* @return the number of tasks
*/
public static int getQueuedTaskCount() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).
- getQueueSize();
+ return ((ForkJoinWorkerThread) Thread.currentThread())
+ .workQueue.queueSize();
}
/**
- * Returns a estimate of how many more locally queued tasks are
+ * Returns an estimate of how many more locally queued tasks are
* held by the current worker thread than there are other worker
* threads that might steal them. This value may be useful for
* heuristic decisions about whether to fork other tasks. In many
@@ -883,23 +1144,74 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* aim to maintain a small constant surplus (for example, 3) of
* tasks, and to process computations locally if this threshold is
* exceeded.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
* @return the surplus number of tasks, which may be negative
*/
public static int getSurplusQueuedTaskCount() {
- return ((ForkJoinWorkerThread)(Thread.currentThread()))
- .getEstimatedSurplusTaskCount();
+ /*
+ * The aim of this method is to return a cheap heuristic guide
+ * for task partitioning when programmers, frameworks, tools,
+ * or languages have little or no idea about task granularity.
+ * In essence by offering this method, we ask users only about
+ * tradeoffs in overhead vs expected throughput and its
+ * variance, rather than how finely to partition tasks.
+ *
+ * In a steady state strict (tree-structured) computation,
+ * each thread makes available for stealing enough tasks for
+ * other threads to remain active. Inductively, if all threads
+ * play by the same rules, each thread should make available
+ * only a constant number of tasks.
+ *
+ * The minimum useful constant is just 1. But using a value of
+ * 1 would require immediate replenishment upon each steal to
+ * maintain enough tasks, which is infeasible. Further,
+ * partitionings/granularities of offered tasks should
+ * minimize steal rates, which in general means that threads
+ * nearer the top of computation tree should generate more
+ * than those nearer the bottom. In perfect steady state, each
+ * thread is at approximately the same level of computation
+ * tree. However, producing extra tasks amortizes the
+ * uncertainty of progress and diffusion assumptions.
+ *
+ * So, users will want to use values larger, but not much
+ * larger than 1 to both smooth over transient shortages and
+ * hedge against uneven progress; as traded off against the
+ * cost of extra task overhead. We leave the user to pick a
+ * threshold value to compare with the results of this call to
+ * guide decisions, but recommend values such as 3.
+ *
+ * When all threads are active, it is on average OK to
+ * estimate surplus strictly locally. In steady-state, if one
+ * thread is maintaining say 2 surplus tasks, then so are
+ * others. So we can just use estimated queue length.
+ * However, this strategy alone leads to serious mis-estimates
+ * in some non-steady-state conditions (ramp-up, ramp-down,
+ * other stalls). We can detect many of these by further
+ * considering the number of "idle" threads, that are known to
+ * have zero queued tasks, so compensate by a factor of
+ * (#idle/#active) threads.
+ */
+ ForkJoinWorkerThread wt =
+ (ForkJoinWorkerThread)Thread.currentThread();
+ return wt.workQueue.queueSize() - wt.pool.idlePerActive();
}
// Extension methods
/**
- * Returns the result that would be returned by <code>join</code>,
- * even if this task completed abnormally, or null if this task is
- * not known to have been completed. This method is designed to
- * aid debugging, as well as to support extensions. Its use in any
- * other context is discouraged.
+ * Returns the result that would be returned by {@link #join}, even
+ * if this task completed abnormally, or {@code null} if this task
+ * is not known to have been completed. This method is designed
+ * to aid debugging, as well as to support extensions. Its use in
+ * any other context is discouraged.
*
- * @return the result, or null if not completed.
+ * @return the result, or {@code null} if not completed
*/
public abstract V getRawResult();
@@ -918,42 +1230,52 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* called otherwise. The return value controls whether this task
* is considered to be done normally. It may return false in
* asynchronous actions that require explicit invocations of
- * <code>complete</code> to become joinable. It may throw exceptions
- * to indicate abnormal exit.
- * @return true if completed normally
- * @throws Error or RuntimeException if encountered during computation
+ * {@link #complete} to become joinable. It may also throw an
+ * (unchecked) exception to indicate abnormal exit.
+ *
+ * @return {@code true} if completed normally
*/
protected abstract boolean exec();
/**
- * Returns, but does not unschedule or execute, the task queued by
- * the current thread but not yet executed, if one is
+ * Returns, but does not unschedule or execute, a task queued by
+ * the current thread but not yet executed, if one is immediately
* available. There is no guarantee that this task will actually
- * be polled or executed next. This method is designed primarily
- * to support extensions, and is unlikely to be useful otherwise.
- * This method may be invoked only from within ForkJoinTask
- * computations. Attempts to invoke in other contexts result in
- * exceptions or errors possibly including ClassCastException.
+ * be polled or executed next. Conversely, this method may return
+ * null even if a task exists but cannot be accessed without
+ * contention with other threads. This method is designed
+ * primarily to support extensions, and is unlikely to be useful
+ * otherwise.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
*
- * @return the next task, or null if none are available
+ * @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> peekNextLocalTask() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).peekTask();
+ return ((ForkJoinWorkerThread) Thread.currentThread()).workQueue.peek();
}
/**
* Unschedules and returns, without executing, the next task
* queued by the current thread but not yet executed. This method
* is designed primarily to support extensions, and is unlikely to
- * be useful otherwise. This method may be invoked only from
- * within ForkJoinTask computations. Attempts to invoke in other
- * contexts result in exceptions or errors possibly including
- * ClassCastException.
+ * be useful otherwise.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
*
- * @return the next task, or null if none are available
+ * @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollNextLocalTask() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).pollLocalTask();
+ return ((ForkJoinWorkerThread) Thread.currentThread())
+ .workQueue.nextLocalTask();
}
/**
@@ -961,19 +1283,170 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* queued by the current thread but not yet executed, if one is
* available, or if not available, a task that was forked by some
* other thread, if available. Availability may be transient, so a
- * <code>null</code> result does not necessarily imply quiecence
+ * {@code null} result does not necessarily imply quiescence
* of the pool this task is operating in. This method is designed
* primarily to support extensions, and is unlikely to be useful
- * otherwise. This method may be invoked only from within
- * ForkJoinTask computations. Attempts to invoke in other contexts
- * result in exceptions or errors possibly including
- * ClassCastException.
+ * otherwise.
*
- * @return a task, or null if none are available
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @return a task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollTask() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).
- pollTask();
+ ForkJoinWorkerThread wt =
+ (ForkJoinWorkerThread)Thread.currentThread();
+ return wt.pool.nextTaskFor(wt.workQueue);
+ }
+
+ // Mark-bit operations
+
+ /**
+ * Returns true if this task is marked.
+ *
+ * @return true if this task is marked
+ * @since 1.8
+ */
+ public final boolean isMarkedForkJoinTask() {
+ return (status & MARKED) != 0;
+ }
+
+ /**
+ * Atomically sets the mark on this task.
+ *
+ * @return true if this task was previously unmarked
+ * @since 1.8
+ */
+ public final boolean markForkJoinTask() {
+ for (int s;;) {
+ if (((s = status) & MARKED) != 0)
+ return false;
+ if (U.compareAndSwapInt(this, STATUS, s, s | MARKED))
+ return true;
+ }
+ }
+
+ /**
+ * Atomically clears the mark on this task.
+ *
+ * @return true if this task was previously marked
+ * @since 1.8
+ */
+ public final boolean unmarkForkJoinTask() {
+ for (int s;;) {
+ if (((s = status) & MARKED) == 0)
+ return false;
+ if (U.compareAndSwapInt(this, STATUS, s, s & ~MARKED))
+ return true;
+ }
+ }
+
+ /**
+ * Adaptor for Runnables. This implements RunnableFuture
+ * to be compliant with AbstractExecutorService constraints
+ * when used in ForkJoinPool.
+ */
+ static final class AdaptedRunnable<T> extends ForkJoinTask<T>
+ implements RunnableFuture<T> {
+ final Runnable runnable;
+ T result;
+ AdaptedRunnable(Runnable runnable, T result) {
+ if (runnable == null) throw new NullPointerException();
+ this.runnable = runnable;
+ this.result = result; // OK to set this even before completion
+ }
+ public final T getRawResult() { return result; }
+ public final void setRawResult(T v) { result = v; }
+ public final boolean exec() { runnable.run(); return true; }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /**
+ * Adaptor for Runnables without results
+ */
+ static final class AdaptedRunnableAction extends ForkJoinTask<Void>
+ implements RunnableFuture<Void> {
+ final Runnable runnable;
+ AdaptedRunnableAction(Runnable runnable) {
+ if (runnable == null) throw new NullPointerException();
+ this.runnable = runnable;
+ }
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void v) { }
+ public final boolean exec() { runnable.run(); return true; }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /**
+ * Adaptor for Callables
+ */
+ static final class AdaptedCallable<T> extends ForkJoinTask<T>
+ implements RunnableFuture<T> {
+ final Callable<? extends T> callable;
+ T result;
+ AdaptedCallable(Callable<? extends T> callable) {
+ if (callable == null) throw new NullPointerException();
+ this.callable = callable;
+ }
+ public final T getRawResult() { return result; }
+ public final void setRawResult(T v) { result = v; }
+ public final boolean exec() {
+ try {
+ result = callable.call();
+ return true;
+ } catch (Error err) {
+ throw err;
+ } catch (RuntimeException rex) {
+ throw rex;
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 2838392045355241008L;
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code run}
+ * method of the given {@code Runnable} as its action, and returns
+ * a null result upon {@link #join}.
+ *
+ * @param runnable the runnable action
+ * @return the task
+ */
+ public static ForkJoinTask<?> adapt(Runnable runnable) {
+ return new AdaptedRunnableAction(runnable);
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code run}
+ * method of the given {@code Runnable} as its action, and returns
+ * the given result upon {@link #join}.
+ *
+ * @param runnable the runnable action
+ * @param result the result upon completion
+ * @return the task
+ */
+ public static <T> ForkJoinTask<T> adapt(Runnable runnable, T result) {
+ return new AdaptedRunnable<T>(runnable, result);
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code call}
+ * method of the given {@code Callable} as its action, and returns
+ * its result upon {@link #join}, translating any checked exceptions
+ * encountered into {@code RuntimeException}.
+ *
+ * @param callable the callable action
+ * @return the task
+ */
+ public static <T> ForkJoinTask<T> adapt(Callable<? extends T> callable) {
+ return new AdaptedCallable<T>(callable);
}
// Serialization support
@@ -981,11 +1454,10 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
private static final long serialVersionUID = -7721805057305804111L;
/**
- * Save the state to a stream.
+ * Saves this task to a stream (that is, serializes it).
*
* @serialData the current run status and the exception thrown
- * during execution, or null if none.
- * @param s the stream
+ * during execution, or {@code null} if none
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
@@ -994,70 +1466,57 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
}
/**
- * Reconstitute the instance from a stream.
- * @param s the stream
+ * Reconstitutes this task from a stream (that is, deserializes it).
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
- status &= ~INTERNAL_SIGNAL_MASK; // clear internal signal counts
- status |= EXTERNAL_SIGNAL; // conservatively set external signal
Object ex = s.readObject();
if (ex != null)
- setDoneExceptionally((Throwable)ex);
+ setExceptionalCompletion((Throwable)ex);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long STATUS;
+ static {
+ exceptionTableLock = new ReentrantLock();
+ exceptionTableRefQueue = new ReferenceQueue<Object>();
+ exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY];
+ try {
+ U = getUnsafe();
+ STATUS = U.objectFieldOffset
+ (ForkJoinTask.class.getDeclaredField("status"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
}
- // Temporary Unsafe mechanics for preliminary release
- private static Unsafe getUnsafe() throws Throwable {
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
try {
- return Unsafe.getUnsafe();
+ return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
- (new java.security.PrivilegedExceptionAction<Unsafe>() {
- public Unsafe run() throws Exception {
- return getUnsafePrivileged();
+ (new java.security
+ .PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ java.lang.reflect.Field f = sun.misc
+ .Unsafe.class.getDeclaredField("theUnsafe");
+ f.setAccessible(true);
+ return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
- throw e.getCause();
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
}
}
}
-
- private static Unsafe getUnsafePrivileged()
- throws NoSuchFieldException, IllegalAccessException {
- Field f = Unsafe.class.getDeclaredField("theUnsafe");
- f.setAccessible(true);
- return (Unsafe) f.get(null);
- }
-
- private static long fieldOffset(String fieldName, Unsafe unsafe)
- throws NoSuchFieldException {
- // do not use _unsafe to avoid NPE
- return unsafe.objectFieldOffset
- (ForkJoinTask.class.getDeclaredField(fieldName));
- }
-
- static final Unsafe _unsafe;
- static final long statusOffset;
-
- static {
- Unsafe tmpUnsafe = null;
- long tmpStatusOffset = 0;
- try {
- tmpUnsafe = getUnsafe();
- tmpStatusOffset = fieldOffset("status", tmpUnsafe);
- } catch (Throwable e) {
- // Ignore the failure to load sun.misc.Unsafe on Android so
- // that platform can use the actor library without the
- // fork/join scheduler.
- String vmVendor = System.getProperty("java.vm.vendor");
- if (!vmVendor.contains("Android")) {
- throw new RuntimeException("Could not initialize intrinsics", e);
- }
- }
- _unsafe = tmpUnsafe;
- statusOffset = tmpStatusOffset;
- }
-
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java
index b4d889750c..90a0af5723 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java
@@ -1,224 +1,55 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.*;
-import java.util.concurrent.locks.*;
-import sun.misc.Unsafe;
-import java.lang.reflect.*;
/**
- * A thread managed by a {@link ForkJoinPool}. This class is
- * subclassable solely for the sake of adding functionality -- there
- * are no overridable methods dealing with scheduling or
- * execution. However, you can override initialization and termination
- * methods surrounding the main task processing loop. If you do
- * create such a subclass, you will also need to supply a custom
- * ForkJoinWorkerThreadFactory to use it in a ForkJoinPool.
+ * A thread managed by a {@link ForkJoinPool}, which executes
+ * {@link ForkJoinTask}s.
+ * This class is subclassable solely for the sake of adding
+ * functionality -- there are no overridable methods dealing with
+ * scheduling or execution. However, you can override initialization
+ * and termination methods surrounding the main task processing loop.
+ * If you do create such a subclass, you will also need to supply a
+ * custom {@link ForkJoinPool.ForkJoinWorkerThreadFactory} to use it
+ * in a {@code ForkJoinPool}.
*
+ * @since 1.7
+ * @author Doug Lea
*/
public class ForkJoinWorkerThread extends Thread {
/*
- * Algorithm overview:
- *
- * 1. Work-Stealing: Work-stealing queues are special forms of
- * Deques that support only three of the four possible
- * end-operations -- push, pop, and deq (aka steal), and only do
- * so under the constraints that push and pop are called only from
- * the owning thread, while deq may be called from other threads.
- * (If you are unfamiliar with them, you probably want to read
- * Herlihy and Shavit's book "The Art of Multiprocessor
- * programming", chapter 16 describing these in more detail before
- * proceeding.) The main work-stealing queue design is roughly
- * similar to "Dynamic Circular Work-Stealing Deque" by David
- * Chase and Yossi Lev, SPAA 2005
- * (http://research.sun.com/scalable/pubs/index.html). The main
- * difference ultimately stems from gc requirements that we null
- * out taken slots as soon as we can, to maintain as small a
- * footprint as possible even in programs generating huge numbers
- * of tasks. To accomplish this, we shift the CAS arbitrating pop
- * vs deq (steal) from being on the indices ("base" and "sp") to
- * the slots themselves (mainly via method "casSlotNull()"). So,
- * both a successful pop and deq mainly entail CAS'ing a nonnull
- * slot to null. Because we rely on CASes of references, we do
- * not need tag bits on base or sp. They are simple ints as used
- * in any circular array-based queue (see for example ArrayDeque).
- * Updates to the indices must still be ordered in a way that
- * guarantees that (sp - base) > 0 means the queue is empty, but
- * otherwise may err on the side of possibly making the queue
- * appear nonempty when a push, pop, or deq have not fully
- * committed. Note that this means that the deq operation,
- * considered individually, is not wait-free. One thief cannot
- * successfully continue until another in-progress one (or, if
- * previously empty, a push) completes. However, in the
- * aggregate, we ensure at least probablistic non-blockingness. If
- * an attempted steal fails, a thief always chooses a different
- * random victim target to try next. So, in order for one thief to
- * progress, it suffices for any in-progress deq or new push on
- * any empty queue to complete. One reason this works well here is
- * that apparently-nonempty often means soon-to-be-stealable,
- * which gives threads a chance to activate if necessary before
- * stealing (see below).
- *
- * Efficient implementation of this approach currently relies on
- * an uncomfortable amount of "Unsafe" mechanics. To maintain
- * correct orderings, reads and writes of variable base require
- * volatile ordering. Variable sp does not require volatile write
- * but needs cheaper store-ordering on writes. Because they are
- * protected by volatile base reads, reads of the queue array and
- * its slots do not need volatile load semantics, but writes (in
- * push) require store order and CASes (in pop and deq) require
- * (volatile) CAS semantics. Since these combinations aren't
- * supported using ordinary volatiles, the only way to accomplish
- * these effciently is to use direct Unsafe calls. (Using external
- * AtomicIntegers and AtomicReferenceArrays for the indices and
- * array is significantly slower because of memory locality and
- * indirection effects.) Further, performance on most platforms is
- * very sensitive to placement and sizing of the (resizable) queue
- * array. Even though these queues don't usually become all that
- * big, the initial size must be large enough to counteract cache
- * contention effects across multiple queues (especially in the
- * presence of GC cardmarking). Also, to improve thread-locality,
- * queues are currently initialized immediately after the thread
- * gets the initial signal to start processing tasks. However,
- * all queue-related methods except pushTask are written in a way
- * that allows them to instead be lazily allocated and/or disposed
- * of when empty. All together, these low-level implementation
- * choices produce as much as a factor of 4 performance
- * improvement compared to naive implementations, and enable the
- * processing of billions of tasks per second, sometimes at the
- * expense of ugliness.
- *
- * 2. Run control: The primary run control is based on a global
- * counter (activeCount) held by the pool. It uses an algorithm
- * similar to that in Herlihy and Shavit section 17.6 to cause
- * threads to eventually block when all threads declare they are
- * inactive. (See variable "scans".) For this to work, threads
- * must be declared active when executing tasks, and before
- * stealing a task. They must be inactive before blocking on the
- * Pool Barrier (awaiting a new submission or other Pool
- * event). In between, there is some free play which we take
- * advantage of to avoid contention and rapid flickering of the
- * global activeCount: If inactive, we activate only if a victim
- * queue appears to be nonempty (see above). Similarly, a thread
- * tries to inactivate only after a full scan of other threads.
- * The net effect is that contention on activeCount is rarely a
- * measurable performance issue. (There are also a few other cases
- * where we scan for work rather than retry/block upon
- * contention.)
- *
- * 3. Selection control. We maintain policy of always choosing to
- * run local tasks rather than stealing, and always trying to
- * steal tasks before trying to run a new submission. All steals
- * are currently performed in randomly-chosen deq-order. It may be
- * worthwhile to bias these with locality / anti-locality
- * information, but doing this well probably requires more
- * lower-level information from JVMs than currently provided.
- */
-
- /**
- * Capacity of work-stealing queue array upon initialization.
- * Must be a power of two. Initial size must be at least 2, but is
- * padded to minimize cache effects.
- */
- private static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
-
- /**
- * Maximum work-stealing queue array size. Must be less than or
- * equal to 1 << 28 to ensure lack of index wraparound. (This
- * is less than usual bounds, because we need leftshift by 3
- * to be in int range).
- */
- private static final int MAXIMUM_QUEUE_CAPACITY = 1 << 28;
-
- /**
- * The pool this thread works in. Accessed directly by ForkJoinTask
- */
- final ForkJoinPool pool;
-
- /**
- * The work-stealing queue array. Size must be a power of two.
- * Initialized when thread starts, to improve memory locality.
- */
- private ForkJoinTask<?>[] queue;
-
- /**
- * Index (mod queue.length) of next queue slot to push to or pop
- * from. It is written only by owner thread, via ordered store.
- * Both sp and base are allowed to wrap around on overflow, but
- * (sp - base) still estimates size.
- */
- private volatile int sp;
-
- /**
- * Index (mod queue.length) of least valid queue slot, which is
- * always the next position to steal from if nonempty.
- */
- private volatile int base;
-
- /**
- * Activity status. When true, this worker is considered active.
- * Must be false upon construction. It must be true when executing
- * tasks, and BEFORE stealing a task. It must be false before
- * calling pool.sync
- */
- private boolean active;
-
- /**
- * Run state of this worker. Supports simple versions of the usual
- * shutdown/shutdownNow control.
- */
- private volatile int runState;
-
- /**
- * Seed for random number generator for choosing steal victims.
- * Uses Marsaglia xorshift. Must be nonzero upon initialization.
- */
- private int seed;
-
- /**
- * Number of steals, transferred to pool when idle
+ * ForkJoinWorkerThreads are managed by ForkJoinPools and perform
+ * ForkJoinTasks. For explanation, see the internal documentation
+ * of class ForkJoinPool.
*/
- private int stealCount;
- /**
- * Index of this worker in pool array. Set once by pool before
- * running, and accessed directly by pool during cleanup etc
- */
- int poolIndex;
-
- /**
- * The last barrier event waited for. Accessed in pool callback
- * methods, but only by current thread.
- */
- long lastEventCount;
-
- /**
- * True if use local fifo, not default lifo, for local polling
- */
- private boolean locallyFifo;
+ final ForkJoinPool.WorkQueue workQueue; // Work-stealing mechanics
+ final ForkJoinPool pool; // the pool this thread works in
/**
* Creates a ForkJoinWorkerThread operating in the given pool.
+ *
* @param pool the pool this thread works in
* @throws NullPointerException if pool is null
*/
protected ForkJoinWorkerThread(ForkJoinPool pool) {
- if (pool == null) throw new NullPointerException();
+ super(pool.nextWorkerName());
+ setDaemon(true);
+ Thread.UncaughtExceptionHandler ueh = pool.ueh;
+ if (ueh != null)
+ setUncaughtExceptionHandler(ueh);
this.pool = pool;
- // Note: poolIndex is set by pool during construction
- // Remaining initialization is deferred to onStart
+ pool.registerWorker(this.workQueue = new ForkJoinPool.WorkQueue
+ (pool, this, pool.localMode));
}
- // Public access methods
-
/**
- * Returns the pool hosting this thread
+ * Returns the pool hosting this thread.
+ *
* @return the pool
*/
public ForkJoinPool getPool() {
@@ -231,543 +62,58 @@ public class ForkJoinWorkerThread extends Thread {
* threads (minus one) that have ever been created in the pool.
* This method may be useful for applications that track status or
* collect results per-worker rather than per-task.
- * @return the index number.
+ *
+ * @return the index number
*/
public int getPoolIndex() {
- return poolIndex;
- }
-
- /**
- * Establishes local first-in-first-out scheduling mode for forked
- * tasks that are never joined.
- * @param async if true, use locally FIFO scheduling
- */
- void setAsyncMode(boolean async) {
- locallyFifo = async;
- }
-
- // Runstate management
-
- // Runstate values. Order matters
- private static final int RUNNING = 0;
- private static final int SHUTDOWN = 1;
- private static final int TERMINATING = 2;
- private static final int TERMINATED = 3;
-
- final boolean isShutdown() { return runState >= SHUTDOWN; }
- final boolean isTerminating() { return runState >= TERMINATING; }
- final boolean isTerminated() { return runState == TERMINATED; }
- final boolean shutdown() { return transitionRunStateTo(SHUTDOWN); }
- final boolean shutdownNow() { return transitionRunStateTo(TERMINATING); }
-
- /**
- * Transition to at least the given state. Return true if not
- * already at least given state.
- */
- private boolean transitionRunStateTo(int state) {
- for (;;) {
- int s = runState;
- if (s >= state)
- return false;
- if (_unsafe.compareAndSwapInt(this, runStateOffset, s, state))
- return true;
- }
- }
-
- /**
- * Try to set status to active; fail on contention
- */
- private boolean tryActivate() {
- if (!active) {
- if (!pool.tryIncrementActiveCount())
- return false;
- active = true;
- }
- return true;
- }
-
- /**
- * Try to set status to active; fail on contention
- */
- private boolean tryInactivate() {
- if (active) {
- if (!pool.tryDecrementActiveCount())
- return false;
- active = false;
- }
- return true;
- }
-
- /**
- * Computes next value for random victim probe. Scans don't
- * require a very high quality generator, but also not a crummy
- * one. Marsaglia xor-shift is cheap and works well.
- */
- private static int xorShift(int r) {
- r ^= r << 1;
- r ^= r >>> 3;
- r ^= r << 10;
- return r;
- }
-
- // Lifecycle methods
-
- /**
- * This method is required to be public, but should never be
- * called explicitly. It performs the main run loop to execute
- * ForkJoinTasks.
- */
- public void run() {
- Throwable exception = null;
- try {
- onStart();
- pool.sync(this); // await first pool event
- mainLoop();
- } catch (Throwable ex) {
- exception = ex;
- } finally {
- onTermination(exception);
- }
- }
-
- /**
- * Execute tasks until shut down.
- */
- private void mainLoop() {
- while (!isShutdown()) {
- ForkJoinTask<?> t = pollTask();
- if (t != null || (t = pollSubmission()) != null)
- t.quietlyExec();
- else if (tryInactivate())
- pool.sync(this);
- }
+ return workQueue.poolIndex;
}
/**
* Initializes internal state after construction but before
* processing any tasks. If you override this method, you must
- * invoke super.onStart() at the beginning of the method.
+ * invoke {@code super.onStart()} at the beginning of the method.
* Initialization requires care: Most fields must have legal
* default values, to ensure that attempted accesses from other
* threads work correctly even before this thread starts
* processing tasks.
*/
protected void onStart() {
- // Allocate while starting to improve chances of thread-local
- // isolation
- queue = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
- // Initial value of seed need not be especially random but
- // should differ across workers and must be nonzero
- int p = poolIndex + 1;
- seed = p + (p << 8) + (p << 16) + (p << 24); // spread bits
}
/**
- * Perform cleanup associated with termination of this worker
+ * Performs cleanup associated with termination of this worker
* thread. If you override this method, you must invoke
- * super.onTermination at the end of the overridden method.
+ * {@code super.onTermination} at the end of the overridden method.
*
* @param exception the exception causing this thread to abort due
- * to an unrecoverable error, or null if completed normally.
+ * to an unrecoverable error, or {@code null} if completed normally
*/
protected void onTermination(Throwable exception) {
- // Execute remaining local tasks unless aborting or terminating
- while (exception == null && !pool.isTerminating() && base != sp) {
- try {
- ForkJoinTask<?> t = popTask();
- if (t != null)
- t.quietlyExec();
- } catch(Throwable ex) {
- exception = ex;
- }
- }
- // Cancel other tasks, transition status, notify pool, and
- // propagate exception to uncaught exception handler
- try {
- do;while (!tryInactivate()); // ensure inactive
- cancelTasks();
- runState = TERMINATED;
- pool.workerTerminated(this);
- } catch (Throwable ex) { // Shouldn't ever happen
- if (exception == null) // but if so, at least rethrown
- exception = ex;
- } finally {
- if (exception != null)
- ForkJoinTask.rethrowException(exception);
- }
}
- // Intrinsics-based support for queue operations.
-
/**
- * Add in store-order the given task at given slot of q to
- * null. Caller must ensure q is nonnull and index is in range.
- */
- private static void setSlot(ForkJoinTask<?>[] q, int i,
- ForkJoinTask<?> t){
- _unsafe.putOrderedObject(q, (i << qShift) + qBase, t);
- }
-
- /**
- * CAS given slot of q to null. Caller must ensure q is nonnull
- * and index is in range.
- */
- private static boolean casSlotNull(ForkJoinTask<?>[] q, int i,
- ForkJoinTask<?> t) {
- return _unsafe.compareAndSwapObject(q, (i << qShift) + qBase, t, null);
- }
-
- /**
- * Sets sp in store-order.
- */
- private void storeSp(int s) {
- _unsafe.putOrderedInt(this, spOffset, s);
- }
-
- // Main queue methods
-
- /**
- * Pushes a task. Called only by current thread.
- * @param t the task. Caller must ensure nonnull
- */
- final void pushTask(ForkJoinTask<?> t) {
- ForkJoinTask<?>[] q = queue;
- int mask = q.length - 1;
- int s = sp;
- setSlot(q, s & mask, t);
- storeSp(++s);
- if ((s -= base) == 1)
- pool.signalWork();
- else if (s >= mask)
- growQueue();
- }
-
- /**
- * Tries to take a task from the base of the queue, failing if
- * either empty or contended.
- * @return a task, or null if none or contended.
- */
- final ForkJoinTask<?> deqTask() {
- ForkJoinTask<?> t;
- ForkJoinTask<?>[] q;
- int i;
- int b;
- if (sp != (b = base) &&
- (q = queue) != null && // must read q after b
- (t = q[i = (q.length - 1) & b]) != null &&
- casSlotNull(q, i, t)) {
- base = b + 1;
- return t;
- }
- return null;
- }
-
- /**
- * Returns a popped task, or null if empty. Ensures active status
- * if nonnull. Called only by current thread.
- */
- final ForkJoinTask<?> popTask() {
- int s = sp;
- while (s != base) {
- if (tryActivate()) {
- ForkJoinTask<?>[] q = queue;
- int mask = q.length - 1;
- int i = (s - 1) & mask;
- ForkJoinTask<?> t = q[i];
- if (t == null || !casSlotNull(q, i, t))
- break;
- storeSp(s - 1);
- return t;
- }
- }
- return null;
- }
-
- /**
- * Specialized version of popTask to pop only if
- * topmost element is the given task. Called only
- * by current thread while active.
- * @param t the task. Caller must ensure nonnull
- */
- final boolean unpushTask(ForkJoinTask<?> t) {
- ForkJoinTask<?>[] q = queue;
- int mask = q.length - 1;
- int s = sp - 1;
- if (casSlotNull(q, s & mask, t)) {
- storeSp(s);
- return true;
- }
- return false;
- }
-
- /**
- * Returns next task.
- */
- final ForkJoinTask<?> peekTask() {
- ForkJoinTask<?>[] q = queue;
- if (q == null)
- return null;
- int mask = q.length - 1;
- int i = locallyFifo? base : (sp - 1);
- return q[i & mask];
- }
-
- /**
- * Doubles queue array size. Transfers elements by emulating
- * steals (deqs) from old array and placing, oldest first, into
- * new array.
- */
- private void growQueue() {
- ForkJoinTask<?>[] oldQ = queue;
- int oldSize = oldQ.length;
- int newSize = oldSize << 1;
- if (newSize > MAXIMUM_QUEUE_CAPACITY)
- throw new RejectedExecutionException("Queue capacity exceeded");
- ForkJoinTask<?>[] newQ = queue = new ForkJoinTask<?>[newSize];
-
- int b = base;
- int bf = b + oldSize;
- int oldMask = oldSize - 1;
- int newMask = newSize - 1;
- do {
- int oldIndex = b & oldMask;
- ForkJoinTask<?> t = oldQ[oldIndex];
- if (t != null && !casSlotNull(oldQ, oldIndex, t))
- t = null;
- setSlot(newQ, b & newMask, t);
- } while (++b != bf);
- pool.signalWork();
- }
-
- /**
- * Tries to steal a task from another worker. Starts at a random
- * index of workers array, and probes workers until finding one
- * with non-empty queue or finding that all are empty. It
- * randomly selects the first n probes. If these are empty, it
- * resorts to a full circular traversal, which is necessary to
- * accurately set active status by caller. Also restarts if pool
- * events occurred since last scan, which forces refresh of
- * workers array, in case barrier was associated with resize.
- *
- * This method must be both fast and quiet -- usually avoiding
- * memory accesses that could disrupt cache sharing etc other than
- * those needed to check for and take tasks. This accounts for,
- * among other things, updating random seed in place without
- * storing it until exit.
- *
- * @return a task, or null if none found
- */
- private ForkJoinTask<?> scan() {
- ForkJoinTask<?> t = null;
- int r = seed; // extract once to keep scan quiet
- ForkJoinWorkerThread[] ws; // refreshed on outer loop
- int mask; // must be power 2 minus 1 and > 0
- outer:do {
- if ((ws = pool.workers) != null && (mask = ws.length - 1) > 0) {
- int idx = r;
- int probes = ~mask; // use random index while negative
- for (;;) {
- r = xorShift(r); // update random seed
- ForkJoinWorkerThread v = ws[mask & idx];
- if (v == null || v.sp == v.base) {
- if (probes <= mask)
- idx = (probes++ < 0)? r : (idx + 1);
- else
- break;
- }
- else if (!tryActivate() || (t = v.deqTask()) == null)
- continue outer; // restart on contention
- else
- break outer;
- }
- }
- } while (pool.hasNewSyncEvent(this)); // retry on pool events
- seed = r;
- return t;
- }
-
- /**
- * gets and removes a local or stolen a task
- * @return a task, if available
- */
- final ForkJoinTask<?> pollTask() {
- ForkJoinTask<?> t = locallyFifo? deqTask() : popTask();
- if (t == null && (t = scan()) != null)
- ++stealCount;
- return t;
- }
-
- /**
- * gets a local task
- * @return a task, if available
- */
- final ForkJoinTask<?> pollLocalTask() {
- return locallyFifo? deqTask() : popTask();
- }
-
- /**
- * Returns a pool submission, if one exists, activating first.
- * @return a submission, if available
- */
- private ForkJoinTask<?> pollSubmission() {
- ForkJoinPool p = pool;
- while (p.hasQueuedSubmissions()) {
- ForkJoinTask<?> t;
- if (tryActivate() && (t = p.pollSubmission()) != null)
- return t;
- }
- return null;
- }
-
- // Methods accessed only by Pool
-
- /**
- * Removes and cancels all tasks in queue. Can be called from any
- * thread.
- */
- final void cancelTasks() {
- ForkJoinTask<?> t;
- while (base != sp && (t = deqTask()) != null)
- t.cancelIgnoringExceptions();
- }
-
- /**
- * Drains tasks to given collection c
- * @return the number of tasks drained
- */
- final int drainTasksTo(Collection<ForkJoinTask<?>> c) {
- int n = 0;
- ForkJoinTask<?> t;
- while (base != sp && (t = deqTask()) != null) {
- c.add(t);
- ++n;
- }
- return n;
- }
-
- /**
- * Get and clear steal count for accumulation by pool. Called
- * only when known to be idle (in pool.sync and termination).
- */
- final int getAndClearStealCount() {
- int sc = stealCount;
- stealCount = 0;
- return sc;
- }
-
- /**
- * Returns true if at least one worker in the given array appears
- * to have at least one queued task.
- * @param ws array of workers
- */
- static boolean hasQueuedTasks(ForkJoinWorkerThread[] ws) {
- if (ws != null) {
- int len = ws.length;
- for (int j = 0; j < 2; ++j) { // need two passes for clean sweep
- for (int i = 0; i < len; ++i) {
- ForkJoinWorkerThread w = ws[i];
- if (w != null && w.sp != w.base)
- return true;
- }
- }
- }
- return false;
- }
-
- // Support methods for ForkJoinTask
-
- /**
- * Returns an estimate of the number of tasks in the queue.
- */
- final int getQueueSize() {
- int n = sp - base;
- return n < 0? 0 : n; // suppress momentarily negative values
- }
-
- /**
- * Returns an estimate of the number of tasks, offset by a
- * function of number of idle workers.
- */
- final int getEstimatedSurplusTaskCount() {
- // The halving approximates weighting idle vs non-idle workers
- return (sp - base) - (pool.getIdleThreadCount() >>> 1);
- }
-
- /**
- * Scan, returning early if joinMe done
- */
- final ForkJoinTask<?> scanWhileJoining(ForkJoinTask<?> joinMe) {
- ForkJoinTask<?> t = pollTask();
- if (t != null && joinMe.status < 0 && sp == base) {
- pushTask(t); // unsteal if done and this task would be stealable
- t = null;
- }
- return t;
- }
-
- /**
- * Runs tasks until pool isQuiescent
+ * This method is required to be public, but should never be
+ * called explicitly. It performs the main run loop to execute
+ * {@link ForkJoinTask}s.
*/
- final void helpQuiescePool() {
- for (;;) {
- ForkJoinTask<?> t = pollTask();
- if (t != null)
- t.quietlyExec();
- else if (tryInactivate() && pool.isQuiescent())
- break;
- }
- do;while (!tryActivate()); // re-activate on exit
- }
-
- // Temporary Unsafe mechanics for preliminary release
- private static Unsafe getUnsafe() throws Throwable {
+ public void run() {
+ Throwable exception = null;
try {
- return Unsafe.getUnsafe();
- } catch (SecurityException se) {
+ onStart();
+ pool.runWorker(workQueue);
+ } catch (Throwable ex) {
+ exception = ex;
+ } finally {
try {
- return java.security.AccessController.doPrivileged
- (new java.security.PrivilegedExceptionAction<Unsafe>() {
- public Unsafe run() throws Exception {
- return getUnsafePrivileged();
- }});
- } catch (java.security.PrivilegedActionException e) {
- throw e.getCause();
+ onTermination(exception);
+ } catch (Throwable ex) {
+ if (exception == null)
+ exception = ex;
+ } finally {
+ pool.deregisterWorker(this, exception);
}
}
}
-
- private static Unsafe getUnsafePrivileged()
- throws NoSuchFieldException, IllegalAccessException {
- Field f = Unsafe.class.getDeclaredField("theUnsafe");
- f.setAccessible(true);
- return (Unsafe) f.get(null);
- }
-
- private static long fieldOffset(String fieldName)
- throws NoSuchFieldException {
- return _unsafe.objectFieldOffset
- (ForkJoinWorkerThread.class.getDeclaredField(fieldName));
- }
-
- static final Unsafe _unsafe;
- static final long baseOffset;
- static final long spOffset;
- static final long runStateOffset;
- static final long qBase;
- static final int qShift;
- static {
- try {
- _unsafe = getUnsafe();
- baseOffset = fieldOffset("base");
- spOffset = fieldOffset("sp");
- runStateOffset = fieldOffset("runState");
- qBase = _unsafe.arrayBaseOffset(ForkJoinTask[].class);
- int s = _unsafe.arrayIndexScale(ForkJoinTask[].class);
- if ((s & (s-1)) != 0)
- throw new Error("data type scale not a power of two");
- qShift = 31 - Integer.numberOfLeadingZeros(s);
- } catch (Throwable e) {
- throw new RuntimeException("Could not initialize intrinsics", e);
- }
- }
}
+
diff --git a/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java b/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java
index 3b46c176ff..ceeb9212d5 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java
@@ -1,30 +1,38 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
-import java.util.concurrent.*;
-import java.util.concurrent.locks.*;
-import java.util.concurrent.atomic.*;
-import java.util.*;
-import java.io.*;
-import sun.misc.Unsafe;
-import java.lang.reflect.*;
+
+import java.util.AbstractQueue;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.LockSupport;
/**
- * An unbounded {@linkplain TransferQueue} based on linked nodes.
+ * An unbounded {@link TransferQueue} based on linked nodes.
* This queue orders elements FIFO (first-in-first-out) with respect
* to any given producer. The <em>head</em> of the queue is that
* element that has been on the queue the longest time for some
* producer. The <em>tail</em> of the queue is that element that has
* been on the queue the shortest time for some producer.
*
- * <p>Beware that, unlike in most collections, the {@code size}
- * method is <em>NOT</em> a constant-time operation. Because of the
+ * <p>Beware that, unlike in most collections, the {@code size} method
+ * is <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these queues, determining the current number
- * of elements requires a traversal of the elements.
+ * of elements requires a traversal of the elements, and so may report
+ * inaccurate results if this collection is modified during traversal.
+ * Additionally, the bulk operations {@code addAll},
+ * {@code removeAll}, {@code retainAll}, {@code containsAll},
+ * {@code equals}, and {@code toArray} are <em>not</em> guaranteed
+ * to be performed atomically. For example, an iterator operating
+ * concurrently with an {@code addAll} operation might view only some
+ * of the added elements.
*
* <p>This class and its iterator implement all of the
* <em>optional</em> methods of the {@link Collection} and {@link
@@ -44,381 +52,938 @@ import java.lang.reflect.*;
* @since 1.7
* @author Doug Lea
* @param <E> the type of elements held in this collection
- *
*/
public class LinkedTransferQueue<E> extends AbstractQueue<E>
implements TransferQueue<E>, java.io.Serializable {
private static final long serialVersionUID = -3223113410248163686L;
/*
- * This class extends the approach used in FIFO-mode
- * SynchronousQueues. See the internal documentation, as well as
- * the PPoPP 2006 paper "Scalable Synchronous Queues" by Scherer,
- * Lea & Scott
- * (http://www.cs.rice.edu/~wns1/papers/2006-PPoPP-SQ.pdf)
+ * *** Overview of Dual Queues with Slack ***
+ *
+ * Dual Queues, introduced by Scherer and Scott
+ * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are
+ * (linked) queues in which nodes may represent either data or
+ * requests. When a thread tries to enqueue a data node, but
+ * encounters a request node, it instead "matches" and removes it;
+ * and vice versa for enqueuing requests. Blocking Dual Queues
+ * arrange that threads enqueuing unmatched requests block until
+ * other threads provide the match. Dual Synchronous Queues (see
+ * Scherer, Lea, & Scott
+ * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
+ * additionally arrange that threads enqueuing unmatched data also
+ * block. Dual Transfer Queues support all of these modes, as
+ * dictated by callers.
+ *
+ * A FIFO dual queue may be implemented using a variation of the
+ * Michael & Scott (M&S) lock-free queue algorithm
+ * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf).
+ * It maintains two pointer fields, "head", pointing to a
+ * (matched) node that in turn points to the first actual
+ * (unmatched) queue node (or null if empty); and "tail" that
+ * points to the last node on the queue (or again null if
+ * empty). For example, here is a possible queue with four data
+ * elements:
+ *
+ * head tail
+ * | |
+ * v v
+ * M -> U -> U -> U -> U
+ *
+ * The M&S queue algorithm is known to be prone to scalability and
+ * overhead limitations when maintaining (via CAS) these head and
+ * tail pointers. This has led to the development of
+ * contention-reducing variants such as elimination arrays (see
+ * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
+ * optimistic back pointers (see Ladan-Mozes & Shavit
+ * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
+ * However, the nature of dual queues enables a simpler tactic for
+ * improving M&S-style implementations when dual-ness is needed.
+ *
+ * In a dual queue, each node must atomically maintain its match
+ * status. While there are other possible variants, we implement
+ * this here as: for a data-mode node, matching entails CASing an
+ * "item" field from a non-null data value to null upon match, and
+ * vice-versa for request nodes, CASing from null to a data
+ * value. (Note that the linearization properties of this style of
+ * queue are easy to verify -- elements are made available by
+ * linking, and unavailable by matching.) Compared to plain M&S
+ * queues, this property of dual queues requires one additional
+ * successful atomic operation per enq/deq pair. But it also
+ * enables lower cost variants of queue maintenance mechanics. (A
+ * variation of this idea applies even for non-dual queues that
+ * support deletion of interior elements, such as
+ * j.u.c.ConcurrentLinkedQueue.)
+ *
+ * Once a node is matched, its match status can never again
+ * change. We may thus arrange that the linked list of them
+ * contain a prefix of zero or more matched nodes, followed by a
+ * suffix of zero or more unmatched nodes. (Note that we allow
+ * both the prefix and suffix to be zero length, which in turn
+ * means that we do not use a dummy header.) If we were not
+ * concerned with either time or space efficiency, we could
+ * correctly perform enqueue and dequeue operations by traversing
+ * from a pointer to the initial node; CASing the item of the
+ * first unmatched node on match and CASing the next field of the
+ * trailing node on appends. (Plus some special-casing when
+ * initially empty). While this would be a terrible idea in
+ * itself, it does have the benefit of not requiring ANY atomic
+ * updates on head/tail fields.
+ *
+ * We introduce here an approach that lies between the extremes of
+ * never versus always updating queue (head and tail) pointers.
+ * This offers a tradeoff between sometimes requiring extra
+ * traversal steps to locate the first and/or last unmatched
+ * nodes, versus the reduced overhead and contention of fewer
+ * updates to queue pointers. For example, a possible snapshot of
+ * a queue is:
+ *
+ * head tail
+ * | |
+ * v v
+ * M -> M -> U -> U -> U -> U
+ *
+ * The best value for this "slack" (the targeted maximum distance
+ * between the value of "head" and the first unmatched node, and
+ * similarly for "tail") is an empirical matter. We have found
+ * that using very small constants in the range of 1-3 work best
+ * over a range of platforms. Larger values introduce increasing
+ * costs of cache misses and risks of long traversal chains, while
+ * smaller values increase CAS contention and overhead.
+ *
+ * Dual queues with slack differ from plain M&S dual queues by
+ * virtue of only sometimes updating head or tail pointers when
+ * matching, appending, or even traversing nodes; in order to
+ * maintain a targeted slack. The idea of "sometimes" may be
+ * operationalized in several ways. The simplest is to use a
+ * per-operation counter incremented on each traversal step, and
+ * to try (via CAS) to update the associated queue pointer
+ * whenever the count exceeds a threshold. Another, that requires
+ * more overhead, is to use random number generators to update
+ * with a given probability per traversal step.
+ *
+ * In any strategy along these lines, because CASes updating
+ * fields may fail, the actual slack may exceed targeted
+ * slack. However, they may be retried at any time to maintain
+ * targets. Even when using very small slack values, this
+ * approach works well for dual queues because it allows all
+ * operations up to the point of matching or appending an item
+ * (hence potentially allowing progress by another thread) to be
+ * read-only, thus not introducing any further contention. As
+ * described below, we implement this by performing slack
+ * maintenance retries only after these points.
+ *
+ * As an accompaniment to such techniques, traversal overhead can
+ * be further reduced without increasing contention of head
+ * pointer updates: Threads may sometimes shortcut the "next" link
+ * path from the current "head" node to be closer to the currently
+ * known first unmatched node, and similarly for tail. Again, this
+ * may be triggered with using thresholds or randomization.
+ *
+ * These ideas must be further extended to avoid unbounded amounts
+ * of costly-to-reclaim garbage caused by the sequential "next"
+ * links of nodes starting at old forgotten head nodes: As first
+ * described in detail by Boehm
+ * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC
+ * delays noticing that any arbitrarily old node has become
+ * garbage, all newer dead nodes will also be unreclaimed.
+ * (Similar issues arise in non-GC environments.) To cope with
+ * this in our implementation, upon CASing to advance the head
+ * pointer, we set the "next" link of the previous head to point
+ * only to itself; thus limiting the length of connected dead lists.
+ * (We also take similar care to wipe out possibly garbage
+ * retaining values held in other Node fields.) However, doing so
+ * adds some further complexity to traversal: If any "next"
+ * pointer links to itself, it indicates that the current thread
+ * has lagged behind a head-update, and so the traversal must
+ * continue from the "head". Traversals trying to find the
+ * current tail starting from "tail" may also encounter
+ * self-links, in which case they also continue at "head".
+ *
+ * It is tempting in slack-based scheme to not even use CAS for
+ * updates (similarly to Ladan-Mozes & Shavit). However, this
+ * cannot be done for head updates under the above link-forgetting
+ * mechanics because an update may leave head at a detached node.
+ * And while direct writes are possible for tail updates, they
+ * increase the risk of long retraversals, and hence long garbage
+ * chains, which can be much more costly than is worthwhile
+ * considering that the cost difference of performing a CAS vs
+ * write is smaller when they are not triggered on each operation
+ * (especially considering that writes and CASes equally require
+ * additional GC bookkeeping ("write barriers") that are sometimes
+ * more costly than the writes themselves because of contention).
+ *
+ * *** Overview of implementation ***
+ *
+ * We use a threshold-based approach to updates, with a slack
+ * threshold of two -- that is, we update head/tail when the
+ * current pointer appears to be two or more steps away from the
+ * first/last node. The slack value is hard-wired: a path greater
+ * than one is naturally implemented by checking equality of
+ * traversal pointers except when the list has only one element,
+ * in which case we keep slack threshold at one. Avoiding tracking
+ * explicit counts across method calls slightly simplifies an
+ * already-messy implementation. Using randomization would
+ * probably work better if there were a low-quality dirt-cheap
+ * per-thread one available, but even ThreadLocalRandom is too
+ * heavy for these purposes.
+ *
+ * With such a small slack threshold value, it is not worthwhile
+ * to augment this with path short-circuiting (i.e., unsplicing
+ * interior nodes) except in the case of cancellation/removal (see
+ * below).
+ *
+ * We allow both the head and tail fields to be null before any
+ * nodes are enqueued; initializing upon first append. This
+ * simplifies some other logic, as well as providing more
+ * efficient explicit control paths instead of letting JVMs insert
+ * implicit NullPointerExceptions when they are null. While not
+ * currently fully implemented, we also leave open the possibility
+ * of re-nulling these fields when empty (which is complicated to
+ * arrange, for little benefit.)
+ *
+ * All enqueue/dequeue operations are handled by the single method
+ * "xfer" with parameters indicating whether to act as some form
+ * of offer, put, poll, take, or transfer (each possibly with
+ * timeout). The relative complexity of using one monolithic
+ * method outweighs the code bulk and maintenance problems of
+ * using separate methods for each case.
*
- * The main extension is to provide different Wait modes for the
- * main "xfer" method that puts or takes items. These don't
- * impact the basic dual-queue logic, but instead control whether
- * or how threads block upon insertion of request or data nodes
- * into the dual queue. It also uses slightly different
- * conventions for tracking whether nodes are off-list or
- * cancelled.
+ * Operation consists of up to three phases. The first is
+ * implemented within method xfer, the second in tryAppend, and
+ * the third in method awaitMatch.
+ *
+ * 1. Try to match an existing node
+ *
+ * Starting at head, skip already-matched nodes until finding
+ * an unmatched node of opposite mode, if one exists, in which
+ * case matching it and returning, also if necessary updating
+ * head to one past the matched node (or the node itself if the
+ * list has no other unmatched nodes). If the CAS misses, then
+ * a loop retries advancing head by two steps until either
+ * success or the slack is at most two. By requiring that each
+ * attempt advances head by two (if applicable), we ensure that
+ * the slack does not grow without bound. Traversals also check
+ * if the initial head is now off-list, in which case they
+ * start at the new head.
+ *
+ * If no candidates are found and the call was untimed
+ * poll/offer, (argument "how" is NOW) return.
+ *
+ * 2. Try to append a new node (method tryAppend)
+ *
+ * Starting at current tail pointer, find the actual last node
+ * and try to append a new node (or if head was null, establish
+ * the first node). Nodes can be appended only if their
+ * predecessors are either already matched or are of the same
+ * mode. If we detect otherwise, then a new node with opposite
+ * mode must have been appended during traversal, so we must
+ * restart at phase 1. The traversal and update steps are
+ * otherwise similar to phase 1: Retrying upon CAS misses and
+ * checking for staleness. In particular, if a self-link is
+ * encountered, then we can safely jump to a node on the list
+ * by continuing the traversal at current head.
+ *
+ * On successful append, if the call was ASYNC, return.
+ *
+ * 3. Await match or cancellation (method awaitMatch)
+ *
+ * Wait for another thread to match node; instead cancelling if
+ * the current thread was interrupted or the wait timed out. On
+ * multiprocessors, we use front-of-queue spinning: If a node
+ * appears to be the first unmatched node in the queue, it
+ * spins a bit before blocking. In either case, before blocking
+ * it tries to unsplice any nodes between the current "head"
+ * and the first unmatched node.
+ *
+ * Front-of-queue spinning vastly improves performance of
+ * heavily contended queues. And so long as it is relatively
+ * brief and "quiet", spinning does not much impact performance
+ * of less-contended queues. During spins threads check their
+ * interrupt status and generate a thread-local random number
+ * to decide to occasionally perform a Thread.yield. While
+ * yield has underdefined specs, we assume that it might help,
+ * and will not hurt, in limiting impact of spinning on busy
+ * systems. We also use smaller (1/2) spins for nodes that are
+ * not known to be front but whose predecessors have not
+ * blocked -- these "chained" spins avoid artifacts of
+ * front-of-queue rules which otherwise lead to alternating
+ * nodes spinning vs blocking. Further, front threads that
+ * represent phase changes (from data to request node or vice
+ * versa) compared to their predecessors receive additional
+ * chained spins, reflecting longer paths typically required to
+ * unblock threads during phase changes.
+ *
+ *
+ * ** Unlinking removed interior nodes **
+ *
+ * In addition to minimizing garbage retention via self-linking
+ * described above, we also unlink removed interior nodes. These
+ * may arise due to timed out or interrupted waits, or calls to
+ * remove(x) or Iterator.remove. Normally, given a node that was
+ * at one time known to be the predecessor of some node s that is
+ * to be removed, we can unsplice s by CASing the next field of
+ * its predecessor if it still points to s (otherwise s must
+ * already have been removed or is now offlist). But there are two
+ * situations in which we cannot guarantee to make node s
+ * unreachable in this way: (1) If s is the trailing node of list
+ * (i.e., with null next), then it is pinned as the target node
+ * for appends, so can only be removed later after other nodes are
+ * appended. (2) We cannot necessarily unlink s given a
+ * predecessor node that is matched (including the case of being
+ * cancelled): the predecessor may already be unspliced, in which
+ * case some previous reachable node may still point to s.
+ * (For further explanation see Herlihy & Shavit "The Art of
+ * Multiprocessor Programming" chapter 9). Although, in both
+ * cases, we can rule out the need for further action if either s
+ * or its predecessor are (or can be made to be) at, or fall off
+ * from, the head of list.
+ *
+ * Without taking these into account, it would be possible for an
+ * unbounded number of supposedly removed nodes to remain
+ * reachable. Situations leading to such buildup are uncommon but
+ * can occur in practice; for example when a series of short timed
+ * calls to poll repeatedly time out but never otherwise fall off
+ * the list because of an untimed call to take at the front of the
+ * queue.
+ *
+ * When these cases arise, rather than always retraversing the
+ * entire list to find an actual predecessor to unlink (which
+ * won't help for case (1) anyway), we record a conservative
+ * estimate of possible unsplice failures (in "sweepVotes").
+ * We trigger a full sweep when the estimate exceeds a threshold
+ * ("SWEEP_THRESHOLD") indicating the maximum number of estimated
+ * removal failures to tolerate before sweeping through, unlinking
+ * cancelled nodes that were not unlinked upon initial removal.
+ * We perform sweeps by the thread hitting threshold (rather than
+ * background threads or by spreading work to other threads)
+ * because in the main contexts in which removal occurs, the
+ * caller is already timed-out, cancelled, or performing a
+ * potentially O(n) operation (e.g. remove(x)), none of which are
+ * time-critical enough to warrant the overhead that alternatives
+ * would impose on other threads.
+ *
+ * Because the sweepVotes estimate is conservative, and because
+ * nodes become unlinked "naturally" as they fall off the head of
+ * the queue, and because we allow votes to accumulate even while
+ * sweeps are in progress, there are typically significantly fewer
+ * such nodes than estimated. Choice of a threshold value
+ * balances the likelihood of wasted effort and contention, versus
+ * providing a worst-case bound on retention of interior nodes in
+ * quiescent queues. The value defined below was chosen
+ * empirically to balance these under various timeout scenarios.
+ *
+ * Note that we cannot self-link unlinked interior nodes during
+ * sweeps. However, the associated garbage chains terminate when
+ * some successor ultimately falls off the head of the list and is
+ * self-linked.
*/
- // Wait modes for xfer method
- static final int NOWAIT = 0;
- static final int TIMEOUT = 1;
- static final int WAIT = 2;
-
- /** The number of CPUs, for spin control */
- static final int NCPUS = Runtime.getRuntime().availableProcessors();
+ /** True if on multiprocessor */
+ private static final boolean MP =
+ Runtime.getRuntime().availableProcessors() > 1;
/**
- * The number of times to spin before blocking in timed waits.
- * The value is empirically derived -- it works well across a
- * variety of processors and OSes. Empirically, the best value
- * seems not to vary with number of CPUs (beyond 2) so is just
- * a constant.
+ * The number of times to spin (with randomly interspersed calls
+ * to Thread.yield) on multiprocessor before blocking when a node
+ * is apparently the first waiter in the queue. See above for
+ * explanation. Must be a power of two. The value is empirically
+ * derived -- it works pretty well across a variety of processors,
+ * numbers of CPUs, and OSes.
*/
- static final int maxTimedSpins = (NCPUS < 2)? 0 : 32;
+ private static final int FRONT_SPINS = 1 << 7;
/**
- * The number of times to spin before blocking in untimed waits.
- * This is greater than timed value because untimed waits spin
- * faster since they don't need to check times on each spin.
+ * The number of times to spin before blocking when a node is
+ * preceded by another node that is apparently spinning. Also
+ * serves as an increment to FRONT_SPINS on phase changes, and as
+ * base average frequency for yielding during spins. Must be a
+ * power of two.
*/
- static final int maxUntimedSpins = maxTimedSpins * 16;
+ private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
/**
- * The number of nanoseconds for which it is faster to spin
- * rather than to use timed park. A rough estimate suffices.
+ * The maximum number of estimated removal failures (sweepVotes)
+ * to tolerate before sweeping through the queue unlinking
+ * cancelled nodes that were not unlinked upon initial
+ * removal. See above for explanation. The value must be at least
+ * two to avoid useless sweeps when removing trailing nodes.
*/
- static final long spinForTimeoutThreshold = 1000L;
+ static final int SWEEP_THRESHOLD = 32;
/**
- * Node class for LinkedTransferQueue. Opportunistically
- * subclasses from AtomicReference to represent item. Uses Object,
- * not E, to allow setting item to "this" after use, to avoid
- * garbage retention. Similarly, setting the next field to this is
- * used as sentinel that node is off list.
+ * Queue nodes. Uses Object, not E, for items to allow forgetting
+ * them after use. Relies heavily on Unsafe mechanics to minimize
+ * unnecessary ordering constraints: Writes that are intrinsically
+ * ordered wrt other accesses or CASes use simple relaxed forms.
*/
- static final class QNode extends AtomicReference<Object> {
- volatile QNode next;
- volatile Thread waiter; // to control park/unpark
- final boolean isData;
- QNode(Object item, boolean isData) {
- super(item);
+ static final class Node {
+ final boolean isData; // false if this is a request node
+ volatile Object item; // initially non-null if isData; CASed to match
+ volatile Node next;
+ volatile Thread waiter; // null until waiting
+
+ // CAS methods for fields
+ final boolean casNext(Node cmp, Node val) {
+ return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
+ }
+
+ final boolean casItem(Object cmp, Object val) {
+ // assert cmp == null || cmp.getClass() != Node.class;
+ return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
+ }
+
+ /**
+ * Constructs a new node. Uses relaxed write because item can
+ * only be seen after publication via casNext.
+ */
+ Node(Object item, boolean isData) {
+ UNSAFE.putObject(this, itemOffset, item); // relaxed write
this.isData = isData;
}
- static final AtomicReferenceFieldUpdater<QNode, QNode>
- nextUpdater = AtomicReferenceFieldUpdater.newUpdater
- (QNode.class, QNode.class, "next");
+ /**
+ * Links node to itself to avoid garbage retention. Called
+ * only after CASing head field, so uses relaxed write.
+ */
+ final void forgetNext() {
+ UNSAFE.putObject(this, nextOffset, this);
+ }
- final boolean casNext(QNode cmp, QNode val) {
- return nextUpdater.compareAndSet(this, cmp, val);
+ /**
+ * Sets item to self and waiter to null, to avoid garbage
+ * retention after matching or cancelling. Uses relaxed writes
+ * because order is already constrained in the only calling
+ * contexts: item is forgotten only after volatile/atomic
+ * mechanics that extract items. Similarly, clearing waiter
+ * follows either CAS or return from park (if ever parked;
+ * else we don't care).
+ */
+ final void forgetContents() {
+ UNSAFE.putObject(this, itemOffset, this);
+ UNSAFE.putObject(this, waiterOffset, null);
}
- final void clearNext() {
- nextUpdater.lazySet(this, this);
+ /**
+ * Returns true if this node has been matched, including the
+ * case of artificial matches due to cancellation.
+ */
+ final boolean isMatched() {
+ Object x = item;
+ return (x == this) || ((x == null) == isData);
}
- }
+ /**
+ * Returns true if this is an unmatched request node.
+ */
+ final boolean isUnmatchedRequest() {
+ return !isData && item == null;
+ }
- /**
- * Padded version of AtomicReference used for head, tail and
- * cleanMe, to alleviate contention across threads CASing one vs
- * the other.
- */
- static final class PaddedAtomicReference<T> extends AtomicReference<T> {
- // enough padding for 64bytes with 4byte refs
- Object p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd, pe;
- PaddedAtomicReference(T r) { super(r); }
+ /**
+ * Returns true if a node with the given mode cannot be
+ * appended to this node because this node is unmatched and
+ * has opposite data mode.
+ */
+ final boolean cannotPrecede(boolean haveData) {
+ boolean d = isData;
+ Object x;
+ return d != haveData && (x = item) != this && (x != null) == d;
+ }
+
+ /**
+ * Tries to artificially match a data node -- used by remove.
+ */
+ final boolean tryMatchData() {
+ // assert isData;
+ Object x = item;
+ if (x != null && x != this && casItem(x, null)) {
+ LockSupport.unpark(waiter);
+ return true;
+ }
+ return false;
+ }
+
+ private static final long serialVersionUID = -3375979862319811754L;
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long itemOffset;
+ private static final long nextOffset;
+ private static final long waiterOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = Node.class;
+ itemOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("item"));
+ nextOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("next"));
+ waiterOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("waiter"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
}
+ /** head of the queue; null until first enqueue */
+ transient volatile Node head;
- /** head of the queue */
- private transient final PaddedAtomicReference<QNode> head;
- /** tail of the queue */
- private transient final PaddedAtomicReference<QNode> tail;
+ /** tail of the queue; null until first append */
+ private transient volatile Node tail;
- /**
- * Reference to a cancelled node that might not yet have been
- * unlinked from queue because it was the last inserted node
- * when it cancelled.
- */
- private transient final PaddedAtomicReference<QNode> cleanMe;
+ /** The number of apparent failures to unsplice removed nodes */
+ private transient volatile int sweepVotes;
- /**
- * Tries to cas nh as new head; if successful, unlink
- * old head's next node to avoid garbage retention.
+ // CAS methods for fields
+ private boolean casTail(Node cmp, Node val) {
+ return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
+ }
+
+ private boolean casHead(Node cmp, Node val) {
+ return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
+ }
+
+ private boolean casSweepVotes(int cmp, int val) {
+ return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val);
+ }
+
+ /*
+ * Possible values for "how" argument in xfer method.
*/
- private boolean advanceHead(QNode h, QNode nh) {
- if (h == head.get() && head.compareAndSet(h, nh)) {
- h.clearNext(); // forget old next
- return true;
- }
- return false;
+ private static final int NOW = 0; // for untimed poll, tryTransfer
+ private static final int ASYNC = 1; // for offer, put, add
+ private static final int SYNC = 2; // for transfer, take
+ private static final int TIMED = 3; // for timed poll, tryTransfer
+
+ @SuppressWarnings("unchecked")
+ static <E> E cast(Object item) {
+ // assert item == null || item.getClass() != Node.class;
+ return (E) item;
}
/**
- * Puts or takes an item. Used for most queue operations (except
- * poll() and tryTransfer()). See the similar code in
- * SynchronousQueue for detailed explanation.
+ * Implements all queuing methods. See above for explanation.
*
- * @param e the item or if null, signifies that this is a take
- * @param mode the wait mode: NOWAIT, TIMEOUT, WAIT
- * @param nanos timeout in nanosecs, used only if mode is TIMEOUT
- * @return an item, or null on failure
+ * @param e the item or null for take
+ * @param haveData true if this is a put, else a take
+ * @param how NOW, ASYNC, SYNC, or TIMED
+ * @param nanos timeout in nanosecs, used only if mode is TIMED
+ * @return an item if matched, else e
+ * @throws NullPointerException if haveData mode but e is null
*/
- private Object xfer(Object e, int mode, long nanos) {
- boolean isData = (e != null);
- QNode s = null;
- final PaddedAtomicReference<QNode> head = this.head;
- final PaddedAtomicReference<QNode> tail = this.tail;
-
- for (;;) {
- QNode t = tail.get();
- QNode h = head.get();
-
- if (t != null && (t == h || t.isData == isData)) {
- if (s == null)
- s = new QNode(e, isData);
- QNode last = t.next;
- if (last != null) {
- if (t == tail.get())
- tail.compareAndSet(t, last);
- }
- else if (t.casNext(null, s)) {
- tail.compareAndSet(t, s);
- return awaitFulfill(t, s, e, mode, nanos);
+ private E xfer(E e, boolean haveData, int how, long nanos) {
+ if (haveData && (e == null))
+ throw new NullPointerException();
+ Node s = null; // the node to append, if needed
+
+ retry:
+ for (;;) { // restart on append race
+
+ for (Node h = head, p = h; p != null;) { // find & match first node
+ boolean isData = p.isData;
+ Object item = p.item;
+ if (item != p && (item != null) == isData) { // unmatched
+ if (isData == haveData) // can't match
+ break;
+ if (p.casItem(item, e)) { // match
+ for (Node q = p; q != h;) {
+ Node n = q.next; // update by 2 unless singleton
+ if (head == h && casHead(h, n == null ? q : n)) {
+ h.forgetNext();
+ break;
+ } // advance and retry
+ if ((h = head) == null ||
+ (q = h.next) == null || !q.isMatched())
+ break; // unless slack < 2
+ }
+ LockSupport.unpark(p.waiter);
+ return LinkedTransferQueue.<E>cast(item);
+ }
}
+ Node n = p.next;
+ p = (p != n) ? n : (h = head); // Use head if p offlist
}
- else if (h != null) {
- QNode first = h.next;
- if (t == tail.get() && first != null &&
- advanceHead(h, first)) {
- Object x = first.get();
- if (x != first && first.compareAndSet(x, e)) {
- LockSupport.unpark(first.waiter);
- return isData? e : x;
- }
- }
+ if (how != NOW) { // No matches available
+ if (s == null)
+ s = new Node(e, haveData);
+ Node pred = tryAppend(s, haveData);
+ if (pred == null)
+ continue retry; // lost race vs opposite mode
+ if (how != ASYNC)
+ return awaitMatch(s, pred, e, (how == TIMED), nanos);
}
+ return e; // not waiting
}
}
-
/**
- * Version of xfer for poll() and tryTransfer, which
- * simplifies control paths both here and in xfer.
+ * Tries to append node s as tail.
+ *
+ * @param s the node to append
+ * @param haveData true if appending in data mode
+ * @return null on failure due to losing race with append in
+ * different mode, else s's predecessor, or s itself if no
+ * predecessor
*/
- private Object fulfill(Object e) {
- boolean isData = (e != null);
- final PaddedAtomicReference<QNode> head = this.head;
- final PaddedAtomicReference<QNode> tail = this.tail;
-
- for (;;) {
- QNode t = tail.get();
- QNode h = head.get();
-
- if (t != null && (t == h || t.isData == isData)) {
- QNode last = t.next;
- if (t == tail.get()) {
- if (last != null)
- tail.compareAndSet(t, last);
- else
- return null;
- }
+ private Node tryAppend(Node s, boolean haveData) {
+ for (Node t = tail, p = t;;) { // move p to last node and append
+ Node n, u; // temps for reads of next & tail
+ if (p == null && (p = head) == null) {
+ if (casHead(null, s))
+ return s; // initialize
}
- else if (h != null) {
- QNode first = h.next;
- if (t == tail.get() &&
- first != null &&
- advanceHead(h, first)) {
- Object x = first.get();
- if (x != first && first.compareAndSet(x, e)) {
- LockSupport.unpark(first.waiter);
- return isData? e : x;
- }
+ else if (p.cannotPrecede(haveData))
+ return null; // lost race vs opposite mode
+ else if ((n = p.next) != null) // not last; keep traversing
+ p = p != t && t != (u = tail) ? (t = u) : // stale tail
+ (p != n) ? n : null; // restart if off list
+ else if (!p.casNext(null, s))
+ p = p.next; // re-read on CAS failure
+ else {
+ if (p != t) { // update if slack now >= 2
+ while ((tail != t || !casTail(t, s)) &&
+ (t = tail) != null &&
+ (s = t.next) != null && // advance and retry
+ (s = s.next) != null && s != t);
}
+ return p;
}
}
}
/**
- * Spins/blocks until node s is fulfilled or caller gives up,
- * depending on wait mode.
+ * Spins/yields/blocks until node s is matched or caller gives up.
*
- * @param pred the predecessor of waiting node
* @param s the waiting node
+ * @param pred the predecessor of s, or s itself if it has no
+ * predecessor, or null if unknown (the null case does not occur
+ * in any current calls but may in possible future extensions)
* @param e the comparison value for checking match
- * @param mode mode
- * @param nanos timeout value
- * @return matched item, or s if cancelled
+ * @param timed if true, wait only until timeout elapses
+ * @param nanos timeout in nanosecs, used only if timed is true
+ * @return matched item, or e if unmatched on interrupt or timeout
*/
- private Object awaitFulfill(QNode pred, QNode s, Object e,
- int mode, long nanos) {
- if (mode == NOWAIT)
- return null;
-
- long lastTime = (mode == TIMEOUT)? System.nanoTime() : 0;
+ private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
+ long lastTime = timed ? System.nanoTime() : 0L;
Thread w = Thread.currentThread();
- int spins = -1; // set to desired spin count below
+ int spins = -1; // initialized after first item and cancel checks
+ ThreadLocalRandom randomYields = null; // bound if needed
+
for (;;) {
- if (w.isInterrupted())
- s.compareAndSet(e, s);
- Object x = s.get();
- if (x != e) { // Node was matched or cancelled
- advanceHead(pred, s); // unlink if head
- if (x == s) { // was cancelled
- clean(pred, s);
- return null;
- }
- else if (x != null) {
- s.set(s); // avoid garbage retention
- return x;
- }
- else
- return e;
+ Object item = s.item;
+ if (item != e) { // matched
+ // assert item != s;
+ s.forgetContents(); // avoid garbage
+ return LinkedTransferQueue.<E>cast(item);
}
- if (mode == TIMEOUT) {
- long now = System.nanoTime();
- nanos -= now - lastTime;
- lastTime = now;
- if (nanos <= 0) {
- s.compareAndSet(e, s); // try to cancel
- continue;
- }
+ if ((w.isInterrupted() || (timed && nanos <= 0)) &&
+ s.casItem(e, s)) { // cancel
+ unsplice(pred, s);
+ return e;
}
- if (spins < 0) {
- QNode h = head.get(); // only spin if at head
- spins = ((h != null && h.next == s) ?
- (mode == TIMEOUT?
- maxTimedSpins : maxUntimedSpins) : 0);
+
+ if (spins < 0) { // establish spins at/near front
+ if ((spins = spinsFor(pred, s.isData)) > 0)
+ randomYields = ThreadLocalRandom.current();
}
- if (spins > 0)
+ else if (spins > 0) { // spin
--spins;
- else if (s.waiter == null)
- s.waiter = w;
- else if (mode != TIMEOUT) {
- LockSupport.park(this);
- s.waiter = null;
- spins = -1;
+ if (randomYields.nextInt(CHAINED_SPINS) == 0)
+ Thread.yield(); // occasionally yield
+ }
+ else if (s.waiter == null) {
+ s.waiter = w; // request unpark then recheck
}
- else if (nanos > spinForTimeoutThreshold) {
- LockSupport.parkNanos(this, nanos);
- s.waiter = null;
- spins = -1;
+ else if (timed) {
+ long now = System.nanoTime();
+ if ((nanos -= now - lastTime) > 0)
+ LockSupport.parkNanos(this, nanos);
+ lastTime = now;
+ }
+ else {
+ LockSupport.park(this);
}
}
}
/**
- * Returns validated tail for use in cleaning methods.
+ * Returns spin/yield value for a node with given predecessor and
+ * data mode. See above for explanation.
*/
- private QNode getValidatedTail() {
- for (;;) {
- QNode h = head.get();
- QNode first = h.next;
- if (first != null && first.next == first) { // help advance
- advanceHead(h, first);
- continue;
- }
- QNode t = tail.get();
- QNode last = t.next;
- if (t == tail.get()) {
- if (last != null)
- tail.compareAndSet(t, last); // help advance
- else
- return t;
+ private static int spinsFor(Node pred, boolean haveData) {
+ if (MP && pred != null) {
+ if (pred.isData != haveData) // phase change
+ return FRONT_SPINS + CHAINED_SPINS;
+ if (pred.isMatched()) // probably at front
+ return FRONT_SPINS;
+ if (pred.waiter == null) // pred apparently spinning
+ return CHAINED_SPINS;
+ }
+ return 0;
+ }
+
+ /* -------------- Traversal methods -------------- */
+
+ /**
+ * Returns the successor of p, or the head node if p.next has been
+ * linked to self, which will only be true if traversing with a
+ * stale pointer that is now off the list.
+ */
+ final Node succ(Node p) {
+ Node next = p.next;
+ return (p == next) ? head : next;
+ }
+
+ /**
+ * Returns the first unmatched node of the given mode, or null if
+ * none. Used by methods isEmpty, hasWaitingConsumer.
+ */
+ private Node firstOfMode(boolean isData) {
+ for (Node p = head; p != null; p = succ(p)) {
+ if (!p.isMatched())
+ return (p.isData == isData) ? p : null;
+ }
+ return null;
+ }
+
+ /**
+ * Returns the item in the first unmatched node with isData; or
+ * null if none. Used by peek.
+ */
+ private E firstDataItem() {
+ for (Node p = head; p != null; p = succ(p)) {
+ Object item = p.item;
+ if (p.isData) {
+ if (item != null && item != p)
+ return LinkedTransferQueue.<E>cast(item);
}
+ else if (item == null)
+ return null;
}
+ return null;
}
/**
- * Gets rid of cancelled node s with original predecessor pred.
- *
- * @param pred predecessor of cancelled node
- * @param s the cancelled node
+ * Traverses and counts unmatched nodes of the given mode.
+ * Used by methods size and getWaitingConsumerCount.
*/
- private void clean(QNode pred, QNode s) {
- Thread w = s.waiter;
- if (w != null) { // Wake up thread
- s.waiter = null;
- if (w != Thread.currentThread())
- LockSupport.unpark(w);
+ private int countOfMode(boolean data) {
+ int count = 0;
+ for (Node p = head; p != null; ) {
+ if (!p.isMatched()) {
+ if (p.isData != data)
+ return 0;
+ if (++count == Integer.MAX_VALUE) // saturated
+ break;
+ }
+ Node n = p.next;
+ if (n != p)
+ p = n;
+ else {
+ count = 0;
+ p = head;
+ }
}
+ return count;
+ }
- if (pred == null)
- return;
+ final class Itr implements Iterator<E> {
+ private Node nextNode; // next node to return item for
+ private E nextItem; // the corresponding item
+ private Node lastRet; // last returned node, to support remove
+ private Node lastPred; // predecessor to unlink lastRet
- /*
- * At any given time, exactly one node on list cannot be
- * deleted -- the last inserted node. To accommodate this, if
- * we cannot delete s, we save its predecessor as "cleanMe",
- * processing the previously saved version first. At least one
- * of node s or the node previously saved can always be
- * processed, so this always terminates.
+ /**
+ * Moves to next node after prev, or first node if prev null.
*/
- while (pred.next == s) {
- QNode oldpred = reclean(); // First, help get rid of cleanMe
- QNode t = getValidatedTail();
- if (s != t) { // If not tail, try to unsplice
- QNode sn = s.next; // s.next == s means s already off list
- if (sn == s || pred.casNext(s, sn))
+ private void advance(Node prev) {
+ /*
+ * To track and avoid buildup of deleted nodes in the face
+ * of calls to both Queue.remove and Itr.remove, we must
+ * include variants of unsplice and sweep upon each
+ * advance: Upon Itr.remove, we may need to catch up links
+ * from lastPred, and upon other removes, we might need to
+ * skip ahead from stale nodes and unsplice deleted ones
+ * found while advancing.
+ */
+
+ Node r, b; // reset lastPred upon possible deletion of lastRet
+ if ((r = lastRet) != null && !r.isMatched())
+ lastPred = r; // next lastPred is old lastRet
+ else if ((b = lastPred) == null || b.isMatched())
+ lastPred = null; // at start of list
+ else {
+ Node s, n; // help with removal of lastPred.next
+ while ((s = b.next) != null &&
+ s != b && s.isMatched() &&
+ (n = s.next) != null && n != s)
+ b.casNext(s, n);
+ }
+
+ this.lastRet = prev;
+
+ for (Node p = prev, s, n;;) {
+ s = (p == null) ? head : p.next;
+ if (s == null)
+ break;
+ else if (s == p) {
+ p = null;
+ continue;
+ }
+ Object item = s.item;
+ if (s.isData) {
+ if (item != null && item != s) {
+ nextItem = LinkedTransferQueue.<E>cast(item);
+ nextNode = s;
+ return;
+ }
+ }
+ else if (item == null)
+ break;
+ // assert s.isMatched();
+ if (p == null)
+ p = s;
+ else if ((n = s.next) == null)
break;
+ else if (s == n)
+ p = null;
+ else
+ p.casNext(s, n);
}
- else if (oldpred == pred || // Already saved
- (oldpred == null && cleanMe.compareAndSet(null, pred)))
- break; // Postpone cleaning
+ nextNode = null;
+ nextItem = null;
+ }
+
+ Itr() {
+ advance(null);
+ }
+
+ public final boolean hasNext() {
+ return nextNode != null;
+ }
+
+ public final E next() {
+ Node p = nextNode;
+ if (p == null) throw new NoSuchElementException();
+ E e = nextItem;
+ advance(p);
+ return e;
+ }
+
+ public final void remove() {
+ final Node lastRet = this.lastRet;
+ if (lastRet == null)
+ throw new IllegalStateException();
+ this.lastRet = null;
+ if (lastRet.tryMatchData())
+ unsplice(lastPred, lastRet);
}
}
+ /* -------------- Removal methods -------------- */
+
/**
- * Tries to unsplice the cancelled node held in cleanMe that was
- * previously uncleanable because it was at tail.
+ * Unsplices (now or later) the given deleted/cancelled node with
+ * the given predecessor.
*
- * @return current cleanMe node (or null)
+ * @param pred a node that was at one time known to be the
+ * predecessor of s, or null or s itself if s is/was at head
+ * @param s the node to be unspliced
*/
- private QNode reclean() {
+ final void unsplice(Node pred, Node s) {
+ s.forgetContents(); // forget unneeded fields
/*
- * cleanMe is, or at one time was, predecessor of cancelled
- * node s that was the tail so could not be unspliced. If s
- * is no longer the tail, try to unsplice if necessary and
- * make cleanMe slot available. This differs from similar
- * code in clean() because we must check that pred still
- * points to a cancelled node that must be unspliced -- if
- * not, we can (must) clear cleanMe without unsplicing.
- * This can loop only due to contention on casNext or
- * clearing cleanMe.
+ * See above for rationale. Briefly: if pred still points to
+ * s, try to unlink s. If s cannot be unlinked, because it is
+ * trailing node or pred might be unlinked, and neither pred
+ * nor s are head or offlist, add to sweepVotes, and if enough
+ * votes have accumulated, sweep.
*/
- QNode pred;
- while ((pred = cleanMe.get()) != null) {
- QNode t = getValidatedTail();
- QNode s = pred.next;
- if (s != t) {
- QNode sn;
- if (s == null || s == pred || s.get() != s ||
- (sn = s.next) == s || pred.casNext(s, sn))
- cleanMe.compareAndSet(pred, null);
+ if (pred != null && pred != s && pred.next == s) {
+ Node n = s.next;
+ if (n == null ||
+ (n != s && pred.casNext(s, n) && pred.isMatched())) {
+ for (;;) { // check if at, or could be, head
+ Node h = head;
+ if (h == pred || h == s || h == null)
+ return; // at head or list empty
+ if (!h.isMatched())
+ break;
+ Node hn = h.next;
+ if (hn == null)
+ return; // now empty
+ if (hn != h && casHead(h, hn))
+ h.forgetNext(); // advance head
+ }
+ if (pred.next != pred && s.next != s) { // recheck if offlist
+ for (;;) { // sweep now if enough votes
+ int v = sweepVotes;
+ if (v < SWEEP_THRESHOLD) {
+ if (casSweepVotes(v, v + 1))
+ break;
+ }
+ else if (casSweepVotes(v, 0)) {
+ sweep();
+ break;
+ }
+ }
+ }
}
- else // s is still tail; cannot clean
+ }
+ }
+
+ /**
+ * Unlinks matched (typically cancelled) nodes encountered in a
+ * traversal from head.
+ */
+ private void sweep() {
+ for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
+ if (!s.isMatched())
+ // Unmatched nodes are never self-linked
+ p = s;
+ else if ((n = s.next) == null) // trailing node is pinned
break;
+ else if (s == n) // stale
+ // No need to also check for p == s, since that implies s == n
+ p = head;
+ else
+ p.casNext(s, n);
}
- return pred;
}
/**
+ * Main implementation of remove(Object)
+ */
+ private boolean findAndRemove(Object e) {
+ if (e != null) {
+ for (Node pred = null, p = head; p != null; ) {
+ Object item = p.item;
+ if (p.isData) {
+ if (item != null && item != p && e.equals(item) &&
+ p.tryMatchData()) {
+ unsplice(pred, p);
+ return true;
+ }
+ }
+ else if (item == null)
+ break;
+ pred = p;
+ if ((p = p.next) == pred) { // stale
+ pred = null;
+ p = head;
+ }
+ }
+ }
+ return false;
+ }
+
+
+ /**
* Creates an initially empty {@code LinkedTransferQueue}.
*/
public LinkedTransferQueue() {
- QNode dummy = new QNode(null, false);
- head = new PaddedAtomicReference<QNode>(dummy);
- tail = new PaddedAtomicReference<QNode>(dummy);
- cleanMe = new PaddedAtomicReference<QNode>(null);
}
/**
@@ -435,252 +1000,200 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
addAll(c);
}
- public void put(E e) throws InterruptedException {
- if (e == null) throw new NullPointerException();
- if (Thread.interrupted()) throw new InterruptedException();
- xfer(e, NOWAIT, 0);
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never block.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public void put(E e) {
+ xfer(e, true, ASYNC, 0);
}
- public boolean offer(E e, long timeout, TimeUnit unit)
- throws InterruptedException {
- if (e == null) throw new NullPointerException();
- if (Thread.interrupted()) throw new InterruptedException();
- xfer(e, NOWAIT, 0);
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never block or
+ * return {@code false}.
+ *
+ * @return {@code true} (as specified by
+ * {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit)
+ * BlockingQueue.offer})
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean offer(E e, long timeout, TimeUnit unit) {
+ xfer(e, true, ASYNC, 0);
return true;
}
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never return {@code false}.
+ *
+ * @return {@code true} (as specified by {@link Queue#offer})
+ * @throws NullPointerException if the specified element is null
+ */
public boolean offer(E e) {
- if (e == null) throw new NullPointerException();
- xfer(e, NOWAIT, 0);
+ xfer(e, true, ASYNC, 0);
return true;
}
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never throw
+ * {@link IllegalStateException} or return {@code false}.
+ *
+ * @return {@code true} (as specified by {@link Collection#add})
+ * @throws NullPointerException if the specified element is null
+ */
public boolean add(E e) {
- if (e == null) throw new NullPointerException();
- xfer(e, NOWAIT, 0);
+ xfer(e, true, ASYNC, 0);
return true;
}
+ /**
+ * Transfers the element to a waiting consumer immediately, if possible.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * otherwise returning {@code false} without enqueuing the element.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean tryTransfer(E e) {
+ return xfer(e, true, NOW, 0) == null;
+ }
+
+ /**
+ * Transfers the element to a consumer, waiting if necessary to do so.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else inserts the specified element at the tail of this queue
+ * and waits until the element is received by a consumer.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
public void transfer(E e) throws InterruptedException {
- if (e == null) throw new NullPointerException();
- if (xfer(e, WAIT, 0) == null) {
- Thread.interrupted();
+ if (xfer(e, true, SYNC, 0) != null) {
+ Thread.interrupted(); // failure possible only due to interrupt
throw new InterruptedException();
}
}
+ /**
+ * Transfers the element to a consumer if it is possible to do so
+ * before the timeout elapses.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else inserts the specified element at the tail of this queue
+ * and waits until the element is received by a consumer,
+ * returning {@code false} if the specified wait time elapses
+ * before the element can be transferred.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
public boolean tryTransfer(E e, long timeout, TimeUnit unit)
throws InterruptedException {
- if (e == null) throw new NullPointerException();
- if (xfer(e, TIMEOUT, unit.toNanos(timeout)) != null)
+ if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
return true;
if (!Thread.interrupted())
return false;
throw new InterruptedException();
}
- public boolean tryTransfer(E e) {
- if (e == null) throw new NullPointerException();
- return fulfill(e) != null;
- }
-
public E take() throws InterruptedException {
- Object e = xfer(null, WAIT, 0);
+ E e = xfer(null, false, SYNC, 0);
if (e != null)
- return (E)e;
+ return e;
Thread.interrupted();
throw new InterruptedException();
}
public E poll(long timeout, TimeUnit unit) throws InterruptedException {
- Object e = xfer(null, TIMEOUT, unit.toNanos(timeout));
+ E e = xfer(null, false, TIMED, unit.toNanos(timeout));
if (e != null || !Thread.interrupted())
- return (E)e;
+ return e;
throw new InterruptedException();
}
public E poll() {
- return (E)fulfill(null);
+ return xfer(null, false, NOW, 0);
}
+ /**
+ * @throws NullPointerException {@inheritDoc}
+ * @throws IllegalArgumentException {@inheritDoc}
+ */
public int drainTo(Collection<? super E> c) {
if (c == null)
throw new NullPointerException();
if (c == this)
throw new IllegalArgumentException();
int n = 0;
- E e;
- while ( (e = poll()) != null) {
+ for (E e; (e = poll()) != null;) {
c.add(e);
++n;
}
return n;
}
+ /**
+ * @throws NullPointerException {@inheritDoc}
+ * @throws IllegalArgumentException {@inheritDoc}
+ */
public int drainTo(Collection<? super E> c, int maxElements) {
if (c == null)
throw new NullPointerException();
if (c == this)
throw new IllegalArgumentException();
int n = 0;
- E e;
- while (n < maxElements && (e = poll()) != null) {
+ for (E e; n < maxElements && (e = poll()) != null;) {
c.add(e);
++n;
}
return n;
}
- // Traversal-based methods
-
/**
- * Returns head after performing any outstanding helping steps.
+ * Returns an iterator over the elements in this queue in proper sequence.
+ * The elements will be returned in order from first (head) to last (tail).
+ *
+ * <p>The returned iterator is a "weakly consistent" iterator that
+ * will never throw {@link java.util.ConcurrentModificationException
+ * ConcurrentModificationException}, and guarantees to traverse
+ * elements as they existed upon construction of the iterator, and
+ * may (but is not guaranteed to) reflect any modifications
+ * subsequent to construction.
+ *
+ * @return an iterator over the elements in this queue in proper sequence
*/
- private QNode traversalHead() {
- for (;;) {
- QNode t = tail.get();
- QNode h = head.get();
- if (h != null && t != null) {
- QNode last = t.next;
- QNode first = h.next;
- if (t == tail.get()) {
- if (last != null)
- tail.compareAndSet(t, last);
- else if (first != null) {
- Object x = first.get();
- if (x == first)
- advanceHead(h, first);
- else
- return h;
- }
- else
- return h;
- }
- }
- reclean();
- }
- }
-
-
public Iterator<E> iterator() {
return new Itr();
}
- /**
- * Iterators. Basic strategy is to traverse list, treating
- * non-data (i.e., request) nodes as terminating list.
- * Once a valid data node is found, the item is cached
- * so that the next call to next() will return it even
- * if subsequently removed.
- */
- class Itr implements Iterator<E> {
- QNode next; // node to return next
- QNode pnext; // predecessor of next
- QNode snext; // successor of next
- QNode curr; // last returned node, for remove()
- QNode pcurr; // predecessor of curr, for remove()
- E nextItem; // Cache of next item, once commited to in next
-
- Itr() {
- findNext();
- }
-
- /**
- * Ensures next points to next valid node, or null if none.
- */
- void findNext() {
- for (;;) {
- QNode pred = pnext;
- QNode q = next;
- if (pred == null || pred == q) {
- pred = traversalHead();
- q = pred.next;
- }
- if (q == null || !q.isData) {
- next = null;
- return;
- }
- Object x = q.get();
- QNode s = q.next;
- if (x != null && q != x && q != s) {
- nextItem = (E)x;
- snext = s;
- pnext = pred;
- next = q;
- return;
- }
- pnext = q;
- next = s;
- }
- }
-
- public boolean hasNext() {
- return next != null;
- }
-
- public E next() {
- if (next == null) throw new NoSuchElementException();
- pcurr = pnext;
- curr = next;
- pnext = next;
- next = snext;
- E x = nextItem;
- findNext();
- return x;
- }
-
- public void remove() {
- QNode p = curr;
- if (p == null)
- throw new IllegalStateException();
- Object x = p.get();
- if (x != null && x != p && p.compareAndSet(x, p))
- clean(pcurr, p);
- }
- }
-
public E peek() {
- for (;;) {
- QNode h = traversalHead();
- QNode p = h.next;
- if (p == null)
- return null;
- Object x = p.get();
- if (p != x) {
- if (!p.isData)
- return null;
- if (x != null)
- return (E)x;
- }
- }
+ return firstDataItem();
}
+ /**
+ * Returns {@code true} if this queue contains no elements.
+ *
+ * @return {@code true} if this queue contains no elements
+ */
public boolean isEmpty() {
- for (;;) {
- QNode h = traversalHead();
- QNode p = h.next;
- if (p == null)
- return true;
- Object x = p.get();
- if (p != x) {
- if (!p.isData)
- return true;
- if (x != null)
- return false;
- }
+ for (Node p = head; p != null; p = succ(p)) {
+ if (!p.isMatched())
+ return !p.isData;
}
+ return true;
}
public boolean hasWaitingConsumer() {
- for (;;) {
- QNode h = traversalHead();
- QNode p = h.next;
- if (p == null)
- return false;
- Object x = p.get();
- if (p != x)
- return !p.isData;
- }
+ return firstOfMode(false) != null;
}
/**
@@ -696,58 +1209,64 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* @return the number of elements in this queue
*/
public int size() {
- int count = 0;
- QNode h = traversalHead();
- for (QNode p = h.next; p != null && p.isData; p = p.next) {
- Object x = p.get();
- if (x != null && x != p) {
- if (++count == Integer.MAX_VALUE) // saturated
- break;
- }
- }
- return count;
+ return countOfMode(true);
}
public int getWaitingConsumerCount() {
- int count = 0;
- QNode h = traversalHead();
- for (QNode p = h.next; p != null && !p.isData; p = p.next) {
- if (p.get() == null) {
- if (++count == Integer.MAX_VALUE)
- break;
- }
- }
- return count;
+ return countOfMode(false);
}
- public int remainingCapacity() {
- return Integer.MAX_VALUE;
+ /**
+ * Removes a single instance of the specified element from this queue,
+ * if it is present. More formally, removes an element {@code e} such
+ * that {@code o.equals(e)}, if this queue contains one or more such
+ * elements.
+ * Returns {@code true} if this queue contained the specified element
+ * (or equivalently, if this queue changed as a result of the call).
+ *
+ * @param o element to be removed from this queue, if present
+ * @return {@code true} if this queue changed as a result of the call
+ */
+ public boolean remove(Object o) {
+ return findAndRemove(o);
}
- public boolean remove(Object o) {
- if (o == null)
- return false;
- for (;;) {
- QNode pred = traversalHead();
- for (;;) {
- QNode q = pred.next;
- if (q == null || !q.isData)
- return false;
- if (q == pred) // restart
- break;
- Object x = q.get();
- if (x != null && x != q && o.equals(x) &&
- q.compareAndSet(x, q)) {
- clean(pred, q);
+ /**
+ * Returns {@code true} if this queue contains the specified element.
+ * More formally, returns {@code true} if and only if this queue contains
+ * at least one element {@code e} such that {@code o.equals(e)}.
+ *
+ * @param o object to be checked for containment in this queue
+ * @return {@code true} if this queue contains the specified element
+ */
+ public boolean contains(Object o) {
+ if (o == null) return false;
+ for (Node p = head; p != null; p = succ(p)) {
+ Object item = p.item;
+ if (p.isData) {
+ if (item != null && item != p && o.equals(item))
return true;
- }
- pred = q;
}
+ else if (item == null)
+ break;
}
+ return false;
+ }
+
+ /**
+ * Always returns {@code Integer.MAX_VALUE} because a
+ * {@code LinkedTransferQueue} is not capacity constrained.
+ *
+ * @return {@code Integer.MAX_VALUE} (as specified by
+ * {@link java.util.concurrent.BlockingQueue#remainingCapacity()
+ * BlockingQueue.remainingCapacity})
+ */
+ public int remainingCapacity() {
+ return Integer.MAX_VALUE;
}
/**
- * Save the state to a stream (that is, serialize it).
+ * Saves the state to a stream (that is, serializes it).
*
* @serialData All of the elements (each an {@code E}) in
* the proper order, followed by a null
@@ -763,16 +1282,17 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
}
/**
- * Reconstitute the Queue instance from a stream (that is,
- * deserialize it).
+ * Reconstitutes the Queue instance from a stream (that is,
+ * deserializes it).
+ *
* @param s the stream
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
- resetHeadAndTail();
for (;;) {
- E item = (E)s.readObject();
+ @SuppressWarnings("unchecked")
+ E item = (E) s.readObject();
if (item == null)
break;
else
@@ -780,61 +1300,53 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
}
}
+ // Unsafe mechanics
- // Support for resetting head/tail while deserializing
- private void resetHeadAndTail() {
- QNode dummy = new QNode(null, false);
- _unsafe.putObjectVolatile(this, headOffset,
- new PaddedAtomicReference<QNode>(dummy));
- _unsafe.putObjectVolatile(this, tailOffset,
- new PaddedAtomicReference<QNode>(dummy));
- _unsafe.putObjectVolatile(this, cleanMeOffset,
- new PaddedAtomicReference<QNode>(null));
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long headOffset;
+ private static final long tailOffset;
+ private static final long sweepVotesOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = LinkedTransferQueue.class;
+ headOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("head"));
+ tailOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("tail"));
+ sweepVotesOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("sweepVotes"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
}
- // Temporary Unsafe mechanics for preliminary release
- private static Unsafe getUnsafe() throws Throwable {
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ static sun.misc.Unsafe getUnsafe() {
try {
- return Unsafe.getUnsafe();
+ return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
- (new java.security.PrivilegedExceptionAction<Unsafe>() {
- public Unsafe run() throws Exception {
- return getUnsafePrivileged();
+ (new java.security
+ .PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ java.lang.reflect.Field f = sun.misc
+ .Unsafe.class.getDeclaredField("theUnsafe");
+ f.setAccessible(true);
+ return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
- throw e.getCause();
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
}
}
}
- private static Unsafe getUnsafePrivileged()
- throws NoSuchFieldException, IllegalAccessException {
- Field f = Unsafe.class.getDeclaredField("theUnsafe");
- f.setAccessible(true);
- return (Unsafe) f.get(null);
- }
-
- private static long fieldOffset(String fieldName)
- throws NoSuchFieldException {
- return _unsafe.objectFieldOffset
- (LinkedTransferQueue.class.getDeclaredField(fieldName));
- }
-
- private static final Unsafe _unsafe;
- private static final long headOffset;
- private static final long tailOffset;
- private static final long cleanMeOffset;
- static {
- try {
- _unsafe = getUnsafe();
- headOffset = fieldOffset("head");
- tailOffset = fieldOffset("tail");
- cleanMeOffset = fieldOffset("cleanMe");
- } catch (Throwable e) {
- throw new RuntimeException("Could not initialize intrinsics", e);
- }
- }
-
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java b/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java
index 2d36f7eb33..1e7cdd952d 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java
@@ -1,64 +1,73 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
/**
- * Recursive resultless ForkJoinTasks. This class establishes
- * conventions to parameterize resultless actions as <tt>Void</tt>
- * ForkJoinTasks. Because <tt>null</tt> is the only valid value of
- * <tt>Void</tt>, methods such as join always return <tt>null</tt>
- * upon completion.
+ * A recursive resultless {@link ForkJoinTask}. This class
+ * establishes conventions to parameterize resultless actions as
+ * {@code Void} {@code ForkJoinTask}s. Because {@code null} is the
+ * only valid value of type {@code Void}, methods such as {@code join}
+ * always return {@code null} upon completion.
*
- * <p><b>Sample Usages.</b> Here is a sketch of a ForkJoin sort that
- * sorts a given <tt>long[]</tt> array:
+ * <p><b>Sample Usages.</b> Here is a simple but complete ForkJoin
+ * sort that sorts a given {@code long[]} array:
*
- * <pre>
- * class SortTask extends RecursiveAction {
- * final long[] array; final int lo; final int hi;
+ * <pre> {@code
+ * static class SortTask extends RecursiveAction {
+ * final long[] array; final int lo, hi;
* SortTask(long[] array, int lo, int hi) {
* this.array = array; this.lo = lo; this.hi = hi;
* }
+ * SortTask(long[] array) { this(array, 0, array.length); }
* protected void compute() {
- * if (hi - lo &lt; THRESHOLD)
- * sequentiallySort(array, lo, hi);
+ * if (hi - lo < THRESHOLD)
+ * sortSequentially(lo, hi);
* else {
- * int mid = (lo + hi) &gt;&gt;&gt; 1;
+ * int mid = (lo + hi) >>> 1;
* invokeAll(new SortTask(array, lo, mid),
* new SortTask(array, mid, hi));
- * merge(array, lo, hi);
+ * merge(lo, mid, hi);
* }
* }
- * }
- * </pre>
+ * // implementation details follow:
+ * final static int THRESHOLD = 1000;
+ * void sortSequentially(int lo, int hi) {
+ * Arrays.sort(array, lo, hi);
+ * }
+ * void merge(int lo, int mid, int hi) {
+ * long[] buf = Arrays.copyOfRange(array, lo, mid);
+ * for (int i = 0, j = lo, k = mid; i < buf.length; j++)
+ * array[j] = (k == hi || buf[i] < array[k]) ?
+ * buf[i++] : array[k++];
+ * }
+ * }}</pre>
*
- * You could then sort anArray by creating <tt>new SortTask(anArray, 0,
- * anArray.length-1) </tt> and invoking it in a ForkJoinPool.
- * As a more concrete simple example, the following task increments
- * each element of an array:
- * <pre>
+ * You could then sort {@code anArray} by creating {@code new
+ * SortTask(anArray)} and invoking it in a ForkJoinPool. As a more
+ * concrete simple example, the following task increments each element
+ * of an array:
+ * <pre> {@code
* class IncrementTask extends RecursiveAction {
- * final long[] array; final int lo; final int hi;
+ * final long[] array; final int lo, hi;
* IncrementTask(long[] array, int lo, int hi) {
* this.array = array; this.lo = lo; this.hi = hi;
* }
* protected void compute() {
- * if (hi - lo &lt; THRESHOLD) {
- * for (int i = lo; i &lt; hi; ++i)
+ * if (hi - lo < THRESHOLD) {
+ * for (int i = lo; i < hi; ++i)
* array[i]++;
* }
* else {
- * int mid = (lo + hi) &gt;&gt;&gt; 1;
+ * int mid = (lo + hi) >>> 1;
* invokeAll(new IncrementTask(array, lo, mid),
* new IncrementTask(array, mid, hi));
* }
* }
- * }
- * </pre>
- *
+ * }}</pre>
*
* <p>The following example illustrates some refinements and idioms
* that may lead to better performance: RecursiveActions need not be
@@ -66,33 +75,33 @@ package scala.concurrent.forkjoin;
* divide-and-conquer approach. Here is a class that sums the squares
* of each element of a double array, by subdividing out only the
* right-hand-sides of repeated divisions by two, and keeping track of
- * them with a chain of <tt>next</tt> references. It uses a dynamic
- * threshold based on method <tt>surplus</tt>, but counterbalances
- * potential excess partitioning by directly performing leaf actions
- * on unstolen tasks rather than further subdividing.
+ * them with a chain of {@code next} references. It uses a dynamic
+ * threshold based on method {@code getSurplusQueuedTaskCount}, but
+ * counterbalances potential excess partitioning by directly
+ * performing leaf actions on unstolen tasks rather than further
+ * subdividing.
*
- * <pre>
+ * <pre> {@code
* double sumOfSquares(ForkJoinPool pool, double[] array) {
* int n = array.length;
- * int seqSize = 1 + n / (8 * pool.getParallelism());
- * Applyer a = new Applyer(array, 0, n, seqSize, null);
+ * Applyer a = new Applyer(array, 0, n, null);
* pool.invoke(a);
* return a.result;
* }
*
* class Applyer extends RecursiveAction {
* final double[] array;
- * final int lo, hi, seqSize;
+ * final int lo, hi;
* double result;
* Applyer next; // keeps track of right-hand-side tasks
- * Applyer(double[] array, int lo, int hi, int seqSize, Applyer next) {
+ * Applyer(double[] array, int lo, int hi, Applyer next) {
* this.array = array; this.lo = lo; this.hi = hi;
- * this.seqSize = seqSize; this.next = next;
+ * this.next = next;
* }
*
- * double atLeaf(int l, int r) {
+ * double atLeaf(int l, int h) {
* double sum = 0;
- * for (int i = l; i &lt; h; ++i) // perform leftmost base step
+ * for (int i = l; i < h; ++i) // perform leftmost base step
* sum += array[i] * array[i];
* return sum;
* }
@@ -101,10 +110,9 @@ package scala.concurrent.forkjoin;
* int l = lo;
* int h = hi;
* Applyer right = null;
- * while (h - l &gt; 1 &amp;&amp;
- * ForkJoinWorkerThread.getEstimatedSurplusTaskCount() &lt;= 3) {
- * int mid = (l + h) &gt;&gt;&gt; 1;
- * right = new Applyer(array, mid, h, seqSize, right);
+ * while (h - l > 1 && getSurplusQueuedTaskCount() <= 3) {
+ * int mid = (l + h) >>> 1;
+ * right = new Applyer(array, mid, h, right);
* right.fork();
* h = mid;
* }
@@ -113,17 +121,20 @@ package scala.concurrent.forkjoin;
* if (right.tryUnfork()) // directly calculate if not stolen
* sum += right.atLeaf(right.lo, right.hi);
* else {
- * right.helpJoin();
+ * right.join();
* sum += right.result;
* }
* right = right.next;
* }
* result = sum;
* }
- * }
- * </pre>
+ * }}</pre>
+ *
+ * @since 1.7
+ * @author Doug Lea
*/
public abstract class RecursiveAction extends ForkJoinTask<Void> {
+ private static final long serialVersionUID = 5232453952276485070L;
/**
* The main computation performed by this task.
@@ -131,7 +142,9 @@ public abstract class RecursiveAction extends ForkJoinTask<Void> {
protected abstract void compute();
/**
- * Always returns null
+ * Always returns {@code null}.
+ *
+ * @return {@code null} always
*/
public final Void getRawResult() { return null; }
@@ -141,7 +154,7 @@ public abstract class RecursiveAction extends ForkJoinTask<Void> {
protected final void setRawResult(Void mustBeNull) { }
/**
- * Implements execution conventions for RecursiveActions
+ * Implements execution conventions for RecursiveActions.
*/
protected final boolean exec() {
compute();
diff --git a/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java b/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java
index a526f75597..d1e1547143 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java
@@ -1,29 +1,29 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
/**
- * Recursive result-bearing ForkJoinTasks.
- * <p> For a classic example, here is a task computing Fibonacci numbers:
+ * A recursive result-bearing {@link ForkJoinTask}.
*
- * <pre>
- * class Fibonacci extends RecursiveTask&lt;Integer&gt; {
+ * <p>For a classic example, here is a task computing Fibonacci numbers:
+ *
+ * <pre> {@code
+ * class Fibonacci extends RecursiveTask<Integer> {
* final int n;
- * Fibonnaci(int n) { this.n = n; }
+ * Fibonacci(int n) { this.n = n; }
* Integer compute() {
- * if (n &lt;= 1)
+ * if (n <= 1)
* return n;
* Fibonacci f1 = new Fibonacci(n - 1);
* f1.fork();
* Fibonacci f2 = new Fibonacci(n - 2);
* return f2.compute() + f1.join();
* }
- * }
- * </pre>
+ * }}</pre>
*
* However, besides being a dumb way to compute Fibonacci functions
* (there is a simple fast linear algorithm that you'd use in
@@ -33,17 +33,14 @@ package scala.concurrent.forkjoin;
* minimum granularity size (for example 10 here) for which you always
* sequentially solve rather than subdividing.
*
+ * @since 1.7
+ * @author Doug Lea
*/
public abstract class RecursiveTask<V> extends ForkJoinTask<V> {
+ private static final long serialVersionUID = 5232453952276485270L;
/**
- * Empty constructor for use by subclasses.
- */
- protected RecursiveTask() {
- }
-
- /**
- * The result returned by compute method.
+ * The result of the computation.
*/
V result;
@@ -61,7 +58,7 @@ public abstract class RecursiveTask<V> extends ForkJoinTask<V> {
}
/**
- * Implements execution conventions for RecursiveTask
+ * Implements execution conventions for RecursiveTask.
*/
protected final boolean exec() {
result = compute();
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java b/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java
index 34e2e37f37..19237c9092 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java
@@ -1,49 +1,53 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
-import java.util.*;
+
+import java.util.Random;
/**
- * A random number generator with the same properties as class {@link
- * Random} but isolated to the current Thread. Like the global
- * generator used by the {@link java.lang.Math} class, a
- * ThreadLocalRandom is initialized with an internally generated seed
- * that may not otherwise be modified. When applicable, use of
- * ThreadLocalRandom rather than shared Random objects in concurrent
- * programs will typically encounter much less overhead and
- * contention. ThreadLocalRandoms are particularly appropriate when
- * multiple tasks (for example, each a {@link ForkJoinTask}), use
- * random numbers in parallel in thread pools.
+ * A random number generator isolated to the current thread. Like the
+ * global {@link java.util.Random} generator used by the {@link
+ * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized
+ * with an internally generated seed that may not otherwise be
+ * modified. When applicable, use of {@code ThreadLocalRandom} rather
+ * than shared {@code Random} objects in concurrent programs will
+ * typically encounter much less overhead and contention. Use of
+ * {@code ThreadLocalRandom} is particularly appropriate when multiple
+ * tasks (for example, each a {@link ForkJoinTask}) use random numbers
+ * in parallel in thread pools.
*
* <p>Usages of this class should typically be of the form:
- * <code>ThreadLocalRandom.current().nextX(...)</code> (where
- * <code>X</code> is <code>Int</code>, <code>Long</code>, etc).
+ * {@code ThreadLocalRandom.current().nextX(...)} (where
+ * {@code X} is {@code Int}, {@code Long}, etc).
* When all usages are of this form, it is never possible to
- * accidently share ThreadLocalRandoms across multiple threads.
+ * accidently share a {@code ThreadLocalRandom} across multiple threads.
*
* <p>This class also provides additional commonly used bounded random
* generation methods.
+ *
+ * @since 1.7
+ * @author Doug Lea
*/
public class ThreadLocalRandom extends Random {
// same constants as Random, but must be redeclared because private
- private final static long multiplier = 0x5DEECE66DL;
- private final static long addend = 0xBL;
- private final static long mask = (1L << 48) - 1;
+ private static final long multiplier = 0x5DEECE66DL;
+ private static final long addend = 0xBL;
+ private static final long mask = (1L << 48) - 1;
/**
- * The random seed. We can't use super.seed
+ * The random seed. We can't use super.seed.
*/
private long rnd;
/**
- * Initialization flag to permit the first and only allowed call
- * to setSeed (inside Random constructor) to succeed. We can't
- * allow others since it would cause setting seed in one part of a
- * program to unintentionally impact other usages by the thread.
+ * Initialization flag to permit calls to setSeed to succeed only
+ * while executing the Random constructor. We can't allow others
+ * since it would cause setting seed in one part of a program to
+ * unintentionally impact other usages by the thread.
*/
boolean initialized;
@@ -65,40 +69,42 @@ public class ThreadLocalRandom extends Random {
/**
* Constructor called only by localRandom.initialValue.
- * We rely on the fact that the superclass no-arg constructor
- * invokes setSeed exactly once to initialize.
*/
ThreadLocalRandom() {
super();
+ initialized = true;
}
/**
- * Returns the current Thread's ThreadLocalRandom
- * @return the current Thread's ThreadLocalRandom
+ * Returns the current thread's {@code ThreadLocalRandom}.
+ *
+ * @return the current thread's {@code ThreadLocalRandom}
*/
public static ThreadLocalRandom current() {
return localRandom.get();
}
/**
- * Throws UnsupportedOperationException. Setting seeds in this
- * generator is unsupported.
+ * Throws {@code UnsupportedOperationException}. Setting seeds in
+ * this generator is not supported.
+ *
* @throws UnsupportedOperationException always
*/
public void setSeed(long seed) {
if (initialized)
throw new UnsupportedOperationException();
- initialized = true;
rnd = (seed ^ multiplier) & mask;
}
protected int next(int bits) {
- return (int)((rnd = (rnd * multiplier + addend) & mask) >>> (48-bits));
+ rnd = (rnd * multiplier + addend) & mask;
+ return (int) (rnd >>> (48-bits));
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
+ *
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @throws IllegalArgumentException if least greater than or equal
@@ -113,7 +119,8 @@ public class ThreadLocalRandom extends Random {
/**
* Returns a pseudorandom, uniformly distributed value
- * between 0 (inclusive) and the specified value (exclusive)
+ * between 0 (inclusive) and the specified value (exclusive).
+ *
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
@@ -131,17 +138,18 @@ public class ThreadLocalRandom extends Random {
while (n >= Integer.MAX_VALUE) {
int bits = next(2);
long half = n >>> 1;
- long nextn = ((bits & 2) == 0)? half : n - half;
+ long nextn = ((bits & 2) == 0) ? half : n - half;
if ((bits & 1) == 0)
offset += n - nextn;
n = nextn;
}
- return offset + nextInt((int)n);
+ return offset + nextInt((int) n);
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
+ *
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
@@ -156,7 +164,8 @@ public class ThreadLocalRandom extends Random {
/**
* Returns a pseudorandom, uniformly distributed {@code double} value
- * between 0 (inclusive) and the specified value (exclusive)
+ * between 0 (inclusive) and the specified value (exclusive).
+ *
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
@@ -171,6 +180,7 @@ public class ThreadLocalRandom extends Random {
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
+ *
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
@@ -183,4 +193,5 @@ public class ThreadLocalRandom extends Random {
return nextDouble() * (bound - least) + least;
}
+ private static final long serialVersionUID = -5851777807851030925L;
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java b/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java
index 9c7b2289c4..7d149c7ae5 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java
@@ -1,7 +1,7 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
@@ -11,21 +11,23 @@ import java.util.concurrent.*;
* A {@link BlockingQueue} in which producers may wait for consumers
* to receive elements. A {@code TransferQueue} may be useful for
* example in message passing applications in which producers
- * sometimes (using method {@code transfer}) await receipt of
- * elements by consumers invoking {@code take} or {@code poll},
- * while at other times enqueue elements (via method {@code put})
- * without waiting for receipt. Non-blocking and time-out versions of
- * {@code tryTransfer} are also available. A TransferQueue may also
- * be queried via {@code hasWaitingConsumer} whether there are any
- * threads waiting for items, which is a converse analogy to a
- * {@code peek} operation.
+ * sometimes (using method {@link #transfer}) await receipt of
+ * elements by consumers invoking {@code take} or {@code poll}, while
+ * at other times enqueue elements (via method {@code put}) without
+ * waiting for receipt.
+ * {@linkplain #tryTransfer(Object) Non-blocking} and
+ * {@linkplain #tryTransfer(Object,long,TimeUnit) time-out} versions of
+ * {@code tryTransfer} are also available.
+ * A {@code TransferQueue} may also be queried, via {@link
+ * #hasWaitingConsumer}, whether there are any threads waiting for
+ * items, which is a converse analogy to a {@code peek} operation.
*
- * <p>Like any {@code BlockingQueue}, a {@code TransferQueue} may be
- * capacity bounded. If so, an attempted {@code transfer} operation
- * may initially block waiting for available space, and/or
- * subsequently block waiting for reception by a consumer. Note that
- * in a queue with zero capacity, such as {@link SynchronousQueue},
- * {@code put} and {@code transfer} are effectively synonymous.
+ * <p>Like other blocking queues, a {@code TransferQueue} may be
+ * capacity bounded. If so, an attempted transfer operation may
+ * initially block waiting for available space, and/or subsequently
+ * block waiting for reception by a consumer. Note that in a queue
+ * with zero capacity, such as {@link SynchronousQueue}, {@code put}
+ * and {@code transfer} are effectively synonymous.
*
* <p>This interface is a member of the
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
@@ -37,9 +39,12 @@ import java.util.concurrent.*;
*/
public interface TransferQueue<E> extends BlockingQueue<E> {
/**
- * Transfers the specified element if there exists a consumer
- * already waiting to receive it, otherwise returning {@code false}
- * without enqueuing the element.
+ * Transfers the element to a waiting consumer immediately, if possible.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * otherwise returning {@code false} without enqueuing the element.
*
* @param e the element to transfer
* @return {@code true} if the element was transferred, else
@@ -53,13 +58,16 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
boolean tryTransfer(E e);
/**
- * Inserts the specified element into this queue, waiting if
- * necessary for space to become available and the element to be
- * dequeued by a consumer invoking {@code take} or {@code poll}.
+ * Transfers the element to a consumer, waiting if necessary to do so.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else waits until the element is received by a consumer.
*
* @param e the element to transfer
* @throws InterruptedException if interrupted while waiting,
- * in which case the element is not enqueued.
+ * in which case the element is not left enqueued
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
@@ -69,10 +77,15 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
void transfer(E e) throws InterruptedException;
/**
- * Inserts the specified element into this queue, waiting up to
- * the specified wait time if necessary for space to become
- * available and the element to be dequeued by a consumer invoking
- * {@code take} or {@code poll}.
+ * Transfers the element to a consumer if it is possible to do so
+ * before the timeout elapses.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else waits until the element is received by a consumer,
+ * returning {@code false} if the specified wait time elapses
+ * before the element can be transferred.
*
* @param e the element to transfer
* @param timeout how long to wait before giving up, in units of
@@ -81,9 +94,9 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
* {@code timeout} parameter
* @return {@code true} if successful, or {@code false} if
* the specified waiting time elapses before completion,
- * in which case the element is not enqueued.
+ * in which case the element is not left enqueued
* @throws InterruptedException if interrupted while waiting,
- * in which case the element is not enqueued.
+ * in which case the element is not left enqueued
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
@@ -95,7 +108,8 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
/**
* Returns {@code true} if there is at least one consumer waiting
- * to dequeue an element via {@code take} or {@code poll}.
+ * to receive an element via {@link #take} or
+ * timed {@link #poll(long,TimeUnit) poll}.
* The return value represents a momentary state of affairs.
*
* @return {@code true} if there is at least one waiting consumer
@@ -104,15 +118,16 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
/**
* Returns an estimate of the number of consumers waiting to
- * dequeue elements via {@code take} or {@code poll}. The return
- * value is an approximation of a momentary state of affairs, that
- * may be inaccurate if consumers have completed or given up
- * waiting. The value may be useful for monitoring and heuristics,
- * but not for synchronization control. Implementations of this
+ * receive elements via {@link #take} or timed
+ * {@link #poll(long,TimeUnit) poll}. The return value is an
+ * approximation of a momentary state of affairs, that may be
+ * inaccurate if consumers have completed or given up waiting.
+ * The value may be useful for monitoring and heuristics, but
+ * not for synchronization control. Implementations of this
* method are likely to be noticeably slower than those for
* {@link #hasWaitingConsumer}.
*
- * @return the number of consumers waiting to dequeue elements
+ * @return the number of consumers waiting to receive elements
*/
int getWaitingConsumerCount();
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/package-info.java b/src/forkjoin/scala/concurrent/forkjoin/package-info.java
index b8fa0fad02..3561b9b44a 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/package-info.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/package-info.java
@@ -1,7 +1,7 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
@@ -15,7 +15,7 @@
* Threads. However, when applicable, they typically provide
* significantly greater performance on multiprocessor platforms.
*
- * <p> Candidates for fork/join processing mainly include those that
+ * <p>Candidates for fork/join processing mainly include those that
* can be expressed using parallel divide-and-conquer techniques: To
* solve a problem, break it in two (or more) parts, and then solve
* those parts in parallel, continuing on in this way until the
@@ -24,6 +24,5 @@
* available to other threads (normally one per CPU), that help
* complete the tasks. In general, the most efficient ForkJoinTasks
* are those that directly implement this algorithmic design pattern.
- *
*/
package scala.concurrent.forkjoin;
diff --git a/src/library/scala/Enumeration.scala b/src/library/scala/Enumeration.scala
index 3d85f2f52f..80571943e5 100644
--- a/src/library/scala/Enumeration.scala
+++ b/src/library/scala/Enumeration.scala
@@ -55,7 +55,7 @@ abstract class Enumeration (initial: Int) extends Serializable {
thisenum =>
def this() = this(0)
-
+
@deprecated("Names should be specified individually or discovered via reflection", "2.10.0")
def this(initial: Int, names: String*) = {
this(initial)
@@ -201,7 +201,7 @@ abstract class Enumeration (initial: Int) extends Serializable {
case _ => false
}
override def hashCode: Int = id.##
-
+
/** Create a ValueSet which contains this value and another one */
def + (v: Value) = ValueSet(this, v)
}
@@ -266,7 +266,7 @@ abstract class Enumeration (initial: Int) extends Serializable {
* new array of longs */
def toBitMask: Array[Long] = nnIds.toBitMask
}
-
+
/** A factory object for value sets */
object ValueSet {
import generic.CanBuildFrom
diff --git a/src/library/scala/Function0.scala b/src/library/scala/Function0.scala
index 508ef25e81..dceed26439 100644
--- a/src/library/scala/Function0.scala
+++ b/src/library/scala/Function0.scala
@@ -12,12 +12,12 @@ package scala
/** A function of 0 parameters.
- *
+ *
* In the following example, the definition of javaVersion is a
* shorthand for the anonymous class definition anonfun0:
*
* {{{
- * object Main extends App {
+ * object Main extends App {
* val javaVersion = () => sys.props("java.version")
*
* val anonfun0 = new Function0[String] {
@@ -31,13 +31,13 @@ package scala
* be suggested by the existence of [[scala.PartialFunction]]. The only
* distinction between `Function1` and `PartialFunction` is that the
* latter can specify inputs which it will not handle.
-
+
*/
trait Function0[@specialized +R] extends AnyRef { self =>
/** Apply the body of this function to the arguments.
* @return the result of function application.
*/
def apply(): R
-
+
override def toString() = "<function0>"
}
diff --git a/src/library/scala/Function1.scala b/src/library/scala/Function1.scala
index 06936e54cb..8995ef912b 100644
--- a/src/library/scala/Function1.scala
+++ b/src/library/scala/Function1.scala
@@ -11,12 +11,12 @@ package scala
/** A function of 1 parameter.
- *
+ *
* In the following example, the definition of succ is a
* shorthand for the anonymous class definition anonfun1:
*
* {{{
- * object Main extends App {
+ * object Main extends App {
* val succ = (x: Int) => x + 1
* val anonfun1 = new Function1[Int, Int] {
* def apply(x: Int): Int = x + 1
@@ -29,7 +29,7 @@ package scala
* be suggested by the existence of [[scala.PartialFunction]]. The only
* distinction between `Function1` and `PartialFunction` is that the
* latter can specify inputs which it will not handle.
-
+
*/
@annotation.implicitNotFound(msg = "No implicit view available from ${T1} => ${R}.")
trait Function1[@specialized(scala.Int, scala.Long, scala.Float, scala.Double, scala.AnyRef) -T1, @specialized(scala.Unit, scala.Boolean, scala.Int, scala.Float, scala.Long, scala.Double, scala.AnyRef) +R] extends AnyRef { self =>
@@ -37,7 +37,7 @@ trait Function1[@specialized(scala.Int, scala.Long, scala.Float, scala.Double, s
* @return the result of function application.
*/
def apply(v1: T1): R
-
+
/** Composes two instances of Function1 in a new Function1, with this function applied last.
*
* @tparam A the type to which function `g` can be applied
diff --git a/src/library/scala/Function2.scala b/src/library/scala/Function2.scala
index 1812f042e0..cacb96ef5d 100644
--- a/src/library/scala/Function2.scala
+++ b/src/library/scala/Function2.scala
@@ -11,12 +11,12 @@ package scala
/** A function of 2 parameters.
- *
+ *
* In the following example, the definition of max is a
* shorthand for the anonymous class definition anonfun2:
*
* {{{
- * object Main extends App {
+ * object Main extends App {
* val max = (x: Int, y: Int) => if (x < y) y else x
*
* val anonfun2 = new Function2[Int, Int, Int] {
@@ -30,7 +30,7 @@ package scala
* be suggested by the existence of [[scala.PartialFunction]]. The only
* distinction between `Function1` and `PartialFunction` is that the
* latter can specify inputs which it will not handle.
-
+
*/
trait Function2[@specialized(scala.Int, scala.Long, scala.Double) -T1, @specialized(scala.Int, scala.Long, scala.Double) -T2, @specialized(scala.Unit, scala.Boolean, scala.Int, scala.Float, scala.Long, scala.Double) +R] extends AnyRef { self =>
/** Apply the body of this function to the arguments.
diff --git a/src/library/scala/PartialFunction.scala b/src/library/scala/PartialFunction.scala
index 70caff0221..3c5d6d0d23 100644
--- a/src/library/scala/PartialFunction.scala
+++ b/src/library/scala/PartialFunction.scala
@@ -26,18 +26,18 @@ package scala
*
* {{{
* val sample = 1 to 10
- * val isEven: PartialFunction[Int, String] = {
- * case x if x % 2 == 0 => x+" is even"
+ * val isEven: PartialFunction[Int, String] = {
+ * case x if x % 2 == 0 => x+" is even"
* }
*
* // the method collect can use isDefinedAt to select which members to collect
* val evenNumbers = sample collect isEven
*
- * val isOdd: PartialFunction[Int, String] = {
- * case x if x % 2 == 1 => x+" is odd"
+ * val isOdd: PartialFunction[Int, String] = {
+ * case x if x % 2 == 1 => x+" is odd"
* }
*
- * // the method orElse allows chaining another partial function to handle
+ * // the method orElse allows chaining another partial function to handle
* // input outside the declared domain
* val numbers = sample map (isEven orElse isOdd)
* }}}
diff --git a/src/library/scala/Product1.scala b/src/library/scala/Product1.scala
index 0106ad34ee..ab8b0a4505 100644
--- a/src/library/scala/Product1.scala
+++ b/src/library/scala/Product1.scala
@@ -23,7 +23,7 @@ trait Product1[@specialized(Int, Long, Double) +T1] extends Product {
*/
override def productArity = 1
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product1[@specialized(Int, Long, Double) +T1] extends Product {
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case _ => throw new IndexOutOfBoundsException(n.toString())
}
diff --git a/src/library/scala/Product10.scala b/src/library/scala/Product10.scala
index ca53b580c0..536fb2fed9 100644
--- a/src/library/scala/Product10.scala
+++ b/src/library/scala/Product10.scala
@@ -23,7 +23,7 @@ trait Product10[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10] extends Produ
*/
override def productArity = 10
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product10[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10] extends Produ
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product11.scala b/src/library/scala/Product11.scala
index 3d5942f3fa..7d49eccc5e 100644
--- a/src/library/scala/Product11.scala
+++ b/src/library/scala/Product11.scala
@@ -23,7 +23,7 @@ trait Product11[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11] extends
*/
override def productArity = 11
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product11[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11] extends
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product12.scala b/src/library/scala/Product12.scala
index 803193793c..0e9c4a01a2 100644
--- a/src/library/scala/Product12.scala
+++ b/src/library/scala/Product12.scala
@@ -23,7 +23,7 @@ trait Product12[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12] e
*/
override def productArity = 12
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product12[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12] e
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product13.scala b/src/library/scala/Product13.scala
index 0c1d889624..a0629201d0 100644
--- a/src/library/scala/Product13.scala
+++ b/src/library/scala/Product13.scala
@@ -23,7 +23,7 @@ trait Product13[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 13
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product13[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product14.scala b/src/library/scala/Product14.scala
index 0222309a0a..32dda81c3e 100644
--- a/src/library/scala/Product14.scala
+++ b/src/library/scala/Product14.scala
@@ -23,7 +23,7 @@ trait Product14[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 14
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product14[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product15.scala b/src/library/scala/Product15.scala
index 41be7ec504..57851f9870 100644
--- a/src/library/scala/Product15.scala
+++ b/src/library/scala/Product15.scala
@@ -23,7 +23,7 @@ trait Product15[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 15
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product15[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product16.scala b/src/library/scala/Product16.scala
index accee3f965..75076f3b3c 100644
--- a/src/library/scala/Product16.scala
+++ b/src/library/scala/Product16.scala
@@ -23,7 +23,7 @@ trait Product16[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 16
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product16[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product17.scala b/src/library/scala/Product17.scala
index da80ae9a6b..9ee6072ffe 100644
--- a/src/library/scala/Product17.scala
+++ b/src/library/scala/Product17.scala
@@ -23,7 +23,7 @@ trait Product17[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 17
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product17[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product18.scala b/src/library/scala/Product18.scala
index ea25647762..25d0839af1 100644
--- a/src/library/scala/Product18.scala
+++ b/src/library/scala/Product18.scala
@@ -23,7 +23,7 @@ trait Product18[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 18
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product18[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product19.scala b/src/library/scala/Product19.scala
index 5d4347c1a8..5464de7264 100644
--- a/src/library/scala/Product19.scala
+++ b/src/library/scala/Product19.scala
@@ -23,7 +23,7 @@ trait Product19[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 19
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product19[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product2.scala b/src/library/scala/Product2.scala
index 4e6c70f463..8097245926 100644
--- a/src/library/scala/Product2.scala
+++ b/src/library/scala/Product2.scala
@@ -23,7 +23,7 @@ trait Product2[@specialized(Int, Long, Double) +T1, @specialized(Int, Long, Doub
*/
override def productArity = 2
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product2[@specialized(Int, Long, Double) +T1, @specialized(Int, Long, Doub
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case _ => throw new IndexOutOfBoundsException(n.toString())
diff --git a/src/library/scala/Product20.scala b/src/library/scala/Product20.scala
index f23a0dee3a..b094e09aca 100644
--- a/src/library/scala/Product20.scala
+++ b/src/library/scala/Product20.scala
@@ -23,7 +23,7 @@ trait Product20[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 20
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product20[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product21.scala b/src/library/scala/Product21.scala
index 4a4fe0697f..fa06cfb438 100644
--- a/src/library/scala/Product21.scala
+++ b/src/library/scala/Product21.scala
@@ -23,7 +23,7 @@ trait Product21[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 21
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product21[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product22.scala b/src/library/scala/Product22.scala
index 7ee01b85ae..46038bf1a2 100644
--- a/src/library/scala/Product22.scala
+++ b/src/library/scala/Product22.scala
@@ -23,7 +23,7 @@ trait Product22[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
override def productArity = 22
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product22[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product3.scala b/src/library/scala/Product3.scala
index 23563c9e23..3a4cd8fc5e 100644
--- a/src/library/scala/Product3.scala
+++ b/src/library/scala/Product3.scala
@@ -23,7 +23,7 @@ trait Product3[+T1, +T2, +T3] extends Product {
*/
override def productArity = 3
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product3[+T1, +T2, +T3] extends Product {
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product4.scala b/src/library/scala/Product4.scala
index 4abaa9051b..a4d47457fa 100644
--- a/src/library/scala/Product4.scala
+++ b/src/library/scala/Product4.scala
@@ -23,7 +23,7 @@ trait Product4[+T1, +T2, +T3, +T4] extends Product {
*/
override def productArity = 4
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product4[+T1, +T2, +T3, +T4] extends Product {
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product5.scala b/src/library/scala/Product5.scala
index 9aa4af58b7..9f25e70af0 100644
--- a/src/library/scala/Product5.scala
+++ b/src/library/scala/Product5.scala
@@ -23,7 +23,7 @@ trait Product5[+T1, +T2, +T3, +T4, +T5] extends Product {
*/
override def productArity = 5
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product5[+T1, +T2, +T3, +T4, +T5] extends Product {
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product6.scala b/src/library/scala/Product6.scala
index 2ca1d7c31e..87fd318c68 100644
--- a/src/library/scala/Product6.scala
+++ b/src/library/scala/Product6.scala
@@ -23,7 +23,7 @@ trait Product6[+T1, +T2, +T3, +T4, +T5, +T6] extends Product {
*/
override def productArity = 6
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product6[+T1, +T2, +T3, +T4, +T5, +T6] extends Product {
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product7.scala b/src/library/scala/Product7.scala
index b7af2d3e32..d074503315 100644
--- a/src/library/scala/Product7.scala
+++ b/src/library/scala/Product7.scala
@@ -23,7 +23,7 @@ trait Product7[+T1, +T2, +T3, +T4, +T5, +T6, +T7] extends Product {
*/
override def productArity = 7
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product7[+T1, +T2, +T3, +T4, +T5, +T6, +T7] extends Product {
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product8.scala b/src/library/scala/Product8.scala
index 17b5e48512..bd6150c235 100644
--- a/src/library/scala/Product8.scala
+++ b/src/library/scala/Product8.scala
@@ -23,7 +23,7 @@ trait Product8[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8] extends Product {
*/
override def productArity = 8
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product8[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8] extends Product {
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Product9.scala b/src/library/scala/Product9.scala
index 784e9a7029..1f042944cc 100644
--- a/src/library/scala/Product9.scala
+++ b/src/library/scala/Product9.scala
@@ -23,7 +23,7 @@ trait Product9[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9] extends Product {
*/
override def productArity = 9
-
+
/** Returns the n-th projection of this product if 0 < n <= productArity,
* otherwise throws an `IndexOutOfBoundsException`.
*
@@ -33,7 +33,7 @@ trait Product9[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9] extends Product {
*/
@throws(classOf[IndexOutOfBoundsException])
- override def productElement(n: Int) = n match {
+ override def productElement(n: Int) = n match {
case 0 => _1
case 1 => _2
case 2 => _3
diff --git a/src/library/scala/Specializable.scala b/src/library/scala/Specializable.scala
index 811a735110..67126b3069 100644
--- a/src/library/scala/Specializable.scala
+++ b/src/library/scala/Specializable.scala
@@ -16,7 +16,7 @@ trait Specializable extends SpecializableCompanion
object Specializable {
// No type parameter in @specialized annotation.
trait SpecializedGroup { }
-
+
// Smuggle a list of types by way of a tuple upon which Group is parameterized.
class Group[T >: Null](value: T) extends SpecializedGroup { }
diff --git a/src/library/scala/StringContext.scala b/src/library/scala/StringContext.scala
index 6116547aa2..8ca312afc5 100644
--- a/src/library/scala/StringContext.scala
+++ b/src/library/scala/StringContext.scala
@@ -13,7 +13,7 @@ import collection.mutable.ArrayBuffer
/** A class to support string interpolation.
* This class supports string interpolation as outlined in Scala SIP-11.
* It needs to be fully documented once the SIP is accepted.
- *
+ *
* @param parts The parts that make up the interpolated string,
* without the expressions that get inserted by interpolation.
*/
@@ -26,13 +26,13 @@ case class StringContext(parts: String*) {
* @param `args` The arguments to be checked.
* @throws An `IllegalArgumentException` if this is not the case.
*/
- def checkLengths(args: Any*): Unit =
+ def checkLengths(args: Any*): Unit =
if (parts.length != args.length + 1)
throw new IllegalArgumentException("wrong number of arguments for interpolated string")
/** The simple string interpolator.
- *
+ *
* It inserts its arguments between corresponding parts of the string context.
* It also treats standard escape sequences as defined in the Scala specification.
* @param `args` The arguments to be inserted into the resulting string.
@@ -55,21 +55,21 @@ case class StringContext(parts: String*) {
}
/** The formatted string interpolator.
- *
+ *
* It inserts its arguments between corresponding parts of the string context.
* It also treats standard escape sequences as defined in the Scala specification.
* Finally, if an interpolated expression is followed by a `parts` string
* that starts with a formatting specifier, the expression is formatted according to that
* specifier. All specifiers allowed in Java format strings are handled, and in the same
* way they are treated in Java.
- *
+ *
* @param `args` The arguments to be inserted into the resulting string.
* @throws An `IllegalArgumentException`
* if the number of `parts` in the enclosing `StringContext` does not exceed
* the number of arguments `arg` by exactly 1.
* @throws A `StringContext.InvalidEscapeException` if a `parts` string contains a backslash (`\`) character
* that does not start a valid escape sequence.
- *
+ *
* Note: The `f` method works by assembling a format string from all the `parts` strings and using
* `java.lang.String.format` to format all arguments with that format string. The format string is
* obtained by concatenating all `parts` strings, and performing two transformations:
@@ -125,14 +125,14 @@ object StringContext {
* @param idx The index of the offending backslash character in `str`.
*/
class InvalidEscapeException(str: String, idx: Int)
- extends IllegalArgumentException("invalid escape character at index "+idx+" in \""+str+"\"")
+ extends IllegalArgumentException("invalid escape character at index "+idx+" in \""+str+"\"")
/** Expands standard Scala escape sequences in a string.
* Escape sequences are:
* control: `\b`, `\t`, `\n`, `\f`, `\r`
* escape: `\\`, `\"`, `\'`
* octal: `\d` `\dd` `\ddd` where `d` is an octal digit between `0` and `7`.
- *
+ *
* @param A string that may contain escape sequences
* @return The string with all escape sequences expanded.
*/
diff --git a/src/library/scala/Tuple1.scala b/src/library/scala/Tuple1.scala
index 02fdd0cba5..6d31d35e51 100644
--- a/src/library/scala/Tuple1.scala
+++ b/src/library/scala/Tuple1.scala
@@ -19,5 +19,5 @@ case class Tuple1[@specialized(Int, Long, Double) +T1](_1: T1)
extends Product1[T1]
{
override def toString() = "(" + _1 + ")"
-
+
}
diff --git a/src/library/scala/Tuple10.scala b/src/library/scala/Tuple10.scala
index ba2a02a8b2..10d554d467 100644
--- a/src/library/scala/Tuple10.scala
+++ b/src/library/scala/Tuple10.scala
@@ -28,5 +28,5 @@ case class Tuple10[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10](_1: T1, _2
extends Product10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 + "," + _10 + ")"
-
+
}
diff --git a/src/library/scala/Tuple11.scala b/src/library/scala/Tuple11.scala
index 7f51d172d4..2065e4f017 100644
--- a/src/library/scala/Tuple11.scala
+++ b/src/library/scala/Tuple11.scala
@@ -29,5 +29,5 @@ case class Tuple11[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11](_1:
extends Product11[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 + "," + _10 + "," + _11 + ")"
-
+
}
diff --git a/src/library/scala/Tuple12.scala b/src/library/scala/Tuple12.scala
index 4bbc6a0eab..a463986752 100644
--- a/src/library/scala/Tuple12.scala
+++ b/src/library/scala/Tuple12.scala
@@ -31,5 +31,5 @@ case class Tuple12[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 +
"," + _7 + "," + _8 + "," + _9 + "," + _10 + "," + _11 + "," + _12 + ")"
-
+
}
diff --git a/src/library/scala/Tuple13.scala b/src/library/scala/Tuple13.scala
index 77bd59bf2e..2bee0d69ad 100644
--- a/src/library/scala/Tuple13.scala
+++ b/src/library/scala/Tuple13.scala
@@ -32,5 +32,5 @@ case class Tuple13[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 +
"," + _7 + "," + _8 + "," + _9 + "," + _10 + "," + _11 + "," + _12 + "," + _13 + ")"
-
+
}
diff --git a/src/library/scala/Tuple14.scala b/src/library/scala/Tuple14.scala
index bf7a4ce016..60f7c51e64 100644
--- a/src/library/scala/Tuple14.scala
+++ b/src/library/scala/Tuple14.scala
@@ -33,5 +33,5 @@ case class Tuple14[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 +
"," + _8 + "," + _9 + "," + _10 + "," + _11 + "," + _12 + "," + _13 + "," + _14 + ")"
-
+
}
diff --git a/src/library/scala/Tuple15.scala b/src/library/scala/Tuple15.scala
index 582c359bc6..fc8e30580b 100644
--- a/src/library/scala/Tuple15.scala
+++ b/src/library/scala/Tuple15.scala
@@ -34,5 +34,5 @@ case class Tuple15[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 +
"," + _8 + "," + _9 + "," + _10 + "," + _11 + "," + _12 + "," + _13 + "," + _14 + "," + _15 + ")"
-
+
}
diff --git a/src/library/scala/Tuple16.scala b/src/library/scala/Tuple16.scala
index a1e9a790ff..80181f6648 100644
--- a/src/library/scala/Tuple16.scala
+++ b/src/library/scala/Tuple16.scala
@@ -35,5 +35,5 @@ case class Tuple16[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 +
"," + _9 + "," + _10 + "," + _11 + "," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + ")"
-
+
}
diff --git a/src/library/scala/Tuple17.scala b/src/library/scala/Tuple17.scala
index f531766c18..6236122be2 100644
--- a/src/library/scala/Tuple17.scala
+++ b/src/library/scala/Tuple17.scala
@@ -36,5 +36,5 @@ case class Tuple17[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 +
"," + _9 + "," + _10 + "," + _11 + "," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + "," + _17 + ")"
-
+
}
diff --git a/src/library/scala/Tuple18.scala b/src/library/scala/Tuple18.scala
index a96db25e4b..dd6a819ac5 100644
--- a/src/library/scala/Tuple18.scala
+++ b/src/library/scala/Tuple18.scala
@@ -37,5 +37,5 @@ case class Tuple18[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 +
"," + _10 + "," + _11 + "," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + "," + _17 + "," + _18 + ")"
-
+
}
diff --git a/src/library/scala/Tuple19.scala b/src/library/scala/Tuple19.scala
index 718280d68a..65f0fd22cf 100644
--- a/src/library/scala/Tuple19.scala
+++ b/src/library/scala/Tuple19.scala
@@ -38,5 +38,5 @@ case class Tuple19[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 +
"," + _10 + "," + _11 + "," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + "," + _17 + "," + _18 + "," + _19 + ")"
-
+
}
diff --git a/src/library/scala/Tuple2.scala b/src/library/scala/Tuple2.scala
index b1befca4fa..684d2266e8 100644
--- a/src/library/scala/Tuple2.scala
+++ b/src/library/scala/Tuple2.scala
@@ -23,7 +23,7 @@ case class Tuple2[@specialized(Int, Long, Double, Char, Boolean, AnyRef) +T1, @s
extends Product2[T1, T2]
{
override def toString() = "(" + _1 + "," + _2 + ")"
-
+
/** Swaps the elements of this `Tuple`.
* @return a new Tuple where the first element is the second element of this Tuple and the
* second element is the first element of this Tuple.
diff --git a/src/library/scala/Tuple20.scala b/src/library/scala/Tuple20.scala
index 4a44c0bb89..cf3626909d 100644
--- a/src/library/scala/Tuple20.scala
+++ b/src/library/scala/Tuple20.scala
@@ -39,5 +39,5 @@ case class Tuple20[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 + "," + _10 +
"," + _11 + "," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + "," + _17 + "," + _18 + "," + _19 + "," + _20 + ")"
-
+
}
diff --git a/src/library/scala/Tuple21.scala b/src/library/scala/Tuple21.scala
index 580a169e39..78b9c585c6 100644
--- a/src/library/scala/Tuple21.scala
+++ b/src/library/scala/Tuple21.scala
@@ -40,5 +40,5 @@ case class Tuple21[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 + "," + _10 +
"," + _11 + "," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + "," + _17 + "," + _18 + "," + _19 + "," + _20 + "," + _21 + ")"
-
+
}
diff --git a/src/library/scala/Tuple22.scala b/src/library/scala/Tuple22.scala
index fd3392ddea..0993dfbbc3 100644
--- a/src/library/scala/Tuple22.scala
+++ b/src/library/scala/Tuple22.scala
@@ -41,5 +41,5 @@ case class Tuple22[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 + "," + _10 + "," + _11 +
"," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + "," + _17 + "," + _18 + "," + _19 + "," + _20 + "," + _21 + "," + _22 + ")"
-
+
}
diff --git a/src/library/scala/Tuple3.scala b/src/library/scala/Tuple3.scala
index 0d5399308b..dfa0c962a2 100644
--- a/src/library/scala/Tuple3.scala
+++ b/src/library/scala/Tuple3.scala
@@ -24,7 +24,7 @@ case class Tuple3[+T1, +T2, +T3](_1: T1, _2: T2, _3: T3)
extends Product3[T1, T2, T3]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + ")"
-
+
@deprecated("Use `zipped` instead.", "2.9.0")
def zip[Repr1, El1, El2, El3, To](implicit w1: T1 => TLike[El1, Repr1],
diff --git a/src/library/scala/Tuple4.scala b/src/library/scala/Tuple4.scala
index a859078bcf..a919072c88 100644
--- a/src/library/scala/Tuple4.scala
+++ b/src/library/scala/Tuple4.scala
@@ -22,5 +22,5 @@ case class Tuple4[+T1, +T2, +T3, +T4](_1: T1, _2: T2, _3: T3, _4: T4)
extends Product4[T1, T2, T3, T4]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + ")"
-
+
}
diff --git a/src/library/scala/Tuple5.scala b/src/library/scala/Tuple5.scala
index 1edfb673ee..6a94f48ab4 100644
--- a/src/library/scala/Tuple5.scala
+++ b/src/library/scala/Tuple5.scala
@@ -23,5 +23,5 @@ case class Tuple5[+T1, +T2, +T3, +T4, +T5](_1: T1, _2: T2, _3: T3, _4: T4, _5: T
extends Product5[T1, T2, T3, T4, T5]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + ")"
-
+
}
diff --git a/src/library/scala/Tuple6.scala b/src/library/scala/Tuple6.scala
index 5b74937e58..34f8224627 100644
--- a/src/library/scala/Tuple6.scala
+++ b/src/library/scala/Tuple6.scala
@@ -24,5 +24,5 @@ case class Tuple6[+T1, +T2, +T3, +T4, +T5, +T6](_1: T1, _2: T2, _3: T3, _4: T4,
extends Product6[T1, T2, T3, T4, T5, T6]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + ")"
-
+
}
diff --git a/src/library/scala/Tuple7.scala b/src/library/scala/Tuple7.scala
index a7f572e9f0..6fc3477ba2 100644
--- a/src/library/scala/Tuple7.scala
+++ b/src/library/scala/Tuple7.scala
@@ -25,5 +25,5 @@ case class Tuple7[+T1, +T2, +T3, +T4, +T5, +T6, +T7](_1: T1, _2: T2, _3: T3, _4:
extends Product7[T1, T2, T3, T4, T5, T6, T7]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + ")"
-
+
}
diff --git a/src/library/scala/Tuple8.scala b/src/library/scala/Tuple8.scala
index 9bb427d689..1e21b684fc 100644
--- a/src/library/scala/Tuple8.scala
+++ b/src/library/scala/Tuple8.scala
@@ -26,5 +26,5 @@ case class Tuple8[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8](_1: T1, _2: T2, _3: T3
extends Product8[T1, T2, T3, T4, T5, T6, T7, T8]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + ")"
-
+
}
diff --git a/src/library/scala/Tuple9.scala b/src/library/scala/Tuple9.scala
index 4d50539e0c..453cea31a1 100644
--- a/src/library/scala/Tuple9.scala
+++ b/src/library/scala/Tuple9.scala
@@ -27,5 +27,5 @@ case class Tuple9[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9](_1: T1, _2: T2, _
extends Product9[T1, T2, T3, T4, T5, T6, T7, T8, T9]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 + ")"
-
+
}
diff --git a/src/library/scala/annotation/elidable.scala b/src/library/scala/annotation/elidable.scala
index 8dc180d7ab..880b645daa 100644
--- a/src/library/scala/annotation/elidable.scala
+++ b/src/library/scala/annotation/elidable.scala
@@ -10,22 +10,53 @@ package scala.annotation
import java.util.logging.Level
-/** An annotation for methods for which invocations might
- * be removed in the generated code.
+/** An annotation for methods whose bodies may be excluded
+ * from compiler-generated bytecode.
*
* Behavior is influenced by passing `-Xelide-below <arg>` to `scalac`.
- * Methods marked elidable will be omitted from generated code if the
- * priority given the annotation is lower than to the command line argument.
- * Examples:
- * {{{
- * import annotation.elidable._
+ * Calls to methods marked elidable (as well as the method body) will
+ * be omitted from generated code if the priority given the annotation
+ * is lower than that given on the command line.
*
- * @elidable(WARNING) def foo = log("foo")
- * @elidable(FINE) def bar = log("bar")
+ * @elidable(123) // annotation priority
+ * scalac -Xelide-below 456 // command line priority
*
- * scalac -Xelide-below=1000
- * }}}
- * @since 2.8
+ * The method call will be replaced with an expression which depends on
+ * the type of the elided expression. In decreasing order of precedence:
+ *
+ * Unit ()
+ * Boolean false
+ * T <: AnyVal 0
+ * T >: Null null
+ * T >: Nothing Predef.???
+ *
+ * Complete example:
+ {{{
+ import annotation._, elidable._
+ object Test extends App {
+ def expensiveComputation(): Int = { Thread.sleep(1000) ; 172 }
+
+ @elidable(WARNING) def warning(msg: String) = println(msg)
+ @elidable(FINE) def debug(msg: String) = println(msg)
+ @elidable(FINE) def computedValue = expensiveComputation()
+
+ warning("Warning! Danger! Warning!")
+ debug("Debug! Danger! Debug!")
+ println("I computed a value: " + computedValue)
+ }
+ % scalac example.scala && scala Test
+ Warning! Danger! Warning!
+ Debug! Danger! Debug!
+ I computed a value: 172
+
+ // INFO lies between WARNING and FINE
+ % scalac -Xelide-below INFO example.scala && scala Test
+ Warning! Danger! Warning!
+ I computed a value: 0
+ }}}
+ *
+ * @author Paul Phillips
+ * @since 2.8
*/
final class elidable(final val level: Int) extends annotation.StaticAnnotation {}
diff --git a/src/library/scala/collection/GenTraversableLike.scala b/src/library/scala/collection/GenTraversableLike.scala
index c837775cf9..1dcc0bdac7 100644
--- a/src/library/scala/collection/GenTraversableLike.scala
+++ b/src/library/scala/collection/GenTraversableLike.scala
@@ -318,7 +318,7 @@ trait GenTraversableLike[+A, +Repr] extends GenTraversableOnce[A] with Paralleli
* $orderDependent
*
* @param from the lowest index to include from this $coll.
- * @param until the highest index to EXCLUDE from this $coll.
+ * @param until the lowest index to EXCLUDE from this $coll.
* @return a $coll containing the elements greater than or equal to
* index `from` extending up to (but not including) index `until`
* of this $coll.
diff --git a/src/library/scala/collection/SeqLike.scala b/src/library/scala/collection/SeqLike.scala
index 02298ef096..b51a37cf9e 100644
--- a/src/library/scala/collection/SeqLike.scala
+++ b/src/library/scala/collection/SeqLike.scala
@@ -151,7 +151,7 @@ trait SeqLike[+A, +Repr] extends IterableLike[A, Repr] with GenSeqLike[A, Repr]
def next(): Repr = {
if (!hasNext)
Iterator.empty.next
-
+
val forcedElms = new mutable.ArrayBuffer[A](elms.size) ++= elms
val result = (self.newBuilder ++= forcedElms).result
var i = idxs.length - 2
diff --git a/src/library/scala/collection/generic/MutableSortedSetFactory.scala b/src/library/scala/collection/generic/MutableSortedSetFactory.scala
index b235379575..cbbedc0231 100644
--- a/src/library/scala/collection/generic/MutableSortedSetFactory.scala
+++ b/src/library/scala/collection/generic/MutableSortedSetFactory.scala
@@ -11,12 +11,12 @@ package generic
import scala.collection.mutable.{ Builder, GrowingBuilder }
-/**
+/**
* @define Coll mutable.SortedSet
* @define coll mutable sorted
*
* @author Lucien Pereira
- *
+ *
*/
abstract class MutableSortedSetFactory[CC[A] <: mutable.SortedSet[A] with SortedSetLike[A, CC[A]] with mutable.Set[A] with mutable.SetLike[A, CC[A]]] extends SortedSetFactory[CC] {
@@ -26,7 +26,7 @@ abstract class MutableSortedSetFactory[CC[A] <: mutable.SortedSet[A] with Sorted
* is evaluated elems is cloned (which is O(n)).
*
* Fortunately GrowingBuilder comes to rescue.
- *
+ *
*/
override def newBuilder[A](implicit ord: Ordering[A]): Builder[A, CC[A]] = new GrowingBuilder[A, CC[A]](empty)
diff --git a/src/library/scala/collection/immutable/BitSet.scala b/src/library/scala/collection/immutable/BitSet.scala
index abccd91f9c..870d5534dc 100644
--- a/src/library/scala/collection/immutable/BitSet.scala
+++ b/src/library/scala/collection/immutable/BitSet.scala
@@ -131,7 +131,7 @@ object BitSet extends BitSetFactory[BitSet] {
* the capacity of two long values). The constructor wraps an existing
* bit mask without copying, thus exposing a mutable part of the internal
* implementation. Care needs to be taken not to modify the exposed
- * array.
+ * array.
*/
class BitSetN(val elems: Array[Long]) extends BitSet {
protected def nwords = elems.length
diff --git a/src/library/scala/collection/immutable/List.scala b/src/library/scala/collection/immutable/List.scala
index 5f3f9b717f..381fcf3117 100644
--- a/src/library/scala/collection/immutable/List.scala
+++ b/src/library/scala/collection/immutable/List.scala
@@ -205,6 +205,16 @@ sealed abstract class List[+A] extends AbstractSeq[A]
these
}
+ /**
+ * @example {{{
+ * // Given a list
+ * val letters = List('a','b','c','d','e')
+ *
+ * // `slice` returns all elements beginning at index `from` and afterwards,
+ * // up until index `until` (excluding index `until`.)
+ * letters.slice(1,3) // Returns List('b','c')
+ * }}}
+ */
override def slice(from: Int, until: Int): List[A] = {
val lo = math.max(from, 0)
if (until <= lo || isEmpty) Nil
@@ -316,13 +326,13 @@ final case class ::[B](private var hd: B, private[scala] var tl: List[B]) extend
override def head : B = hd
override def tail : List[B] = tl
override def isEmpty: Boolean = false
-
+
private def writeObject(out: ObjectOutputStream) {
out.writeObject(ListSerializeStart) // needed to differentiate with the legacy `::` serialization
out.writeObject(this.hd)
out.writeObject(this.tl)
}
-
+
private def readObject(in: ObjectInputStream) {
val obj = in.readObject()
if (obj == ListSerializeStart) {
@@ -330,7 +340,7 @@ final case class ::[B](private var hd: B, private[scala] var tl: List[B]) extend
this.tl = in.readObject().asInstanceOf[List[B]]
} else oldReadObject(in, obj)
}
-
+
/* The oldReadObject method exists here for compatibility reasons.
* :: objects used to be serialized by serializing all the elements to
* the output stream directly, but this was broken (see SI-5374).
@@ -349,13 +359,13 @@ final case class ::[B](private var hd: B, private[scala] var tl: List[B]) extend
current = list
}
}
-
+
private def oldWriteObject(out: ObjectOutputStream) {
var xs: List[B] = this
while (!xs.isEmpty) { out.writeObject(xs.head); xs = xs.tail }
out.writeObject(ListSerializeEnd)
}
-
+
}
/** $factoryInfo
diff --git a/src/library/scala/collection/immutable/Range.scala b/src/library/scala/collection/immutable/Range.scala
index 7537558f0b..b72d83f896 100644
--- a/src/library/scala/collection/immutable/Range.scala
+++ b/src/library/scala/collection/immutable/Range.scala
@@ -77,9 +77,9 @@ extends collection.AbstractSeq[Int]
}
final val lastElement = start + (numRangeElements - 1) * step
final val terminalElement = start + numRangeElements * step
-
+
override def last = if (isEmpty) Nil.last else lastElement
-
+
protected def copy(start: Int, end: Int, step: Int): Range = new Range(start, end, step)
/** Create a new range with the `start` and `end` values of this range and
@@ -93,14 +93,14 @@ extends collection.AbstractSeq[Int]
override def size = length
override def length = if (numRangeElements < 0) fail() else numRangeElements
-
+
private def description = "%d %s %d by %s".format(start, if (isInclusive) "to" else "until", end, step)
private def fail() = throw new IllegalArgumentException(description + ": seqs cannot contain more than Int.MaxValue elements.")
private def validateMaxLength() {
if (numRangeElements < 0)
fail()
}
-
+
def validateRangeBoundaries(f: Int => Any): Boolean = {
validateMaxLength()
@@ -121,7 +121,7 @@ extends collection.AbstractSeq[Int]
if (idx < 0 || idx >= numRangeElements) throw new IndexOutOfBoundsException(idx.toString)
else start + (step * idx)
}
-
+
@inline final override def foreach[@specialized(Unit) U](f: Int => U) {
if (validateRangeBoundaries(f)) {
var i = start
@@ -309,7 +309,7 @@ object Range {
// number of full-sized jumps.
val hasStub = isInclusive || (gap % step != 0)
val result: Long = jumps + ( if (hasStub) 1 else 0 )
-
+
if (result > scala.Int.MaxValue) -1
else result.toInt
}
@@ -405,4 +405,3 @@ object Range {
// super.foreach(f)
}
}
- \ No newline at end of file
diff --git a/src/library/scala/collection/mutable/AVLTree.scala b/src/library/scala/collection/mutable/AVLTree.scala
index ba2af8f120..9aea25f330 100644
--- a/src/library/scala/collection/mutable/AVLTree.scala
+++ b/src/library/scala/collection/mutable/AVLTree.scala
@@ -12,9 +12,9 @@ package mutable
/**
* An immutable AVL Tree implementation used by mutable.TreeSet
- *
+ *
* @author Lucien Pereira
- *
+ *
*/
private[mutable] sealed trait AVLTree[+A] extends Serializable {
def balance: Int
@@ -28,28 +28,28 @@ private[mutable] sealed trait AVLTree[+A] extends Serializable {
/**
* Returns a new tree containing the given element.
* Thows an IllegalArgumentException if element is already present.
- *
+ *
*/
def insert[B >: A](value: B, ordering: Ordering[B]): AVLTree[B] = Node(value, Leaf, Leaf)
/**
* Return a new tree which not contains given element.
- *
+ *
*/
def remove[B >: A](value: B, ordering: Ordering[B]): AVLTree[A] =
throw new NoSuchElementException(String.valueOf(value))
-
+
/**
* Return a tuple containing the smallest element of the provided tree
* and a new tree from which this element has been extracted.
- *
+ *
*/
def removeMin[B >: A]: (B, AVLTree[B]) = sys.error("Should not happen.")
-
+
/**
* Return a tuple containing the biggest element of the provided tree
* and a new tree from which this element has been extracted.
- *
+ *
*/
def removeMax[B >: A]: (B, AVLTree[B]) = sys.error("Should not happen.")
@@ -90,7 +90,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
/**
* Returns a new tree containing the given element.
* Thows an IllegalArgumentException if element is already present.
- *
+ *
*/
override def insert[B >: A](value: B, ordering: Ordering[B]) = {
val ord = ordering.compare(value, data)
@@ -104,7 +104,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
/**
* Return a new tree which not contains given element.
- *
+ *
*/
override def remove[B >: A](value: B, ordering: Ordering[B]): AVLTree[A] = {
val ord = ordering.compare(value, data)
@@ -130,7 +130,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
/**
* Return a tuple containing the smallest element of the provided tree
* and a new tree from which this element has been extracted.
- *
+ *
*/
override def removeMin[B >: A]: (B, AVLTree[B]) = {
if (Leaf == left)
@@ -144,7 +144,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
/**
* Return a tuple containing the biggest element of the provided tree
* and a new tree from which this element has been extracted.
- *
+ *
*/
override def removeMax[B >: A]: (B, AVLTree[B]) = {
if (Leaf == right)
@@ -154,7 +154,7 @@ private case class Node[A](val data: A, val left: AVLTree[A], val right: AVLTree
(max, Node(data, left, newRight).rebalance)
}
}
-
+
override def rebalance[B >: A] = {
if (-2 == balance) {
if (1 == left.balance)
diff --git a/src/library/scala/collection/mutable/Ctrie.scala b/src/library/scala/collection/mutable/Ctrie.scala
index 699b96b87c..cbec118aa9 100644
--- a/src/library/scala/collection/mutable/Ctrie.scala
+++ b/src/library/scala/collection/mutable/Ctrie.scala
@@ -22,29 +22,29 @@ import annotation.switch
private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends INodeBase[K, V](g) {
import INodeBase._
-
+
WRITE(bn)
-
+
def this(g: Gen) = this(null, g)
-
+
@inline final def WRITE(nval: MainNode[K, V]) = INodeBase.updater.set(this, nval)
-
+
@inline final def CAS(old: MainNode[K, V], n: MainNode[K, V]) = INodeBase.updater.compareAndSet(this, old, n)
-
+
final def gcasRead(ct: Ctrie[K, V]): MainNode[K, V] = GCAS_READ(ct)
-
+
@inline final def GCAS_READ(ct: Ctrie[K, V]): MainNode[K, V] = {
val m = /*READ*/mainnode
val prevval = /*READ*/m.prev
if (prevval eq null) m
else GCAS_Complete(m, ct)
}
-
+
@tailrec private def GCAS_Complete(m: MainNode[K, V], ct: Ctrie[K, V]): MainNode[K, V] = if (m eq null) null else {
// complete the GCAS
val prev = /*READ*/m.prev
val ctr = ct.readRoot(true)
-
+
prev match {
case null =>
m
@@ -71,7 +71,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
}
}
}
-
+
@inline final def GCAS(old: MainNode[K, V], n: MainNode[K, V], ct: Ctrie[K, V]): Boolean = {
n.WRITE_PREV(old)
if (CAS(old, n)) {
@@ -79,27 +79,27 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
/*READ*/n.prev eq null
} else false
}
-
+
@inline private def inode(cn: MainNode[K, V]) = {
val nin = new INode[K, V](gen)
nin.WRITE(cn)
nin
}
-
+
final def copyToGen(ngen: Gen, ct: Ctrie[K, V]) = {
val nin = new INode[K, V](ngen)
val main = GCAS_READ(ct)
nin.WRITE(main)
nin
}
-
+
/** Inserts a key value pair, overwriting the old pair if the keys match.
- *
+ *
* @return true if successful, false otherwise
*/
@tailrec final def rec_insert(k: K, v: V, hc: Int, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): Boolean = {
val m = GCAS_READ(ct) // use -Yinline!
-
+
m match {
case cn: CNode[K, V] => // 1) a multiway node
val idx = (hc >>> lev) & 0x1f
@@ -137,7 +137,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
GCAS(ln, nn, ct)
}
}
-
+
/** Inserts a new key value pair, given that a specific condition is met.
*
* @param cond null - don't care if the key was there; KEY_ABSENT - key wasn't there; KEY_PRESENT - key was there; other value `v` - key must be bound to `v`
@@ -145,7 +145,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
*/
@tailrec final def rec_insertif(k: K, v: V, hc: Int, cond: AnyRef, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): Option[V] = {
val m = GCAS_READ(ct) // use -Yinline!
-
+
m match {
case cn: CNode[K, V] => // 1) a multiway node
val idx = (hc >>> lev) & 0x1f
@@ -228,14 +228,14 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
}
}
}
-
+
/** Looks up the value associated with the key.
- *
+ *
* @return null if no value has been found, RESTART if the operation wasn't successful, or any other value otherwise
*/
@tailrec final def rec_lookup(k: K, hc: Int, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): AnyRef = {
val m = GCAS_READ(ct) // use -Yinline!
-
+
m match {
case cn: CNode[K, V] => // 1) a multinode
val idx = (hc >>> lev) & 0x1f
@@ -270,15 +270,15 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
ln.get(k).asInstanceOf[Option[AnyRef]].orNull
}
}
-
+
/** Removes the key associated with the given value.
- *
+ *
* @param v if null, will remove the key irregardless of the value; otherwise removes only if binding contains that exact key and value
* @return null if not successful, an Option[V] indicating the previous value otherwise
*/
final def rec_remove(k: K, v: V, hc: Int, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): Option[V] = {
val m = GCAS_READ(ct) // use -Yinline!
-
+
m match {
case cn: CNode[K, V] =>
val idx = (hc >>> lev) & 0x1f
@@ -289,7 +289,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
val pos = Integer.bitCount(bmp & (flag - 1))
val sub = cn.array(pos)
val res = sub match {
- case in: INode[K, V] =>
+ case in: INode[K, V] =>
if (startgen eq in.gen) in.rec_remove(k, v, hc, lev + 5, this, startgen, ct)
else {
if (GCAS(cn, cn.renewed(startgen, ct), ct)) rec_remove(k, v, hc, lev, parent, startgen, ct)
@@ -301,7 +301,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
if (GCAS(cn, ncn, ct)) Some(sn.v) else null
} else None
}
-
+
if (res == None || (res eq null)) res
else {
@tailrec def cleanParent(nonlive: AnyRef) {
@@ -325,13 +325,13 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
case _ => // parent is no longer a cnode, we're done
}
}
-
+
if (parent ne null) { // never tomb at root
val n = GCAS_READ(ct)
if (n.isInstanceOf[TNode[_, _]])
cleanParent(n)
}
-
+
res
}
}
@@ -351,7 +351,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
}
}
}
-
+
private def clean(nd: INode[K, V], ct: Ctrie[K, V], lev: Int) {
val m = nd.GCAS_READ(ct)
m match {
@@ -359,14 +359,14 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
case _ =>
}
}
-
+
final def isNullInode(ct: Ctrie[K, V]) = GCAS_READ(ct) eq null
-
+
final def cachedSize(ct: Ctrie[K, V]): Int = {
val m = GCAS_READ(ct)
m.cachedSize(ct)
}
-
+
/* this is a quiescent method! */
def string(lev: Int) = "%sINode -> %s".format(" " * lev, mainnode match {
case null => "<null>"
@@ -375,14 +375,14 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
case ln: LNode[_, _] => ln.string(lev)
case x => "<elem: %s>".format(x)
})
-
+
}
private[mutable] object INode {
val KEY_PRESENT = new AnyRef
val KEY_ABSENT = new AnyRef
-
+
def newRootNode[K, V] = {
val gen = new Gen
val cn = new CNode[K, V](0, new Array(0), gen)
@@ -393,11 +393,11 @@ private[mutable] object INode {
private[mutable] final class FailedNode[K, V](p: MainNode[K, V]) extends MainNode[K, V] {
WRITE_PREV(p)
-
+
def string(lev: Int) = throw new UnsupportedOperationException
-
+
def cachedSize(ct: AnyRef): Int = throw new UnsupportedOperationException
-
+
override def toString = "FailedNode(%s)".format(p)
}
@@ -449,7 +449,7 @@ extends MainNode[K, V] {
private[collection] final class CNode[K, V](final val bitmap: Int, final val array: Array[BasicNode], final val gen: Gen)
extends CNodeBase[K, V] {
-
+
// this should only be called from within read-only snapshots
final def cachedSize(ct: AnyRef) = {
val currsz = READ_SIZE()
@@ -460,7 +460,7 @@ extends CNodeBase[K, V] {
READ_SIZE()
}
}
-
+
// lends itself towards being parallelizable by choosing
// a random starting offset in the array
// => if there are concurrent size computations, they start
@@ -480,7 +480,7 @@ extends CNodeBase[K, V] {
}
sz
}
-
+
final def updatedAt(pos: Int, nn: BasicNode, gen: Gen) = {
val len = array.length
val narr = new Array[BasicNode](len)
@@ -488,7 +488,7 @@ extends CNodeBase[K, V] {
narr(pos) = nn
new CNode[K, V](bitmap, narr, gen)
}
-
+
final def removedAt(pos: Int, flag: Int, gen: Gen) = {
val arr = array
val len = arr.length
@@ -497,7 +497,7 @@ extends CNodeBase[K, V] {
Array.copy(arr, pos + 1, narr, pos, len - pos - 1)
new CNode[K, V](bitmap ^ flag, narr, gen)
}
-
+
final def insertedAt(pos: Int, flag: Int, nn: BasicNode, gen: Gen) = {
val len = array.length
val bmp = bitmap
@@ -507,7 +507,7 @@ extends CNodeBase[K, V] {
Array.copy(array, pos, narr, pos + 1, len - pos)
new CNode[K, V](bmp | flag, narr, gen)
}
-
+
/** Returns a copy of this cnode such that all the i-nodes below it are copied
* to the specified generation `ngen`.
*/
@@ -525,17 +525,17 @@ extends CNodeBase[K, V] {
}
new CNode[K, V](bitmap, narr, ngen)
}
-
+
private def resurrect(inode: INode[K, V], inodemain: AnyRef): BasicNode = inodemain match {
case tn: TNode[_, _] => tn.copyUntombed
case _ => inode
}
-
+
final def toContracted(lev: Int): MainNode[K, V] = if (array.length == 1 && lev > 0) array(0) match {
case sn: SNode[K, V] => sn.copyTombed
case _ => this
} else this
-
+
// - if the branching factor is 1 for this CNode, and the child
// is a tombed SNode, returns its tombed version
// - otherwise, if there is at least one non-null node below,
@@ -559,12 +559,12 @@ extends CNodeBase[K, V] {
}
i += 1
}
-
+
new CNode[K, V](bmp, tmparray, gen).toContracted(lev)
}
-
+
private[mutable] def string(lev: Int): String = "CNode %x\n%s".format(bitmap, array.map(_.string(lev + 1)).mkString("\n"))
-
+
/* quiescently consistent - don't call concurrently to anything involving a GCAS!! */
protected def collectElems: Seq[(K, V)] = array flatMap {
case sn: SNode[K, V] => Some(sn.kvPair)
@@ -574,12 +574,12 @@ extends CNodeBase[K, V] {
case cn: CNode[K, V] => cn.collectElems
}
}
-
+
protected def collectLocalElems: Seq[String] = array flatMap {
case sn: SNode[K, V] => Some(sn.kvPair._2.toString)
case in: INode[K, V] => Some(in.toString.drop(14) + "(" + in.gen + ")")
}
-
+
override def toString = {
val elems = collectLocalElems
"CNode(sz: %d; %s)".format(elems.size, elems.sorted.mkString(", "))
@@ -588,7 +588,7 @@ extends CNodeBase[K, V] {
private[mutable] object CNode {
-
+
def dual[K, V](x: SNode[K, V], xhc: Int, y: SNode[K, V], yhc: Int, lev: Int, gen: Gen): MainNode[K, V] = if (lev < 35) {
val xidx = (xhc >>> lev) & 0x1f
val yidx = (yhc >>> lev) & 0x1f
@@ -604,7 +604,7 @@ private[mutable] object CNode {
} else {
new LNode(x.k, x.v, y.k, y.v)
}
-
+
}
@@ -620,9 +620,9 @@ private[mutable] case class RDCSS_Descriptor[K, V](old: INode[K, V], expectedmai
* lock-free snapshots which are used to implement linearizable lock-free size,
* iterator and clear operations. The cost of evaluating the (lazy) snapshot is
* distributed across subsequent updates, thus making snapshot evaluation horizontally scalable.
- *
+ *
* For details, see: http://lampwww.epfl.ch/~prokopec/ctries-snapshot.pdf
- *
+ *
* @author Aleksandar Prokopec
* @since 2.10
*/
@@ -634,17 +634,17 @@ extends ConcurrentMap[K, V]
with Serializable
{
import Ctrie.computeHash
-
+
private var rootupdater = rtupd
@volatile var root = r
-
+
def this() = this(
INode.newRootNode,
AtomicReferenceFieldUpdater.newUpdater(classOf[Ctrie[K, V]], classOf[AnyRef], "root")
)
-
+
/* internal methods */
-
+
private def writeObject(out: java.io.ObjectOutputStream) {
val it = iterator
while (it.hasNext) {
@@ -654,11 +654,11 @@ extends ConcurrentMap[K, V]
}
out.writeObject(CtrieSerializationEnd)
}
-
+
private def readObject(in: java.io.ObjectInputStream) {
root = INode.newRootNode
rootupdater = AtomicReferenceFieldUpdater.newUpdater(classOf[Ctrie[K, V]], classOf[AnyRef], "root")
-
+
var obj: AnyRef = null
do {
obj = in.readObject()
@@ -669,11 +669,11 @@ extends ConcurrentMap[K, V]
}
} while (obj != CtrieSerializationEnd)
}
-
+
@inline final def CAS_ROOT(ov: AnyRef, nv: AnyRef) = rootupdater.compareAndSet(this, ov, nv)
-
+
final def readRoot(abort: Boolean = false): INode[K, V] = RDCSS_READ_ROOT(abort)
-
+
@inline final def RDCSS_READ_ROOT(abort: Boolean = false): INode[K, V] = {
val r = /*READ*/root
r match {
@@ -681,7 +681,7 @@ extends ConcurrentMap[K, V]
case desc: RDCSS_Descriptor[K, V] => RDCSS_Complete(abort)
}
}
-
+
@tailrec private def RDCSS_Complete(abort: Boolean): INode[K, V] = {
val v = /*READ*/root
v match {
@@ -705,7 +705,7 @@ extends ConcurrentMap[K, V]
}
}
}
-
+
private def RDCSS_ROOT(ov: INode[K, V], expectedmain: MainNode[K, V], nv: INode[K, V]): Boolean = {
val desc = RDCSS_Descriptor(ov, expectedmain, nv)
if (CAS_ROOT(ov, desc)) {
@@ -713,27 +713,27 @@ extends ConcurrentMap[K, V]
/*READ*/desc.committed
} else false
}
-
+
@tailrec private def inserthc(k: K, hc: Int, v: V) {
val r = RDCSS_READ_ROOT()
if (!r.rec_insert(k, v, hc, 0, null, r.gen, this)) inserthc(k, hc, v)
}
-
+
@tailrec private def insertifhc(k: K, hc: Int, v: V, cond: AnyRef): Option[V] = {
val r = RDCSS_READ_ROOT()
-
+
val ret = r.rec_insertif(k, v, hc, cond, 0, null, r.gen, this)
if (ret eq null) insertifhc(k, hc, v, cond)
else ret
}
-
+
@tailrec private def lookuphc(k: K, hc: Int): AnyRef = {
val r = RDCSS_READ_ROOT()
val res = r.rec_lookup(k, hc, 0, null, r.gen, this)
if (res eq INodeBase.RESTART) lookuphc(k, hc)
else res
}
-
+
/* slower:
//@tailrec
private def lookuphc(k: K, hc: Int): AnyRef = {
@@ -746,31 +746,31 @@ extends ConcurrentMap[K, V]
}
}
*/
-
+
@tailrec private def removehc(k: K, v: V, hc: Int): Option[V] = {
val r = RDCSS_READ_ROOT()
val res = r.rec_remove(k, v, hc, 0, null, r.gen, this)
if (res ne null) res
else removehc(k, v, hc)
}
-
+
def string = RDCSS_READ_ROOT().string(0)
-
+
/* public methods */
-
+
override def seq = this
-
+
override def par = new ParCtrie(this)
-
+
override def empty: Ctrie[K, V] = new Ctrie[K, V]
-
+
final def isReadOnly = rootupdater eq null
-
+
final def nonReadOnly = rootupdater ne null
-
+
/** Returns a snapshot of this Ctrie.
* This operation is lock-free and linearizable.
- *
+ *
* The snapshot is lazily updated - the first time some branch
* in the snapshot or this Ctrie are accessed, they are rewritten.
* This means that the work of rebuilding both the snapshot and this
@@ -783,17 +783,17 @@ extends ConcurrentMap[K, V]
if (RDCSS_ROOT(r, expmain, r.copyToGen(new Gen, this))) new Ctrie(r.copyToGen(new Gen, this), rootupdater)
else snapshot()
}
-
+
/** Returns a read-only snapshot of this Ctrie.
* This operation is lock-free and linearizable.
- *
+ *
* The snapshot is lazily updated - the first time some branch
* of this Ctrie are accessed, it is rewritten. The work of creating
* the snapshot is thus distributed across subsequent updates
* and accesses on this Ctrie by all threads.
* Note that the snapshot itself is never rewritten unlike when calling
* the `snapshot` method, but the obtained snapshot cannot be modified.
- *
+ *
* This method is used by other methods such as `size` and `iterator`.
*/
@tailrec final def readOnlySnapshot(): collection.Map[K, V] = {
@@ -802,106 +802,106 @@ extends ConcurrentMap[K, V]
if (RDCSS_ROOT(r, expmain, r.copyToGen(new Gen, this))) new Ctrie(r, null)
else readOnlySnapshot()
}
-
+
@tailrec final override def clear() {
val r = RDCSS_READ_ROOT()
if (!RDCSS_ROOT(r, r.gcasRead(this), INode.newRootNode[K, V])) clear()
}
-
+
final def lookup(k: K): V = {
val hc = computeHash(k)
lookuphc(k, hc).asInstanceOf[V]
}
-
+
final override def apply(k: K): V = {
val hc = computeHash(k)
val res = lookuphc(k, hc)
if (res eq null) throw new NoSuchElementException
else res.asInstanceOf[V]
}
-
+
final def get(k: K): Option[V] = {
val hc = computeHash(k)
Option(lookuphc(k, hc)).asInstanceOf[Option[V]]
}
-
+
override def put(key: K, value: V): Option[V] = {
val hc = computeHash(key)
insertifhc(key, hc, value, null)
}
-
+
final override def update(k: K, v: V) {
val hc = computeHash(k)
inserthc(k, hc, v)
}
-
+
final def +=(kv: (K, V)) = {
update(kv._1, kv._2)
this
}
-
+
final override def remove(k: K): Option[V] = {
val hc = computeHash(k)
removehc(k, null.asInstanceOf[V], hc)
}
-
+
final def -=(k: K) = {
remove(k)
this
}
-
+
def putIfAbsent(k: K, v: V): Option[V] = {
val hc = computeHash(k)
insertifhc(k, hc, v, INode.KEY_ABSENT)
}
-
+
def remove(k: K, v: V): Boolean = {
val hc = computeHash(k)
removehc(k, v, hc).nonEmpty
}
-
+
def replace(k: K, oldvalue: V, newvalue: V): Boolean = {
val hc = computeHash(k)
insertifhc(k, hc, newvalue, oldvalue.asInstanceOf[AnyRef]).nonEmpty
}
-
+
def replace(k: K, v: V): Option[V] = {
val hc = computeHash(k)
insertifhc(k, hc, v, INode.KEY_PRESENT)
}
-
+
def iterator: Iterator[(K, V)] =
if (nonReadOnly) readOnlySnapshot().iterator
else new CtrieIterator(0, this)
-
+
private def cachedSize() = {
val r = RDCSS_READ_ROOT()
r.cachedSize(this)
}
-
+
override def size: Int =
if (nonReadOnly) readOnlySnapshot().size
else cachedSize()
-
+
override def stringPrefix = "Ctrie"
-
+
}
object Ctrie extends MutableMapFactory[Ctrie] {
val inodeupdater = AtomicReferenceFieldUpdater.newUpdater(classOf[INodeBase[_, _]], classOf[MainNode[_, _]], "mainnode")
-
+
implicit def canBuildFrom[K, V]: CanBuildFrom[Coll, (K, V), Ctrie[K, V]] = new MapCanBuildFrom[K, V]
-
+
def empty[K, V]: Ctrie[K, V] = new Ctrie[K, V]
-
+
@inline final def computeHash[K](k: K): Int = {
var hcode = k.hashCode
hcode = hcode * 0x9e3775cd
hcode = java.lang.Integer.reverseBytes(hcode)
hcode * 0x9e3775cd
}
-
+
}
@@ -911,11 +911,11 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
var depth = -1
var subiter: Iterator[(K, V)] = null
var current: KVNode[K, V] = null
-
+
if (mustInit) initialize()
-
+
def hasNext = (current ne null) || (subiter ne null)
-
+
def next() = if (hasNext) {
var r: (K, V) = null
if (subiter ne null) {
@@ -927,7 +927,7 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
}
r
} else Iterator.empty.next()
-
+
private def readin(in: INode[K, V]) = in.gcasRead(ct) match {
case cn: CNode[K, V] =>
depth += 1
@@ -942,19 +942,19 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
case null =>
current = null
}
-
+
@inline private def checkSubiter() = if (!subiter.hasNext) {
subiter = null
advance()
}
-
+
@inline private def initialize() {
assert(ct.isReadOnly)
-
+
val r = ct.RDCSS_READ_ROOT()
readin(r)
}
-
+
def advance(): Unit = if (depth >= 0) {
val npos = stackpos(depth) + 1
if (npos < stack(depth).length) {
@@ -970,19 +970,19 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
advance()
}
} else current = null
-
+
protected def newIterator(_lev: Int, _ct: Ctrie[K, V], _mustInit: Boolean) = new CtrieIterator[K, V](_lev, _ct, _mustInit)
-
+
protected def dupTo(it: CtrieIterator[K, V]) = {
it.level = this.level
it.ct = this.ct
it.depth = this.depth
it.current = this.current
-
+
// these need a deep copy
Array.copy(this.stack, 0, it.stack, 0, 7)
Array.copy(this.stackpos, 0, it.stackpos, 0, 7)
-
+
// this one needs to be evaluated
if (this.subiter == null) it.subiter = null
else {
@@ -991,7 +991,7 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
it.subiter = lst.iterator
}
}
-
+
/** Returns a sequence of iterators over subsets of this iterator.
* It's used to ease the implementation of splitters for a parallel version of the Ctrie.
*/
@@ -1026,7 +1026,7 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
this.level += 1
Seq(this)
}
-
+
def printDebug {
println("ctrie iterator")
println(stackpos.mkString(","))
@@ -1034,7 +1034,7 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
println("curr.: " + current)
println(stack.mkString("\n"))
}
-
+
}
@@ -1048,20 +1048,20 @@ private[mutable] case object CtrieSerializationEnd
private[mutable] object Debug {
import collection._
-
+
lazy val logbuffer = new java.util.concurrent.ConcurrentLinkedQueue[AnyRef]
-
+
def log(s: AnyRef) = logbuffer.add(s)
-
+
def flush() {
for (s <- JavaConversions.asScalaIterator(logbuffer.iterator())) Console.out.println(s.toString)
logbuffer.clear()
}
-
+
def clear() {
logbuffer.clear()
}
-
+
}
diff --git a/src/library/scala/collection/mutable/FlatHashTable.scala b/src/library/scala/collection/mutable/FlatHashTable.scala
index f3fb6738eb..ee6d4d1d22 100644
--- a/src/library/scala/collection/mutable/FlatHashTable.scala
+++ b/src/library/scala/collection/mutable/FlatHashTable.scala
@@ -43,19 +43,19 @@ trait FlatHashTable[A] extends FlatHashTable.HashUtils[A] {
/** The array keeping track of number of elements in 32 element blocks.
*/
@transient protected var sizemap: Array[Int] = null
-
+
@transient var seedvalue: Int = tableSizeSeed
-
+
import HashTable.powerOfTwo
-
+
protected def capacity(expectedSize: Int) = if (expectedSize == 0) 1 else powerOfTwo(expectedSize)
-
+
private def initialCapacity = capacity(initialSize)
-
+
protected def randomSeed = seedGenerator.get.nextInt()
-
+
protected def tableSizeSeed = Integer.bitCount(table.length - 1)
-
+
/**
* Initializes the collection from the input stream. `f` will be called for each element
* read from the input stream in the order determined by the stream. This is useful for
@@ -65,22 +65,22 @@ trait FlatHashTable[A] extends FlatHashTable.HashUtils[A] {
*/
private[collection] def init(in: java.io.ObjectInputStream, f: A => Unit) {
in.defaultReadObject
-
+
_loadFactor = in.readInt()
assert(_loadFactor > 0)
-
+
val size = in.readInt()
tableSize = 0
assert(size >= 0)
-
+
table = new Array(capacity(sizeForThreshold(size, _loadFactor)))
threshold = newThreshold(_loadFactor, table.size)
-
+
seedvalue = in.readInt()
-
+
val smDefined = in.readBoolean()
if (smDefined) sizeMapInit(table.length) else sizemap = null
-
+
var index = 0
while (index < size) {
val elem = in.readObject().asInstanceOf[A]
@@ -295,12 +295,12 @@ trait FlatHashTable[A] extends FlatHashTable.HashUtils[A] {
protected final def index(hcode: Int) = {
// version 1 (no longer used - did not work with parallel hash tables)
// improve(hcode) & (table.length - 1)
-
+
// version 2 (allows for parallel hash table construction)
val improved = improve(hcode, seedvalue)
val ones = table.length - 1
(improved >>> (32 - java.lang.Integer.bitCount(ones))) & ones
-
+
// version 3 (solves SI-5293 in most cases, but such a case would still arise for parallel hash tables)
// val hc = improve(hcode)
// val bbp = blockbitpos
@@ -345,17 +345,17 @@ trait FlatHashTable[A] extends FlatHashTable.HashUtils[A] {
private[collection] object FlatHashTable {
-
+
/** Creates a specific seed to improve hashcode of a hash table instance
* and ensure that iteration order vulnerabilities are not 'felt' in other
* hash tables.
- *
+ *
* See SI-5293.
*/
final def seedGenerator = new ThreadLocal[util.Random] {
override def initialValue = new util.Random
}
-
+
/** The load factor for the hash table; must be < 500 (0.5)
*/
def defaultLoadFactor: Int = 450
@@ -396,11 +396,11 @@ private[collection] object FlatHashTable {
//h = h ^ (h >>> 14)
//h = h + (h << 4)
//h ^ (h >>> 10)
-
+
var i = hcode * 0x9e3775cd
i = java.lang.Integer.reverseBytes(i)
val improved = i * 0x9e3775cd
-
+
// for the remainder, see SI-5293
// to ensure that different bits are used for different hash tables, we have to rotate based on the seed
val rotation = seed % 32
diff --git a/src/library/scala/collection/mutable/HashTable.scala b/src/library/scala/collection/mutable/HashTable.scala
index 5b3e07b826..cc0aed6963 100644
--- a/src/library/scala/collection/mutable/HashTable.scala
+++ b/src/library/scala/collection/mutable/HashTable.scala
@@ -53,9 +53,9 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
@transient protected var sizemap: Array[Int] = null
@transient var seedvalue: Int = tableSizeSeed
-
+
protected def tableSizeSeed = Integer.bitCount(table.length - 1)
-
+
protected def initialSize: Int = HashTable.initialSize
private def lastPopulatedIndex = {
@@ -80,9 +80,9 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
val size = in.readInt()
tableSize = 0
assert(size >= 0)
-
+
seedvalue = in.readInt()
-
+
val smDefined = in.readBoolean()
table = new Array(capacity(sizeForThreshold(_loadFactor, size)))
@@ -429,7 +429,7 @@ private[collection] object HashTable {
// h = h ^ (h >>> 14)
// h = h + (h << 4)
// h ^ (h >>> 10)
-
+
// the rest of the computation is due to SI-5293
val rotation = seed % 32
val rotated = (i >>> rotation) | (i << (32 - rotation))
diff --git a/src/library/scala/collection/mutable/ListBuffer.scala b/src/library/scala/collection/mutable/ListBuffer.scala
index 53c876ec08..037f3b2939 100644
--- a/src/library/scala/collection/mutable/ListBuffer.scala
+++ b/src/library/scala/collection/mutable/ListBuffer.scala
@@ -62,22 +62,22 @@ final class ListBuffer[A]
private var len = 0
protected def underlying: immutable.Seq[A] = start
-
+
private def writeObject(out: ObjectOutputStream) {
// write start
var xs: List[A] = start
while (!xs.isEmpty) { out.writeObject(xs.head); xs = xs.tail }
out.writeObject(ListSerializeEnd)
-
+
// no need to write last0
-
+
// write if exported
out.writeBoolean(exported)
-
+
// write the length
out.writeInt(len)
}
-
+
private def readObject(in: ObjectInputStream) {
// read start, set last0 appropriately
var elem: A = in.readObject.asInstanceOf[A]
@@ -97,14 +97,14 @@ final class ListBuffer[A]
last0 = current
start
}
-
+
// read if exported
exported = in.readBoolean()
-
+
// read the length
len = in.readInt()
}
-
+
/** The current length of the buffer.
*
* This operation takes constant time.
diff --git a/src/library/scala/collection/mutable/SortedSet.scala b/src/library/scala/collection/mutable/SortedSet.scala
index d87fc0b4a2..f41a51d3ef 100644
--- a/src/library/scala/collection/mutable/SortedSet.scala
+++ b/src/library/scala/collection/mutable/SortedSet.scala
@@ -13,12 +13,12 @@ import generic._
/**
* Base trait for mutable sorted set.
- *
+ *
* @define Coll mutable.SortedSet
* @define coll mutable sorted set
*
* @author Lucien Pereira
- *
+ *
*/
trait SortedSet[A] extends collection.SortedSet[A] with collection.SortedSetLike[A,SortedSet[A]]
with mutable.Set[A] with mutable.SetLike[A, SortedSet[A]] {
@@ -39,11 +39,11 @@ trait SortedSet[A] extends collection.SortedSet[A] with collection.SortedSetLike
* Standard `CanBuildFrom` instance for sorted sets.
*
* @author Lucien Pereira
- *
+ *
*/
object SortedSet extends MutableSortedSetFactory[SortedSet] {
implicit def canBuildFrom[A](implicit ord: Ordering[A]): CanBuildFrom[Coll, A, SortedSet[A]] = new SortedSetCanBuildFrom[A]
-
+
def empty[A](implicit ord: Ordering[A]): SortedSet[A] = TreeSet.empty[A]
-
+
}
diff --git a/src/library/scala/collection/mutable/TreeSet.scala b/src/library/scala/collection/mutable/TreeSet.scala
index e0f1c3adfe..02ee811193 100644
--- a/src/library/scala/collection/mutable/TreeSet.scala
+++ b/src/library/scala/collection/mutable/TreeSet.scala
@@ -11,14 +11,14 @@ package mutable
import generic._
-/**
+/**
* @define Coll mutable.TreeSet
* @define coll mutable tree set
* @factoryInfo
* Companion object of TreeSet providing factory related utilities.
- *
+ *
* @author Lucien Pereira
- *
+ *
*/
object TreeSet extends MutableSortedSetFactory[TreeSet] {
/**
@@ -32,7 +32,7 @@ object TreeSet extends MutableSortedSetFactory[TreeSet] {
* A mutable SortedSet using an immutable AVL Tree as underlying data structure.
*
* @author Lucien Pereira
- *
+ *
*/
class TreeSet[A](implicit val ordering: Ordering[A]) extends SortedSet[A] with SetLike[A, TreeSet[A]]
with SortedSetLike[A, TreeSet[A]] with Set[A] with Serializable {
@@ -67,7 +67,7 @@ class TreeSet[A](implicit val ordering: Ordering[A]) extends SortedSet[A] with S
* Cardinality store the set size, unfortunately a
* set view (given by rangeImpl)
* cannot take advantage of this optimisation
- *
+ *
*/
override def size: Int = base.map(_ => super.size).getOrElse(cardinality)
@@ -101,7 +101,7 @@ class TreeSet[A](implicit val ordering: Ordering[A]) extends SortedSet[A] with S
* Thanks to the immutable nature of the
* underlying AVL Tree, we can share it with
* the clone. So clone complexity in time is O(1).
- *
+ *
*/
override def clone: TreeSet[A] = {
val clone = new TreeSet[A](base, from, until)
@@ -119,5 +119,5 @@ class TreeSet[A](implicit val ordering: Ordering[A]) extends SortedSet[A] with S
override def iterator: Iterator[A] = resolve.avl.iterator
.dropWhile(e => !isLeftAcceptable(from, ordering)(e))
.takeWhile(e => isRightAcceptable(until, ordering)(e))
-
+
}
diff --git a/src/library/scala/collection/parallel/Combiner.scala b/src/library/scala/collection/parallel/Combiner.scala
index 69e3271d39..6afe901258 100644
--- a/src/library/scala/collection/parallel/Combiner.scala
+++ b/src/library/scala/collection/parallel/Combiner.scala
@@ -74,10 +74,10 @@ trait Combiner[-Elem, +To] extends Builder[Elem, To] with Sizing with Parallel {
* @return the parallel builder containing both the elements of this and the `other` builder
*/
def combine[N <: Elem, NewTo >: To](other: Combiner[N, NewTo]): Combiner[N, NewTo]
-
+
/** Returns `true` if this combiner has a thread-safe `+=` and is meant to be shared
* across several threads constructing the collection.
- *
+ *
* By default, this method returns `false`.
*/
def canBeShared: Boolean = false
diff --git a/src/library/scala/collection/parallel/ParIterableLike.scala b/src/library/scala/collection/parallel/ParIterableLike.scala
index cffd3bfbcf..5e6bf8c1a3 100644
--- a/src/library/scala/collection/parallel/ParIterableLike.scala
+++ b/src/library/scala/collection/parallel/ParIterableLike.scala
@@ -176,7 +176,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
def hasDefiniteSize = true
def nonEmpty = size != 0
-
+
/** Creates a new parallel iterator used to traverse the elements of this parallel collection.
* This iterator is more specific than the iterator of the returned by `iterator`, and augmented
* with additional accessor and transformer methods.
@@ -246,7 +246,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
trait SignallingOps[PI <: DelegatedSignalling] {
def assign(cntx: Signalling): PI
}
-
+
/* convenience task operations wrapper */
protected implicit def task2ops[R, Tp](tsk: SSCTask[R, Tp]) = new TaskOps[R, Tp] {
def mapResult[R1](mapping: R => R1): ResultMapping[R, Tp, R1] = new ResultMapping[R, Tp, R1](tsk) {
@@ -274,7 +274,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
it
}
}
-
+
protected implicit def builder2ops[Elem, To](cb: Builder[Elem, To]) = new BuilderOps[Elem, To] {
def ifIs[Cmb](isbody: Cmb => Unit) = new Otherwise[Cmb] {
def otherwise(notbody: => Unit)(implicit m: ClassManifest[Cmb]) {
@@ -284,12 +284,12 @@ self: ParIterableLike[T, Repr, Sequential] =>
def isCombiner = cb.isInstanceOf[Combiner[_, _]]
def asCombiner = cb.asInstanceOf[Combiner[Elem, To]]
}
-
+
protected[this] def bf2seq[S, That](bf: CanBuildFrom[Repr, S, That]) = new CanBuildFrom[Sequential, S, That] {
def apply(from: Sequential) = bf.apply(from.par.asInstanceOf[Repr]) // !!! we only use this on `this.seq`, and know that `this.seq.par.getClass == this.getClass`
def apply() = bf.apply()
}
-
+
protected[this] def sequentially[S, That <: Parallel](b: Sequential => Parallelizable[S, That]) = b(seq).par.asInstanceOf[Repr]
def mkString(start: String, sep: String, end: String): String = seq.mkString(start, sep, end)
@@ -299,7 +299,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
def mkString: String = seq.mkString("")
override def toString = seq.mkString(stringPrefix + "(", ", ", ")")
-
+
def canEqual(other: Any) = true
/** Reduces the elements of this sequence using the specified associative binary operator.
@@ -336,7 +336,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
* the elements if the collection is nonempty, and `None` otherwise.
*/
def reduceOption[U >: T](op: (U, U) => U): Option[U] = if (isEmpty) None else Some(reduce(op))
-
+
/** Folds the elements of this sequence using the specified associative binary operator.
* The order in which the elements are reduced is unspecified and may be nondeterministic.
*
@@ -387,11 +387,11 @@ self: ParIterableLike[T, Repr, Sequential] =>
def aggregate[S](z: S)(seqop: (S, T) => S, combop: (S, S) => S): S = {
tasksupport.executeAndWaitResult(new Aggregate(z, seqop, combop, splitter))
}
-
+
def foldLeft[S](z: S)(op: (S, T) => S): S = seq.foldLeft(z)(op)
-
+
def foldRight[S](z: S)(op: (T, S) => S): S = seq.foldRight(z)(op)
-
+
def reduceLeft[U >: T](op: (U, T) => U): U = seq.reduceLeft(op)
def reduceRight[U >: T](op: (T, U) => U): U = seq.reduceRight(op)
@@ -440,7 +440,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
reduce((x, y) => if (cmp.lteq(f(x), f(y))) x else y)
}
-
+
def map[S, That](f: T => S)(implicit bf: CanBuildFrom[Repr, S, That]): That = if (bf(repr).isCombiner) {
tasksupport.executeAndWaitResult(new Map[S, That](f, combinerFactory(() => bf(repr).asCombiner), splitter) mapResult { _.resultWithTaskSupport })
} else setTaskSupport(seq.map(f)(bf2seq(bf)), tasksupport)
@@ -498,11 +498,11 @@ self: ParIterableLike[T, Repr, Sequential] =>
def find(pred: T => Boolean): Option[T] = {
tasksupport.executeAndWaitResult(new Find(pred, splitter assign new DefaultSignalling with VolatileAbort))
}
-
+
/** Creates a combiner factory. Each combiner factory instance is used
* once per invocation of a parallel transformer method for a single
* collection.
- *
+ *
* The default combiner factory creates a new combiner every time it
* is requested, unless the combiner is thread-safe as indicated by its
* `canBeShared` method. In this case, the method returns a factory which
@@ -522,7 +522,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
def doesShareCombiners = false
}
}
-
+
protected[this] def combinerFactory[S, That](cbf: () => Combiner[S, That]) = {
val combiner = cbf()
combiner.combinerTaskSupport = tasksupport
@@ -535,7 +535,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
def doesShareCombiners = false
}
}
-
+
def filter(pred: T => Boolean): Repr = {
tasksupport.executeAndWaitResult(new Filter(pred, combinerFactory, splitter) mapResult { _.resultWithTaskSupport })
}
@@ -906,9 +906,9 @@ self: ParIterableLike[T, Repr, Sequential] =>
}
override def requiresStrictSplitters = inner.requiresStrictSplitters
}
-
+
protected trait Transformer[R, Tp] extends Accessor[R, Tp]
-
+
protected[this] class Foreach[S](op: T => S, protected[this] val pit: IterableSplitter[T])
extends Accessor[Unit, Foreach[S]] {
@volatile var result: Unit = ()
@@ -925,7 +925,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
override def merge(that: Count) = result = result + that.result
// override def toString = "CountTask(" + pittxt + ")"
}
-
+
protected[this] class Reduce[U >: T](op: (U, U) => U, protected[this] val pit: IterableSplitter[T])
extends Accessor[Option[U], Reduce[U]] {
@volatile var result: Option[U] = None
@@ -1334,7 +1334,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
} else result = that.result
override def requiresStrictSplitters = true
}
-
+
protected[this] class FromScanTree[U >: T, That]
(tree: ScanTree[U], z: U, op: (U, U) => U, cbf: CombinerFactory[U, That])
extends StrictSplitterCheckTask[Combiner[U, That], FromScanTree[U, That]] {
@@ -1410,13 +1410,13 @@ self: ParIterableLike[T, Repr, Sequential] =>
def rightmost = this
def print(depth: Int) = println((" " * depth) + this)
}
-
+
/* alias methods */
-
+
def /:[S](z: S)(op: (S, T) => S): S = foldLeft(z)(op);
-
+
def :\[S](z: S)(op: (T, S) => S): S = foldRight(z)(op);
-
+
/* debug information */
private[parallel] def debugInformation = "Parallel collection: " + this.getClass
diff --git a/src/library/scala/collection/parallel/ParSeqLike.scala b/src/library/scala/collection/parallel/ParSeqLike.scala
index 3d498ab41b..9f28a286ca 100644
--- a/src/library/scala/collection/parallel/ParSeqLike.scala
+++ b/src/library/scala/collection/parallel/ParSeqLike.scala
@@ -185,9 +185,9 @@ self =>
} otherwise seq.sameElements(that)
/** Tests whether this $coll ends with the given parallel sequence.
- *
+ *
* $abortsignalling
- *
+ *
* @tparam S the type of the elements of `that` sequence
* @param that the sequence to test
* @return `true` if this $coll has `that` as a suffix, `false` otherwise
diff --git a/src/library/scala/collection/parallel/RemainsIterator.scala b/src/library/scala/collection/parallel/RemainsIterator.scala
index 8ed4583419..c5910ff2c8 100644
--- a/src/library/scala/collection/parallel/RemainsIterator.scala
+++ b/src/library/scala/collection/parallel/RemainsIterator.scala
@@ -28,7 +28,7 @@ private[collection] trait RemainsIterator[+T] extends Iterator[T] {
* This method doesn't change the state of the iterator.
*/
def remaining: Int
-
+
/** For most collections, this is a cheap operation.
* Exceptions can override this method.
*/
@@ -386,22 +386,22 @@ extends AugmentedIterableIterator[T]
with DelegatedSignalling
{
self =>
-
+
var signalDelegate: Signalling = IdleSignalling
-
+
/** Creates a copy of this iterator. */
def dup: IterableSplitter[T]
def split: Seq[IterableSplitter[T]]
-
+
def splitWithSignalling: Seq[IterableSplitter[T]] = {
val pits = split
pits foreach { _.signalDelegate = signalDelegate }
pits
}
-
+
def shouldSplitFurther[S](coll: ParIterable[S], parallelismLevel: Int) = remaining > thresholdFromSize(coll.size, parallelismLevel)
-
+
/** The number of elements this iterator has yet to traverse. This method
* doesn't change the state of the iterator.
*
@@ -554,13 +554,13 @@ self =>
pits foreach { _.signalDelegate = signalDelegate }
pits
}
-
+
def psplitWithSignalling(sizes: Int*): Seq[SeqSplitter[T]] = {
val pits = psplit(sizes: _*)
pits foreach { _.signalDelegate = signalDelegate }
pits
}
-
+
/** The number of elements this iterator has yet to traverse. This method
* doesn't change the state of the iterator. Unlike the version of this method in the supertrait,
* method `remaining` in `ParSeqLike.this.ParIterator` must return an exact number
diff --git a/src/library/scala/collection/parallel/Tasks.scala b/src/library/scala/collection/parallel/Tasks.scala
index e32ac443ae..a7f2c586a7 100644
--- a/src/library/scala/collection/parallel/Tasks.scala
+++ b/src/library/scala/collection/parallel/Tasks.scala
@@ -168,12 +168,12 @@ trait AdaptiveWorkStealingTasks extends Tasks {
def internal() = {
var last = spawnSubtasks()
-
+
last.body.tryLeaf(None)
last.release()
body.result = last.body.result
body.throwable = last.body.throwable
-
+
while (last.next != null) {
// val lastresult = Option(last.body.result)
val beforelast = last
@@ -190,7 +190,7 @@ trait AdaptiveWorkStealingTasks extends Tasks {
body.tryMerge(last.body.repr)
}
}
-
+
def spawnSubtasks() = {
var last: TaskImpl[R, Tp] = null
var head: TaskImpl[R, Tp] = this
@@ -234,7 +234,7 @@ trait ThreadPoolTasks extends Tasks {
// utb: var future: Future[_] = null
@volatile var owned = false
@volatile var completed = false
-
+
def start() = synchronized {
// debuglog("Starting " + body)
// utb: future = executor.submit(this)
@@ -323,7 +323,7 @@ trait ThreadPoolTasks extends Tasks {
// debuglog("-----------> Executing with wait: " + task)
t.start()
-
+
t.sync()
t.body.forwardThrowable
t.body.result
diff --git a/src/library/scala/collection/parallel/mutable/ParCtrie.scala b/src/library/scala/collection/parallel/mutable/ParCtrie.scala
index 178424decc..470972adad 100644
--- a/src/library/scala/collection/parallel/mutable/ParCtrie.scala
+++ b/src/library/scala/collection/parallel/mutable/ParCtrie.scala
@@ -26,11 +26,11 @@ import scala.collection.mutable.CtrieIterator
/** Parallel Ctrie collection.
- *
+ *
* It has its bulk operations parallelized, but uses the snapshot operation
* to create the splitter. This means that parallel bulk operations can be
* called concurrently with the modifications.
- *
+ *
* @author Aleksandar Prokopec
* @since 2.10
*/
@@ -41,41 +41,40 @@ extends ParMap[K, V]
with ParCtrieCombiner[K, V]
with Serializable
{
-
def this() = this(new Ctrie)
-
+
override def mapCompanion: GenericParMapCompanion[ParCtrie] = ParCtrie
-
+
override def empty: ParCtrie[K, V] = ParCtrie.empty
-
+
protected[this] override def newCombiner = ParCtrie.newCombiner
-
+
override def seq = ctrie
-
+
def splitter = new ParCtrieSplitter(0, ctrie.readOnlySnapshot().asInstanceOf[Ctrie[K, V]], true)
-
+
override def clear() = ctrie.clear()
-
+
def result = this
-
+
def get(key: K): Option[V] = ctrie.get(key)
-
+
def put(key: K, value: V): Option[V] = ctrie.put(key, value)
-
+
def update(key: K, value: V): Unit = ctrie.update(key, value)
-
+
def remove(key: K): Option[V] = ctrie.remove(key)
-
+
def +=(kv: (K, V)): this.type = {
ctrie.+=(kv)
this
}
-
+
def -=(key: K): this.type = {
ctrie.-=(key)
this
}
-
+
override def size = {
val in = ctrie.readRoot()
val r = in.gcasRead(ctrie)
@@ -87,11 +86,11 @@ extends ParMap[K, V]
cn.cachedSize(ctrie)
}
}
-
+
override def stringPrefix = "ParCtrie"
-
+
/* tasks */
-
+
/** Computes Ctrie size in parallel. */
class Size(offset: Int, howmany: Int, array: Array[BasicNode]) extends Task[Int, Size] {
var result = -1
@@ -115,7 +114,7 @@ extends ParMap[K, V]
def shouldSplitFurther = howmany > 1
override def merge(that: Size) = result = result + that.result
}
-
+
}
@@ -126,63 +125,63 @@ extends CtrieIterator[K, V](lev, ct, mustInit)
// only evaluated if `remaining` is invoked (which is not used by most tasks)
lazy val totalsize = ct.par.size
var iterated = 0
-
+
protected override def newIterator(_lev: Int, _ct: Ctrie[K, V], _mustInit: Boolean) = new ParCtrieSplitter[K, V](_lev, _ct, _mustInit)
-
+
override def shouldSplitFurther[S](coll: collection.parallel.ParIterable[S], parallelismLevel: Int) = {
val maxsplits = 3 + Integer.highestOneBit(parallelismLevel)
level < maxsplits
}
-
+
def dup = {
val it = newIterator(0, ct, false)
dupTo(it)
it.iterated = this.iterated
it
}
-
+
override def next() = {
iterated += 1
super.next()
}
-
+
def split: Seq[IterableSplitter[(K, V)]] = subdivide().asInstanceOf[Seq[IterableSplitter[(K, V)]]]
-
+
override def isRemainingCheap = false
-
+
def remaining: Int = totalsize - iterated
}
/** Only used within the `ParCtrie`. */
private[mutable] trait ParCtrieCombiner[K, V] extends Combiner[(K, V), ParCtrie[K, V]] {
-
+
def combine[N <: (K, V), NewTo >: ParCtrie[K, V]](other: Combiner[N, NewTo]): Combiner[N, NewTo] = if (this eq other) this else {
throw new UnsupportedOperationException("This shouldn't have been called in the first place.")
-
+
val thiz = this.asInstanceOf[ParCtrie[K, V]]
val that = other.asInstanceOf[ParCtrie[K, V]]
val result = new ParCtrie[K, V]
-
+
result ++= thiz.iterator
result ++= that.iterator
-
+
result
}
-
+
override def canBeShared = true
-
+
}
-
+
object ParCtrie extends ParMapFactory[ParCtrie] {
-
+
def empty[K, V]: ParCtrie[K, V] = new ParCtrie[K, V]
-
+
def newCombiner[K, V]: Combiner[(K, V), ParCtrie[K, V]] = new ParCtrie[K, V]
-
+
implicit def canBuildFrom[K, V]: CanCombineFrom[Coll, (K, V), ParCtrie[K, V]] = new CanCombineFromMap[K, V]
-
+
}
diff --git a/src/library/scala/collection/parallel/mutable/ParHashSet.scala b/src/library/scala/collection/parallel/mutable/ParHashSet.scala
index 811fc8bfe7..e0a2ab03df 100644
--- a/src/library/scala/collection/parallel/mutable/ParHashSet.scala
+++ b/src/library/scala/collection/parallel/mutable/ParHashSet.scala
@@ -121,7 +121,7 @@ with collection.mutable.FlatHashTable.HashUtils[T] {
private var mask = ParHashSetCombiner.discriminantmask
private var nonmasklen = ParHashSetCombiner.nonmasklength
private var seedvalue = 27
-
+
def +=(elem: T) = {
sz += 1
val hc = improve(elemHashCode(elem), seedvalue)
diff --git a/src/library/scala/collection/parallel/package.scala b/src/library/scala/collection/parallel/package.scala
index 8f49b80c93..943e0208c7 100644
--- a/src/library/scala/collection/parallel/package.scala
+++ b/src/library/scala/collection/parallel/package.scala
@@ -122,7 +122,7 @@ package parallel {
}
/* classes */
-
+
trait CombinerFactory[U, Repr] {
/** Provides a combiner used to construct a collection. */
def apply(): Combiner[U, Repr]
@@ -134,7 +134,7 @@ package parallel {
*/
def doesShareCombiners: Boolean
}
-
+
/** Composite throwable - thrown when multiple exceptions are thrown at the same time. */
final case class CompositeThrowable(
val throwables: Set[Throwable]
@@ -209,18 +209,18 @@ package parallel {
//self: EnvironmentPassingCombiner[Elem, To] =>
protected var buckets: Array[UnrolledBuffer[Buck]] @uncheckedVariance = new Array[UnrolledBuffer[Buck]](bucketnumber)
protected var sz: Int = 0
-
+
def size = sz
-
+
def clear() = {
buckets = new Array[UnrolledBuffer[Buck]](bucketnumber)
sz = 0
}
-
+
def beforeCombine[N <: Elem, NewTo >: To](other: Combiner[N, NewTo]) {}
-
+
def afterCombine[N <: Elem, NewTo >: To](other: Combiner[N, NewTo]) {}
-
+
def combine[N <: Elem, NewTo >: To](other: Combiner[N, NewTo]): Combiner[N, NewTo] = {
if (this eq other) this
else other match {
diff --git a/src/library/scala/concurrent/Channel.scala b/src/library/scala/concurrent/Channel.scala
index e79f76430f..f6d6341151 100644
--- a/src/library/scala/concurrent/Channel.scala
+++ b/src/library/scala/concurrent/Channel.scala
@@ -23,7 +23,7 @@ class Channel[A] {
private var written = new LinkedList[A] // FIFO buffer, realized through
private var lastWritten = written // aliasing of a linked list
private var nreaders = 0
-
+
/**
* @param x ...
*/
@@ -33,7 +33,7 @@ class Channel[A] {
lastWritten = lastWritten.next
if (nreaders > 0) notify()
}
-
+
def read: A = synchronized {
while (written.next == null) {
try {
@@ -46,5 +46,5 @@ class Channel[A] {
written = written.next
x
}
-
+
}
diff --git a/src/library/scala/concurrent/ConcurrentPackageObject.scala b/src/library/scala/concurrent/ConcurrentPackageObject.scala
index 6aacd53de2..ae17c7e032 100644
--- a/src/library/scala/concurrent/ConcurrentPackageObject.scala
+++ b/src/library/scala/concurrent/ConcurrentPackageObject.scala
@@ -18,16 +18,16 @@ abstract class ConcurrentPackageObject {
*/
lazy val executionContext =
new impl.ExecutionContextImpl(java.util.concurrent.Executors.newCachedThreadPool())
-
+
/** A global service for scheduling tasks for execution.
*/
// lazy val scheduler =
// new default.SchedulerImpl
-
+
val handledFutureException: PartialFunction[Throwable, Throwable] = {
case t: Throwable if isFutureThrowable(t) => t
}
-
+
// TODO rename appropriately and make public
private[concurrent] def isFutureThrowable(t: Throwable) = t match {
case e: Error => false
@@ -35,7 +35,7 @@ abstract class ConcurrentPackageObject {
case i: InterruptedException => false
case _ => true
}
-
+
private[concurrent] def resolve[T](source: Try[T]): Try[T] = source match {
case Failure(t: scala.runtime.NonLocalReturnControl[_]) => Success(t.value.asInstanceOf[T])
case Failure(t: scala.util.control.ControlThrowable) => Failure(new ExecutionException("Boxed ControlThrowable", t))
@@ -46,24 +46,24 @@ abstract class ConcurrentPackageObject {
private[concurrent] def resolver[T] =
resolverFunction.asInstanceOf[PartialFunction[Throwable, Try[T]]]
-
+
/* concurrency constructs */
-
+
def future[T](body: =>T)(implicit execCtx: ExecutionContext = executionContext): Future[T] =
execCtx future body
-
+
def promise[T]()(implicit execCtx: ExecutionContext = executionContext): Promise[T] =
execCtx promise
-
+
/** Wraps a block of code into an awaitable object. */
def body2awaitable[T](body: =>T) = new Awaitable[T] {
def await(atMost: Duration)(implicit cb: CanAwait) = body
}
-
+
/** Used to block on a piece of code which potentially blocks.
- *
+ *
* @param body A piece of code which contains potentially blocking or long running calls.
- *
+ *
* Calling this method may throw the following exceptions:
* - CancellationException - if the computation was cancelled
* - InterruptedException - in the case that a wait within the blockable object was interrupted
@@ -71,11 +71,11 @@ abstract class ConcurrentPackageObject {
*/
def blocking[T](atMost: Duration)(body: =>T)(implicit execCtx: ExecutionContext): T =
executionContext.blocking(atMost)(body)
-
+
/** Blocks on an awaitable object.
- *
+ *
* @param awaitable An object with a `block` method which runs potentially blocking or long running calls.
- *
+ *
* Calling this method may throw the following exceptions:
* - CancellationException - if the computation was cancelled
* - InterruptedException - in the case that a wait within the blockable object was interrupted
@@ -83,7 +83,7 @@ abstract class ConcurrentPackageObject {
*/
def blocking[T](awaitable: Awaitable[T], atMost: Duration)(implicit execCtx: ExecutionContext = executionContext): T =
executionContext.blocking(awaitable, atMost)
-
+
@inline implicit final def int2durationops(x: Int): DurationOps = new DurationOps(x)
}
diff --git a/src/library/scala/concurrent/DelayedLazyVal.scala b/src/library/scala/concurrent/DelayedLazyVal.scala
index 0b7f54a27a..a17153bad5 100644
--- a/src/library/scala/concurrent/DelayedLazyVal.scala
+++ b/src/library/scala/concurrent/DelayedLazyVal.scala
@@ -26,23 +26,23 @@ package scala.concurrent
class DelayedLazyVal[T](f: () => T, body: => Unit) {
@volatile private[this] var _isDone = false
private[this] lazy val complete = f()
-
+
/** Whether the computation is complete.
*
* @return true if the computation is complete.
*/
def isDone = _isDone
-
+
/** The current result of f(), or the final result if complete.
*
* @return the current value
*/
def apply(): T = if (isDone) complete else f()
-
+
// TODO replace with scala.concurrent.future { ... }
ops.future {
body
_isDone = true
}
-
+
}
diff --git a/src/library/scala/concurrent/ExecutionContext.scala b/src/library/scala/concurrent/ExecutionContext.scala
index 99cd264ac5..eb1b3355c0 100644
--- a/src/library/scala/concurrent/ExecutionContext.scala
+++ b/src/library/scala/concurrent/ExecutionContext.scala
@@ -21,41 +21,41 @@ import collection._
trait ExecutionContext {
-
+
protected implicit object CanAwaitEvidence extends CanAwait
-
+
def execute(runnable: Runnable): Unit
-
+
def execute[U](body: () => U): Unit
-
+
def promise[T]: Promise[T]
-
+
def future[T](body: Callable[T]): Future[T] = future(body.call())
-
+
def future[T](body: => T): Future[T]
-
+
def blocking[T](atMost: Duration)(body: =>T): T
-
+
def blocking[T](awaitable: Awaitable[T], atMost: Duration): T
-
+
def reportFailure(t: Throwable): Unit
-
+
/* implementations follow */
-
+
private implicit val executionContext = this
-
+
def keptPromise[T](result: T): Promise[T] = {
val p = promise[T]
p success result
}
-
+
def brokenPromise[T](t: Throwable): Promise[T] = {
val p = promise[T]
p failure t
}
-
+
/** TODO some docs
- *
+ *
*/
def all[T, Coll[X] <: Traversable[X]](futures: Coll[Future[T]])(implicit cbf: CanBuildFrom[Coll[_], T, Coll[T]]): Future[Coll[T]] = {
import nondeterministic._
@@ -63,13 +63,13 @@ trait ExecutionContext {
val counter = new AtomicInteger(1) // how else could we do this?
val p: Promise[Coll[T]] = promise[Coll[T]] // we need an implicit execctx in the signature
var idx = 0
-
+
def tryFinish() = if (counter.decrementAndGet() == 0) {
val builder = cbf(futures)
builder ++= buffer
p success builder.result
}
-
+
for (f <- futures) {
val currentIndex = idx
buffer += null.asInstanceOf[T]
@@ -83,46 +83,46 @@ trait ExecutionContext {
}
idx += 1
}
-
+
tryFinish()
-
+
p.future
}
-
+
/** TODO some docs
- *
+ *
*/
def any[T](futures: Traversable[Future[T]]): Future[T] = {
val p = promise[T]
val completeFirst: Try[T] => Unit = elem => p tryComplete elem
-
+
futures foreach (_ onComplete completeFirst)
-
+
p.future
}
-
+
/** TODO some docs
- *
+ *
*/
def find[T](futures: Traversable[Future[T]])(predicate: T => Boolean): Future[Option[T]] = {
if (futures.isEmpty) Promise.kept[Option[T]](None).future
else {
val result = promise[Option[T]]
val count = new AtomicInteger(futures.size)
- val search: Try[T] => Unit = {
+ val search: Try[T] => Unit = {
v => v match {
case Success(r) => if (predicate(r)) result trySuccess Some(r)
case _ =>
}
if (count.decrementAndGet() == 0) result trySuccess None
}
-
+
futures.foreach(_ onComplete search)
result.future
}
}
-
+
}
diff --git a/src/library/scala/concurrent/Future.scala b/src/library/scala/concurrent/Future.scala
index 73f76bbbfb..eb54b61db0 100644
--- a/src/library/scala/concurrent/Future.scala
+++ b/src/library/scala/concurrent/Future.scala
@@ -28,9 +28,9 @@ import scala.collection.generic.CanBuildFrom
/** The trait that represents futures.
- *
+ *
* Asynchronous computations that yield futures are created with the `future` call:
- *
+ *
* {{{
* val s = "Hello"
* val f: Future[String] = future {
@@ -40,9 +40,9 @@ import scala.collection.generic.CanBuildFrom
* case msg => println(msg)
* }
* }}}
- *
+ *
* @author Philipp Haller, Heather Miller, Aleksandar Prokopec, Viktor Klang
- *
+ *
* @define multipleCallbacks
* Multiple callbacks may be registered; there is no guarantee that they will be
* executed in a particular order.
@@ -54,18 +54,18 @@ import scala.collection.generic.CanBuildFrom
* - `Error` - errors are not contained within futures
* - `InterruptedException` - not contained within futures
* - all `scala.util.control.ControlThrowable` except `NonLocalReturnControl` - not contained within futures
- *
+ *
* Instead, the future is completed with a ExecutionException with one of the exceptions above
* as the cause.
* If a future is failed with a `scala.runtime.NonLocalReturnControl`,
* it is completed with a value instead from that throwable instead instead.
- *
+ *
* @define nonDeterministic
* Note: using this method yields nondeterministic dataflow programs.
- *
+ *
* @define forComprehensionExamples
* Example:
- *
+ *
* {{{
* val f = future { 5 }
* val g = future { 3 }
@@ -74,116 +74,116 @@ import scala.collection.generic.CanBuildFrom
* y: Int <- g // returns Future(5)
* } yield x + y
* }}}
- *
+ *
* is translated to:
- *
+ *
* {{{
* f flatMap { (x: Int) => g map { (y: Int) => x + y } }
* }}}
*/
trait Future[+T] extends Awaitable[T] {
self =>
-
+
/* Callbacks */
-
+
/** When this future is completed successfully (i.e. with a value),
* apply the provided partial function to the value if the partial function
* is defined at that value.
- *
+ *
* If the future has already been completed with a value,
* this will either be applied immediately or be scheduled asynchronously.
- *
+ *
* $multipleCallbacks
*/
def onSuccess[U](pf: PartialFunction[T, U]): this.type = onComplete {
case Failure(t) => // do nothing
case Success(v) => if (pf isDefinedAt v) pf(v) else { /*do nothing*/ }
}
-
+
/** When this future is completed with a failure (i.e. with a throwable),
* apply the provided callback to the throwable.
- *
+ *
* $caughtThrowables
- *
+ *
* If the future has already been completed with a failure,
* this will either be applied immediately or be scheduled asynchronously.
- *
+ *
* Will not be called in case that the future is completed with a value.
- *
+ *
* $multipleCallbacks
*/
def onFailure[U](callback: PartialFunction[Throwable, U]): this.type = onComplete {
case Failure(t) => if (isFutureThrowable(t) && callback.isDefinedAt(t)) callback(t) else { /*do nothing*/ }
case Success(v) => // do nothing
}
-
+
/** When this future is completed, either through an exception, a timeout, or a value,
* apply the provided function.
- *
+ *
* If the future has already been completed,
* this will either be applied immediately or be scheduled asynchronously.
- *
+ *
* $multipleCallbacks
*/
def onComplete[U](func: Try[T] => U): this.type
-
-
+
+
/* Miscellaneous */
-
+
/** Creates a new promise.
*/
def newPromise[S]: Promise[S]
-
-
+
+
/* Projections */
-
+
/** Returns a failed projection of this future.
- *
+ *
* The failed projection is a future holding a value of type `Throwable`.
- *
+ *
* It is completed with a value which is the throwable of the original future
* in case the original future is failed.
- *
+ *
* It is failed with a `NoSuchElementException` if the original future is completed successfully.
- *
+ *
* Blocking on this future returns a value if the original future is completed with an exception
* and throws a corresponding exception if the original future fails.
*/
def failed: Future[Throwable] = {
- def noSuchElem(v: T) =
+ def noSuchElem(v: T) =
new NoSuchElementException("Future.failed not completed with a throwable. Instead completed with: " + v)
-
+
val p = newPromise[Throwable]
-
+
onComplete {
case Failure(t) => p success t
case Success(v) => p failure noSuchElem(v)
}
-
+
p.future
}
-
-
+
+
/* Monadic operations */
-
+
/** Asynchronously processes the value in the future once the value becomes available.
- *
+ *
* Will not be called if the future fails.
*/
def foreach[U](f: T => U): Unit = onComplete {
case Success(r) => f(r)
case Failure(_) => // do nothing
}
-
+
/** Creates a new future by applying a function to the successful result of
* this future. If this future is completed with an exception then the new
* future will also contain this exception.
- *
+ *
* $forComprehensionExample
*/
def map[S](f: T => S): Future[S] = {
val p = newPromise[S]
-
+
onComplete {
case Failure(t) => p failure t
case Success(v) =>
@@ -192,23 +192,23 @@ self =>
case t => p complete resolver(t)
}
}
-
+
p.future
}
-
+
/** Creates a new future by applying a function to the successful result of
* this future, and returns the result of the function as the new future.
* If this future is completed with an exception then the new future will
* also contain this exception.
- *
+ *
* $forComprehensionExample
*/
def flatMap[S](f: T => Future[S]): Future[S] = {
val p = newPromise[S]
-
+
onComplete {
case Failure(t) => p failure t
- case Success(v) =>
+ case Success(v) =>
try {
f(v) onComplete {
case Failure(t) => p failure t
@@ -218,15 +218,15 @@ self =>
case t: Throwable => p complete resolver(t)
}
}
-
+
p.future
}
-
+
/** Creates a new future by filtering the value of the current future with a predicate.
- *
+ *
* If the current future contains a value which satisfies the predicate, the new future will also hold that value.
* Otherwise, the resulting future will fail with a `NoSuchElementException`.
- *
+ *
* If the current future fails or times out, the resulting future also fails or times out, respectively.
*
* Example:
@@ -240,7 +240,7 @@ self =>
*/
def filter(pred: T => Boolean): Future[T] = {
val p = newPromise[T]
-
+
onComplete {
case Failure(t) => p failure t
case Success(v) =>
@@ -251,12 +251,12 @@ self =>
case t: Throwable => p complete resolver(t)
}
}
-
+
p.future
}
-
+
/** Creates a new future by mapping the value of the current future if the given partial function is defined at that value.
- *
+ *
* If the current future contains a value for which the partial function is defined, the new future will also hold that value.
* Otherwise, the resulting future will fail with a `NoSuchElementException`.
*
@@ -277,7 +277,7 @@ self =>
*/
def collect[S](pf: PartialFunction[T, S]): Future[S] = {
val p = newPromise[S]
-
+
onComplete {
case Failure(t) => p failure t
case Success(v) =>
@@ -288,16 +288,16 @@ self =>
case t: Throwable => p complete resolver(t)
}
}
-
+
p.future
}
-
+
/** Creates a new future that will handle any matching throwable that this
* future might contain. If there is no match, or if this future contains
* a valid result then the new future will contain the same.
- *
+ *
* Example:
- *
+ *
* {{{
* future (6 / 0) recover { case e: ArithmeticException ⇒ 0 } // result: 0
* future (6 / 0) recover { case e: NotFoundException ⇒ 0 } // result: exception
@@ -306,25 +306,25 @@ self =>
*/
def recover[U >: T](pf: PartialFunction[Throwable, U]): Future[U] = {
val p = newPromise[U]
-
+
onComplete {
case Failure(t) if pf isDefinedAt t =>
try { p success pf(t) }
catch { case t: Throwable => p complete resolver(t) }
case otherwise => p complete otherwise
}
-
+
p.future
}
-
+
/** Creates a new future that will handle any matching throwable that this
* future might contain by assigning it a value of another future.
- *
+ *
* If there is no match, or if this future contains
* a valid result then the new future will contain the same result.
- *
+ *
* Example:
- *
+ *
* {{{
* val f = future { Int.MaxValue }
* future (6 / 0) recoverWith { case e: ArithmeticException => f } // result: Int.MaxValue
@@ -332,7 +332,7 @@ self =>
*/
def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] = {
val p = newPromise[U]
-
+
onComplete {
case Failure(t) if pf isDefinedAt t =>
try {
@@ -342,13 +342,13 @@ self =>
}
case otherwise => p complete otherwise
}
-
+
p.future
}
-
+
/** Zips the values of `this` and `that` future, and creates
* a new future holding the tuple of their results.
- *
+ *
* If `this` future fails, the resulting future is failed
* with the throwable stored in `this`.
* Otherwise, if `that` future fails, the resulting future is failed
@@ -356,27 +356,27 @@ self =>
*/
def zip[U](that: Future[U]): Future[(T, U)] = {
val p = newPromise[(T, U)]
-
+
this onComplete {
case Failure(t) => p failure t
case Success(r) => that onSuccess {
case r2 => p success ((r, r2))
}
}
-
+
that onFailure {
case f => p failure f
}
-
+
p.future
}
-
+
/** Creates a new future which holds the result of this future if it was completed successfully, or, if not,
* the result of the `that` future if `that` is completed successfully.
* If both futures are failed, the resulting future holds the throwable object of the first future.
- *
+ *
* Using this method will not cause concurrent programs to become nondeterministic.
- *
+ *
* Example:
* {{{
* val f = future { sys.error("failed") }
@@ -387,7 +387,7 @@ self =>
*/
def fallbackTo[U >: T](that: Future[U]): Future[U] = {
val p = newPromise[U]
-
+
onComplete {
case Failure(t) => that onComplete {
case Failure(_) => p failure t
@@ -395,23 +395,23 @@ self =>
}
case Success(v) => p success v
}
-
+
p.future
}
-
+
/** Applies the side-effecting function to the result of this future, and returns
* a new future with the result of this future.
- *
+ *
* This method allows one to enforce that the callbacks are executed in a
* specified order.
- *
+ *
* Note that if one of the chained `andThen` callbacks throws
* an exception, that exception is not propagated to the subsequent `andThen`
* callbacks. Instead, the subsequent `andThen` callbacks are given the original
* value of this future.
- *
+ *
* The following example prints out `5`:
- *
+ *
* {{{
* val f = future { 5 }
* f andThen {
@@ -424,21 +424,21 @@ self =>
*/
def andThen[U](pf: PartialFunction[Try[T], U]): Future[T] = {
val p = newPromise[T]
-
+
onComplete {
case r =>
try if (pf isDefinedAt r) pf(r)
finally p complete r
}
-
+
p.future
}
-
+
/** Creates a new future which holds the result of either this future or `that` future, depending on
* which future was completed first.
- *
+ *
* $nonDeterministic
- *
+ *
* Example:
* {{{
* val f = future { sys.error("failed") }
@@ -449,42 +449,42 @@ self =>
*/
def either[U >: T](that: Future[U]): Future[U] = {
val p = self.newPromise[U]
-
+
val completePromise: PartialFunction[Try[U], _] = {
case Failure(t) => p tryFailure t
case Success(v) => p trySuccess v
}
-
+
self onComplete completePromise
that onComplete completePromise
-
+
p.future
}
-
+
}
/** TODO some docs
- *
+ *
* @define nonDeterministic
* Note: using this method yields nondeterministic dataflow programs.
*/
object Future {
-
+
// TODO make more modular by encoding all other helper methods within the execution context
/** TODO some docs
*/
def all[T, Coll[X] <: Traversable[X]](futures: Coll[Future[T]])(implicit cbf: CanBuildFrom[Coll[_], T, Coll[T]], ec: ExecutionContext): Future[Coll[T]] =
ec.all[T, Coll](futures)
-
+
// move this to future companion object
@inline def apply[T](body: =>T)(implicit executor: ExecutionContext): Future[T] = executor.future(body)
def any[T](futures: Traversable[Future[T]])(implicit ec: ExecutionContext): Future[T] = ec.any(futures)
def find[T](futures: Traversable[Future[T]])(predicate: T => Boolean)(implicit ec: ExecutionContext): Future[Option[T]] = ec.find(futures)(predicate)
-
+
}
diff --git a/src/library/scala/concurrent/JavaConversions.scala b/src/library/scala/concurrent/JavaConversions.scala
index bac9d4f558..127a0e0055 100644
--- a/src/library/scala/concurrent/JavaConversions.scala
+++ b/src/library/scala/concurrent/JavaConversions.scala
@@ -48,9 +48,9 @@ object JavaConversions {
// do nothing
}
}
-
+
implicit def asExecutionContext(exec: ExecutorService): ExecutionContext = null // TODO
-
+
implicit def asExecutionContext(exec: Executor): ExecutionContext = null // TODO
-
+
}
diff --git a/src/library/scala/concurrent/Promise.scala b/src/library/scala/concurrent/Promise.scala
index f26deb77ab..4404e90971 100644
--- a/src/library/scala/concurrent/Promise.scala
+++ b/src/library/scala/concurrent/Promise.scala
@@ -24,36 +24,36 @@ import scala.util.{ Try, Success, Failure }
* If the throwable used to fail this promise is an error, a control exception
* or an interrupted exception, it will be wrapped as a cause within an
* `ExecutionException` which will fail the promise.
- *
+ *
* @define nonDeterministic
* Note: Using this method may result in non-deterministic concurrent programs.
*/
trait Promise[T] {
-
+
import nondeterministic._
-
+
/** Future containing the value of this promise.
*/
def future: Future[T]
-
+
/** Completes the promise with either an exception or a value.
- *
+ *
* @param result Either the value or the exception to complete the promise with.
- *
+ *
* $promiseCompletion
*/
def complete(result:Try[T]): this.type = if (tryComplete(result)) this else throwCompleted
-
+
/** Tries to complete the promise with either a value or the exception.
- *
+ *
* $nonDeterministic
- *
+ *
* @return If the promise has already been completed returns `false`, or `true` otherwise.
*/
def tryComplete(result: Try[T]): Boolean
-
+
/** Completes this promise with the specified future, once that future is completed.
- *
+ *
* @return This promise
*/
final def completeWith(other: Future[T]): this.type = {
@@ -62,64 +62,64 @@ trait Promise[T] {
}
this
}
-
+
/** Completes the promise with a value.
- *
+ *
* @param value The value to complete the promise with.
- *
+ *
* $promiseCompletion
*/
def success(v: T): this.type = if (trySuccess(v)) this else throwCompleted
-
+
/** Tries to complete the promise with a value.
- *
+ *
* $nonDeterministic
- *
+ *
* @return If the promise has already been completed returns `false`, or `true` otherwise.
*/
def trySuccess(value: T): Boolean = tryComplete(Success(value))
-
+
/** Completes the promise with an exception.
- *
+ *
* @param t The throwable to complete the promise with.
- *
+ *
* $allowedThrowables
- *
+ *
* $promiseCompletion
*/
def failure(t: Throwable): this.type = if (tryFailure(t)) this else throwCompleted
-
+
/** Tries to complete the promise with an exception.
- *
+ *
* $nonDeterministic
- *
+ *
* @return If the promise has already been completed returns `false`, or `true` otherwise.
*/
def tryFailure(t: Throwable): Boolean = tryComplete(Failure(t))
-
+
/** Wraps a `Throwable` in an `ExecutionException` if necessary. TODO replace with `resolver` from scala.concurrent
- *
+ *
* $allowedThrowables
*/
protected def wrap(t: Throwable): Throwable = t match {
case t: Throwable if isFutureThrowable(t) => t
case _ => new ExecutionException(t)
}
-
+
private def throwCompleted = throw new IllegalStateException("Promise already completed.")
-
+
}
object Promise {
-
+
def kept[T](result: T)(implicit execctx: ExecutionContext): Promise[T] =
execctx keptPromise result
-
- def broken[T](t: Throwable)(implicit execctx: ExecutionContext): Promise[T] =
+
+ def broken[T](t: Throwable)(implicit execctx: ExecutionContext): Promise[T] =
execctx brokenPromise t
-
+
}
diff --git a/src/library/scala/concurrent/Task.scala b/src/library/scala/concurrent/Task.scala
index d6f86bac31..eb3efbb422 100644
--- a/src/library/scala/concurrent/Task.scala
+++ b/src/library/scala/concurrent/Task.scala
@@ -3,11 +3,11 @@ package scala.concurrent
trait Task[+T] {
-
+
def start(): Unit
-
+
def future: Future[T]
-
+
}
diff --git a/src/library/scala/concurrent/impl/ExecutionContextImpl.scala b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
index af0eb66292..7b44d02612 100644
--- a/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
+++ b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
@@ -19,7 +19,7 @@ import scala.collection.mutable.Stack
class ExecutionContextImpl(executorService: ExecutorService) extends ExecutionContext {
import ExecutionContextImpl._
-
+
def execute(runnable: Runnable): Unit = executorService match {
// case fj: ForkJoinPool =>
// TODO fork if more applicable
@@ -27,16 +27,16 @@ class ExecutionContextImpl(executorService: ExecutorService) extends ExecutionCo
case _ =>
executorService execute runnable
}
-
+
def execute[U](body: () => U): Unit = execute(new Runnable {
def run() = body()
})
-
+
def promise[T]: Promise[T] = new Promise.DefaultPromise[T]()(this)
-
+
def future[T](body: =>T): Future[T] = {
val p = promise[T]
-
+
dispatchFuture {
() =>
p complete {
@@ -47,39 +47,39 @@ class ExecutionContextImpl(executorService: ExecutorService) extends ExecutionCo
}
}
}
-
+
p.future
}
-
+
def blocking[T](atMost: Duration)(body: =>T): T = blocking(body2awaitable(body), atMost)
-
+
def blocking[T](awaitable: Awaitable[T], atMost: Duration): T = {
currentExecutionContext.get match {
case null => awaitable.await(atMost)(null) // outside - TODO - fix timeout case
case x => x.blockingCall(awaitable) // inside an execution context thread
}
}
-
+
def reportFailure(t: Throwable) = t match {
case e: Error => throw e // rethrow serious errors
case t => t.printStackTrace()
}
-
+
/** Only callable from the tasks running on the same execution context. */
private def blockingCall[T](body: Awaitable[T]): T = {
releaseStack()
-
+
// TODO see what to do with timeout
body.await(Duration.fromNanos(0))(CanAwaitEvidence)
}
-
+
// an optimization for batching futures
// TODO we should replace this with a public queue,
// so that it can be stolen from
// OR: a push to the local task queue should be so cheap that this is
// not even needed, but stealing is still possible
private val _taskStack = new ThreadLocal[Stack[() => Unit]]()
-
+
private def releaseStack(): Unit =
_taskStack.get match {
case stack if (stack ne null) && stack.nonEmpty =>
@@ -92,7 +92,7 @@ class ExecutionContextImpl(executorService: ExecutorService) extends ExecutionCo
case _ =>
_taskStack.remove()
}
-
+
private[impl] def dispatchFuture(task: () => Unit, force: Boolean = false): Unit =
_taskStack.get match {
case stack if (stack ne null) && !force => stack push task
@@ -119,16 +119,16 @@ class ExecutionContextImpl(executorService: ExecutorService) extends ExecutionCo
}
)
}
-
+
}
object ExecutionContextImpl {
-
+
private[concurrent] def currentExecutionContext: ThreadLocal[ExecutionContextImpl] = new ThreadLocal[ExecutionContextImpl] {
override protected def initialValue = null
}
-
+
}
diff --git a/src/library/scala/concurrent/impl/Future.scala b/src/library/scala/concurrent/impl/Future.scala
index 24d0258cc8..9466761d4d 100644
--- a/src/library/scala/concurrent/impl/Future.scala
+++ b/src/library/scala/concurrent/impl/Future.scala
@@ -13,35 +13,35 @@ import scala.util.{ Try, Success, Failure }
//import scala.util.continuations._
trait Future[+T] extends scala.concurrent.Future[T] with Awaitable[T] {
-
+
implicit def executor: ExecutionContextImpl
-
+
/** For use only within a Future.flow block or another compatible Delimited Continuations reset block.
- *
+ *
* Returns the result of this Future without blocking, by suspending execution and storing it as a
* continuation until the result is available.
*/
//def apply(): T @cps[Future[Any]] = shift(this flatMap (_: T => Future[Any]))
-
+
/** Tests whether this Future has been completed.
*/
final def isCompleted: Boolean = value.isDefined
-
+
/** The contained value of this Future. Before this Future is completed
* the value will be None. After completion the value will be Some(Right(t))
* if it contains a valid result, or Some(Left(error)) if it contains
* an exception.
*/
def value: Option[Try[T]]
-
+
def onComplete[U](func: Try[T] => U): this.type
-
+
/** Creates a new Future[A] which is completed with this Future's result if
* that conforms to A's erased type or a ClassCastException otherwise.
*/
final def mapTo[T](implicit m: Manifest[T]) = {
val p = executor.promise[T]
-
+
onComplete {
case f @ Failure(t) => p complete f.asInstanceOf[Try[T]]
case Success(v) =>
@@ -51,7 +51,7 @@ trait Future[+T] extends scala.concurrent.Future[T] with Awaitable[T] {
case e: ClassCastException ⇒ Failure(e)
})
}
-
+
p.future
}
@@ -65,7 +65,7 @@ trait Future[+T] extends scala.concurrent.Future[T] with Awaitable[T] {
def flatMap[B](f: A => Future[B]) = self filter p flatMap f
def withFilter(q: A => Boolean): FutureWithFilter[A] = new FutureWithFilter[A](self, x ⇒ p(x) && q(x))
}
-
+
}
object Future {
diff --git a/src/library/scala/concurrent/impl/Promise.scala b/src/library/scala/concurrent/impl/Promise.scala
index 7ef76e1501..585f71f3cf 100644
--- a/src/library/scala/concurrent/impl/Promise.scala
+++ b/src/library/scala/concurrent/impl/Promise.scala
@@ -23,11 +23,11 @@ import scala.annotation.tailrec
trait Promise[T] extends scala.concurrent.Promise[T] with Future[T] {
-
+
def future = this
-
+
def newPromise[S]: Promise[S] = executor promise
-
+
// TODO refine answer and return types here from Any to type parameters
// then move this up in the hierarchy
/*
@@ -40,7 +40,7 @@ trait Promise[T] extends scala.concurrent.Promise[T] with Future[T] {
cont: (Future[T] => Future[Any]) =>
val p = executor.promise[Any]
val thisPromise = this
-
+
thisPromise completeWith other
thisPromise onComplete { v =>
try {
@@ -49,12 +49,12 @@ trait Promise[T] extends scala.concurrent.Promise[T] with Future[T] {
case e => p complete resolver(e)
}
}
-
+
p.future
}
*/
// TODO finish this once we introduce something like dataflow streams
-
+
/*
final def <<(stream: PromiseStreamOut[T]): Future[T] @cps[Future[Any]] = shift { cont: (Future[T] => Future[Any]) =>
val fr = executor.promise[Any]
@@ -70,40 +70,40 @@ trait Promise[T] extends scala.concurrent.Promise[T] with Future[T] {
fr
}
*/
-
+
}
object Promise {
def dur2long(dur: Duration): Long = if (dur.isFinite) dur.toNanos else Long.MaxValue
-
+
def EmptyPending[T](): FState[T] = emptyPendingValue.asInstanceOf[FState[T]]
-
+
/** Represents the internal state.
*/
sealed trait FState[+T] { def value: Option[Try[T]] }
-
+
case class Pending[T](listeners: List[Try[T] => Any] = Nil) extends FState[T] {
def value: Option[Try[T]] = None
}
-
+
case class Success[T](value: Option[util.Success[T]] = None) extends FState[T] {
def result: T = value.get.get
}
-
+
case class Failure[T](value: Option[util.Failure[T]] = None) extends FState[T] {
def exception: Throwable = value.get.exception
}
-
+
private val emptyPendingValue = Pending[Nothing](Nil)
-
+
/** Default promise implementation.
*/
class DefaultPromise[T](implicit val executor: ExecutionContextImpl) extends AbstractPromise with Promise[T] {
self =>
-
+
updater.set(this, Promise.EmptyPending())
-
+
protected final def tryAwait(atMost: Duration): Boolean = {
@tailrec
def awaitUnsafe(waitTimeNanos: Long): Boolean = {
@@ -118,36 +118,36 @@ object Promise {
} catch {
case e: InterruptedException =>
}
-
+
awaitUnsafe(waitTimeNanos - (System.nanoTime() - start))
} else
value.isDefined
}
-
+
executor.blocking(concurrent.body2awaitable(awaitUnsafe(dur2long(atMost))), Duration.fromNanos(0))
}
-
+
private def ready(atMost: Duration)(implicit permit: CanAwait): this.type =
if (value.isDefined || tryAwait(atMost)) this
else throw new TimeoutException("Futures timed out after [" + atMost.toMillis + "] milliseconds")
-
+
def await(atMost: Duration)(implicit permit: CanAwait): T =
ready(atMost).value.get match {
case util.Failure(e) => throw e
case util.Success(r) => r
}
-
+
def value: Option[Try[T]] = getState.value
-
+
@inline
private[this] final def updater = AbstractPromise.updater.asInstanceOf[AtomicReferenceFieldUpdater[AbstractPromise, FState[T]]]
-
+
@inline
protected final def updateState(oldState: FState[T], newState: FState[T]): Boolean = updater.compareAndSet(this, oldState, newState)
-
+
@inline
protected final def getState: FState[T] = updater.get(this)
-
+
def tryComplete(value: Try[T]): Boolean = {
val callbacks: List[Try[T] => Any] = {
try {
@@ -165,7 +165,7 @@ object Promise {
synchronized { notifyAll() } // notify any blockers from `tryAwait`
}
}
-
+
callbacks match {
case null => false
case cs if cs.isEmpty => true
@@ -176,7 +176,7 @@ object Promise {
true
}
}
-
+
def onComplete[U](func: Try[T] => U): this.type = {
@tailrec // Returns whether the future has already been completed or not
def tryAddCallback(): Boolean = {
@@ -188,17 +188,17 @@ object Promise {
if (updateState(pt, pt.copy(listeners = func :: pt.listeners))) false else tryAddCallback()
}
}
-
+
if (tryAddCallback()) {
val result = value.get
executor dispatchFuture {
() => notifyCompleted(func, result)
}
}
-
+
this
}
-
+
private final def notifyCompleted(func: Try[T] => Any, result: Try[T]) {
try {
func(result)
@@ -207,16 +207,16 @@ object Promise {
}
}
}
-
+
/** An already completed Future is given its result at creation.
- *
+ *
* Useful in Future-composition when a value to contribute is already available.
*/
final class KeptPromise[T](suppliedValue: Try[T])(implicit val executor: ExecutionContextImpl) extends Promise[T] {
val value = Some(resolve(suppliedValue))
-
+
def tryComplete(value: Try[T]): Boolean = false
-
+
def onComplete[U](func: Try[T] => U): this.type = {
val completedAs = value.get
executor dispatchFuture {
@@ -224,15 +224,15 @@ object Promise {
}
this
}
-
+
private def ready(atMost: Duration)(implicit permit: CanAwait): this.type = this
-
+
def await(atMost: Duration)(implicit permit: CanAwait): T = value.get match {
case util.Failure(e) => throw e
case util.Success(r) => r
}
}
-
+
}
diff --git a/src/library/scala/concurrent/package.scala b/src/library/scala/concurrent/package.scala
index 6a98fd50c2..7cc48c09b2 100644
--- a/src/library/scala/concurrent/package.scala
+++ b/src/library/scala/concurrent/package.scala
@@ -25,31 +25,31 @@ package concurrent {
catch { case _ => }
awaitable
}
-
+
def result[T](atMost: Duration)(awaitable: Awaitable[T])(implicit execCtx: ExecutionContext = executionContext): T = {
blocking(awaitable, atMost)
}
}
-
+
/** Importing this object allows using some concurrency primitives
* on futures and promises that can yield nondeterministic programs.
- *
+ *
* While program determinism is broken when using these primitives,
* some programs cannot be written without them (e.g. multiple client threads
* cannot send requests to a server thread through regular promises and futures).
*/
object nondeterministic { }
-
+
/** A timeout exception.
- *
+ *
* Futures are failed with a timeout exception when their timeout expires.
- *
+ *
* Each timeout exception contains an origin future which originally timed out.
*/
class FutureTimeoutException(origin: Future[_], message: String) extends TimeoutException(message) {
def this(origin: Future[_]) = this(origin, "Future timed out.")
}
-
+
final class DurationOps private[concurrent] (x: Int) {
// TODO ADD OTHERS
def ns = util.Duration.fromNanos(0)
diff --git a/src/library/scala/reflect/ReflectionUtils.scala b/src/library/scala/reflect/ReflectionUtils.scala
index dfadfb4976..510f0819c6 100644
--- a/src/library/scala/reflect/ReflectionUtils.scala
+++ b/src/library/scala/reflect/ReflectionUtils.scala
@@ -29,13 +29,13 @@ object ReflectionUtils {
def singletonInstance(className: String, cl: ClassLoader = getClass.getClassLoader): AnyRef = {
val name = if (className endsWith "$") className else className + "$"
- val clazz = java.lang.Class.forName(name, true, cl)
+ val clazz = java.lang.Class.forName(name, true, cl)
val singleton = clazz getField "MODULE$" get null
singleton
}
// Retrieves the MODULE$ field for the given class name.
- def singletonInstanceOpt(className: String, cl: ClassLoader = getClass.getClassLoader): Option[AnyRef] =
+ def singletonInstanceOpt(className: String, cl: ClassLoader = getClass.getClassLoader): Option[AnyRef] =
try Some(singletonInstance(className, cl))
catch { case _: ClassNotFoundException => None }
}
diff --git a/src/library/scala/reflect/api/Mirror.scala b/src/library/scala/reflect/api/Mirror.scala
index 448dca752c..cea9e1a37d 100644
--- a/src/library/scala/reflect/api/Mirror.scala
+++ b/src/library/scala/reflect/api/Mirror.scala
@@ -13,11 +13,11 @@ trait Mirror extends Universe with RuntimeTypes with TreeBuildUtil {
* to do: throws anything else?
*/
def symbolForName(name: String): Symbol
-
+
/** Return a reference to the companion object of the given class symbol.
*/
def companionInstance(clazz: Symbol): AnyRef
-
+
/** The Scala class symbol corresponding to the runtime class of the given instance.
* @param instance The instance
* @return The class Symbol for the instance
diff --git a/src/library/scala/reflect/api/Modifier.scala b/src/library/scala/reflect/api/Modifier.scala
index c0123ed955..cbfe91e59b 100644
--- a/src/library/scala/reflect/api/Modifier.scala
+++ b/src/library/scala/reflect/api/Modifier.scala
@@ -69,7 +69,7 @@ object Modifier extends immutable.Set[Modifier] {
val parameter = SymbolModifier("parameter")
val preSuper = SymbolModifier("preSuper")
val static = SymbolModifier("static")
-
+
val sourceModifiers: Set[SourceModifier] = SourceModifier.all.toSet
val symbolModifiers: Set[SymbolModifier] = SymbolModifier.all.toSet
val allModifiers: Set[Modifier] = sourceModifiers ++ symbolModifiers
diff --git a/src/library/scala/reflect/api/Names.scala b/src/library/scala/reflect/api/Names.scala
index 3a00f21c8c..c72774dfc7 100755
--- a/src/library/scala/reflect/api/Names.scala
+++ b/src/library/scala/reflect/api/Names.scala
@@ -6,7 +6,7 @@ package api
* The same string can be a name in both universes.
* Two names are equal if they represent the same string and they are
* members of the same universe.
- *
+ *
* Names are interned. That is, for two names `name11 and `name2`,
* `name1 == name2` implies `name1 eq name2`.
*/
@@ -42,7 +42,7 @@ trait Names {
* Example: `foo_$plus$eq` becomes `foo_+=`
*/
def encoded: String
-
+
/** The decoded name, still represented as a name.
*/
def decodedName: Name
diff --git a/src/library/scala/reflect/api/Symbols.scala b/src/library/scala/reflect/api/Symbols.scala
index 15d754b5b4..44dc2ce1c2 100755
--- a/src/library/scala/reflect/api/Symbols.scala
+++ b/src/library/scala/reflect/api/Symbols.scala
@@ -18,7 +18,7 @@ trait Symbols { self: Universe =>
/** A list of annotations attached to this Symbol.
*/
def annotations: List[self.AnnotationInfo]
-
+
/** Whether this symbol carries an annotation for which the given
* symbol is its typeSymbol.
*/
@@ -99,7 +99,7 @@ trait Symbols { self: Universe =>
* method, or `NoSymbol` if none exists.
*/
def enclosingMethod: Symbol
-
+
/** If this symbol is a package class, this symbol; otherwise the next enclosing
* package class, or `NoSymbol` if none exists.
*/
@@ -170,7 +170,7 @@ trait Symbols { self: Universe =>
* `C`. Then `C.asType` is the type `C[T]`, but `C.asTypeConstructor` is `C`.
*/
def asTypeConstructor: Type // needed by LiftCode
-
+
/** If this symbol is a class, the type `C.this`, otherwise `NoPrefix`.
*/
def thisPrefix: Type
@@ -181,10 +181,10 @@ trait Symbols { self: Universe =>
def selfType: Type
/** A fresh symbol with given name `name`, position `pos` and flags `flags` that has
- * the current symbol as its owner.
+ * the current symbol as its owner.
*/
def newNestedSymbol(name: Name, pos: Position, flags: Long): Symbol // needed by LiftCode
-
+
/** Low-level operation to set the symbol's flags
* @return the symbol itself
*/
diff --git a/src/library/scala/reflect/api/TreePrinters.scala b/src/library/scala/reflect/api/TreePrinters.scala
index 19bfd09b81..21b55e9c0e 100644
--- a/src/library/scala/reflect/api/TreePrinters.scala
+++ b/src/library/scala/reflect/api/TreePrinters.scala
@@ -55,7 +55,7 @@ trait TreePrinters { self: Universe =>
print(")")
if (typesPrinted)
print(".setType(", tree.tpe, ")")
- case list: List[_] =>
+ case list: List[_] =>
print("List(")
val it = list.iterator
while (it.hasNext) {
@@ -64,16 +64,16 @@ trait TreePrinters { self: Universe =>
}
print(")")
case mods: Modifiers =>
- val parts = collection.mutable.ListBuffer[String]()
+ val parts = collection.mutable.ListBuffer[String]()
parts += "Set(" + mods.modifiers.map(_.sourceString).mkString(", ") + ")"
parts += "newTypeName(\"" + mods.privateWithin.toString + "\")"
parts += "List(" + mods.annotations.map{showRaw}.mkString(", ") + ")"
-
+
var keep = 3
if (keep == 3 && mods.annotations.isEmpty) keep -= 1
if (keep == 2 && mods.privateWithin == EmptyTypeName) keep -= 1
if (keep == 1 && mods.modifiers.isEmpty) keep -= 1
-
+
print("Modifiers(", parts.take(keep).mkString(", "), ")")
case name: Name =>
if (name.isTermName) print("newTermName(\"") else print("newTypeName(\"")
diff --git a/src/library/scala/reflect/api/Trees.scala b/src/library/scala/reflect/api/Trees.scala
index 9d96f30afb..181ce85dac 100644
--- a/src/library/scala/reflect/api/Trees.scala
+++ b/src/library/scala/reflect/api/Trees.scala
@@ -491,6 +491,10 @@ trait Trees { self: Universe =>
if (argss.isEmpty) Apply(superRef, Nil)
else (superRef /: argss) (Apply)
}
+ /** 0-1 argument list new, based on a type.
+ */
+ def New(tpe: Type, args: Tree*): Tree =
+ New(TypeTree(tpe), List(args.toList))
/** Type annotation, eliminated by explicit outer */
case class Typed(expr: Tree, tpt: Tree)
@@ -680,12 +684,75 @@ trait Trees { self: Universe =>
case t =>
sys.error("Not a DefDef: " + t + "/" + t.getClass)
}
+ def copyValDef(tree: Tree)(
+ mods: Modifiers = null,
+ name: Name = null,
+ tpt: Tree = null,
+ rhs: Tree = null
+ ): ValDef = tree match {
+ case ValDef(mods0, name0, tpt0, rhs0) =>
+ treeCopy.ValDef(tree,
+ if (mods eq null) mods0 else mods,
+ if (name eq null) name0 else name,
+ if (tpt eq null) tpt0 else tpt,
+ if (rhs eq null) rhs0 else rhs
+ )
+ case t =>
+ sys.error("Not a ValDef: " + t + "/" + t.getClass)
+ }
+ def copyClassDef(tree: Tree)(
+ mods: Modifiers = null,
+ name: Name = null,
+ tparams: List[TypeDef] = null,
+ impl: Template = null
+ ): ClassDef = tree match {
+ case ClassDef(mods0, name0, tparams0, impl0) =>
+ treeCopy.ClassDef(tree,
+ if (mods eq null) mods0 else mods,
+ if (name eq null) name0 else name,
+ if (tparams eq null) tparams0 else tparams,
+ if (impl eq null) impl0 else impl
+ )
+ case t =>
+ sys.error("Not a ClassDef: " + t + "/" + t.getClass)
+ }
+
def deriveDefDef(ddef: Tree)(applyToRhs: Tree => Tree): DefDef = ddef match {
case DefDef(mods0, name0, tparams0, vparamss0, tpt0, rhs0) =>
treeCopy.DefDef(ddef, mods0, name0, tparams0, vparamss0, tpt0, applyToRhs(rhs0))
case t =>
sys.error("Not a DefDef: " + t + "/" + t.getClass)
}
+ def deriveValDef(vdef: Tree)(applyToRhs: Tree => Tree): ValDef = vdef match {
+ case ValDef(mods0, name0, tpt0, rhs0) =>
+ treeCopy.ValDef(vdef, mods0, name0, tpt0, applyToRhs(rhs0))
+ case t =>
+ sys.error("Not a ValDef: " + t + "/" + t.getClass)
+ }
+ def deriveTemplate(templ: Tree)(applyToBody: List[Tree] => List[Tree]): Template = templ match {
+ case Template(parents0, self0, body0) =>
+ treeCopy.Template(templ, parents0, self0, applyToBody(body0))
+ case t =>
+ sys.error("Not a Template: " + t + "/" + t.getClass)
+ }
+ def deriveClassDef(cdef: Tree)(applyToImpl: Template => Template): ClassDef = cdef match {
+ case ClassDef(mods0, name0, tparams0, impl0) =>
+ treeCopy.ClassDef(cdef, mods0, name0, tparams0, applyToImpl(impl0))
+ case t =>
+ sys.error("Not a ClassDef: " + t + "/" + t.getClass)
+ }
+ def deriveCaseDef(cdef: Tree)(applyToBody: Tree => Tree): CaseDef = cdef match {
+ case CaseDef(pat0, guard0, body0) =>
+ treeCopy.CaseDef(cdef, pat0, guard0, applyToBody(body0))
+ case t =>
+ sys.error("Not a CaseDef: " + t + "/" + t.getClass)
+ }
+ def deriveLabelDef(ldef: Tree)(applyToRhs: Tree => Tree): LabelDef = ldef match {
+ case LabelDef(name0, params0, rhs0) =>
+ treeCopy.LabelDef(ldef, name0, params0, applyToRhs(rhs0))
+ case t =>
+ sys.error("Not a LabelDef: " + t + "/" + t.getClass)
+ }
class Traverser {
protected var currentOwner: Symbol = definitions.RootClass
diff --git a/src/library/scala/reflect/api/Types.scala b/src/library/scala/reflect/api/Types.scala
index 8a91956320..cc8e85b9c8 100755
--- a/src/library/scala/reflect/api/Types.scala
+++ b/src/library/scala/reflect/api/Types.scala
@@ -140,7 +140,7 @@ trait Types { self: Universe =>
* If this is not a singleton type, returns this type itself.
*
* Example:
- *
+ *
* class Outer { class C ; val x: C }
* val o: Outer
* <o.x.type>.widen = o.C
diff --git a/src/library/scala/reflect/macro/Context.scala b/src/library/scala/reflect/macro/Context.scala
index ebbd4735e5..2fd9bb6484 100644
--- a/src/library/scala/reflect/macro/Context.scala
+++ b/src/library/scala/reflect/macro/Context.scala
@@ -2,11 +2,11 @@ package scala.reflect
package macro
trait Context extends api.Universe {
-
+
/** Mark a variable as captured; i.e. force boxing in a *Ref type.
*/
def captureVariable(vble: Symbol): Unit
-
+
/** Mark given identifier as a reference to a captured variable itself
* suppressing dereferencing with the `elem` field.
*/
diff --git a/src/library/scala/runtime/NonLocalReturnControl.scala b/src/library/scala/runtime/NonLocalReturnControl.scala
index 8be2745086..216e3e664b 100644
--- a/src/library/scala/runtime/NonLocalReturnControl.scala
+++ b/src/library/scala/runtime/NonLocalReturnControl.scala
@@ -6,12 +6,10 @@
** |/ **
\* */
-
-
package scala.runtime
import scala.util.control.ControlThrowable
-class NonLocalReturnControl[T](val key: AnyRef, val value: T) extends ControlThrowable {
+class NonLocalReturnControl[@specialized T](val key: AnyRef, val value: T) extends ControlThrowable {
final override def fillInStackTrace(): Throwable = this
}
diff --git a/src/library/scala/specialized.scala b/src/library/scala/specialized.scala
index b24474f35d..b876869afb 100644
--- a/src/library/scala/specialized.scala
+++ b/src/library/scala/specialized.scala
@@ -25,7 +25,7 @@ import Specializable._
* @since 2.8
*/
// class tspecialized[T](group: Group[T]) extends annotation.StaticAnnotation {
-
+
class specialized(group: SpecializedGroup) extends annotation.StaticAnnotation {
def this(types: Specializable*) = this(new Group(types.toList))
def this() = this(Everything)
diff --git a/src/library/scala/sys/process/BasicIO.scala b/src/library/scala/sys/process/BasicIO.scala
index edc60a1bb5..77e36f6196 100644
--- a/src/library/scala/sys/process/BasicIO.scala
+++ b/src/library/scala/sys/process/BasicIO.scala
@@ -97,7 +97,7 @@ object BasicIO {
*
* @param withIn True if the process input should be attached to stdin.
* @param buffer A `StringBuffer` which will receive the process normal
- * output.
+ * output.
* @param log An optional `ProcessLogger` to which the output should be
* sent. If `None`, output will be sent to stderr.
* @return A `ProcessIO` with the characteristics above.
diff --git a/src/library/scala/util/Properties.scala b/src/library/scala/util/Properties.scala
index 22de5544a8..a62d74b1f6 100644
--- a/src/library/scala/util/Properties.scala
+++ b/src/library/scala/util/Properties.scala
@@ -142,7 +142,7 @@ private[scala] trait PropertiesTrait {
*/
def isWin = osName startsWith "Windows"
def isMac = javaVendor startsWith "Apple"
-
+
// This is looking for javac, tools.jar, etc.
// Tries JDK_HOME first, then the more common but likely jre JAVA_HOME,
// and finally the system property based javaHome.
diff --git a/src/library/scala/util/Try.scala b/src/library/scala/util/Try.scala
index a05a75e0b7..c9bde81317 100644
--- a/src/library/scala/util/Try.scala
+++ b/src/library/scala/util/Try.scala
@@ -15,7 +15,7 @@ import collection.Seq
/**
- * The `Try` type represents a computation that may either result in an exception,
+ * The `Try` type represents a computation that may either result in an exception,
* or return a success value. It's analagous to the `Either` type.
*/
sealed abstract class Try[+T] {
@@ -55,9 +55,9 @@ sealed abstract class Try[+T] {
def map[U](f: T => U): Try[U]
def collect[U](pf: PartialFunction[T, U]): Try[U]
-
+
def exists(p: T => Boolean): Boolean
-
+
/**
* Converts this to a `Failure` if the predicate is not satisfied.
*/
@@ -77,14 +77,14 @@ sealed abstract class Try[+T] {
* Calls the exceptionHandler with the exception if this is a `Failure`. This is like map for the exception.
*/
def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U]
-
+
/**
* Returns `None` if this is a `Failure` or a `Some` containing the value if this is a `Success`.
*/
def toOption = if (isSuccess) Some(get) else None
def toSeq = if (isSuccess) Seq(get) else Seq()
-
+
/**
* Returns the given function applied to the value from this Success or returns this if this is a `Failure`.
* Alias for `flatMap`.
@@ -92,11 +92,11 @@ sealed abstract class Try[+T] {
def andThen[U](f: T => Try[U]): Try[U] = flatMap(f)
/**
- * Transforms a nested `Try`, i.e., a `Try` of type `Try[Try[T]]`,
+ * Transforms a nested `Try`, i.e., a `Try` of type `Try[Try[T]]`,
* into an un-nested `Try`, i.e., a `Try` of type `Try[T]`.
*/
def flatten[U](implicit ev: T <:< Try[U]): Try[U]
-
+
def failed: Try[Throwable]
}
@@ -109,7 +109,7 @@ final case class Failure[+T](val exception: Throwable) extends Try[T] {
if (rescueException.isDefinedAt(exception)) rescueException(exception) else this
} catch {
case e2 => Failure(e2)
- }
+ }
}
def get: T = throw exception
def flatMap[U](f: T => Try[U]): Try[U] = Failure[U](exception)
@@ -118,7 +118,7 @@ final case class Failure[+T](val exception: Throwable) extends Try[T] {
def map[U](f: T => U): Try[U] = Failure[U](exception)
def collect[U](pf: PartialFunction[T, U]): Try[U] = Failure[U](exception)
def filter(p: T => Boolean): Try[T] = this
- def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U] =
+ def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U] =
if (rescueException.isDefinedAt(exception)) {
Try(rescueException(exception))
} else {
@@ -134,10 +134,10 @@ final case class Success[+T](r: T) extends Try[T] {
def isSuccess = true
def rescue[U >: T](rescueException: PartialFunction[Throwable, Try[U]]): Try[U] = Success(r)
def get = r
- def flatMap[U](f: T => Try[U]): Try[U] =
- try f(r)
- catch {
- case e => Failure(e)
+ def flatMap[U](f: T => Try[U]): Try[U] =
+ try f(r)
+ catch {
+ case e => Failure(e)
}
def flatten[U](implicit ev: T <:< Try[U]): Try[U] = r
def foreach[U](f: T => U): Unit = f(r)
@@ -145,7 +145,7 @@ final case class Success[+T](r: T) extends Try[T] {
def collect[U](pf: PartialFunction[T, U]): Try[U] =
if (pf isDefinedAt r) Success(pf(r))
else Failure[U](new NoSuchElementException("Partial function not defined at " + r))
- def filter(p: T => Boolean): Try[T] =
+ def filter(p: T => Boolean): Try[T] =
if (p(r)) this
else Failure(new NoSuchElementException("Predicate does not hold for " + r))
def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U] = this
@@ -155,11 +155,11 @@ final case class Success[+T](r: T) extends Try[T] {
object Try {
-
+
def apply[T](r: => T): Try[T] = {
try { Success(r) } catch {
case e => Failure(e)
}
}
-
+
}
diff --git a/src/library/scala/util/parsing/combinator/Parsers.scala b/src/library/scala/util/parsing/combinator/Parsers.scala
index 4004a01ad9..27e9112fce 100644
--- a/src/library/scala/util/parsing/combinator/Parsers.scala
+++ b/src/library/scala/util/parsing/combinator/Parsers.scala
@@ -487,7 +487,7 @@ trait Parsers {
}
/** Changes the error message produced by a parser.
- *
+ *
* This doesn't change the behavior of a parser on neither
* success nor failure, just on error. The semantics are
* slightly different than those obtained by doing `| error(msg)`,
diff --git a/src/manual/scala/tools/docutil/EmitManPage.scala b/src/manual/scala/tools/docutil/EmitManPage.scala
index 3e0b02a415..39d68dbc18 100644
--- a/src/manual/scala/tools/docutil/EmitManPage.scala
+++ b/src/manual/scala/tools/docutil/EmitManPage.scala
@@ -165,7 +165,7 @@ object EmitManPage {
def main(args: Array[String]) = args match{
case Array(classname) => emitManPage(classname)
- case Array(classname, file, _*) => emitManPage(classname, new java.io.FileOutputStream(file))
+ case Array(classname, file, _*) => emitManPage(classname, new java.io.FileOutputStream(file))
case _ => sys.exit(1)
}
diff --git a/src/partest/scala/tools/partest/CompilerTest.scala b/src/partest/scala/tools/partest/CompilerTest.scala
index dd06c051a4..1cb09b433a 100644
--- a/src/partest/scala/tools/partest/CompilerTest.scala
+++ b/src/partest/scala/tools/partest/CompilerTest.scala
@@ -19,7 +19,7 @@ abstract class CompilerTest extends DirectTest {
lazy val global: Global = newCompiler()
lazy val units = compilationUnits(global)(sources: _ *)
-
+
override def extraSettings = "-usejavacp -d " + testOutput.path
def sources: List[String] = List(code)
diff --git a/src/partest/scala/tools/partest/DirectTest.scala b/src/partest/scala/tools/partest/DirectTest.scala
index 74f511aa4e..07444f8d4b 100644
--- a/src/partest/scala/tools/partest/DirectTest.scala
+++ b/src/partest/scala/tools/partest/DirectTest.scala
@@ -63,7 +63,7 @@ abstract class DirectTest extends App {
global.reporter.reset()
f(new global.Run)
}
-
+
// compile the code, optionally first adding to the settings
def compile(args: String*) = compileString(newCompiler(args: _*))(code)
diff --git a/src/partest/scala/tools/partest/nest/PathSettings.scala b/src/partest/scala/tools/partest/nest/PathSettings.scala
index e0a2f65b80..ac04c64c33 100644
--- a/src/partest/scala/tools/partest/nest/PathSettings.scala
+++ b/src/partest/scala/tools/partest/nest/PathSettings.scala
@@ -43,9 +43,11 @@ object PathSettings {
// Directory <root>/test/files/codelib
lazy val srcCodeLibDir = Directory(srcDir / "codelib")
- lazy val srcCodeLib: File = findJar(srcCodeLibDir, "code") getOrElse {
- sys.error("No code.jar found in %s".format(srcCodeLibDir))
- }
+ lazy val srcCodeLib: File = (
+ findJar(srcCodeLibDir, "code")
+ orElse findJar(Directory(testRoot / "files" / "codelib"), "code") // work with --srcpath pending
+ getOrElse sys.error("No code.jar found in %s".format(srcCodeLibDir))
+ )
// Directory <root>/build
lazy val buildDir: Directory = {
diff --git a/src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala b/src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala
index 84f28af7ce..1a4b3456b8 100644
--- a/src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala
+++ b/src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala
@@ -68,7 +68,7 @@ class ByteCode(val bytes : Array[Byte], val pos : Int, val length : Int) {
val chunk: Array[Byte] = new Array[Byte](length)
System.arraycopy(bytes, pos, chunk, 0, length)
val str = new String(io.Codec.fromUTF8(bytes, pos, length))
-
+
StringBytesPair(str, chunk)
}