summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala4
-rw-r--r--src/actors/scala/actors/scheduler/ForkJoinScheduler.scala9
-rw-r--r--src/build/genprod.scala2
-rw-r--r--src/build/maven/continuations-plugin-pom.xml12
-rw-r--r--src/build/maven/jline-pom.xml12
-rw-r--r--src/build/maven/maven-deploy.xml113
-rw-r--r--src/build/maven/scala-compiler-pom.xml12
-rw-r--r--src/build/maven/scala-dbc-pom.xml12
-rw-r--r--src/build/maven/scala-library-pom.xml12
-rw-r--r--src/build/maven/scala-partest-pom.xml12
-rw-r--r--src/build/maven/scala-swing-pom.xml12
-rw-r--r--src/build/maven/scalap-pom.xml12
-rw-r--r--src/build/pack.xml35
-rw-r--r--src/compiler/scala/reflect/internal/Definitions.scala11
-rw-r--r--src/compiler/scala/reflect/internal/ExistentialsAndSkolems.scala50
-rw-r--r--src/compiler/scala/reflect/internal/NameManglers.scala15
-rw-r--r--src/compiler/scala/reflect/internal/StdNames.scala7
-rw-r--r--src/compiler/scala/reflect/internal/SymbolTable.scala15
-rw-r--r--src/compiler/scala/reflect/internal/Symbols.scala31
-rw-r--r--src/compiler/scala/reflect/internal/TreeGen.scala30
-rw-r--r--src/compiler/scala/reflect/internal/Trees.scala27
-rw-r--r--src/compiler/scala/reflect/internal/Types.scala142
-rw-r--r--src/compiler/scala/reflect/internal/pickling/UnPickler.scala11
-rw-r--r--src/compiler/scala/reflect/internal/util/Collections.scala10
-rw-r--r--src/compiler/scala/tools/ant/templates/tool-unix.tmpl4
-rw-r--r--src/compiler/scala/tools/nsc/Global.scala93
-rw-r--r--src/compiler/scala/tools/nsc/SubComponent.scala3
-rw-r--r--src/compiler/scala/tools/nsc/ast/Reifiers.scala2
-rw-r--r--src/compiler/scala/tools/nsc/ast/TreeDSL.scala8
-rw-r--r--src/compiler/scala/tools/nsc/ast/TreeGen.scala111
-rw-r--r--src/compiler/scala/tools/nsc/ast/Trees.scala28
-rw-r--r--src/compiler/scala/tools/nsc/ast/parser/Parsers.scala17
-rw-r--r--src/compiler/scala/tools/nsc/ast/parser/TreeBuilder.scala12
-rw-r--r--src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/GenICode.scala19
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/Members.scala6
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/analysis/DataFlowAnalysis.scala19
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala8
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/analysis/TypeFlowAnalysis.scala639
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala4
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala44
-rw-r--r--src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala4
-rw-r--r--src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala4
-rw-r--r--src/compiler/scala/tools/nsc/backend/opt/Inliners.scala317
-rw-r--r--src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala21
-rw-r--r--src/compiler/scala/tools/nsc/interactive/Global.scala1
-rw-r--r--src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala15
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/ILoop.scala6
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/IMain.scala12
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/Imports.scala2
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala2
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala11
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/Power.scala25
-rw-r--r--src/compiler/scala/tools/nsc/interpreter/ReplVals.scala6
-rw-r--r--src/compiler/scala/tools/nsc/javac/JavaParsers.scala10
-rw-r--r--src/compiler/scala/tools/nsc/settings/MutableSettings.scala2
-rw-r--r--src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala18
-rw-r--r--src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala2
-rw-r--r--src/compiler/scala/tools/nsc/transform/AddInterfaces.scala18
-rw-r--r--src/compiler/scala/tools/nsc/transform/CleanUp.scala19
-rw-r--r--src/compiler/scala/tools/nsc/transform/Constructors.scala43
-rw-r--r--src/compiler/scala/tools/nsc/transform/Erasure.scala71
-rw-r--r--src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala16
-rw-r--r--src/compiler/scala/tools/nsc/transform/Flatten.scala34
-rw-r--r--src/compiler/scala/tools/nsc/transform/LambdaLift.scala31
-rw-r--r--src/compiler/scala/tools/nsc/transform/LazyVals.scala45
-rw-r--r--src/compiler/scala/tools/nsc/transform/Mixin.scala93
-rw-r--r--src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala341
-rw-r--r--src/compiler/scala/tools/nsc/transform/TailCalls.scala19
-rw-r--r--src/compiler/scala/tools/nsc/transform/UnCurry.scala418
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala36
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Contexts.scala8
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Duplicators.scala32
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Infer.scala16
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Macros.scala77
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala8
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Namers.scala17
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala14
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala337
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/RefChecks.scala54
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala4
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala6
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Typers.scala95
-rw-r--r--src/compiler/scala/tools/nsc/util/ProxyReport.scala2
-rw-r--r--src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala55
-rw-r--r--src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala22
-rw-r--r--src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala40
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java3829
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java1749
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java756
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java1590
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java113
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java31
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java81
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java85
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/package-info.java5
-rw-r--r--src/library/scala/Tuple2.scala2
-rw-r--r--src/library/scala/annotation/elidable.scala55
-rw-r--r--src/library/scala/collection/GenTraversableLike.scala2
-rw-r--r--src/library/scala/collection/immutable/List.scala10
-rw-r--r--src/library/scala/collection/immutable/RedBlack.scala7
-rw-r--r--src/library/scala/collection/immutable/RedBlackTree.scala485
-rw-r--r--src/library/scala/collection/immutable/TreeMap.scala101
-rw-r--r--src/library/scala/collection/immutable/TreeSet.scala91
-rw-r--r--src/library/scala/collection/mutable/HashTable.scala32
-rw-r--r--src/library/scala/collection/parallel/mutable/ParHashMap.scala10
-rw-r--r--src/library/scala/concurrent/Awaitable.scala24
-rw-r--r--src/library/scala/concurrent/Channel.scala5
-rw-r--r--src/library/scala/concurrent/ConcurrentPackageObject.scala103
-rw-r--r--src/library/scala/concurrent/DelayedLazyVal.scala11
-rw-r--r--src/library/scala/concurrent/ExecutionContext.scala132
-rw-r--r--src/library/scala/concurrent/Future.scala492
-rw-r--r--src/library/scala/concurrent/FutureTaskRunner.scala1
-rw-r--r--src/library/scala/concurrent/JavaConversions.scala7
-rw-r--r--src/library/scala/concurrent/ManagedBlocker.scala1
-rw-r--r--src/library/scala/concurrent/Promise.scala132
-rw-r--r--src/library/scala/concurrent/Scheduler.scala54
-rw-r--r--src/library/scala/concurrent/Task.scala13
-rw-r--r--src/library/scala/concurrent/TaskRunner.scala1
-rw-r--r--src/library/scala/concurrent/TaskRunners.scala1
-rw-r--r--src/library/scala/concurrent/ThreadPoolRunner.scala1
-rw-r--r--src/library/scala/concurrent/default/SchedulerImpl.scala.disabled44
-rw-r--r--src/library/scala/concurrent/default/TaskImpl.scala.disabled313
-rw-r--r--src/library/scala/concurrent/impl/AbstractPromise.java21
-rw-r--r--src/library/scala/concurrent/impl/ExecutionContextImpl.scala134
-rw-r--r--src/library/scala/concurrent/impl/Future.scala89
-rw-r--r--src/library/scala/concurrent/impl/Promise.scala252
-rw-r--r--src/library/scala/concurrent/ops.scala1
-rw-r--r--src/library/scala/concurrent/package.scala57
-rw-r--r--src/library/scala/concurrent/package.scala.disabled108
-rw-r--r--src/library/scala/package.scala6
-rw-r--r--src/library/scala/reflect/api/Trees.scala101
-rw-r--r--src/library/scala/runtime/NonLocalReturnControl.scala4
-rw-r--r--src/library/scala/sys/process/BasicIO.scala128
-rw-r--r--src/library/scala/sys/process/Process.scala67
-rw-r--r--src/library/scala/sys/process/ProcessBuilder.scala306
-rw-r--r--src/library/scala/sys/process/ProcessIO.scala49
-rw-r--r--src/library/scala/sys/process/ProcessLogger.scala26
-rw-r--r--src/library/scala/sys/process/package.scala212
-rw-r--r--src/library/scala/util/Duration.scala485
-rw-r--r--src/library/scala/util/Timeout.scala33
-rw-r--r--src/library/scala/util/Try.scala165
-rw-r--r--src/partest/scala/tools/partest/nest/PathSettings.scala8
144 files changed, 11272 insertions, 5407 deletions
diff --git a/src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala b/src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala
index 257fe92a91..15ce60566a 100644
--- a/src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala
+++ b/src/actors/scala/actors/scheduler/DrainableForkJoinPool.scala
@@ -4,9 +4,9 @@ package scheduler
import java.util.Collection
import scala.concurrent.forkjoin.{ForkJoinPool, ForkJoinTask}
-private class DrainableForkJoinPool extends ForkJoinPool {
+private class DrainableForkJoinPool(parallelism: Int, maxPoolSize: Int) extends ForkJoinPool(parallelism, ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true) {
- override def drainTasksTo(c: Collection[ForkJoinTask[_]]): Int =
+ override def drainTasksTo(c: Collection[ _ >: ForkJoinTask[_]]): Int =
super.drainTasksTo(c)
}
diff --git a/src/actors/scala/actors/scheduler/ForkJoinScheduler.scala b/src/actors/scala/actors/scheduler/ForkJoinScheduler.scala
index ba0f88c668..ce67ffd037 100644
--- a/src/actors/scala/actors/scheduler/ForkJoinScheduler.scala
+++ b/src/actors/scala/actors/scheduler/ForkJoinScheduler.scala
@@ -38,13 +38,8 @@ class ForkJoinScheduler(val initCoreSize: Int, val maxSize: Int, daemon: Boolean
}
private def makeNewPool(): DrainableForkJoinPool = {
- val p = new DrainableForkJoinPool()
- // enable locally FIFO scheduling mode
- p.setAsyncMode(true)
- p.setParallelism(initCoreSize)
- p.setMaximumPoolSize(maxSize)
+ val p = new DrainableForkJoinPool(initCoreSize, maxSize)
Debug.info(this+": parallelism "+p.getParallelism())
- Debug.info(this+": max pool size "+p.getMaximumPoolSize())
p
}
@@ -144,7 +139,7 @@ class ForkJoinScheduler(val initCoreSize: Int, val maxSize: Int, daemon: Boolean
ForkJoinPool.managedBlock(new ForkJoinPool.ManagedBlocker {
def block = blocker.block()
def isReleasable() = blocker.isReleasable
- }, true)
+ })
}
/** Suspends the scheduler. All threads that were in use by the
diff --git a/src/build/genprod.scala b/src/build/genprod.scala
index a43b5e02c7..cce00321df 100644
--- a/src/build/genprod.scala
+++ b/src/build/genprod.scala
@@ -277,7 +277,7 @@ object TupleOne extends Tuple(1)
object TupleTwo extends Tuple(2)
{
override def imports = Tuple.zipImports
- override def covariantSpecs = "@specialized(Int, Long, Double) "
+ override def covariantSpecs = "@specialized(Int, Long, Double, Char, Boolean, AnyRef) "
override def moreMethods = """
/** Swaps the elements of this `Tuple`.
* @return a new Tuple where the first element is the second element of this Tuple and the
diff --git a/src/build/maven/continuations-plugin-pom.xml b/src/build/maven/continuations-plugin-pom.xml
index 0277b899ed..aca519b87e 100644
--- a/src/build/maven/continuations-plugin-pom.xml
+++ b/src/build/maven/continuations-plugin-pom.xml
@@ -6,6 +6,8 @@
<artifactId>continuations</artifactId>
<packaging>jar</packaging>
<version>@VERSION@</version>
+ <name>Scala Continuations Plugin</name>
+ <description>Delimited continuations compilation for Scala</description>
<url>http://www.scala-lang.org/</url>
<inceptionYear>2010</inceptionYear>
<organization>
@@ -48,4 +50,14 @@
<uniqueVersion>false</uniqueVersion>
</snapshotRepository>
</distributionManagement>
+ <developers>
+ <developer>
+ <id>lamp</id>
+ <name>EPFL LAMP</name>
+ </developer>
+ <developer>
+ <id>Typesafe</id>
+ <name>Typesafe, Inc.</name>
+ </developer>
+ </developers>
</project>
diff --git a/src/build/maven/jline-pom.xml b/src/build/maven/jline-pom.xml
index 0c96c1374e..4752deb5e0 100644
--- a/src/build/maven/jline-pom.xml
+++ b/src/build/maven/jline-pom.xml
@@ -6,6 +6,8 @@
<artifactId>jline</artifactId>
<packaging>jar</packaging>
<version>@VERSION@</version>
+ <name>jline</name>
+ <description>Like readline, but better</description>
<url>http://www.scala-lang.org/</url>
<inceptionYear>2011</inceptionYear>
<organization>
@@ -54,4 +56,14 @@
<uniqueVersion>false</uniqueVersion>
</snapshotRepository>
</distributionManagement>
+ <developers>
+ <developer>
+ <id>lamp</id>
+ <name>EPFL LAMP</name>
+ </developer>
+ <developer>
+ <id>Typesafe</id>
+ <name>Typesafe, Inc.</name>
+ </developer>
+ </developers>
</project>
diff --git a/src/build/maven/maven-deploy.xml b/src/build/maven/maven-deploy.xml
index 679f45ab54..2e490163e0 100644
--- a/src/build/maven/maven-deploy.xml
+++ b/src/build/maven/maven-deploy.xml
@@ -27,6 +27,7 @@
<!-- Add our maven ant tasks -->
<path id="maven-ant-tasks.classpath" path="maven-ant-tasks-2.1.1.jar" />
<typedef resource="org/apache/maven/artifact/ant/antlib.xml" uri="urn:maven-artifact-ant" classpathref="maven-ant-tasks.classpath" />
+
<!-- simplify fixing pom versions -->
<macrodef name="make-pom">
<attribute name="name" />
@@ -57,13 +58,6 @@
<artifact:pom id="plugin-@{name}.pom" file="plugins/@{name}/@{name}-pom-fixed.xml" />
</sequential>
</macrodef>
- <!-- Simply attaching documentation -->
- <macrodef name="attach-doc">
- <attribute name="name" />
- <sequential>
- <artifact:attach type="jar" file="@{name}/@{name}-docs.jar" classifier="javadoc" />
- </sequential>
- </macrodef>
</target>
<!-- macros for local deployment -->
<target name="deploy.local.init" depends="init.maven">
@@ -79,6 +73,7 @@
<artifact:pom refid="@{name}.pom" />
<artifact:localRepository path="@{repository}" id="${repository.credentials.id}" />
<artifact:attach type="jar" file="@{name}/@{name}-src.jar" classifier="sources" />
+ <artifact:attach type="jar" file="@{name}/@{name}-docs.jar" classifier="javadoc" />
<extra-attachments />
</artifact:install>
</sequential>
@@ -94,6 +89,8 @@
<make-pom-plugin name="@{name}" version="@{version}" />
<artifact:install file="plugins/@{name}/@{name}.jar">
<artifact:pom refid="plugin-@{name}.pom" />
+ <artifact:attach type="jar" file="plugins/@{name}/@{name}-src.jar" classifier="sources" />
+ <artifact:attach type="jar" file="plugins/@{name}/@{name}-docs.jar" classifier="javadoc" />
<artifact:localRepository path="@{repository}" id="${repository.credentials.id}" />
<extra-attachments />
</artifact:install>
@@ -106,24 +103,14 @@
<attribute name="repository" />
<attribute name="version" />
<sequential>
- <deploy-local name="scala-library" version="@{version}" repository="@{repository}">
- <extra-attachments>
- <artifact:attach type="jar" file="scala-library/scala-library-docs.jar" classifier="javadoc" />
- </extra-attachments>
- </deploy-local>
- <deploy-local name="jline" version="@{version}" repository="@{repository}"/>
+ <deploy-local name="scala-library" version="@{version}" repository="@{repository}" />
<deploy-local name="scala-compiler" version="@{version}" repository="@{repository}" />
+ <deploy-local-plugin name="continuations" version="@{version}" repository="@{repository}"/>
<deploy-local name="scala-dbc" version="@{version}" repository="@{repository}" />
<deploy-local name="scala-swing" version="@{version}" repository="@{repository}"/>
<deploy-local name="scalap" version="@{version}" repository="@{repository}"/>
<deploy-local name="scala-partest" version="@{version}" repository="@{repository}"/>
- <deploy-local-plugin name="continuations" version="@{version}" repository="@{repository}"/>
- <!-- scala swing api is included in main library api
- <extra-attachments>
- <artifact:attach type="jar" file="scala-swing/scala-swing-docs.jar" classifier="javadoc" />
- </extra-attachments>
- </deploy-local>
- -->
+ <deploy-local name="jline" version="@{version}" repository="@{repository}"/>
</sequential>
</macrodef>
</target>
@@ -142,13 +129,13 @@
<artifact:pom refid="@{name}.pom" />
<artifact:remoteRepository url="@{repository}" id="${repository.credentials.id}" />
<artifact:attach type="jar" file="@{name}/@{name}-src.jar" classifier="sources" />
+ <artifact:attach type="jar" file="@{name}/@{name}-docs.jar" classifier="javadoc" />
<extra-attachments />
</artifact:deploy>
</sequential>
</macrodef>
-
- <!-- Deploy compiler plugins -->
+ <!-- Deploy compiler plugins -->
<macrodef name="deploy-remote-plugin">
<attribute name="name" />
<attribute name="version" />
@@ -158,6 +145,8 @@
<make-pom-plugin name="@{name}" version="@{version}" />
<artifact:deploy file="plugins/@{name}/@{name}.jar" settingsFile="${settings.file}">
<artifact:pom refid="plugin-@{name}.pom" />
+ <artifact:attach type="jar" file="plugins/@{name}/@{name}-src.jar" classifier="sources" />
+ <artifact:attach type="jar" file="plugins/@{name}/@{name}-docs.jar" classifier="javadoc" />
<artifact:remoteRepository url="@{repository}" id="${repository.credentials.id}" />
<extra-attachments />
</artifact:deploy>
@@ -181,12 +170,72 @@
<deploy-remote name="scalap" version="@{version}" repository="@{repository}"/>
<deploy-remote name="scala-partest" version="@{version}" repository="@{repository}"/>
<deploy-remote-plugin name="continuations" version="@{version}" repository="@{repository}"/>
- <!-- scala swing api is included in main library api
- <extra-attachments>
- <artifact:attach type="jar" file="scala-swing/scala-swing-docs.jar" classifier="javadoc" />
- </extra-attachments>
- </deploy-remote>
- -->
+ </sequential>
+ </macrodef>
+
+ <!-- PGP Signed deployment -->
+ <macrodef name="deploy-remote-signed-single">
+ <attribute name="pom" />
+ <attribute name="repository" />
+ <attribute name="jar" />
+ <attribute name="srcjar" />
+ <attribute name="docjar" />
+ <sequential>
+ <artifact:mvn>
+ <arg value="org.apache.maven.plugins:maven-gpg-plugin:1.3:sign-and-deploy-file" />
+ <arg value="-Durl=@{repository}" />
+ <arg value="-DrepositoryId=${repository.credentials.id}" />
+ <arg value="-DpomFile=@{pom}" />
+ <arg value="-Dfile=@{jar}" />
+ <arg value="-Dsources=@{srcjar}" />
+ <arg value="-Djavadoc=@{docjar}" />
+ <arg value="-Pgpg" />
+ <arg value="-Dgpg.useagent=true" />
+ </artifact:mvn>
+ </sequential>
+ </macrodef>
+ <macrodef name="deploy-remote-signed">
+ <attribute name="name" />
+ <attribute name="repository" />
+ <attribute name="version" />
+ <element name="extra-attachments" optional="yes" />
+ <sequential>
+ <make-pom name="@{name}" version="@{version}" />
+ <deploy-remote-signed-single
+ pom="@{name}/@{name}-pom-fixed.xml"
+ repository="@{repository}"
+ jar="@{name}/@{name}.jar"
+ srcjar="@{name}/@{name}-src.jar"
+ docjar="@{name}/@{name}-docs.jar" />
+ </sequential>
+ </macrodef>
+ <macrodef name="deploy-remote-plugin-signed">
+ <attribute name="name" />
+ <attribute name="repository" />
+ <attribute name="version" />
+ <element name="extra-attachments" optional="yes" />
+ <sequential>
+ <make-pom-plugin name="@{name}" version="@{version}" />
+ <deploy-remote-signed-single
+ pom="plugins/@{name}/@{name}-pom-fixed.xml"
+ repository="@{repository}"
+ jar="plugins/@{name}/@{name}.jar"
+ srcjar="plugins/@{name}/@{name}-src.jar"
+ docjar="plugins/@{name}/@{name}-docs.jar" />
+ </sequential>
+ </macrodef>
+ <macrodef name="deploy-remote-signed-all">
+ <attribute name="repository" />
+ <attribute name="version" />
+ <sequential>
+ <deploy-remote-plugin-signed name="continuations" version="@{version}" repository="@{repository}"/>
+ <deploy-remote-signed name="scala-library" version="@{version}" repository="@{repository}"/>
+ <deploy-remote-signed name="jline" version="@{version}" repository="@{repository}"/>
+ <deploy-remote-signed name="scala-compiler" version="@{version}" repository="@{repository}" />
+ <deploy-remote-signed name="scala-dbc" version="@{version}" repository="@{repository}" />
+ <deploy-remote-signed name="scala-swing" version="@{version}" repository="@{repository}"/>
+ <deploy-remote-signed name="scalap" version="@{version}" repository="@{repository}"/>
+ <deploy-remote-signed name="scala-partest" version="@{version}" repository="@{repository}"/>
</sequential>
</macrodef>
</target>
@@ -201,6 +250,14 @@
</target>
<!-- Remote Targets -->
+ <target name="deploy.signed.snapshot" depends="deploy.remote.init" description="Deploys the bundled files as a snapshot into the desired remote Maven repository">
+ <deploy-remote-signed-all version="${maven.snapshot.version.number}" repository="${remote.snapshot.repository}" />
+ </target>
+
+ <target name="deploy.signed.release" depends="deploy.remote.init" description="Deploys the bundled files as a release into the desired remote Maven repository">
+ <deploy-remote-signed-all version="${version.number}" repository="${remote.release.repository}" />
+ </target>
+
<target name="deploy.snapshot" depends="deploy.remote.init" description="Deploys the bundled files as a snapshot into the desired remote Maven repository">
<deploy-remote-all version="${maven.snapshot.version.number}" repository="${remote.snapshot.repository}" />
</target>
diff --git a/src/build/maven/scala-compiler-pom.xml b/src/build/maven/scala-compiler-pom.xml
index 520c5fd5fd..f9bcb6719d 100644
--- a/src/build/maven/scala-compiler-pom.xml
+++ b/src/build/maven/scala-compiler-pom.xml
@@ -6,6 +6,8 @@
<artifactId>scala-compiler</artifactId>
<packaging>jar</packaging>
<version>@VERSION@</version>
+ <name>Scala Compiler</name>
+ <description>Compiler for the Scala Programming Language</description>
<url>http://www.scala-lang.org/</url>
<inceptionYear>2002</inceptionYear>
<organization>
@@ -54,4 +56,14 @@
<uniqueVersion>false</uniqueVersion>
</snapshotRepository>
</distributionManagement>
+ <developers>
+ <developer>
+ <id>lamp</id>
+ <name>EPFL LAMP</name>
+ </developer>
+ <developer>
+ <id>Typesafe</id>
+ <name>Typesafe, Inc.</name>
+ </developer>
+ </developers>
</project>
diff --git a/src/build/maven/scala-dbc-pom.xml b/src/build/maven/scala-dbc-pom.xml
index 6c1fa4529c..23092d10ad 100644
--- a/src/build/maven/scala-dbc-pom.xml
+++ b/src/build/maven/scala-dbc-pom.xml
@@ -6,6 +6,8 @@
<artifactId>scala-dbc</artifactId>
<packaging>jar</packaging>
<version>@VERSION@</version>
+ <name>Scala Database Connectivity</name>
+ <description>Connectivity for your DBs</description>
<url>http://www.scala-lang.org/</url>
<inceptionYear>2002</inceptionYear>
<organization>
@@ -47,4 +49,14 @@
<uniqueVersion>false</uniqueVersion>
</snapshotRepository>
</distributionManagement>
+ <developers>
+ <developer>
+ <id>lamp</id>
+ <name>EPFL LAMP</name>
+ </developer>
+ <developer>
+ <id>Typesafe</id>
+ <name>Typesafe, Inc.</name>
+ </developer>
+ </developers>
</project>
diff --git a/src/build/maven/scala-library-pom.xml b/src/build/maven/scala-library-pom.xml
index 836ff4766a..8e0abd4937 100644
--- a/src/build/maven/scala-library-pom.xml
+++ b/src/build/maven/scala-library-pom.xml
@@ -6,6 +6,8 @@
<artifactId>scala-library</artifactId>
<packaging>jar</packaging>
<version>@VERSION@</version>
+ <name>Scala Library</name>
+ <description>Standard library for the Scala Programming Language</description>
<url>http://www.scala-lang.org/</url>
<inceptionYear>2002</inceptionYear>
<organization>
@@ -40,4 +42,14 @@
<uniqueVersion>false</uniqueVersion>
</snapshotRepository>
</distributionManagement>
+ <developers>
+ <developer>
+ <id>lamp</id>
+ <name>EPFL LAMP</name>
+ </developer>
+ <developer>
+ <id>Typesafe</id>
+ <name>Typesafe, Inc.</name>
+ </developer>
+ </developers>
</project>
diff --git a/src/build/maven/scala-partest-pom.xml b/src/build/maven/scala-partest-pom.xml
index 3dc330672a..f18ca46c50 100644
--- a/src/build/maven/scala-partest-pom.xml
+++ b/src/build/maven/scala-partest-pom.xml
@@ -6,6 +6,8 @@
<artifactId>scala-partest</artifactId>
<packaging>jar</packaging>
<version>@VERSION@</version>
+ <name>Parallel Test Framework</name>
+ <description>testing framework for the Scala compiler.</description>
<url>http://www.scala-lang.org/</url>
<inceptionYear>2002</inceptionYear>
<organization>
@@ -48,4 +50,14 @@
<uniqueVersion>false</uniqueVersion>
</snapshotRepository>
</distributionManagement>
+ <developers>
+ <developer>
+ <id>lamp</id>
+ <name>EPFL LAMP</name>
+ </developer>
+ <developer>
+ <id>Typesafe</id>
+ <name>Typesafe, Inc.</name>
+ </developer>
+ </developers>
</project>
diff --git a/src/build/maven/scala-swing-pom.xml b/src/build/maven/scala-swing-pom.xml
index 06b799c7b7..a03bc07ab0 100644
--- a/src/build/maven/scala-swing-pom.xml
+++ b/src/build/maven/scala-swing-pom.xml
@@ -6,6 +6,8 @@
<artifactId>scala-swing</artifactId>
<packaging>jar</packaging>
<version>@VERSION@</version>
+ <name>Scala Swing library</name>
+ <description>Swing for Scala</description>
<url>http://www.scala-lang.org/</url>
<inceptionYear>2002</inceptionYear>
<organization>
@@ -47,4 +49,14 @@
<uniqueVersion>false</uniqueVersion>
</snapshotRepository>
</distributionManagement>
+ <developers>
+ <developer>
+ <id>lamp</id>
+ <name>EPFL LAMP</name>
+ </developer>
+ <developer>
+ <id>Typesafe</id>
+ <name>Typesafe, Inc.</name>
+ </developer>
+ </developers>
</project>
diff --git a/src/build/maven/scalap-pom.xml b/src/build/maven/scalap-pom.xml
index 3326e2d350..d7f867d4a1 100644
--- a/src/build/maven/scalap-pom.xml
+++ b/src/build/maven/scalap-pom.xml
@@ -6,6 +6,8 @@
<artifactId>scalap</artifactId>
<packaging>jar</packaging>
<version>@VERSION@</version>
+ <name>Scalap</name>
+ <description>bytecode analysis tool</description>
<url>http://www.scala-lang.org/</url>
<inceptionYear>2002</inceptionYear>
<organization>
@@ -48,4 +50,14 @@
<uniqueVersion>false</uniqueVersion>
</snapshotRepository>
</distributionManagement>
+ <developers>
+ <developer>
+ <id>lamp</id>
+ <name>EPFL LAMP</name>
+ </developer>
+ <developer>
+ <id>Typesafe</id>
+ <name>Typesafe, Inc.</name>
+ </developer>
+ </developers>
</project>
diff --git a/src/build/pack.xml b/src/build/pack.xml
index d022ac3f05..90aec8e25b 100644
--- a/src/build/pack.xml
+++ b/src/build/pack.xml
@@ -251,23 +251,50 @@ MAIN DISTRIBUTION SBAZ
</target>
<target name="pack-maven.srcs" depends="pack-maven.libs">
+ <!-- Add missing src jars. -->
<jar destfile="${dists.dir}/maven/${version.number}/jline/jline-src.jar"
basedir="${src.dir}/jline/src/main/java">
<include name="**/*"/>
</jar>
+
+
+ <!-- Continuations plugin -->
+ <jar destfile="${dists.dir}/maven/${version.number}/plugins/continuations/continuations-src.jar"
+ basedir="${src.dir}/continuations/plugin">
+ <include name="**/*"/>
+ </jar>
</target>
<target name="pack-maven.docs" depends="pack-maven.libs, pack-maven.plugins">
+ <jar destfile="${dists.dir}/maven/${version.number}/jline/jline-docs.jar"
+ basedir="${build-docs.dir}/jline">
+ <include name="**/*"/>
+ </jar>
<jar destfile="${dists.dir}/maven/${version.number}/scala-library/scala-library-docs.jar"
basedir="${build-docs.dir}/library">
<include name="**/*"/>
</jar>
- <!-- scala-swing api is included in main library api
- <jar destfile="${dists.dir}/maven/${version.number}/scala-swing/scala-swing-docs.jar"
- basedir="${build-docs.dir}/swing">
+ <jar destfile="${dists.dir}/maven/${version.number}/scala-compiler/scala-compiler-docs.jar"
+ basedir="${build-docs.dir}/compiler">
+ <include name="**/*"/>
+ </jar>
+ <jar destfile="${dists.dir}/maven/${version.number}/scalap/scalap-docs.jar"
+ basedir="${build-docs.dir}/scalap">
+ <include name="**/*"/>
+ </jar>
+ <jar destfile="${dists.dir}/maven/${version.number}/scala-partest/scala-partest-docs.jar"
+ basedir="${build-docs.dir}/scala-partest">
+ <include name="**/*"/>
+ </jar>
+ <jar destfile="${dists.dir}/maven/${version.number}/plugins/continuations/continuations-docs.jar"
+ basedir="${build-docs.dir}/continuations-plugin">
<include name="**/*"/>
</jar>
- -->
+ <!-- TODO - Scala swing, dbc should maybe have thier own jar, but creating it is SLOW. -->
+ <copy tofile="${dists.dir}/maven/${version.number}/scala-swing/scala-swing-docs.jar"
+ file="${dists.dir}/maven/${version.number}/scala-library/scala-library-docs.jar"/>
+ <copy tofile="${dists.dir}/maven/${version.number}/scala-dbc/scala-dbc-docs.jar"
+ file="${dists.dir}/maven/${version.number}/scala-library/scala-library-docs.jar"/>
</target>
<target name="pack-maven.latest.unix" depends="pack-maven.docs" unless="os.win">
diff --git a/src/compiler/scala/reflect/internal/Definitions.scala b/src/compiler/scala/reflect/internal/Definitions.scala
index 485955c5c4..e87f7d65b0 100644
--- a/src/compiler/scala/reflect/internal/Definitions.scala
+++ b/src/compiler/scala/reflect/internal/Definitions.scala
@@ -70,8 +70,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
tpnme.Float -> FLOAT_TAG,
tpnme.Double -> DOUBLE_TAG,
tpnme.Boolean -> BOOL_TAG,
- tpnme.Unit -> VOID_TAG,
- tpnme.Object -> OBJECT_TAG
+ tpnme.Unit -> VOID_TAG
)
private def classesMap[T](f: Name => T) = symbolsMap(ScalaValueClassesNoUnit, f)
@@ -80,7 +79,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
private def boxedName(name: Name) = sn.Boxed(name.toTypeName)
- lazy val abbrvTag = symbolsMap(ObjectClass :: ScalaValueClasses, nameToTag)
+ lazy val abbrvTag = symbolsMap(ScalaValueClasses, nameToTag) withDefaultValue OBJECT_TAG
lazy val numericWeight = symbolsMapFilt(ScalaValueClasses, nameToWeight.keySet, nameToWeight)
lazy val boxedModule = classesMap(x => getModule(boxedName(x)))
lazy val boxedClass = classesMap(x => getClass(boxedName(x)))
@@ -213,7 +212,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
// Note: this is not the type alias AnyRef, it's a companion-like
// object used by the @specialize annotation.
- def AnyRefModule = getMember(ScalaPackageClass, nme.AnyRef)
+ lazy val AnyRefModule = getMember(ScalaPackageClass, nme.AnyRef)
@deprecated("Use AnyRefModule", "2.10.0")
def Predef_AnyRef = AnyRefModule
@@ -275,6 +274,7 @@ trait Definitions extends reflect.api.StandardDefinitions {
def Predef_identity = getMember(PredefModule, nme.identity)
def Predef_conforms = getMember(PredefModule, nme.conforms)
def Predef_wrapRefArray = getMember(PredefModule, nme.wrapRefArray)
+ def Predef_??? = getMember(PredefModule, nme.???)
/** Is `sym` a member of Predef with the given name?
* Note: DON't replace this by sym == Predef_conforms/etc, as Predef_conforms is a `def`
@@ -815,6 +815,9 @@ trait Definitions extends reflect.api.StandardDefinitions {
try getModule(fullname.toTermName)
catch { case _: MissingRequirementError => NoSymbol }
+ def termMember(owner: Symbol, name: String): Symbol = owner.info.member(newTermName(name))
+ def typeMember(owner: Symbol, name: String): Symbol = owner.info.member(newTypeName(name))
+
def getMember(owner: Symbol, name: Name): Symbol = {
if (owner == NoSymbol) NoSymbol
else owner.info.nonPrivateMember(name) match {
diff --git a/src/compiler/scala/reflect/internal/ExistentialsAndSkolems.scala b/src/compiler/scala/reflect/internal/ExistentialsAndSkolems.scala
new file mode 100644
index 0000000000..47f794681c
--- /dev/null
+++ b/src/compiler/scala/reflect/internal/ExistentialsAndSkolems.scala
@@ -0,0 +1,50 @@
+/* NSC -- new scala compiler
+ * Copyright 2005-2011 LAMP/EPFL
+ * @author Martin Odersky
+ */
+
+package scala.reflect
+package internal
+
+import scala.collection.{ mutable, immutable }
+import util._
+
+/** The name of this trait defines the eventual intent better than
+ * it does the initial contents.
+ */
+trait ExistentialsAndSkolems {
+ self: SymbolTable =>
+
+ /** Map a list of type parameter symbols to skolemized symbols, which
+ * can be deskolemized to the original type parameter. (A skolem is a
+ * representation of a bound variable when viewed inside its scope.)
+ * !!!Adriaan: this does not work for hk types.
+ */
+ def deriveFreshSkolems(tparams: List[Symbol]): List[Symbol] = {
+ class Deskolemizer extends LazyType {
+ override val typeParams = tparams
+ val typeSkolems = typeParams map (_.newTypeSkolem setInfo this)
+ override def complete(sym: Symbol) {
+ // The info of a skolem is the skolemized info of the
+ // actual type parameter of the skolem
+ sym setInfo sym.deSkolemize.info.substSym(typeParams, typeSkolems)
+ }
+ }
+ (new Deskolemizer).typeSkolems
+ }
+
+ /** Convert to corresponding type parameters all skolems of method
+ * parameters which appear in `tparams`.
+ */
+ def deskolemizeTypeParams(tparams: List[Symbol])(tp: Type): Type = {
+ class DeSkolemizeMap extends TypeMap {
+ def apply(tp: Type): Type = tp match {
+ case TypeRef(pre, sym, args) if sym.isTypeSkolem && (tparams contains sym.deSkolemize) =>
+ mapOver(typeRef(NoPrefix, sym.deSkolemize, args))
+ case _ =>
+ mapOver(tp)
+ }
+ }
+ new DeSkolemizeMap mapOver tp
+ }
+}
diff --git a/src/compiler/scala/reflect/internal/NameManglers.scala b/src/compiler/scala/reflect/internal/NameManglers.scala
index 97a74c2383..e43a0906a8 100644
--- a/src/compiler/scala/reflect/internal/NameManglers.scala
+++ b/src/compiler/scala/reflect/internal/NameManglers.scala
@@ -76,12 +76,14 @@ trait NameManglers {
val PROTECTED_PREFIX = "protected$"
val PROTECTED_SET_PREFIX = PROTECTED_PREFIX + "set"
val SINGLETON_SUFFIX = ".type"
- val SPECIALIZED_SUFFIX_STRING = "$sp"
val SUPER_PREFIX_STRING = "super$"
val TRAIT_SETTER_SEPARATOR_STRING = "$_setter_$"
+ val SETTER_SUFFIX: TermName = encode("_=")
- val SETTER_SUFFIX: TermName = encode("_=")
- val SPECIALIZED_SUFFIX_NAME: TermName = SPECIALIZED_SUFFIX_STRING
+ @deprecated("2.10.0", "Use SPECIALIZED_SUFFIX")
+ def SPECIALIZED_SUFFIX_STRING = SPECIALIZED_SUFFIX.toString
+ @deprecated("2.10.0", "Use SPECIALIZED_SUFFIX")
+ def SPECIALIZED_SUFFIX_NAME: TermName = SPECIALIZED_SUFFIX.toTermName
def isConstructorName(name: Name) = name == CONSTRUCTOR || name == MIXIN_CONSTRUCTOR
def isExceptionResultName(name: Name) = name startsWith EXCEPTION_RESULT_PREFIX
@@ -90,6 +92,7 @@ trait NameManglers {
def isLocalName(name: Name) = name endsWith LOCAL_SUFFIX_STRING
def isLoopHeaderLabel(name: Name) = (name startsWith WHILE_PREFIX) || (name startsWith DO_WHILE_PREFIX)
def isProtectedAccessorName(name: Name) = name startsWith PROTECTED_PREFIX
+ def isSuperAccessorName(name: Name) = name startsWith SUPER_PREFIX_STRING
def isReplWrapperName(name: Name) = name containsName INTERPRETER_IMPORT_WRAPPER
def isSetterName(name: Name) = name endsWith SETTER_SUFFIX
def isTraitSetterName(name: Name) = isSetterName(name) && (name containsName TRAIT_SETTER_SEPARATOR_STRING)
@@ -120,7 +123,7 @@ trait NameManglers {
}
def unspecializedName(name: Name): Name = (
- if (name endsWith SPECIALIZED_SUFFIX_NAME)
+ if (name endsWith SPECIALIZED_SUFFIX)
name.subName(0, name.lastIndexOf('m') - 1)
else name
)
@@ -140,8 +143,8 @@ trait NameManglers {
* and another one belonging to the enclosing class, on Double.
*/
def splitSpecializedName(name: Name): (Name, String, String) =
- if (name endsWith SPECIALIZED_SUFFIX_NAME) {
- val name1 = name dropRight SPECIALIZED_SUFFIX_NAME.length
+ if (name endsWith SPECIALIZED_SUFFIX) {
+ val name1 = name dropRight SPECIALIZED_SUFFIX.length
val idxC = name1 lastIndexOf 'c'
val idxM = name1 lastIndexOf 'm'
diff --git a/src/compiler/scala/reflect/internal/StdNames.scala b/src/compiler/scala/reflect/internal/StdNames.scala
index 1f67bbc0ac..bcd3fc8b14 100644
--- a/src/compiler/scala/reflect/internal/StdNames.scala
+++ b/src/compiler/scala/reflect/internal/StdNames.scala
@@ -94,11 +94,13 @@ trait StdNames extends NameManglers { self: SymbolTable =>
val EMPTY: NameType = ""
val ANON_FUN_NAME: NameType = "$anonfun"
+ val ANON_CLASS_NAME: NameType = "$anon"
val EMPTY_PACKAGE_NAME: NameType = "<empty>"
val IMPORT: NameType = "<import>"
val MODULE_VAR_SUFFIX: NameType = "$module"
val ROOT: NameType = "<root>"
val PACKAGE: NameType = "package"
+ val SPECIALIZED_SUFFIX: NameType = "$sp"
// value types (and AnyRef) are all used as terms as well
// as (at least) arguments to the @specialize annotation.
@@ -259,6 +261,8 @@ trait StdNames extends NameManglers { self: SymbolTable =>
case _ => newTermName("x$" + i)
}
+ val ??? = encode("???")
+
val wrapRefArray: NameType = "wrapRefArray"
val wrapByteArray: NameType = "wrapByteArray"
val wrapShortArray: NameType = "wrapShortArray"
@@ -330,6 +334,7 @@ trait StdNames extends NameManglers { self: SymbolTable =>
val freeValue : NameType = "freeValue"
val genericArrayOps: NameType = "genericArrayOps"
val get: NameType = "get"
+ val getOrElse: NameType = "getOrElse"
val hasNext: NameType = "hasNext"
val hashCode_ : NameType = if (forMSIL) "GetHashCode" else "hashCode"
val hash_ : NameType = "hash"
@@ -342,6 +347,7 @@ trait StdNames extends NameManglers { self: SymbolTable =>
val isInstanceOf_ : NameType = "isInstanceOf"
val isInstanceOf_Ob : NameType = "$isInstanceOf"
val java: NameType = "java"
+ val key: NameType = "key"
val lang: NameType = "lang"
val length: NameType = "length"
val lengthCompare: NameType = "lengthCompare"
@@ -438,7 +444,6 @@ trait StdNames extends NameManglers { self: SymbolTable =>
protected implicit def createNameType(name: String): TypeName = newTypeNameCached(name)
val REFINE_CLASS_NAME: NameType = "<refinement>"
- val ANON_CLASS_NAME: NameType = "$anon"
}
/** For fully qualified type names.
diff --git a/src/compiler/scala/reflect/internal/SymbolTable.scala b/src/compiler/scala/reflect/internal/SymbolTable.scala
index 1973a97279..4bcf522a8f 100644
--- a/src/compiler/scala/reflect/internal/SymbolTable.scala
+++ b/src/compiler/scala/reflect/internal/SymbolTable.scala
@@ -15,6 +15,7 @@ abstract class SymbolTable extends api.Universe
with Symbols
with Types
with Kinds
+ with ExistentialsAndSkolems
with Scopes
with Definitions
with Constants
@@ -77,16 +78,18 @@ abstract class SymbolTable extends api.Universe
type RunId = Int
final val NoRunId = 0
+ private var phStack: List[Phase] = Nil
private var ph: Phase = NoPhase
private var per = NoPeriod
+ final def atPhaseStack: List[Phase] = phStack
final def phase: Phase = ph
final def phase_=(p: Phase) {
//System.out.println("setting phase to " + p)
- assert((p ne null) && p != NoPhase)
+ assert((p ne null) && p != NoPhase, p)
ph = p
- per = (currentRunId << 8) + p.id
+ per = period(currentRunId, p.id)
}
/** The current compiler run identifier. */
@@ -111,14 +114,18 @@ abstract class SymbolTable extends api.Universe
final def phaseOf(period: Period): Phase = phaseWithId(phaseId(period))
final def period(rid: RunId, pid: Phase#Id): Period =
- (currentRunId << 8) + pid
+ (rid << 8) + pid
/** Perform given operation at given phase. */
@inline final def atPhase[T](ph: Phase)(op: => T): T = {
val current = phase
phase = ph
+ phStack ::= ph
try op
- finally phase = current
+ finally {
+ phase = current
+ phStack = phStack.tail
+ }
}
/** Since when it is to be "at" a phase is inherently ambiguous,
* a couple unambiguously named methods.
diff --git a/src/compiler/scala/reflect/internal/Symbols.scala b/src/compiler/scala/reflect/internal/Symbols.scala
index 25d6192e74..e997405c77 100644
--- a/src/compiler/scala/reflect/internal/Symbols.scala
+++ b/src/compiler/scala/reflect/internal/Symbols.scala
@@ -1024,8 +1024,11 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
/** Modifies this symbol's info in place. */
def modifyInfo(f: Type => Type): this.type = setInfo(f(info))
/** Substitute second list of symbols for first in current info. */
- def substInfo(syms0: List[Symbol], syms1: List[Symbol]) = modifyInfo(_.substSym(syms0, syms1))
- def setInfoOwnerAdjusted(info: Type): this.type = setInfo(info atOwner this)
+ def substInfo(syms0: List[Symbol], syms1: List[Symbol]): this.type =
+ if (syms0.isEmpty) this
+ else modifyInfo(_.substSym(syms0, syms1))
+
+ def setInfoOwnerAdjusted(info: Type): this.type = setInfo(info atOwner this)
/** Set the info and enter this symbol into the owner's scope. */
def setInfoAndEnter(info: Type): this.type = {
@@ -1381,15 +1384,25 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
cloneSymbol(owner)
/** A clone of this symbol, but with given owner. */
- final def cloneSymbol(owner: Symbol): Symbol = cloneSymbol(owner, this.rawflags)
- final def cloneSymbol(owner: Symbol, newFlags: Long): Symbol = {
- val newSym = cloneSymbolImpl(owner, newFlags)
- ( newSym
+ final def cloneSymbol(newOwner: Symbol): Symbol =
+ cloneSymbol(newOwner, this.rawflags)
+ final def cloneSymbol(newOwner: Symbol, newFlags: Long): Symbol =
+ cloneSymbol(newOwner, newFlags, nme.NO_NAME)
+ final def cloneSymbol(newOwner: Symbol, newFlags: Long, newName: Name): Symbol = {
+ val clone = cloneSymbolImpl(newOwner, newFlags)
+ ( clone
setPrivateWithin privateWithin
- setInfo (info cloneInfo newSym)
+ setInfo (this.info cloneInfo clone)
setAnnotations this.annotations
)
+ if (clone.thisSym != clone)
+ clone.typeOfThis = (clone.typeOfThis cloneInfo clone)
+ if (newName != nme.NO_NAME)
+ clone.name = newName
+
+ clone
}
+
/** Internal method to clone a symbol's implementation with the given flags and no info. */
def cloneSymbolImpl(owner: Symbol, newFlags: Long): Symbol
def cloneSymbolImpl(owner: Symbol): Symbol = cloneSymbolImpl(owner, 0L)
@@ -2325,7 +2338,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
*/
def existentialBound: Type = abort("unexpected type: "+this.getClass+ " "+debugLocationString)
- override def name: TypeName = super.name.asInstanceOf[TypeName]
+ override def name: TypeName = super.name.toTypeName
final override def isType = true
override def isNonClassType = true
override def isAbstractType = {
@@ -2702,6 +2715,8 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
*/
def cloneSymbolsAndModify(syms: List[Symbol], infoFn: Type => Type): List[Symbol] =
cloneSymbols(syms) map (_ modifyInfo infoFn)
+ def cloneSymbolsAtOwnerAndModify(syms: List[Symbol], owner: Symbol, infoFn: Type => Type): List[Symbol] =
+ cloneSymbolsAtOwner(syms, owner) map (_ modifyInfo infoFn)
/** Functions which perform the standard clone/substituting on the given symbols and type,
* then call the creator function with the new symbols and type as arguments.
diff --git a/src/compiler/scala/reflect/internal/TreeGen.scala b/src/compiler/scala/reflect/internal/TreeGen.scala
index e537c6b83f..8c2a067d4d 100644
--- a/src/compiler/scala/reflect/internal/TreeGen.scala
+++ b/src/compiler/scala/reflect/internal/TreeGen.scala
@@ -250,20 +250,22 @@ abstract class TreeGen {
* var x: T = _
* which is appropriate to the given Type.
*/
- def mkZero(tp: Type): Tree = {
- val tree = tp.typeSymbol match {
- case UnitClass => Literal(Constant())
- case BooleanClass => Literal(Constant(false))
- case FloatClass => Literal(Constant(0.0f))
- case DoubleClass => Literal(Constant(0.0d))
- case ByteClass => Literal(Constant(0.toByte))
- case ShortClass => Literal(Constant(0.toShort))
- case IntClass => Literal(Constant(0))
- case LongClass => Literal(Constant(0L))
- case CharClass => Literal(Constant(0.toChar))
- case _ => Literal(Constant(null))
- }
- tree setType tp
+ def mkZero(tp: Type): Tree = tp.typeSymbol match {
+ case NothingClass => mkMethodCall(Predef_???, Nil) setType NothingClass.tpe
+ case _ => Literal(mkConstantZero(tp)) setType tp
+ }
+
+ def mkConstantZero(tp: Type): Constant = tp.typeSymbol match {
+ case UnitClass => Constant(())
+ case BooleanClass => Constant(false)
+ case FloatClass => Constant(0.0f)
+ case DoubleClass => Constant(0.0d)
+ case ByteClass => Constant(0.toByte)
+ case ShortClass => Constant(0.toShort)
+ case IntClass => Constant(0)
+ case LongClass => Constant(0L)
+ case CharClass => Constant(0.toChar)
+ case _ => Constant(null)
}
def mkZeroContravariantAfterTyper(tp: Type): Tree = {
diff --git a/src/compiler/scala/reflect/internal/Trees.scala b/src/compiler/scala/reflect/internal/Trees.scala
index 958d04732b..3782b24c02 100644
--- a/src/compiler/scala/reflect/internal/Trees.scala
+++ b/src/compiler/scala/reflect/internal/Trees.scala
@@ -224,22 +224,24 @@ trait Trees extends api.Trees { self: SymbolTable =>
LabelDef(sym.name.toTermName, params map Ident, rhs) setSymbol sym
}
-
/** casedef shorthand */
def CaseDef(pat: Tree, body: Tree): CaseDef = CaseDef(pat, EmptyTree, body)
def Bind(sym: Symbol, body: Tree): Bind =
Bind(sym.name, body) setSymbol sym
- /** 0-1 argument list new, based on a symbol.
- */
- def New(sym: Symbol, args: Tree*): Tree =
- if (args.isEmpty) New(TypeTree(sym.tpe))
- else New(TypeTree(sym.tpe), List(args.toList))
+ def Try(body: Tree, cases: (Tree, Tree)*): Try =
+ Try(body, cases.toList map { case (pat, rhs) => CaseDef(pat, EmptyTree, rhs) }, EmptyTree)
+
+ def Throw(tpe: Type, args: Tree*): Throw =
+ Throw(New(tpe, args: _*))
def Apply(sym: Symbol, args: Tree*): Tree =
Apply(Ident(sym), args.toList)
+ def New(sym: Symbol, args: Tree*): Tree =
+ New(sym.tpe, args: _*)
+
def Super(sym: Symbol, mix: TypeName): Tree = Super(This(sym), mix)
/** Block factory that flattens directly nested blocks.
@@ -271,7 +273,18 @@ trait Trees extends api.Trees { self: SymbolTable =>
override def traverse(t: Tree) {
if (t != EmptyTree && t.pos == NoPosition) {
t.setPos(pos)
- super.traverse(t) // TODO: bug? shouldn't the traverse be outside of the if?
+ super.traverse(t) // TODO: bug? shouldn't the traverse be outside of the if?
+ // @PP: it's pruning whenever it encounters a node with a
+ // position, which I interpret to mean that (in the author's
+ // mind at least) either the children of a positioned node will
+ // already be positioned, or the children of a positioned node
+ // do not merit positioning.
+ //
+ // Whatever the author's rationale, it does seem like a bad idea
+ // to press on through a positioned node to find unpositioned
+ // children beneath it and then to assign whatever happens to
+ // be in `pos` to such nodes. There are supposed to be some
+ // position invariants which I can't imagine surviving that.
}
}
}
diff --git a/src/compiler/scala/reflect/internal/Types.scala b/src/compiler/scala/reflect/internal/Types.scala
index adf9df185a..9f5f7ffe61 100644
--- a/src/compiler/scala/reflect/internal/Types.scala
+++ b/src/compiler/scala/reflect/internal/Types.scala
@@ -680,7 +680,7 @@ trait Types extends api.Types { self: SymbolTable =>
* symbol.
*/
def substSym(from: List[Symbol], to: List[Symbol]): Type =
- if (from eq to) this
+ if ((from eq to) || from.isEmpty) this
else new SubstSymMap(from, to) apply this
/** Substitute all occurrences of `ThisType(from)` in this type by `to`.
@@ -893,7 +893,7 @@ trait Types extends api.Types { self: SymbolTable =>
def toLongString = {
val str = toString
if (str == "type") widen.toString
- else if (str endsWith ".type") str + " (with underlying type " + widen + ")"
+ else if ((str endsWith ".type") && !typeSymbol.isModuleClass) str + " (with underlying type " + widen + ")"
else str
}
@@ -3903,7 +3903,6 @@ trait Types extends api.Types { self: SymbolTable =>
*/
def rawToExistential = new TypeMap {
private var expanded = immutable.Set[Symbol]()
- private var generated = immutable.Set[Type]()
def apply(tp: Type): Type = tp match {
case TypeRef(pre, sym, List()) if isRawIfWithoutArgs(sym) =>
if (expanded contains sym) AnyRefClass.tpe
@@ -3914,10 +3913,6 @@ trait Types extends api.Types { self: SymbolTable =>
} finally {
expanded -= sym
}
- case ExistentialType(_, _) if !(generated contains tp) => // to avoid infinite expansions. todo: not sure whether this is needed
- val result = mapOver(tp)
- generated += result
- result
case _ =>
mapOver(tp)
}
@@ -4319,83 +4314,83 @@ trait Types extends api.Types { self: SymbolTable =>
else mapOver(tp)
}
- class InstantiateDependentMap(params: List[Symbol], actuals: List[Type]) extends TypeMap with KeepOnlyTypeConstraints {
- private val actualsIndexed = actuals.toIndexedSeq
+ class InstantiateDependentMap(params: List[Symbol], actuals0: List[Type]) extends TypeMap with KeepOnlyTypeConstraints {
+ private val actuals = actuals0.toIndexedSeq
+ private val existentials = new Array[Symbol](actuals.size)
+ def existentialsNeeded: List[Symbol] = existentials.filter(_ ne null).toList
- object ParamWithActual {
- def unapply(sym: Symbol): Option[Type] = {
- val pid = params indexOf sym
- if(pid != -1) Some(actualsIndexed(pid)) else None
- }
+ private object StableArg {
+ def unapply(param: Symbol) = Arg unapply param map actuals filter (tp =>
+ tp.isStable && (tp.typeSymbol != NothingClass)
+ )
+ }
+ private object Arg {
+ def unapply(param: Symbol) = Some(params indexOf param) filter (_ >= 0)
}
- def apply(tp: Type): Type =
- mapOver(tp) match {
- case SingleType(NoPrefix, ParamWithActual(arg)) if arg.isStable => arg // unsound to replace args by unstable actual #3873
- // (soundly) expand type alias selections on implicit arguments, see depmet_implicit_oopsla* test cases -- typically, `param.isImplicit`
- case tp1@TypeRef(SingleType(NoPrefix, ParamWithActual(arg)), sym, targs) =>
- val res = typeRef(arg, sym, targs)
- if(res.typeSymbolDirect isAliasType) res.dealias
- else tp1
- case tp1 => tp1 // don't return the original `tp`, which may be different from `tp1`, due to dropping annotations
- }
-
- def existentialsNeeded: List[Symbol] = existSyms.filter(_ ne null).toList
-
- private val existSyms: Array[Symbol] = new Array(actualsIndexed.size)
- private def haveExistential(i: Int) = {assert((i >= 0) && (i <= actualsIndexed.size)); existSyms(i) ne null}
+ def apply(tp: Type): Type = mapOver(tp) match {
+ // unsound to replace args by unstable actual #3873
+ case SingleType(NoPrefix, StableArg(arg)) => arg
+ // (soundly) expand type alias selections on implicit arguments,
+ // see depmet_implicit_oopsla* test cases -- typically, `param.isImplicit`
+ case tp1 @ TypeRef(SingleType(NoPrefix, Arg(pid)), sym, targs) =>
+ val arg = actuals(pid)
+ val res = typeRef(arg, sym, targs)
+ if (res.typeSymbolDirect.isAliasType) res.dealias else tp1
+ // don't return the original `tp`, which may be different from `tp1`,
+ // due to dropping annotations
+ case tp1 => tp1
+ }
/* Return the type symbol for referencing a parameter inside the existential quantifier.
* (Only needed if the actual is unstable.)
*/
- def existSymFor(actualIdx: Int) =
- if (haveExistential(actualIdx)) existSyms(actualIdx)
- else {
- val oldSym = params(actualIdx)
- val symowner = oldSym.owner
- val bound = singletonBounds(actualsIndexed(actualIdx))
-
- val sym = symowner.newExistential(newTypeName(oldSym.name + ".type"), oldSym.pos)
- sym.setInfo(bound)
- sym.setFlag(oldSym.flags)
-
- existSyms(actualIdx) = sym
- sym
+ private def existentialFor(pid: Int) = {
+ if (existentials(pid) eq null) {
+ val param = params(pid)
+ existentials(pid) = (
+ param.owner.newExistential(newTypeName(param.name + ".type"), param.pos, param.flags)
+ setInfo singletonBounds(actuals(pid))
+ )
}
+ existentials(pid)
+ }
//AM propagate more info to annotations -- this seems a bit ad-hoc... (based on code by spoon)
override def mapOver(arg: Tree, giveup: ()=>Nothing): Tree = {
+ // TODO: this should be simplified; in the stable case, one can
+ // probably just use an Ident to the tree.symbol.
+ //
+ // @PP: That leads to failure here, where stuff no longer has type
+ // 'String @Annot("stuff")' but 'String @Annot(x)'.
+ //
+ // def m(x: String): String @Annot(x) = x
+ // val stuff = m("stuff")
+ //
+ // (TODO cont.) Why an existential in the non-stable case?
+ //
+ // @PP: In the following:
+ //
+ // def m = { val x = "three" ; val y: String @Annot(x) = x; y }
+ //
+ // m is typed as 'String @Annot(x) forSome { val x: String }'.
+ //
+ // Both examples are from run/constrained-types.scala.
object treeTrans extends Transformer {
- override def transform(tree: Tree): Tree = {
- tree match {
- case RefParamAt(pid) =>
- // TODO: this should be simplified; in the stable case, one can probably
- // just use an Ident to the tree.symbol. Why an existential in the non-stable case?
- val actual = actualsIndexed(pid)
- if (actual.isStable && actual.typeSymbol != NothingClass) {
- gen.mkAttributedQualifier(actualsIndexed(pid), tree.symbol)
- } else {
- val sym = existSymFor(pid)
- (Ident(sym.name)
- copyAttrs tree
- setType typeRef(NoPrefix, sym, Nil))
- }
- case _ => super.transform(tree)
- }
- }
- object RefParamAt {
- def unapply(tree: Tree): Option[Int] = tree match {
- case Ident(_) => Some(params indexOf tree.symbol) filterNot (_ == -1)
- case _ => None
- }
+ override def transform(tree: Tree): Tree = tree.symbol match {
+ case StableArg(actual) =>
+ gen.mkAttributedQualifier(actual, tree.symbol)
+ case Arg(pid) =>
+ val sym = existentialFor(pid)
+ Ident(sym) copyAttrs tree setType typeRef(NoPrefix, sym, Nil)
+ case _ =>
+ super.transform(tree)
}
}
-
- treeTrans.transform(arg)
+ treeTrans transform arg
}
}
-
object StripAnnotationsMap extends TypeMap {
def apply(tp: Type): Type = tp match {
case AnnotatedType(_, atp, _) =>
@@ -5381,9 +5376,9 @@ trait Types extends api.Types { self: SymbolTable =>
val params2 = mt2.params
val res2 = mt2.resultType
(sameLength(params1, params2) &&
+ mt1.isImplicit == mt2.isImplicit &&
matchingParams(params1, params2, mt1.isJava, mt2.isJava) &&
- (res1 <:< res2.substSym(params2, params1)) &&
- mt1.isImplicit == mt2.isImplicit)
+ (res1 <:< res2.substSym(params2, params1)))
// TODO: if mt1.params.isEmpty, consider NullaryMethodType?
case _ =>
false
@@ -5503,9 +5498,9 @@ trait Types extends api.Types { self: SymbolTable =>
tp2 match {
case mt2 @ MethodType(params2, res2) =>
// sameLength(params1, params2) was used directly as pre-screening optimization (now done by matchesQuantified -- is that ok, performancewise?)
- matchesQuantified(params1, params2, res1, res2) &&
+ mt1.isImplicit == mt2.isImplicit &&
matchingParams(params1, params2, mt1.isJava, mt2.isJava) &&
- mt1.isImplicit == mt2.isImplicit
+ matchesQuantified(params1, params2, res1, res2)
case NullaryMethodType(res2) =>
if (params1.isEmpty) matchesType(res1, res2, alwaysMatchSimple)
else matchesType(tp1, res2, alwaysMatchSimple)
@@ -5532,7 +5527,10 @@ trait Types extends api.Types { self: SymbolTable =>
case PolyType(tparams1, res1) =>
tp2 match {
case PolyType(tparams2, res2) =>
- matchesQuantified(tparams1, tparams2, res1, res2)
+ if ((tparams1 corresponds tparams2)(_ eq _))
+ matchesType(res1, res2, alwaysMatchSimple)
+ else
+ matchesQuantified(tparams1, tparams2, res1, res2)
case ExistentialType(_, res2) =>
alwaysMatchSimple && matchesType(tp1, res2, true)
case _ =>
diff --git a/src/compiler/scala/reflect/internal/pickling/UnPickler.scala b/src/compiler/scala/reflect/internal/pickling/UnPickler.scala
index b21b33e138..9f93108420 100644
--- a/src/compiler/scala/reflect/internal/pickling/UnPickler.scala
+++ b/src/compiler/scala/reflect/internal/pickling/UnPickler.scala
@@ -862,13 +862,12 @@ abstract class UnPickler /*extends reflect.generic.UnPickler*/ {
override def complete(sym: Symbol) = try {
super.complete(sym)
var alias = at(j, readSymbol)
- if (alias.isOverloaded) {
- atPhase(picklerPhase) {
- alias = alias suchThat (alt => sym.tpe =:= sym.owner.thisType.memberType(alt))
- }
- }
+ if (alias.isOverloaded)
+ alias = atPhase(picklerPhase)((alias suchThat (alt => sym.tpe =:= sym.owner.thisType.memberType(alt))))
+
sym.asInstanceOf[TermSymbol].setAlias(alias)
- } catch {
+ }
+ catch {
case e: MissingRequirementError => throw toTypeError(e)
}
}
diff --git a/src/compiler/scala/reflect/internal/util/Collections.scala b/src/compiler/scala/reflect/internal/util/Collections.scala
index 94672097c4..e3fb1a9cad 100644
--- a/src/compiler/scala/reflect/internal/util/Collections.scala
+++ b/src/compiler/scala/reflect/internal/util/Collections.scala
@@ -65,6 +65,16 @@ trait Collections {
lb.toList
}
+ final def foreachWithIndex[A, B](xs: List[A])(f: (A, Int) => Unit) {
+ var index = 0
+ var ys = xs
+ while (!ys.isEmpty) {
+ f(ys.head, index)
+ ys = ys.tail
+ index += 1
+ }
+ }
+
final def mapWithIndex[A, B](xs: List[A])(f: (A, Int) => B): List[B] = {
val lb = new ListBuffer[B]
var index = 0
diff --git a/src/compiler/scala/tools/ant/templates/tool-unix.tmpl b/src/compiler/scala/tools/ant/templates/tool-unix.tmpl
index 7e51930fa4..599936f6f8 100644
--- a/src/compiler/scala/tools/ant/templates/tool-unix.tmpl
+++ b/src/compiler/scala/tools/ant/templates/tool-unix.tmpl
@@ -128,9 +128,11 @@ if [[ -z "$cygwin$mingw" ]]; then
usebootcp="true"
fi
+# If using the boot classpath, also pass an empty classpath
+# to java to suppress "." from materializing.
classpathArgs () {
if [[ -n $usebootcp ]]; then
- echo "-Xbootclasspath/a:$TOOL_CLASSPATH"
+ echo "-Xbootclasspath/a:$TOOL_CLASSPATH -classpath \"\""
else
echo "-classpath $TOOL_CLASSPATH"
fi
diff --git a/src/compiler/scala/tools/nsc/Global.scala b/src/compiler/scala/tools/nsc/Global.scala
index 4493188b31..ff8d86873c 100644
--- a/src/compiler/scala/tools/nsc/Global.scala
+++ b/src/compiler/scala/tools/nsc/Global.scala
@@ -193,10 +193,6 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
if (settings.debug.value)
body
}
- @inline final override def debuglog(msg: => String) {
- if (settings.debug.value && (settings.log containsPhase globalPhase))
- inform("[log " + phase + "] " + msg)
- }
// Warnings issued only under -Ydebug. For messages which should reach
// developer ears, but are not adequately actionable by users.
@inline final override def debugwarn(msg: => String) {
@@ -213,10 +209,28 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
def informTime(msg: String, start: Long) = informProgress(elapsedMessage(msg, start))
def logError(msg: String, t: Throwable): Unit = ()
+
+ private def atPhaseStackMessage = atPhaseStack match {
+ case Nil => ""
+ case ps => ps.reverseMap("->" + _).mkString("(", " ", ")")
+ }
+ private def shouldLogAtThisPhase = (
+ (settings.log.isSetByUser)
+ && ((settings.log containsPhase globalPhase) || (settings.log containsPhase phase))
+ )
+
+ def logAfterEveryPhase[T](msg: String)(op: => T) {
+ log("Running operation '%s' after every phase.\n".format(msg) + describeAfterEveryPhase(op))
+ }
// Over 200 closure objects are eliminated by inlining this.
@inline final def log(msg: => AnyRef): Unit =
- if (settings.log containsPhase globalPhase)
- inform("[log " + phase + "] " + msg)
+ if (shouldLogAtThisPhase)
+ inform("[log %s%s] %s".format(globalPhase, atPhaseStackMessage, msg))
+
+ @inline final override def debuglog(msg: => String) {
+ if (settings.debug.value)
+ log(msg)
+ }
def logThrowable(t: Throwable): Unit = globalError(throwableAsString(t))
def throwableAsString(t: Throwable): String =
@@ -754,6 +768,51 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
line1 :: line2 :: descs mkString
}
+ /** Returns List of (phase, value) pairs, including only those
+ * where the value compares unequal to the previous phase's value.
+ */
+ def afterEachPhase[T](op: => T): List[(Phase, T)] = {
+ phaseDescriptors.map(_.ownPhase).foldLeft(List[(Phase, T)]()) { (res, ph) =>
+ val value = afterPhase(ph)(op)
+ if (res.nonEmpty && res.head._2 == value) res
+ else ((ph, value)) :: res
+ } reverse
+ }
+
+ /** Returns List of ChangeAfterPhase objects, encapsulating those
+ * phase transitions where the result of the operation gave a different
+ * list than it had when run during the previous phase.
+ */
+ def changesAfterEachPhase[T](op: => List[T]): List[ChangeAfterPhase[T]] = {
+ val ops = ((NoPhase, Nil)) :: afterEachPhase(op)
+
+ ops sliding 2 map {
+ case (_, before) :: (ph, after) :: Nil =>
+ val lost = before filterNot (after contains _)
+ val gained = after filterNot (before contains _)
+ ChangeAfterPhase(ph, lost, gained)
+ case _ => ???
+ } toList
+ }
+ private def numberedPhase(ph: Phase) = "%2d/%s".format(ph.id, ph.name)
+
+ case class ChangeAfterPhase[+T](ph: Phase, lost: List[T], gained: List[T]) {
+ private def mkStr(what: String, xs: List[_]) = (
+ if (xs.isEmpty) ""
+ else xs.mkString(what + " after " + numberedPhase(ph) + " {\n ", "\n ", "\n}\n")
+ )
+ override def toString = mkStr("Lost", lost) + mkStr("Gained", gained)
+ }
+
+ def describeAfterEachPhase[T](op: => T): List[String] =
+ afterEachPhase(op) map { case (ph, t) => "[after %-15s] %s".format(numberedPhase(ph), t) }
+
+ def describeAfterEveryPhase[T](op: => T): String =
+ describeAfterEachPhase(op) map (" " + _ + "\n") mkString
+
+ def printAfterEachPhase[T](op: => T): Unit =
+ describeAfterEachPhase(op) foreach (m => println(" " + m))
+
// ----------- Runs ---------------------------------------
private var curRun: Run = null
@@ -808,9 +867,27 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
def currentUnit: CompilationUnit = if (currentRun eq null) NoCompilationUnit else currentRun.currentUnit
def currentSource: SourceFile = if (currentUnit.exists) currentUnit.source else lastSeenSourceFile
+ // TODO - trim these to the absolute minimum.
+ @inline final def afterErasure[T](op: => T): T = afterPhase(currentRun.erasurePhase)(op)
+ @inline final def afterExplicitOuter[T](op: => T): T = afterPhase(currentRun.explicitouterPhase)(op)
+ @inline final def afterFlatten[T](op: => T): T = afterPhase(currentRun.flattenPhase)(op)
+ @inline final def afterIcode[T](op: => T): T = afterPhase(currentRun.icodePhase)(op)
+ @inline final def afterMixin[T](op: => T): T = afterPhase(currentRun.mixinPhase)(op)
+ @inline final def afterPickler[T](op: => T): T = afterPhase(currentRun.picklerPhase)(op)
+ @inline final def afterRefchecks[T](op: => T): T = afterPhase(currentRun.refchecksPhase)(op)
+ @inline final def afterSpecialize[T](op: => T): T = afterPhase(currentRun.specializePhase)(op)
@inline final def afterTyper[T](op: => T): T = afterPhase(currentRun.typerPhase)(op)
+ @inline final def afterUncurry[T](op: => T): T = afterPhase(currentRun.uncurryPhase)(op)
@inline final def beforeErasure[T](op: => T): T = beforePhase(currentRun.erasurePhase)(op)
- @inline final def afterErasure[T](op: => T): T = afterPhase(currentRun.erasurePhase)(op)
+ @inline final def beforeExplicitOuter[T](op: => T): T = beforePhase(currentRun.explicitouterPhase)(op)
+ @inline final def beforeFlatten[T](op: => T): T = beforePhase(currentRun.flattenPhase)(op)
+ @inline final def beforeIcode[T](op: => T): T = beforePhase(currentRun.icodePhase)(op)
+ @inline final def beforeMixin[T](op: => T): T = beforePhase(currentRun.mixinPhase)(op)
+ @inline final def beforePickler[T](op: => T): T = beforePhase(currentRun.picklerPhase)(op)
+ @inline final def beforeRefchecks[T](op: => T): T = beforePhase(currentRun.refchecksPhase)(op)
+ @inline final def beforeSpecialize[T](op: => T): T = beforePhase(currentRun.specializePhase)(op)
+ @inline final def beforeTyper[T](op: => T): T = beforePhase(currentRun.typerPhase)(op)
+ @inline final def beforeUncurry[T](op: => T): T = beforePhase(currentRun.uncurryPhase)(op)
/** Don't want to introduce new errors trying to report errors,
* so swallow exceptions.
@@ -1027,9 +1104,11 @@ class Global(var currentSettings: Settings, var reporter: Reporter) extends Symb
val refchecksPhase = phaseNamed("refchecks")
val uncurryPhase = phaseNamed("uncurry")
// tailcalls, specialize
+ val specializePhase = phaseNamed("specialize")
val explicitouterPhase = phaseNamed("explicitouter")
val erasurePhase = phaseNamed("erasure")
// lazyvals, lambdalift, constructors
+ val lambdaLiftPhase = phaseNamed("lambdalift")
val flattenPhase = phaseNamed("flatten")
val mixinPhase = phaseNamed("mixin")
val cleanupPhase = phaseNamed("cleanup")
diff --git a/src/compiler/scala/tools/nsc/SubComponent.scala b/src/compiler/scala/tools/nsc/SubComponent.scala
index cd9fef117f..7e832a56b0 100644
--- a/src/compiler/scala/tools/nsc/SubComponent.scala
+++ b/src/compiler/scala/tools/nsc/SubComponent.scala
@@ -46,6 +46,9 @@ abstract class SubComponent {
private var ownPhaseCache: WeakReference[Phase] = new WeakReference(null)
private var ownPhaseRunId = global.NoRunId
+
+ @inline final def atOwnPhase[T](op: => T) = global.atPhase(ownPhase)(op)
+ @inline final def afterOwnPhase[T](op: => T) = global.afterPhase(ownPhase)(op)
/** The phase corresponding to this subcomponent in the current compiler run */
def ownPhase: Phase = {
diff --git a/src/compiler/scala/tools/nsc/ast/Reifiers.scala b/src/compiler/scala/tools/nsc/ast/Reifiers.scala
index 21e075950f..7ece8bbd31 100644
--- a/src/compiler/scala/tools/nsc/ast/Reifiers.scala
+++ b/src/compiler/scala/tools/nsc/ast/Reifiers.scala
@@ -580,7 +580,7 @@ trait Reifiers { self: Global =>
ann.assocs map { case (nme, arg) => AssignOrNamedArg(Ident(nme), toScalaAnnotation(arg)) }
}
- New(TypeTree(ann.atp), List(args))
+ New(ann.atp, args: _*)
}
}
diff --git a/src/compiler/scala/tools/nsc/ast/TreeDSL.scala b/src/compiler/scala/tools/nsc/ast/TreeDSL.scala
index 2cfd21ecc8..0d19b781e2 100644
--- a/src/compiler/scala/tools/nsc/ast/TreeDSL.scala
+++ b/src/compiler/scala/tools/nsc/ast/TreeDSL.scala
@@ -253,13 +253,11 @@ trait TreeDSL {
}
/** Top level accessible. */
- def MATCHERROR(arg: Tree) = Throw(New(TypeTree(MatchErrorClass.tpe), List(List(arg))))
- /** !!! should generalize null guard from match error here. */
- def THROW(sym: Symbol): Throw = Throw(New(TypeTree(sym.tpe), List(Nil)))
- def THROW(sym: Symbol, msg: Tree): Throw = Throw(New(TypeTree(sym.tpe), List(List(msg.TOSTRING()))))
+ def MATCHERROR(arg: Tree) = Throw(MatchErrorClass.tpe, arg)
+ def THROW(sym: Symbol, msg: Tree): Throw = Throw(sym.tpe, msg.TOSTRING())
def NEW(tpt: Tree, args: Tree*): Tree = New(tpt, List(args.toList))
- def NEW(sym: Symbol, args: Tree*): Tree = New(sym, args: _*)
+ def NEW(sym: Symbol, args: Tree*): Tree = New(sym.tpe, args: _*)
def DEF(name: Name, tp: Type): DefTreeStart = DEF(name) withType tp
def DEF(name: Name): DefTreeStart = new DefTreeStart(name)
diff --git a/src/compiler/scala/tools/nsc/ast/TreeGen.scala b/src/compiler/scala/tools/nsc/ast/TreeGen.scala
index 265d017653..a94154e0ff 100644
--- a/src/compiler/scala/tools/nsc/ast/TreeGen.scala
+++ b/src/compiler/scala/tools/nsc/ast/TreeGen.scala
@@ -13,7 +13,7 @@ import symtab.SymbolTable
/** XXX to resolve: TreeGen only assumes global is a SymbolTable, but
* TreeDSL at the moment expects a Global. Can we get by with SymbolTable?
*/
-abstract class TreeGen extends reflect.internal.TreeGen {
+abstract class TreeGen extends reflect.internal.TreeGen with TreeDSL {
val global: Global
import global._
@@ -51,13 +51,12 @@ abstract class TreeGen extends reflect.internal.TreeGen {
}
// wrap the given expression in a SoftReference so it can be gc-ed
- def mkSoftRef(expr: Tree): Tree = atPos(expr.pos) {
- New(SoftReferenceClass, expr)
- }
+ def mkSoftRef(expr: Tree): Tree = atPos(expr.pos)(New(SoftReferenceClass.tpe, expr))
+
// annotate the expression with @unchecked
def mkUnchecked(expr: Tree): Tree = atPos(expr.pos) {
// This can't be "Annotated(New(UncheckedClass), expr)" because annotations
- // are very pick about things and it crashes the compiler with "unexpected new".
+ // are very picky about things and it crashes the compiler with "unexpected new".
Annotated(New(scalaDot(UncheckedClass.name), List(Nil)), expr)
}
// if it's a Match, mark the selector unchecked; otherwise nothing.
@@ -66,18 +65,81 @@ abstract class TreeGen extends reflect.internal.TreeGen {
case _ => tree
}
- def withDefaultCase(matchExpr: Tree, defaultAction: Tree/*scrutinee*/ => Tree): Tree = matchExpr match {
- case Match(scrutinee, cases) =>
- if (cases exists treeInfo.isDefaultCase) matchExpr
- else {
- val defaultCase = CaseDef(Ident(nme.WILDCARD), EmptyTree, defaultAction(scrutinee))
- Match(scrutinee, cases :+ defaultCase)
+ // must be kept in synch with the codegen in PatMatVirtualiser
+ object VirtualCaseDef {
+ def unapply(b: Block): Option[(Assign, Tree, Tree)] = b match {
+ case Block(List(assign@Assign(keepGoingLhs, falseLit), matchRes), zero) => Some((assign, matchRes, zero)) // TODO: check tree annotation
+ case _ => None
+ }
+ }
+
+ // TODO: would be so much nicer if we would know during match-translation (i.e., type checking)
+ // whether we should emit missingCase-style apply (and isDefinedAt), instead of transforming trees post-factum
+ class MatchMatcher {
+ def caseMatch(orig: Tree, selector: Tree, cases: List[CaseDef], wrap: Tree => Tree): Tree = unknownTree(orig)
+ def caseVirtualizedMatch(orig: Tree, _match: Tree, targs: List[Tree], scrut: Tree, matcher: Tree): Tree = unknownTree(orig)
+ def caseVirtualizedMatchOpt(orig: Tree, zero: ValDef, x: ValDef, matchRes: ValDef, keepGoing: ValDef, stats: List[Tree], epilogue: Tree, wrap: Tree => Tree): Tree = unknownTree(orig)
+
+ def apply(matchExpr: Tree): Tree = (matchExpr: @unchecked) match {
+ // old-style match or virtpatmat switch
+ case Match(selector, cases) => // println("simple match: "+ (selector, cases) + "for:\n"+ matchExpr )
+ caseMatch(matchExpr, selector, cases, identity)
+ // old-style match or virtpatmat switch
+ case Block((vd: ValDef) :: Nil, orig@Match(selector, cases)) => // println("block match: "+ (selector, cases, vd) + "for:\n"+ matchExpr )
+ caseMatch(matchExpr, selector, cases, m => copyBlock(matchExpr, List(vd), m))
+ // virtpatmat
+ case Apply(Apply(TypeApply(Select(tgt, nme.runOrElse), targs), List(scrut)), List(matcher)) if opt.virtPatmat => // println("virt match: "+ (tgt, targs, scrut, matcher) + "for:\n"+ matchExpr )
+ caseVirtualizedMatch(matchExpr, tgt, targs, scrut, matcher)
+ // optimized version of virtpatmat
+ case Block((zero: ValDef) :: (x: ValDef) :: (matchRes: ValDef) :: (keepGoing: ValDef) :: stats, epilogue) if opt.virtPatmat => // TODO: check tree annotation // println("virtopt match: "+ (zero, x, matchRes, keepGoing, stats) + "for:\n"+ matchExpr )
+ caseVirtualizedMatchOpt(matchExpr, zero, x, matchRes, keepGoing, stats, epilogue, identity)
+ // optimized version of virtpatmat
+ case Block(outerStats, orig@Block((zero: ValDef) :: (x: ValDef) :: (matchRes: ValDef) :: (keepGoing: ValDef) :: stats, epilogue)) if opt.virtPatmat => // TODO: check tree annotation // println("virt opt block match: "+ (zero, x, matchRes, keepGoing, stats, outerStats) + "for:\n"+ matchExpr )
+ caseVirtualizedMatchOpt(matchExpr, zero, x, matchRes, keepGoing, stats, epilogue, m => copyBlock(matchExpr, outerStats, m))
+ case other =>
+ unknownTree(other)
+ }
+
+ def unknownTree(t: Tree): Tree = throw new MatchError(t)
+ def copyBlock(orig: Tree, stats: List[Tree], expr: Tree): Block = Block(stats, expr)
+
+ def dropSyntheticCatchAll(cases: List[CaseDef]): List[CaseDef] =
+ if (!opt.virtPatmat) cases
+ else cases filter {
+ case CaseDef(pat, EmptyTree, Throw(Apply(Select(New(exTpt), nme.CONSTRUCTOR), _))) if (treeInfo.isWildcardArg(pat) && (exTpt.tpe.typeSymbol eq MatchErrorClass)) => false
+ case CaseDef(pat, guard, body) => true
+ }
+ }
+
+ def withDefaultCase(matchExpr: Tree, defaultAction: Tree/*scrutinee*/ => Tree): Tree = {
+ object withDefaultTransformer extends MatchMatcher {
+ override def caseMatch(orig: Tree, selector: Tree, cases: List[CaseDef], wrap: Tree => Tree): Tree = {
+ val casesNoSynthCatchAll = dropSyntheticCatchAll(cases)
+ if (casesNoSynthCatchAll exists treeInfo.isDefaultCase) orig
+ else {
+ val defaultCase = CaseDef(Ident(nme.WILDCARD), EmptyTree, defaultAction(selector.duplicate))
+ wrap(Match(selector, casesNoSynthCatchAll :+ defaultCase))
+ }
+ }
+ override def caseVirtualizedMatch(orig: Tree, _match: Tree, targs: List[Tree], scrut: Tree, matcher: Tree): Tree = { import CODE._
+ ((matcher APPLY (scrut)) DOT nme.getOrElse) APPLY (defaultAction(scrut.duplicate)) // TODO: pass targs
}
- case _ =>
- matchExpr
- // [Martin] Adriaan: please fill in virtpatmat transformation here
+ override def caseVirtualizedMatchOpt(orig: Tree, zero: ValDef, x: ValDef, matchRes: ValDef, keepGoing: ValDef, stats: List[Tree], epilogue: Tree, wrap: Tree => Tree): Tree = { import CODE._
+ wrap(Block(
+ zero ::
+ x ::
+ matchRes ::
+ keepGoing ::
+ stats,
+ // replace `if (keepGoing) throw new MatchError(...) else matchRes` by `if (keepGoing) ${defaultAction(`x`)} else matchRes`
+ (IF (REF(keepGoing.symbol)) THEN defaultAction(x.rhs.duplicate) ELSE REF(matchRes.symbol))
+ ))
+ }
+ }
+ withDefaultTransformer(matchExpr)
}
+
def mkCached(cvar: Symbol, expr: Tree): Tree = {
val cvarRef = mkUnattributedRef(cvar)
Block(
@@ -118,10 +180,11 @@ abstract class TreeGen extends reflect.internal.TreeGen {
def mkModuleAccessDef(accessor: Symbol, msym: Symbol) =
DefDef(accessor, Select(This(msym.owner), msym))
- def newModule(accessor: Symbol, tpe: Type) =
- New(TypeTree(tpe),
- List(for (pt <- tpe.typeSymbol.primaryConstructor.info.paramTypes)
- yield This(accessor.owner.enclClass)))
+ def newModule(accessor: Symbol, tpe: Type) = {
+ val ps = tpe.typeSymbol.primaryConstructor.info.paramTypes
+ if (ps.isEmpty) New(tpe)
+ else New(tpe, This(accessor.owner.enclClass))
+ }
// def m: T;
def mkModuleAccessDcl(accessor: Symbol) =
@@ -156,6 +219,18 @@ abstract class TreeGen extends reflect.internal.TreeGen {
def mkSynchronized(monitor: Tree, body: Tree): Tree =
Apply(Select(monitor, Object_synchronized), List(body))
+ def mkAppliedTypeForCase(clazz: Symbol): Tree = {
+ val numParams = clazz.typeParams.size
+ if (clazz.typeParams.isEmpty) Ident(clazz)
+ else AppliedTypeTree(Ident(clazz), 1 to numParams map (_ => Bind(tpnme.WILDCARD, EmptyTree)) toList)
+ }
+ def mkBindForCase(patVar: Symbol, clazz: Symbol, targs: List[Type]): Tree = {
+ Bind(patVar, Typed(Ident(nme.WILDCARD),
+ if (targs.isEmpty) mkAppliedTypeForCase(clazz)
+ else AppliedTypeTree(Ident(clazz), targs map TypeTree)
+ ))
+ }
+
def wildcardStar(tree: Tree) =
atPos(tree.pos) { Typed(tree, Ident(tpnme.WILDCARD_STAR)) }
diff --git a/src/compiler/scala/tools/nsc/ast/Trees.scala b/src/compiler/scala/tools/nsc/ast/Trees.scala
index 855b55bb5e..ad87889145 100644
--- a/src/compiler/scala/tools/nsc/ast/Trees.scala
+++ b/src/compiler/scala/tools/nsc/ast/Trees.scala
@@ -79,16 +79,16 @@ trait Trees extends reflect.internal.Trees { self: Global =>
val (edefs, rest) = body span treeInfo.isEarlyDef
val (evdefs, etdefs) = edefs partition treeInfo.isEarlyValDef
val gvdefs = evdefs map {
- case vdef @ ValDef(mods, name, tpt, rhs) =>
- treeCopy.ValDef(
- vdef.duplicate, mods, name,
- atPos(focusPos(vdef.pos)) { TypeTree() setOriginal tpt setPos focusPos(tpt.pos) }, // atPos in case
- EmptyTree)
- }
- val lvdefs = evdefs map {
- case vdef @ ValDef(mods, name, tpt, rhs) =>
- treeCopy.ValDef(vdef, Modifiers(PRESUPER), name, tpt, rhs)
+ case vdef @ ValDef(_, _, tpt, _) => copyValDef(vdef)(
+ // !!! I know "atPos in case" wasn't intentionally planted to
+ // add an air of mystery to this file, but it is the sort of
+ // comment which only its author could love.
+ tpt = atPos(focusPos(vdef.pos))(TypeTree() setOriginal tpt setPos focusPos(tpt.pos)), // atPos in case
+ rhs = EmptyTree
+ )
}
+ val lvdefs = evdefs collect { case vdef: ValDef => copyValDef(vdef)(mods = Modifiers(PRESUPER)) }
+
val constrs = {
if (constrMods hasFlag TRAIT) {
if (body forall treeInfo.isInterfaceMember) List()
@@ -108,13 +108,11 @@ trait Trees extends reflect.internal.Trees { self: Global =>
DefDef(constrMods, nme.CONSTRUCTOR, List(), vparamss1, TypeTree(), Block(lvdefs ::: List(superCall), Literal(Constant())))))
}
}
- // println("typed template, gvdefs = "+gvdefs+", parents = "+parents+", constrs = "+constrs)
constrs foreach (ensureNonOverlapping(_, parents ::: gvdefs))
- // vparamss2 are used as field definitions for the class. remove defaults
- val vparamss2 = vparamss map (vps => vps map { vd =>
- treeCopy.ValDef(vd, vd.mods &~ DEFAULTPARAM, vd.name, vd.tpt, EmptyTree)
- })
- Template(parents, self, gvdefs ::: vparamss2.flatten ::: constrs ::: etdefs ::: rest)
+ // Field definitions for the class - remove defaults.
+ val fieldDefs = vparamss.flatten map (vd => copyValDef(vd)(mods = vd.mods &~ DEFAULTPARAM, rhs = EmptyTree))
+
+ Template(parents, self, gvdefs ::: fieldDefs ::: constrs ::: etdefs ::: rest)
}
/** Construct class definition with given class symbol, value parameters,
diff --git a/src/compiler/scala/tools/nsc/ast/parser/Parsers.scala b/src/compiler/scala/tools/nsc/ast/parser/Parsers.scala
index fe6dcc9138..8e445a62db 100644
--- a/src/compiler/scala/tools/nsc/ast/parser/Parsers.scala
+++ b/src/compiler/scala/tools/nsc/ast/parser/Parsers.scala
@@ -1205,7 +1205,7 @@ self =>
*/
def wildcardType(start: Int) = {
val pname = freshTypeName("_$")
- val t = atPos(start) { Ident(pname) }
+ val t = atPos(start)(Ident(pname))
val bounds = typeBounds()
val param = atPos(t.pos union bounds.pos) { makeSyntheticTypeParam(pname, bounds) }
placeholderTypes = param :: placeholderTypes
@@ -1423,15 +1423,14 @@ self =>
def implicitClosure(start: Int, location: Int): Tree = {
val param0 = convertToParam {
atPos(in.offset) {
- var paramexpr: Tree = Ident(ident())
- if (in.token == COLON) {
- in.nextToken()
- paramexpr = Typed(paramexpr, typeOrInfixType(location))
+ Ident(ident()) match {
+ case expr if in.token == COLON =>
+ in.nextToken() ; Typed(expr, typeOrInfixType(location))
+ case expr => expr
}
- paramexpr
}
}
- val param = treeCopy.ValDef(param0, param0.mods | Flags.IMPLICIT, param0.name, param0.tpt, param0.rhs)
+ val param = copyValDef(param0)(mods = param0.mods | Flags.IMPLICIT)
atPos(start, in.offset) {
accept(ARROW)
Function(List(param), if (location != InBlock) expr() else block())
@@ -2689,8 +2688,8 @@ self =>
val (self, body) = templateBody(true)
if (in.token == WITH && self.isEmpty) {
val earlyDefs: List[Tree] = body flatMap {
- case vdef @ ValDef(mods, name, tpt, rhs) if !mods.isDeferred =>
- List(treeCopy.ValDef(vdef, mods | Flags.PRESUPER, name, tpt, rhs))
+ case vdef @ ValDef(mods, _, _, _) if !mods.isDeferred =>
+ List(copyValDef(vdef)(mods = mods | Flags.PRESUPER))
case tdef @ TypeDef(mods, name, tparams, rhs) =>
List(treeCopy.TypeDef(tdef, mods | Flags.PRESUPER, name, tparams, rhs))
case stat if !stat.isEmpty =>
diff --git a/src/compiler/scala/tools/nsc/ast/parser/TreeBuilder.scala b/src/compiler/scala/tools/nsc/ast/parser/TreeBuilder.scala
index 13f608ed4e..ad93b4753f 100644
--- a/src/compiler/scala/tools/nsc/ast/parser/TreeBuilder.scala
+++ b/src/compiler/scala/tools/nsc/ast/parser/TreeBuilder.scala
@@ -470,15 +470,11 @@ abstract class TreeBuilder {
def makeVisitor(cases: List[CaseDef], checkExhaustive: Boolean): Tree =
makeVisitor(cases, checkExhaustive, "x$")
- private def makeUnchecked(expr: Tree): Tree = atPos(expr.pos) {
- Annotated(New(scalaDot(definitions.UncheckedClass.name), List(Nil)), expr)
- }
-
/** Create visitor <x => x match cases> */
def makeVisitor(cases: List[CaseDef], checkExhaustive: Boolean, prefix: String): Tree = {
- val x = freshTermName(prefix)
- val id = Ident(x)
- val sel = if (checkExhaustive) id else makeUnchecked(id)
+ val x = freshTermName(prefix)
+ val id = Ident(x)
+ val sel = if (checkExhaustive) id else gen.mkUnchecked(id)
Function(List(makeSyntheticParam(x)), Match(sel, cases))
}
@@ -563,7 +559,7 @@ abstract class TreeBuilder {
val vars = getVariables(pat1)
val matchExpr = atPos((pat1.pos union rhs.pos).makeTransparent) {
Match(
- makeUnchecked(rhs),
+ gen.mkUnchecked(rhs),
List(
atPos(pat1.pos) {
CaseDef(pat1, EmptyTree, makeTupleTerm(vars map (_._1) map Ident, true))
diff --git a/src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala b/src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala
index 05571b2424..aab944f65a 100644
--- a/src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala
+++ b/src/compiler/scala/tools/nsc/backend/ScalaPrimitives.scala
@@ -565,7 +565,7 @@ abstract class ScalaPrimitives {
import definitions._
val code = getPrimitive(fun)
- def elementType = atPhase(currentRun.typerPhase) {
+ def elementType = beforeTyper {
val arrayParent = tpe :: tpe.parents collectFirst {
case TypeRef(_, ArrayClass, elem :: Nil) => elem
}
diff --git a/src/compiler/scala/tools/nsc/backend/icode/GenICode.scala b/src/compiler/scala/tools/nsc/backend/icode/GenICode.scala
index 6aee52a354..9e801e3ea8 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/GenICode.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/GenICode.scala
@@ -94,7 +94,7 @@ abstract class GenICode extends SubComponent {
// !! modules should be eliminated by refcheck... or not?
case ModuleDef(mods, name, impl) =>
- abort("Modules should not reach backend!")
+ abort("Modules should not reach backend! " + tree)
case ValDef(mods, name, tpt, rhs) =>
ctx // we use the symbol to add fields
@@ -393,15 +393,15 @@ abstract class GenICode extends SubComponent {
for (CaseDef(pat, _, body) <- catches.reverse) yield {
def genWildcardHandler(sym: Symbol): (Symbol, TypeKind, Context => Context) =
(sym, kind, ctx => {
- ctx.bb.emit(DROP(REFERENCE(sym)))
+ ctx.bb.emit(DROP(REFERENCE(sym))) // drop the loaded exception
genLoad(body, ctx, kind)
})
pat match {
case Typed(Ident(nme.WILDCARD), tpt) => genWildcardHandler(tpt.tpe.typeSymbol)
case Ident(nme.WILDCARD) => genWildcardHandler(ThrowableClass)
- case Bind(name, _) =>
- val exception = ctx.method addLocal new Local(pat.symbol, toTypeKind(pat.symbol.tpe), false)
+ case Bind(_, _) =>
+ val exception = ctx.method addLocal new Local(pat.symbol, toTypeKind(pat.symbol.tpe), false) // the exception will be loaded and stored into this local
(pat.symbol.tpe.typeSymbol, kind, {
ctx: Context =>
@@ -704,7 +704,8 @@ abstract class GenICode extends SubComponent {
ctx1
case New(tpt) =>
- abort("Unexpected New")
+ abort("Unexpected New(" + tpt.summaryString + "/" + tpt + ") received in icode.\n" +
+ " Call was genLoad" + ((tree, ctx, expectedType)))
case Apply(TypeApply(fun, targs), _) =>
val sym = fun.symbol
@@ -1054,7 +1055,7 @@ abstract class GenICode extends SubComponent {
case Match(selector, cases) =>
debuglog("Generating SWITCH statement.");
- var ctx1 = genLoad(selector, ctx, INT)
+ var ctx1 = genLoad(selector, ctx, INT) // TODO: Java 7 allows strings in switches (so, don't assume INT and don't convert the literals using intValue)
val afterCtx = ctx1.newBlock
var caseCtx: Context = null
generatedType = toTypeKind(tree.tpe)
@@ -2086,12 +2087,12 @@ abstract class GenICode extends SubComponent {
exh
}) else None
- val exhs = handlers.map { handler =>
- val exh = this.newExceptionHandler(handler._1, handler._2, tree.pos)
+ val exhs = handlers.map { case (sym, kind, handler) => // def genWildcardHandler(sym: Symbol): (Symbol, TypeKind, Context => Context) =
+ val exh = this.newExceptionHandler(sym, kind, tree.pos)
var ctx1 = outerCtx.enterExceptionHandler(exh)
ctx1.addFinalizer(finalizer, finalizerCtx)
loadException(ctx1, exh, tree.pos)
- ctx1 = handler._3(ctx1)
+ ctx1 = handler(ctx1)
// emit finalizer
val ctx2 = emitFinalizer(ctx1)
ctx2.bb.closeWith(JUMP(afterCtx.bb))
diff --git a/src/compiler/scala/tools/nsc/backend/icode/Members.scala b/src/compiler/scala/tools/nsc/backend/icode/Members.scala
index 298c9171a1..44a58e75b4 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/Members.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/Members.scala
@@ -212,6 +212,12 @@ trait Members {
def isStatic: Boolean = symbol.isStaticMember
override def toString() = symbol.fullName
+
+ def matchesSignature(other: IMethod) = {
+ (symbol.name == other.symbol.name) &&
+ (params corresponds other.params)(_.kind == _.kind) &&
+ (returnType == other.returnType)
+ }
import opcodes._
def checkLocals(): Unit = {
diff --git a/src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala b/src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala
index a485272ca6..5eef02f2cb 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/TypeKinds.scala
@@ -145,7 +145,7 @@ trait TypeKinds { self: ICodes =>
* Here we make the adjustment by rewinding to a pre-erasure state and
* sifting through the parents for a class type.
*/
- def lub0(tk1: TypeKind, tk2: TypeKind): Type = atPhase(currentRun.uncurryPhase) {
+ def lub0(tk1: TypeKind, tk2: TypeKind): Type = beforeUncurry {
import definitions._
val tp = global.lub(List(tk1.toType, tk2.toType))
val (front, rest) = tp.parents span (_.typeSymbol.hasTraitFlag)
diff --git a/src/compiler/scala/tools/nsc/backend/icode/analysis/DataFlowAnalysis.scala b/src/compiler/scala/tools/nsc/backend/icode/analysis/DataFlowAnalysis.scala
index 60cb679782..9f43e1b84c 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/analysis/DataFlowAnalysis.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/analysis/DataFlowAnalysis.scala
@@ -60,20 +60,17 @@ trait DataFlowAnalysis[L <: SemiLattice] {
val output = f(point, in(point))
if ((lattice.bottom == out(point)) || output != out(point)) {
-// Console.println("Output changed at " + point
-// + " from: " + out(point) + " to: " + output
-// + " for input: " + in(point) + " and they are different: " + (output != out(point)))
+ // Console.println("Output changed at " + point
+ // + " from: " + out(point) + " to: " + output
+ // + " for input: " + in(point) + " and they are different: " + (output != out(point)))
out(point) = output
val succs = point.successors
succs foreach { p =>
- if (!worklist(p))
- worklist += p;
- if (!in.isDefinedAt(p))
- assert(false, "Invalid successor for: " + point + " successor " + p + " does not exist")
-// if (!p.exceptionHandlerHeader) {
-// println("lubbing " + p.predecessors + " outs: " + p.predecessors.map(out.apply).mkString("\n", "\n", ""))
- in(p) = lattice.lub(in(p) :: (p.predecessors map out.apply), p.exceptionHandlerStart)
-// }
+ val updated = lattice.lub(in(p) :: (p.predecessors map out.apply), p.exceptionHandlerStart)
+ if(updated != in(p)) {
+ in(p) = updated
+ if (!worklist(p)) { worklist += p; }
+ }
}
}
}
diff --git a/src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala b/src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala
index c06bd2e097..69de0dfa90 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/analysis/ReachingDefinitions.scala
@@ -105,11 +105,9 @@ abstract class ReachingDefinitions {
def genAndKill(b: BasicBlock): (ListSet[Definition], ListSet[Local]) = {
var genSet = ListSet[Definition]()
var killSet = ListSet[Local]()
- for ((i, idx) <- b.toList.zipWithIndex) i match {
- case STORE_LOCAL(local) =>
- killSet = killSet + local
- genSet = updateReachingDefinition(b, idx, genSet)
- case _ => ()
+ for ((STORE_LOCAL(local), idx) <- b.toList.zipWithIndex) {
+ killSet = killSet + local
+ genSet = updateReachingDefinition(b, idx, genSet)
}
(genSet, killSet)
}
diff --git a/src/compiler/scala/tools/nsc/backend/icode/analysis/TypeFlowAnalysis.scala b/src/compiler/scala/tools/nsc/backend/icode/analysis/TypeFlowAnalysis.scala
index 6421d6c8ef..877c51ebc1 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/analysis/TypeFlowAnalysis.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/analysis/TypeFlowAnalysis.scala
@@ -127,34 +127,6 @@ abstract class TypeFlowAnalysis {
}
}
- /** reinitialize the analysis, keeping around solutions from a previous run. */
- def reinit(m: icodes.IMethod) {
- if (this.method == null || this.method.symbol != m.symbol)
- init(m)
- else reinit {
- m foreachBlock { b =>
- if (!in.contains(b)) {
- for (p <- b.predecessors) {
- if (out.isDefinedAt(p)) {
- in(b) = out(p)
- worklist += p
- }
- /* else
- in(b) = typeFlowLattice.bottom
- */ }
- out(b) = typeFlowLattice.bottom
- }
- }
- for (handler <- m.exh) {
- val start = handler.startBlock
- if (!in.contains(start)) {
- worklist += start
- in(start) = lattice.IState(in(start).vars, typeStackLattice.exceptionHandlerStack)
- }
- }
- }
- }
-
def this(m: icodes.IMethod) {
this()
init(m)
@@ -162,7 +134,7 @@ abstract class TypeFlowAnalysis {
def run = {
timer.start
-// icodes.lubs0 = 0
+ // icodes.lubs0 = 0
forwardAnalysis(blockTransfer)
val t = timer.stop
if (settings.debug.value) {
@@ -170,216 +142,35 @@ abstract class TypeFlowAnalysis {
assert(visited.contains(b),
"Block " + b + " in " + this.method + " has input equal to bottom -- not visited? .." + visited));
}
-// log("" + method.symbol.fullName + " [" + method.code.blocks.size + " blocks] "
-// + "\n\t" + iterations + " iterations: " + t + " ms."
-// + "\n\tlubs: " + typeFlowLattice.lubs + " out of which " + icodes.lubs0 + " typer lubs")
+ // log("" + method.symbol.fullName + " [" + method.code.blocks.size + " blocks] "
+ // + "\n\t" + iterations + " iterations: " + t + " ms."
+ // + "\n\tlubs: " + typeFlowLattice.lubs + " out of which " + icodes.lubs0 + " typer lubs")
}
def blockTransfer(b: BasicBlock, in: lattice.Elem): lattice.Elem = {
- b.iterator.foldLeft(in)(interpret)
- }
- /** The flow function of a given basic block. */
- /* var flowFun: immutable.Map[BasicBlock, TransferFunction] = new immutable.HashMap */
-
- /** Fill flowFun with a transfer function per basic block. */
-/*
- private def buildFlowFunctions(blocks: List[BasicBlock]) {
- def transfer(b: BasicBlock): TransferFunction = {
- var gens: List[Gen] = Nil
- var consumed: Int = 0
- val stack = new SimulatedStack
-
- for (instr <- b) instr match {
- case THIS(clasz) =>
- stack push toTypeKind(clasz.tpe)
-
- case CONSTANT(const) =>
- stack push toTypeKind(const.tpe)
-
- case LOAD_ARRAY_ITEM(kind) =>
- stack.pop2
- stack.push(kind)
-
- case LOAD_LOCAL(local) =>
- val t = bindings(local)
- stack push (if (t == typeLattice.bottom) local.kind else t)
-
- case LOAD_FIELD(field, isStatic) =>
- if (!isStatic)
- stack.pop
- stack push toTypeKind(field.tpe)
-
- case LOAD_MODULE(module) =>
- stack push toTypeKind(module.tpe)
-
- case STORE_ARRAY_ITEM(kind) =>
- stack.pop3
-
- case STORE_LOCAL(local) =>
- val t = stack.pop
- bindings += (local -> t)
-
- case STORE_THIS(_) =>
- stack.pop
-
- case STORE_FIELD(field, isStatic) =>
- if (isStatic)
- stack.pop
- else
- stack.pop2
-
- case CALL_PRIMITIVE(primitive) =>
- primitive match {
- case Negation(kind) =>
- stack.pop; stack.push(kind)
- case Test(_, kind, zero) =>
- stack.pop
- if (!zero) stack.pop
- stack push BOOL;
- case Comparison(_, _) =>
- stack.pop2
- stack push INT
-
- case Arithmetic(op, kind) =>
- stack.pop
- if (op != NOT)
- stack.pop
- val k = kind match {
- case BYTE | SHORT | CHAR => INT
- case _ => kind
- }
- stack push k
-
- case Logical(op, kind) =>
- stack.pop2
- stack push kind
-
- case Shift(op, kind) =>
- stack.pop2
- stack push kind
-
- case Conversion(src, dst) =>
- stack.pop
- stack push dst
-
- case ArrayLength(kind) =>
- stack.pop
- stack push INT
-
- case StartConcat =>
- stack.push(ConcatClass)
-
- case EndConcat =>
- stack.pop
- stack.push(STRING)
-
- case StringConcat(el) =>
- stack.pop2
- stack push ConcatClass
- }
-
- case CALL_METHOD(method, style) => style match {
- case Dynamic =>
- stack.pop(1 + method.info.paramTypes.length)
- stack.push(toTypeKind(method.info.resultType))
-
- case Static(onInstance) =>
- if (onInstance) {
- stack.pop(1 + method.info.paramTypes.length)
- if (!method.isConstructor)
- stack.push(toTypeKind(method.info.resultType));
- } else {
- stack.pop(method.info.paramTypes.length)
- stack.push(toTypeKind(method.info.resultType))
- }
-
- case SuperCall(mix) =>
- stack.pop(1 + method.info.paramTypes.length)
- stack.push(toTypeKind(method.info.resultType))
- }
-
- case BOX(kind) =>
- stack.pop
- stack.push(BOXED(kind))
-
- case UNBOX(kind) =>
- stack.pop
- stack.push(kind)
-
- case NEW(kind) =>
- stack.push(kind)
-
- case CREATE_ARRAY(elem, dims) =>
- stack.pop(dims)
- stack.push(ARRAY(elem))
-
- case IS_INSTANCE(tpe) =>
- stack.pop
- stack.push(BOOL)
-
- case CHECK_CAST(tpe) =>
- stack.pop
- stack.push(tpe)
-
- case SWITCH(tags, labels) =>
- stack.pop
-
- case JUMP(whereto) =>
- ()
-
- case CJUMP(success, failure, cond, kind) =>
- stack.pop2
-
- case CZJUMP(success, failure, cond, kind) =>
- stack.pop
-
- case RETURN(kind) =>
- if (kind != UNIT)
- stack.pop;
-
- case THROW() =>
- stack.pop
-
- case DROP(kind) =>
- stack.pop
-
- case DUP(kind) =>
- stack.push(stack.head)
-
- case MONITOR_ENTER() =>
- stack.pop
-
- case MONITOR_EXIT() =>
- stack.pop
-
- case SCOPE_ENTER(_) | SCOPE_EXIT(_) =>
- ()
-
- case LOAD_EXCEPTION(_) =>
- stack.pop(stack.length)
- stack.push(typeLattice.Object)
-
- case _ =>
- dumpClassesAndAbort("Unknown instruction: " + i)
- }
-
- new TransferFunction(consumed, gens)
- }
-
- for (b <- blocks) {
- flowFun = flowFun + (b -> transfer(b))
+ var result = lattice.IState(new VarBinding(in.vars), new TypeStack(in.stack))
+ var instrs = b.toList
+ while(!instrs.isEmpty) {
+ val i = instrs.head
+ result = mutatingInterpret(result, i)
+ instrs = instrs.tail
}
+ result
}
-*/
+
/** Abstract interpretation for one instruction. */
def interpret(in: typeFlowLattice.Elem, i: Instruction): typeFlowLattice.Elem = {
val out = lattice.IState(new VarBinding(in.vars), new TypeStack(in.stack))
+ mutatingInterpret(out, i)
+ }
+
+ def mutatingInterpret(out: typeFlowLattice.Elem, i: Instruction): typeFlowLattice.Elem = {
val bindings = out.vars
val stack = out.stack
if (settings.debug.value) {
-// Console.println("[before] Stack: " + stack);
-// Console.println(i);
+ // Console.println("[before] Stack: " + stack);
+ // Console.println(i);
}
i match {
@@ -619,11 +410,292 @@ abstract class TypeFlowAnalysis {
}
}
+ case class CallsiteInfo(bb: icodes.BasicBlock, receiver: Symbol, stackLength: Int, concreteMethod: Symbol)
+
+ /**
+
+ A full type-flow analysis on a method computes in- and out-flows for each basic block (that's what MethodTFA does).
+
+ For the purposes of Inliner, doing so guarantees that an abstract typestack-slot is available by the time an inlining candidate (a CALL_METHOD instruction) is visited.
+ This subclass (MTFAGrowable) of MethodTFA also aims at performing such analysis on CALL_METHOD instructions, with some differences:
+
+ (a) early screening is performed while the type-flow is being computed (in an override of `blockTransfer`) by testing a subset of the conditions that Inliner checks later.
+ The reasoning here is: if the early check fails at some iteration, there's no chance a follow-up iteration (with a yet more lub-ed typestack-slot) will succeed.
+ Failure is sufficient to remove that particular CALL_METHOD from the typeflow's `remainingCALLs`.
+ A forward note: in case inlining occurs at some basic block B, all blocks reachable from B get their CALL_METHOD instructions considered again as candidates
+ (because of the more precise types that -- perhaps -- can be computed).
+
+ (b) in case the early check does not fail, no conclusive decision can be made, thus the CALL_METHOD stays `isOnwatchlist`.
+
+ In other words, `remainingCALLs` tracks those callsites that still remain as candidates for inlining, so that Inliner can focus on those.
+ `remainingCALLs` also caches info about the typestack just before the callsite, so as to spare computing them again at inlining time.
+
+ Besides caching, a further optimization involves skipping those basic blocks whose in-flow and out-flow isn't needed anyway (as explained next).
+ A basic block lacking a callsite in `remainingCALLs`, when visisted by the standard algorithm, won't cause any inlining.
+ But as we know from the way type-flows are computed, computing the in- and out-flow for a basic block relies in general on those of other basic blocks.
+ In detail, we want to focus on that sub-graph of the CFG such that control flow may reach a remaining candidate callsite.
+ Those basic blocks not in that subgraph can be skipped altogether. That's why:
+ - `forwardAnalysis()` in `MTFAGrowable` now checks for inclusion of a basic block in `relevantBBs`
+ - same check is performed before adding a block to the worklist, and as part of choosing successors.
+ The bookkeeping supporting on-the-fly pruning of irrelevant blocks requires overridding most methods of the dataflow-analysis.
+
+ The rest of the story takes place in Inliner, which does not visit all of the method's basic blocks but only on those represented in `remainingCALLs`.
+
+ @author Miguel Garcia, http://lampwww.epfl.ch/~magarcia/ScalaCompilerCornerReloaded/
+
+ */
class MTFAGrowable extends MethodTFA {
import icodes._
- /** discards what must be discarded, blanks what needs to be blanked out, and keeps the rest. */
+ val remainingCALLs = mutable.Map.empty[opcodes.CALL_METHOD, CallsiteInfo]
+
+ val preCandidates = mutable.Set.empty[BasicBlock]
+
+ var callerLin: Traversable[BasicBlock] = null
+
+ override def run {
+
+ timer.start
+ forwardAnalysis(blockTransfer)
+ val t = timer.stop
+
+ /* Now that `forwardAnalysis(blockTransfer)` has finished, all inlining candidates can be found in `remainingCALLs`,
+ whose keys are callsites and whose values are pieces of information about the typestack just before the callsite in question.
+ In order to keep `analyzeMethod()` simple, we collect in `preCandidates` those basic blocks containing at least one candidate. */
+ preCandidates.clear()
+ for(rc <- remainingCALLs) {
+ preCandidates += rc._2.bb
+ }
+
+ if (settings.debug.value) {
+ for(b <- callerLin; if (b != method.startBlock) && preCandidates(b)) {
+ assert(visited.contains(b),
+ "Block " + b + " in " + this.method + " has input equal to bottom -- not visited? .." + visited)
+ }
+ }
+
+ }
+
+ var shrinkedWatchlist = false
+
+ /*
+ This is the method where information cached elsewhere is put to use. References are given those other places that populate those caches.
+
+ The goal is avoiding computing type-flows for blocks we don't need (ie blocks not tracked in `relevantBBs`). The method used to add to `relevantBBs` is `putOnRadar`.
+
+ Moreover, it's often the case that the last CALL_METHOD of interest ("of interest" equates to "being tracked in `isOnWatchlist`) isn't the last instruction on the block.
+ There are cases where the typeflows computed past this `lastInstruction` are needed, and cases when they aren't.
+ The reasoning behind this decsision is described in `populatePerimeter()`. All `blockTransfer()` needs to do (in order to know at which instruction it can stop)
+ is querying `isOnPerimeter`.
+
+ Upon visiting a CALL_METHOD that's an inlining candidate, the relevant pieces of information about the pre-instruction typestack are collected for future use.
+ That is, unless the candidacy test fails. The reasoning here is: if such early check fails at some iteration, there's no chance a follow-up iteration
+ (with a yet more lub-ed typestack-slot) will succeed. In case of failure we can safely remove the CALL_METHOD from both `isOnWatchlist` and `remainingCALLs`.
+
+ */
+ override def blockTransfer(b: BasicBlock, in: lattice.Elem): lattice.Elem = {
+ var result = lattice.IState(new VarBinding(in.vars), new TypeStack(in.stack))
+
+ val stopAt = if(isOnPerimeter(b)) lastInstruction(b) else null;
+ var isPastLast = false
+
+ var instrs = b.toList
+ while(!isPastLast && !instrs.isEmpty) {
+ val i = instrs.head
+
+ if(isOnWatchlist(i)) {
+ val cm = i.asInstanceOf[opcodes.CALL_METHOD]
+ val msym = cm.method
+ val paramsLength = msym.info.paramTypes.size
+ val receiver = result.stack.types.drop(paramsLength).head match {
+ case REFERENCE(s) => s
+ case _ => NoSymbol // e.g. the scrutinee is BOX(s) or ARRAY
+ }
+ val concreteMethod = inliner.lookupImplFor(msym, receiver)
+ val isCandidate = {
+ ( inliner.isClosureClass(receiver) || concreteMethod.isEffectivelyFinal || receiver.isEffectivelyFinal ) &&
+ !blackballed(concreteMethod)
+ }
+ if(isCandidate) {
+ remainingCALLs += Pair(cm, CallsiteInfo(b, receiver, result.stack.length, concreteMethod))
+ } else {
+ remainingCALLs.remove(cm)
+ isOnWatchlist.remove(cm)
+ shrinkedWatchlist = true
+ }
+ }
+
+ isPastLast = (i eq stopAt)
+
+ if(!isPastLast) {
+ result = mutatingInterpret(result, i)
+ instrs = instrs.tail
+ }
+ }
+
+ result
+ } // end of method blockTransfer
+
+ val isOnWatchlist = mutable.Set.empty[Instruction]
+
+ /* Each time CallerCalleeInfo.isSafeToInline determines a concrete callee is unsafe to inline in the current caller,
+ the fact is recorded in this TFA instance for the purpose of avoiding devoting processing to that callsite next time.
+ The condition of "being unsafe to inline in the current caller" sticks across inlinings and TFA re-inits
+ because it depends on the instructions of the callee, which stay unchanged during the course of `analyzeInc(caller)`
+ (with the caveat of the side-effecting `makePublic` in `helperIsSafeToInline`).*/
+ val knownUnsafe = mutable.Set.empty[Symbol]
+ val knownSafe = mutable.Set.empty[Symbol]
+ val knownNever = mutable.Set.empty[Symbol] // `knownNever` needs be cleared only at the very end of the inlining phase (unlike `knownUnsafe` and `knownSafe`)
+ @inline final def blackballed(msym: Symbol): Boolean = { knownUnsafe(msym) || knownNever(msym) }
+
+ val relevantBBs = mutable.Set.empty[BasicBlock]
+
+ private def isPreCandidate(cm: opcodes.CALL_METHOD): Boolean = {
+ val msym = cm.method
+ val style = cm.style
+ // Dynamic == normal invocations
+ // Static(true) == calls to private members
+ !msym.isConstructor && !blackballed(msym) &&
+ (style.isDynamic || (style.hasInstance && style.isStatic))
+ // && !(msym hasAnnotation definitions.ScalaNoInlineClass)
+ }
+
+ override def init(m: icodes.IMethod) {
+ super.init(m)
+ remainingCALLs.clear()
+ knownUnsafe.clear()
+ knownSafe.clear()
+ // initially populate the watchlist with all callsites standing a chance of being inlined
+ isOnWatchlist.clear()
+ relevantBBs.clear()
+ /* TODO Do we want to perform inlining in non-finally exception handlers?
+ * Seems counterproductive (the larger the method the less likely it will be JITed.
+ * It's not that putting on radar only `linearizer linearizeAt (m, m.startBlock)` makes for much shorter inlining times (a minor speedup nonetheless)
+ * but the effect on method size could be explored. */
+ putOnRadar(m.linearizedBlocks(linearizer))
+ populatePerimeter()
+ assert(relevantBBs.isEmpty || relevantBBs.contains(m.startBlock), "you gave me dead code")
+ }
+
+ def conclusives(b: BasicBlock): List[opcodes.CALL_METHOD] = {
+ knownBeforehand(b) filter { cm => inliner.isMonadicMethod(cm.method) || inliner.hasInline(cm.method) }
+ }
+
+ def knownBeforehand(b: BasicBlock): List[opcodes.CALL_METHOD] = {
+ b.toList collect { case c : opcodes.CALL_METHOD => c } filter { cm => isPreCandidate(cm) && isReceiverKnown(cm) }
+ }
+
+ private def isReceiverKnown(cm: opcodes.CALL_METHOD): Boolean = {
+ cm.method.isEffectivelyFinal && cm.method.owner.isEffectivelyFinal
+ }
+
+ private def putOnRadar(blocks: Traversable[BasicBlock]) {
+ for(bb <- blocks) {
+ val preCands = bb.toList collect {
+ case cm : opcodes.CALL_METHOD
+ if isPreCandidate(cm) /* && !isReceiverKnown(cm) */
+ => cm
+ }
+ isOnWatchlist ++= preCands
+ }
+ relevantBBs ++= blocks
+ }
+
+ /* the argument is also included in the result */
+ private def transitivePreds(b: BasicBlock): Set[BasicBlock] = { transitivePreds(List(b)) }
+
+ /* those BBs in the argument are also included in the result */
+ private def transitivePreds(starters: Traversable[BasicBlock]): Set[BasicBlock] = {
+ val result = mutable.Set.empty[BasicBlock]
+ var toVisit: List[BasicBlock] = starters.toList.distinct
+ while(toVisit.nonEmpty) {
+ val h = toVisit.head
+ toVisit = toVisit.tail
+ result += h
+ for(p <- h.predecessors; if !result(p) && !toVisit.contains(p)) { toVisit = p :: toVisit }
+ }
+ result.toSet
+ }
+
+ /* those BBs in the argument are also included in the result */
+ private def transitiveSuccs(starters: Traversable[BasicBlock]): Set[BasicBlock] = {
+ val result = mutable.Set.empty[BasicBlock]
+ var toVisit: List[BasicBlock] = starters.toList.distinct
+ while(toVisit.nonEmpty) {
+ val h = toVisit.head
+ toVisit = toVisit.tail
+ result += h
+ for(p <- h.successors; if !result(p) && !toVisit.contains(p)) { toVisit = p :: toVisit }
+ }
+ result.toSet
+ }
+
+ /* A basic block B is "on the perimeter" of the current control-flow subgraph if none of its successors belongs to that subgraph.
+ * In that case, for the purposes of inlining, we're interested in the typestack right before the last inline candidate in B, not in those afterwards.
+ * In particular we can do without computing the outflow at B. */
+ private def populatePerimeter() {
+ isOnPerimeter.clear()
+ var done = true
+ do {
+ val (frontier, toPrune) = (relevantBBs filter hasNoRelevantSuccs) partition isWatching
+ isOnPerimeter ++= frontier
+ relevantBBs --= toPrune
+ done = toPrune.isEmpty
+ } while(!done)
+
+ lastInstruction.clear()
+ for(b <- isOnPerimeter; val lastIns = b.toList.reverse find isOnWatchlist) {
+ lastInstruction += (b -> lastIns.get.asInstanceOf[opcodes.CALL_METHOD])
+ }
+
+ // assertion: "no relevant block can have a predecessor that is on perimeter"
+ assert((for (b <- relevantBBs; if transitivePreds(b.predecessors) exists isOnPerimeter) yield b).isEmpty)
+ }
+
+ private val isOnPerimeter = mutable.Set.empty[BasicBlock]
+ private val lastInstruction = mutable.Map.empty[BasicBlock, opcodes.CALL_METHOD]
+
+ def hasNoRelevantSuccs(x: BasicBlock): Boolean = { !(x.successors exists relevantBBs) }
+
+ def isWatching(x: BasicBlock): Boolean = (x.toList exists isOnWatchlist)
+
+
+
+
+ /**
+
+ This method is invoked after one or more inlinings have been performed in basic blocks whose in-flow is non-bottom (this makes a difference later).
+ What we know about those inlinings is given by:
+
+ - `staleOut`: These are the blocks where a callsite was inlined.
+ For each callsite, all instructions in that block before the callsite were left in the block, and the rest moved to an `afterBlock`.
+ The out-flow of these basic blocks is thus in general stale, that's why we'll add them to the TFA worklist.
+
+ - `inlined` : These blocks were spliced into the method's CFG as part of inlining. Being new blocks, they haven't been visited yet by the typeflow analysis.
+
+ - `staleIn` : These blocks are what `doInline()` calls `afterBlock`s, ie the new home for instructions that previously appearead
+ after a callsite in a `staleOut` block.
+
+ Based on the above information, we have to bring up-to-date the caches that `forwardAnalysis` and `blockTransfer` use to skip blocks and instructions.
+ Those caches are `relevantBBs` and `isOnPerimeter` (for blocks) and `isOnWatchlist` and `lastInstruction` (for CALL_METHODs).
+ Please notice that all `inlined` and `staleIn` blocks are reachable from `staleOut` blocks.
+
+ The update takes place in two steps:
+
+ (1) `staleOut foreach { so => putOnRadar(linearizer linearizeAt (m, so)) }`
+ This results in initial populations for `relevantBBs` and `isOnWatchlist`.
+ Because of the way `isPreCandidate` reuses previous decision-outcomes that are still valid,
+ this already prunes some candidates standing no chance of being inlined.
+
+ (2) `populatePerimeter()`
+ Based on the CFG-subgraph determined in (1) as reflected in `relevantBBs`,
+ this method detects some blocks whose typeflows aren't needed past a certain CALL_METHOD
+ (not needed because none of its successors is relevant for the purposes of inlining, see `hasNoRelevantSuccs`).
+ The blocks thus chosen are said to be "on the perimeter" of the CFG-subgraph.
+ For each of them, its `lastInstruction` (after which no more typeflows are needed) is found.
+
+ */
def reinit(m: icodes.IMethod, staleOut: List[BasicBlock], inlined: collection.Set[BasicBlock], staleIn: collection.Set[BasicBlock]) {
if (this.method == null || this.method.symbol != m.symbol) {
init(m)
@@ -633,31 +705,102 @@ abstract class TypeFlowAnalysis {
return;
}
- reinit {
- // asserts conveying an idea what CFG shapes arrive here.
- // staleIn foreach (p => assert( !in.isDefinedAt(p), p))
- // staleIn foreach (p => assert(!out.isDefinedAt(p), p))
- // inlined foreach (p => assert( !in.isDefinedAt(p), p))
- // inlined foreach (p => assert(!out.isDefinedAt(p), p))
- // inlined foreach (p => assert(!p.successors.isEmpty || p.lastInstruction.isInstanceOf[icodes.opcodes.THROW], p))
- // staleOut foreach (p => assert( in.isDefinedAt(p), p))
-
- // never rewrite in(m.startBlock)
- staleOut foreach { b =>
- if(!inlined.contains(b)) { worklist += b }
- out(b) = typeFlowLattice.bottom
- }
- // nothing else is added to the worklist, bb's reachable via succs will be tfa'ed
- blankOut(inlined)
- blankOut(staleIn)
- // no need to add startBlocks from m.exh
+ worklist.clear // calling reinit(f: => Unit) would also clear visited, thus forgetting about blocks visited before reinit.
+
+ // asserts conveying an idea what CFG shapes arrive here:
+ // staleIn foreach (p => assert( !in.isDefinedAt(p), p))
+ // staleIn foreach (p => assert(!out.isDefinedAt(p), p))
+ // inlined foreach (p => assert( !in.isDefinedAt(p), p))
+ // inlined foreach (p => assert(!out.isDefinedAt(p), p))
+ // inlined foreach (p => assert(!p.successors.isEmpty || p.lastInstruction.isInstanceOf[icodes.opcodes.THROW], p))
+ // staleOut foreach (p => assert( in.isDefinedAt(p), p))
+
+ // remainingCALLs.clear()
+ isOnWatchlist.clear()
+ relevantBBs.clear()
+
+ // never rewrite in(m.startBlock)
+ staleOut foreach { b =>
+ enqueue(b)
+ out(b) = typeFlowLattice.bottom
}
+ // nothing else is added to the worklist, bb's reachable via succs will be tfa'ed
+ blankOut(inlined)
+ blankOut(staleIn)
+ // no need to add startBlocks from m.exh
+
+ staleOut foreach { so => putOnRadar(linearizer linearizeAt (m, so)) }
+ populatePerimeter()
+
+ } // end of method reinit
+
+ /* this is not a general purpose method to add to the worklist,
+ * because the assert is expected to hold only when called from MTFAGrowable.reinit() */
+ private def enqueue(b: BasicBlock) {
+ assert(in(b) ne typeFlowLattice.bottom)
+ if(!worklist.contains(b)) { worklist += b }
+ }
+
+ /* this is not a general purpose method to add to the worklist,
+ * because the assert is expected to hold only when called from MTFAGrowable.reinit() */
+ private def enqueue(bs: Traversable[BasicBlock]) {
+ bs foreach enqueue
}
private def blankOut(blocks: collection.Set[BasicBlock]) {
blocks foreach { b =>
- in(b) = typeFlowLattice.bottom
- out(b) = typeFlowLattice.bottom
+ in(b) = typeFlowLattice.bottom
+ out(b) = typeFlowLattice.bottom
+ }
+ }
+
+ /*
+ This is basically the plain-old forward-analysis part of a dataflow algorithm,
+ adapted to skip non-relevant blocks (as determined by `reinit()` via `populatePerimeter()`).
+
+ The adaptations are:
+
+ - only relevant blocks dequeued from the worklist move on to have the transfer function applied
+
+ - `visited` now means the transfer function was applied to the block,
+ but please notice that this does not imply anymore its out-flow to be different from bottom,
+ because a block on the perimeter will have per-instruction typeflows computed only up to its `lastInstruction`.
+ In case you need to know whether a visted block `v` has been "fully visited", evaluate `out(v) ne typeflowLattice.bottom`
+
+ - given that the transfer function may remove callsite-candidates from the watchlist (thus, they are not candidates anymore)
+ there's an opportunity to detect whether a previously relevant block has been left without candidates.
+ That's what `shrinkedWatchlist` detects. Provided the block was on the perimeter, we know we can skip it from now now,
+ and we can also constrain the CFG-subgraph by finding a new perimeter (thus the invocation to `populatePerimeter()`).
+ */
+ override def forwardAnalysis(f: (P, lattice.Elem) => lattice.Elem): Unit = {
+ while (!worklist.isEmpty && relevantBBs.nonEmpty) {
+ if (stat) iterations += 1
+ val point = worklist.iterator.next; worklist -= point;
+ if(relevantBBs(point)) {
+ shrinkedWatchlist = false
+ val output = f(point, in(point))
+ visited += point;
+ if(isOnPerimeter(point)) {
+ if(shrinkedWatchlist && !isWatching(point)) {
+ relevantBBs -= point;
+ populatePerimeter()
+ }
+ } else {
+ val propagate = ((lattice.bottom == out(point)) || output != out(point))
+ if (propagate) {
+ out(point) = output
+ val succs = point.successors filter relevantBBs
+ succs foreach { p =>
+ assert((p.predecessors filter isOnPerimeter).isEmpty)
+ val updated = lattice.lub(List(output, in(p)), p.exceptionHandlerStart)
+ if(updated != in(p)) {
+ in(p) = updated
+ enqueue(p)
+ }
+ }
+ }
+ }
+ }
}
}
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala b/src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala
index 865bacffaa..32177c309a 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BytecodeWriters.scala
@@ -23,9 +23,7 @@ trait BytecodeWriters {
import global._
private def outputDirectory(sym: Symbol): AbstractFile = (
- settings.outputDirs.outputDirFor {
- atPhase(currentRun.flattenPhase.prev)(sym.sourceFile)
- }
+ settings.outputDirs.outputDirFor(beforeFlatten(sym.sourceFile))
)
private def getFile(base: AbstractFile, cls: JClass, suffix: String): AbstractFile = {
var dir = base
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala b/src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala
index 9de0ec6610..50e84a81a8 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/GenJVM.scala
@@ -37,11 +37,9 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
/** Create a new phase */
override def newPhase(p: Phase): Phase = new JvmPhase(p)
- private def outputDirectory(sym: Symbol): AbstractFile = (
- settings.outputDirs.outputDirFor {
- atPhase(currentRun.flattenPhase.prev)(sym.sourceFile)
- }
- )
+ private def outputDirectory(sym: Symbol): AbstractFile =
+ settings.outputDirs outputDirFor beforeFlatten(sym.sourceFile)
+
private def getFile(base: AbstractFile, cls: JClass, suffix: String): AbstractFile = {
var dir = base
val pathParts = cls.getName().split("[./]").toList
@@ -87,7 +85,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
// succeed or warn that it isn't.
hasApproximate && {
// Before erasure so we can identify generic mains.
- atPhase(currentRun.erasurePhase) {
+ beforeErasure {
val companion = sym.linkedClassOfClass
val companionMain = companion.tpe.member(nme.main)
@@ -161,7 +159,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
}
val codeGenerator = new BytecodeGenerator(bytecodeWriter)
- log("Created new bytecode generator for " + classes.size + " classes.")
+ debuglog("Created new bytecode generator for " + classes.size + " classes.")
sortedClasses foreach { c =>
try codeGenerator.genClass(c)
@@ -212,7 +210,8 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
final val ExcludedForwarderFlags = {
import Flags._
- ( CASE | SPECIALIZED | LIFTED | PROTECTED | STATIC | BridgeAndPrivateFlags )
+ // Should include DEFERRED but this breaks findMember.
+ ( CASE | SPECIALIZED | LIFTED | PROTECTED | STATIC | EXPANDEDNAME | BridgeAndPrivateFlags )
}
// Additional interface parents based on annotations and other cues
@@ -272,7 +271,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
* of inner class all until root class.
*/
def collectInnerClass(s: Symbol): Unit = {
- // TODO: something atPhase(currentRun.flattenPhase.prev) which accounts for
+ // TODO: some beforeFlatten { ... } which accounts for
// being nested in parameterized classes (if we're going to selectively flatten.)
val x = innerClassSymbolFor(s)
val isInner = x.isClass && !x.rawowner.isPackageClass
@@ -393,7 +392,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
// it must be a top level class (name contains no $s)
def isCandidateForForwarders(sym: Symbol): Boolean =
- atPhase(currentRun.picklerPhase.next) {
+ afterPickler {
!(sym.name.toString contains '$') && sym.hasModuleFlag && !sym.isImplClass && !sym.isNestedClass
}
@@ -433,7 +432,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
private def addEnclosingMethodAttribute(jclass: JClass, clazz: Symbol) {
val sym = clazz.originalEnclosingMethod
if (sym.isMethod) {
- log("enclosing method for %s is %s (in %s)".format(clazz, sym, sym.enclClass))
+ debuglog("enclosing method for %s is %s (in %s)".format(clazz, sym, sym.enclClass))
jclass addAttribute fjbgContext.JEnclosingMethodAttribute(
jclass,
javaName(sym.enclClass),
@@ -449,7 +448,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
enclClass, clazz)
)
else {
- log("enclosing method for %s is %s (in %s)".format(clazz, sym, enclClass))
+ debuglog("enclosing method for %s is %s (in %s)".format(clazz, sym, enclClass))
jclass addAttribute fjbgContext.JEnclosingMethodAttribute(
jclass,
javaName(enclClass),
@@ -681,7 +680,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
)
def addGenericSignature(jmember: JMember, sym: Symbol, owner: Symbol) {
if (needsGenericSignature(sym)) {
- val memberTpe = atPhase(currentRun.erasurePhase)(owner.thisType.memberInfo(sym))
+ val memberTpe = beforeErasure(owner.thisType.memberInfo(sym))
// println("addGenericSignature sym: " + sym.fullName + " : " + memberTpe + " sym.info: " + sym.info)
// println("addGenericSignature: "+ (sym.ownerChain map (x => (x.name, x.isImplClass))))
erasure.javaSig(sym, memberTpe) foreach { sig =>
@@ -700,7 +699,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
return
}
if ((settings.check.value contains "genjvm")) {
- val normalizedTpe = atPhase(currentRun.erasurePhase)(erasure.prepareSigMap(memberTpe))
+ val normalizedTpe = beforeErasure(erasure.prepareSigMap(memberTpe))
val bytecodeTpe = owner.thisType.memberInfo(sym)
if (!sym.isType && !sym.isConstructor && !(erasure.erasure(sym, normalizedTpe) =:= bytecodeTpe)) {
clasz.cunit.warning(sym.pos,
@@ -716,9 +715,8 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
}
val index = jmember.getConstantPool.addUtf8(sig).toShort
if (opt.verboseDebug)
- atPhase(currentRun.erasurePhase) {
- println("add generic sig "+sym+":"+sym.info+" ==> "+sig+" @ "+index)
- }
+ beforeErasure(println("add generic sig "+sym+":"+sym.info+" ==> "+sig+" @ "+index))
+
val buf = ByteBuffer.allocate(2)
buf putShort index
addAttribute(jmember, tpnme.SignatureATTR, buf)
@@ -793,14 +791,14 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
innerSym.rawname + innerSym.moduleSuffix
// add inner classes which might not have been referenced yet
- atPhase(currentRun.erasurePhase.next) {
+ afterErasure {
for (sym <- List(clasz.symbol, clasz.symbol.linkedClassOfClass); m <- sym.info.decls.map(innerClassSymbolFor) if m.isClass)
innerClassBuffer += m
}
val allInners = innerClassBuffer.toList
if (allInners.nonEmpty) {
- log(clasz.symbol.fullName('.') + " contains " + allInners.size + " inner classes.")
+ debuglog(clasz.symbol.fullName('.') + " contains " + allInners.size + " inner classes.")
val innerClassesAttr = jclass.getInnerClasses()
// sort them so inner classes succeed their enclosing class
// to satisfy the Eclipse Java compiler
@@ -1231,7 +1229,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
val jtype = javaType(method).asInstanceOf[JMethodType]
def emit(invoke: String) {
- log("%s %s %s.%s:%s".format(invoke, receiver.accessString, jowner, jname, jtype))
+ debuglog("%s %s %s.%s:%s".format(invoke, receiver.accessString, jowner, jname, jtype))
invoke match {
case "invokeinterface" => jcode.emitINVOKEINTERFACE(jowner, jname, jtype)
case "invokevirtual" => jcode.emitINVOKEVIRTUAL(jowner, jname, jtype)
@@ -1917,7 +1915,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
if (sym.isInterface) ACC_INTERFACE else 0,
if (finalFlag) ACC_FINAL else 0,
if (sym.isStaticMember) ACC_STATIC else 0,
- if (sym.isBridge) ACC_BRIDGE else 0,
+ if (sym.isBridge) ACC_BRIDGE | ACC_SYNTHETIC else 0,
if (sym.isClass && !sym.isInterface) ACC_SUPER else 0,
if (sym.isVarargsMethod) ACC_VARARGS else 0
)
@@ -1931,9 +1929,7 @@ abstract class GenJVM extends SubComponent with GenJVMUtil with GenAndroid with
}
def isTopLevelModule(sym: Symbol): Boolean =
- atPhase (currentRun.picklerPhase.next) {
- sym.isModuleClass && !sym.isImplClass && !sym.isNestedClass
- }
+ afterPickler { sym.isModuleClass && !sym.isImplClass && !sym.isNestedClass }
def isStaticModule(sym: Symbol): Boolean = {
sym.isModuleClass && !sym.isImplClass && !sym.isLifted
diff --git a/src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala b/src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala
index d2e54ff3f1..2fb615f893 100644
--- a/src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala
+++ b/src/compiler/scala/tools/nsc/backend/msil/GenMSIL.scala
@@ -1125,7 +1125,7 @@ abstract class GenMSIL extends SubComponent {
}
// method: implicit view(FunctionX[PType0, PType1, ...,PTypeN, ResType]):DelegateType
- val (isDelegateView, paramType, resType) = atPhase(currentRun.typerPhase) {
+ val (isDelegateView, paramType, resType) = beforeTyper {
msym.tpe match {
case MethodType(params, resultType)
if (params.length == 1 && msym.name == nme.view_) =>
@@ -1954,7 +1954,7 @@ abstract class GenMSIL extends SubComponent {
} // createClassMembers0
private def isTopLevelModule(sym: Symbol): Boolean =
- atPhase (currentRun.refchecksPhase) {
+ beforeRefchecks {
sym.isModuleClass && !sym.isImplClass && !sym.isNestedClass
}
diff --git a/src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala b/src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala
index 5fc7329955..95c371fa8b 100644
--- a/src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala
+++ b/src/compiler/scala/tools/nsc/backend/opt/DeadCodeElimination.scala
@@ -225,9 +225,9 @@ abstract class DeadCodeElimination extends SubComponent {
m foreachBlock { bb =>
assert(bb.closed, "Open block in computeCompensations")
- for ((i, idx) <- bb.toList.zipWithIndex) {
+ foreachWithIndex(bb.toList) { (i, idx) =>
if (!useful(bb)(idx)) {
- for ((consumedType, depth) <- i.consumedTypes.reverse.zipWithIndex) {
+ foreachWithIndex(i.consumedTypes.reverse) { (consumedType, depth) =>
log("Finding definitions of: " + i + "\n\t" + consumedType + " at depth: " + depth)
val defs = rdef.findDefs(bb, idx, 1, depth)
for (d <- defs) {
diff --git a/src/compiler/scala/tools/nsc/backend/opt/Inliners.scala b/src/compiler/scala/tools/nsc/backend/opt/Inliners.scala
index 4598141e8b..09be682123 100644
--- a/src/compiler/scala/tools/nsc/backend/opt/Inliners.scala
+++ b/src/compiler/scala/tools/nsc/backend/opt/Inliners.scala
@@ -38,6 +38,33 @@ abstract class Inliners extends SubComponent {
res
}
+ /** Look up implementation of method 'sym in 'clazz'.
+ */
+ def lookupImplFor(sym: Symbol, clazz: Symbol): Symbol = {
+ // TODO: verify that clazz.superClass is equivalent here to clazz.tpe.parents(0).typeSymbol (.tpe vs .info)
+ def needsLookup = (
+ (clazz != NoSymbol)
+ && (clazz != sym.owner)
+ && !sym.isEffectivelyFinal
+ && clazz.isEffectivelyFinal
+ )
+ def lookup(clazz: Symbol): Symbol = {
+ // println("\t\tlooking up " + meth + " in " + clazz.fullName + " meth.owner = " + meth.owner)
+ if (sym.owner == clazz || isBottomType(clazz)) sym
+ else sym.overridingSymbol(clazz) match {
+ case NoSymbol => if (sym.owner.isTrait) sym else lookup(clazz.superClass)
+ case imp => imp
+ }
+ }
+ if (needsLookup) {
+ val concreteMethod = lookup(clazz)
+ debuglog("\tlooked up method: " + concreteMethod.fullName)
+
+ concreteMethod
+ }
+ else sym
+ }
+
/* A warning threshold */
private final val MAX_INLINE_MILLIS = 2000
@@ -67,8 +94,7 @@ abstract class Inliners extends SubComponent {
try {
super.run()
} finally {
- inliner.NonPublicRefs.usesNonPublics.clear()
- inliner.recentTFAs.clear
+ inliner.clearCaches()
}
}
}
@@ -80,6 +106,21 @@ abstract class Inliners extends SubComponent {
def isClosureClass(cls: Symbol): Boolean =
cls.isFinal && cls.isSynthetic && !cls.isModuleClass && cls.isAnonymousFunction
+ /*
+ TODO now that Inliner runs faster we could consider additional "monadic methods" (in the limit, all those taking a closure as last arg)
+ Any "monadic method" occurring in a given caller C that is not `isMonadicMethod()` will prevent CloseElim from eliminating
+ any anonymous-closure-class any whose instances are given as argument to C invocations.
+ */
+ def isMonadicMethod(sym: Symbol) = {
+ nme.unspecializedName(sym.name) match {
+ case nme.foreach | nme.filter | nme.withFilter | nme.map | nme.flatMap => true
+ case _ => false
+ }
+ }
+
+ def hasInline(sym: Symbol) = sym hasAnnotation ScalaInlineClass
+ def hasNoInline(sym: Symbol) = sym hasAnnotation ScalaNoInlineClass
+
/**
* Simple inliner.
*/
@@ -92,9 +133,6 @@ abstract class Inliners extends SubComponent {
}
import NonPublicRefs._
- private def hasInline(sym: Symbol) = sym hasAnnotation ScalaInlineClass
- private def hasNoInline(sym: Symbol) = sym hasAnnotation ScalaNoInlineClass
-
/** The current iclass */
private var currentIClazz: IClass = _
private def warn(pos: Position, msg: String) = currentIClazz.cunit.warning(pos, msg)
@@ -121,6 +159,21 @@ abstract class Inliners extends SubComponent {
(hasRETURN, a)
}
+ def clearCaches() {
+ // methods
+ NonPublicRefs.usesNonPublics.clear()
+ recentTFAs.clear
+ tfa.knownUnsafe.clear()
+ tfa.knownSafe.clear()
+ tfa.knownNever.clear()
+ // basic blocks
+ tfa.preCandidates.clear()
+ tfa.relevantBBs.clear()
+ // callsites
+ tfa.remainingCALLs.clear()
+ tfa.isOnWatchlist.clear()
+ }
+
def analyzeClass(cls: IClass): Unit =
if (settings.inline.value) {
debuglog("Analyzing " + cls)
@@ -142,7 +195,38 @@ abstract class Inliners extends SubComponent {
val splicedBlocks = mutable.Set.empty[BasicBlock]
val staleIn = mutable.Set.empty[BasicBlock]
+ /**
+ * A transformation local to the body of the argument.
+ * An linining decision consists in replacing a callsite with the body of the callee.
+ * Please notice that, because `analyzeMethod()` itself may modify a method body,
+ * the particular callee bodies that end up being inlined depend on the particular order in which methods are visited
+ * (no topological ordering over the call-graph is attempted).
+ *
+ * Making an inlining decision requires type-flow information for both caller and callee.
+ * Regarding the caller, such information is needed only for basic blocks containing inlining candidates
+ * (and their transitive predecessors). This observation leads to using a custom type-flow analysis (MTFAGrowable)
+ * that can be re-inited, i.e. that reuses lattice elements (type-flow information) computed in a previous iteration
+ * as starting point for faster convergence in a new iteration.
+ *
+ * The mechanics of inlining are iterative for a given invocation of `analyzeMethod(m)`,
+ * thus considering the basic blocks that successful inlining added in a previous iteration:
+ *
+ * (1) before the iterations proper start, so-called preinlining is performed.
+ * Those callsites whose (receiver, concreteMethod) are both known statically
+ * can be analyzed for inlining before computing a type-flow. Details in `preInline()`
+ *
+ * (2) the first iteration computes type-flow information for basic blocks containing inlining candidates
+ * (and their transitive predecessors), so called `relevantBBs`.
+ * The ensuing analysis of each candidate (performed by `analyzeInc()`)
+ * may result in a CFG isomorphic to that of the callee being inserted where the callsite was
+ * (i.e. a CALL_METHOD instruction is replaced with a single-entry single-exit CFG, which we call "successful inlining").
+ *
+ * (3) following iterations have their relevant basic blocks updated to focus
+ * on the inlined basic blocks and their successors only. Details in `MTFAGrowable.reinit()`
+ * */
def analyzeMethod(m: IMethod): Unit = {
+ // m.normalize
+
var sizeBeforeInlining = m.code.blockCount
var instrBeforeInlining = m.code.instructionCount
var retry = false
@@ -154,17 +238,53 @@ abstract class Inliners extends SubComponent {
val inlinedMethodCount = mutable.HashMap.empty[Symbol, Int] withDefaultValue 0
val caller = new IMethodInfo(m)
- var info: tfa.lattice.Elem = null
- def analyzeInc(msym: Symbol, i: Instruction, bb: BasicBlock): Boolean = {
- var inlined = false
- def paramTypes = msym.info.paramTypes
- val receiver = (info.stack.types drop paramTypes.length) match {
- case Nil => log("analyzeInc(" + msym + "), no type on the stack!") ; NoSymbol
- case REFERENCE(s) :: _ => s
- case _ => NoSymbol
+ def preInline(isFirstRound: Boolean): Int = {
+ val inputBlocks = caller.m.linearizedBlocks()
+ val callsites: Function1[BasicBlock, List[opcodes.CALL_METHOD]] = {
+ if(isFirstRound) tfa.conclusives else tfa.knownBeforehand
}
- val concreteMethod = lookupImplFor(msym, receiver)
+ inlineWithoutTFA(inputBlocks, callsites)
+ }
+
+ /**
+ * Inline straightforward callsites (those that can be inlined without a TFA).
+ *
+ * To perform inlining, all we need to know is listed as formal params in `analyzeInc()`:
+ * - callsite and block containing it
+ * - actual (ie runtime) class of the receiver
+ * - actual (ie runtime) method being invoked
+ * - stack length just before the callsite (to check whether enough arguments have been pushed).
+ * The assert below lists the conditions under which "no TFA is needed"
+ * (the statically known receiver and method are both final, thus, at runtime they can't be any others than those).
+ *
+ */
+ def inlineWithoutTFA(inputBlocks: Traversable[BasicBlock], callsites: Function1[BasicBlock, List[opcodes.CALL_METHOD]]): Int = {
+ var inlineCount = 0
+ import scala.util.control.Breaks._
+ for(x <- inputBlocks; val easyCake = callsites(x); if easyCake.nonEmpty) {
+ breakable {
+ for(ocm <- easyCake) {
+ assert(ocm.method.isEffectivelyFinal && ocm.method.owner.isEffectivelyFinal)
+ if(analyzeInc(ocm, x, ocm.method.owner, -1, ocm.method)) {
+ inlineCount += 1
+ break
+ }
+ }
+ }
+ }
+
+ inlineCount
+ }
+
+ /**
+ Decides whether it's feasible and desirable to inline the body of the method given by `concreteMethod`
+ at the program point given by `i` (a callsite). The boolean result indicates whether inlining was performed.
+
+ */
+ def analyzeInc(i: CALL_METHOD, bb: BasicBlock, receiver: Symbol, stackLength: Int, concreteMethod: Symbol): Boolean = {
+ var inlined = false
+ val msym = i.method
def warnNoInline(reason: String) = {
if (hasInline(msym) && !caller.isBridge)
@@ -209,7 +329,7 @@ abstract class Inliners extends SubComponent {
val inc = new IMethodInfo(callee)
val pair = new CallerCalleeInfo(caller, inc, fresh, inlinedMethodCount)
- if (pair isStampedForInlining info.stack) {
+ if (pair isStampedForInlining stackLength) {
retry = true
inlined = true
if (isCountable)
@@ -228,9 +348,9 @@ abstract class Inliners extends SubComponent {
}
else {
if (settings.debug.value)
- pair logFailure info.stack
+ pair logFailure stackLength
- warnNoInline(pair failureReason info.stack)
+ warnNoInline(pair failureReason stackLength)
}
case None =>
warnNoInline("bytecode was not available")
@@ -241,38 +361,96 @@ abstract class Inliners extends SubComponent {
if (!isAvailable) "bytecode was not available"
else "it can be overridden"
)
+
inlined
}
- import scala.util.control.Breaks._
+ /* Pre-inlining consists in invoking the usual inlining subroutine with (receiver class, concrete method) pairs as input
+ * where both method and receiver are final, which implies that the receiver computed via TFA will always match `concreteMethod.owner`.
+ *
+ * As with any invocation of `analyzeInc()` the inlining outcome is based on heuristics which favor inlining an isMonadicMethod before other methods.
+ * That's why preInline() is invoked twice: any inlinings downplayed by the heuristics during the first round get an opportunity to rank higher during the second.
+ *
+ * As a whole, both `preInline()` invocations amount to priming the inlining process,
+ * so that the first TFA run afterwards is able to gain more information as compared to a cold-start.
+ */
+ val totalPreInlines = {
+ val firstRound = preInline(true)
+ if(firstRound == 0) 0 else (firstRound + preInline(false))
+ }
+ staleOut.clear()
+ splicedBlocks.clear()
+ staleIn.clear()
+
do {
retry = false
log("Analyzing " + m + " count " + count + " with " + caller.length + " blocks")
+
+ /* it's important not to inline in unreachable basic blocks. linearizedBlocks() returns only reachable ones. */
+ tfa.callerLin = caller.m.linearizedBlocks()
+ /* TODO Do we want to perform inlining in non-finally exception handlers?
+ * Seems counterproductive (the larger the method the less likely it will be JITed).
+ * The alternative above would be `linearizer.linearizeAt(caller.m, caller.m.startBlock)`.
+ * See also comment on the same topic in TypeFlowAnalysis. */
+
tfa.reinit(m, staleOut.toList, splicedBlocks, staleIn)
tfa.run
staleOut.clear()
splicedBlocks.clear()
staleIn.clear()
- caller.m.linearizedBlocks() foreach { bb =>
- info = tfa in bb
-
+ import scala.util.control.Breaks._
+ for(bb <- tfa.callerLin; if tfa.preCandidates(bb)) {
+ val cms = bb.toList collect { case cm : CALL_METHOD => cm }
breakable {
- for (i <- bb) {
- i match {
- // Dynamic == normal invocations
- // Static(true) == calls to private members
- case CALL_METHOD(msym, Dynamic | Static(true)) if !msym.isConstructor =>
- if (analyzeInc(msym, i, bb)) {
- break
- }
- case _ => ()
+ for (cm <- cms; if tfa.remainingCALLs.isDefinedAt(cm)) {
+ val analysis.CallsiteInfo(_, receiver, stackLength, concreteMethod) = tfa.remainingCALLs(cm)
+ if (analyzeInc(cm, bb, receiver, stackLength, concreteMethod)) {
+ break
}
- info = tfa.interpret(info, i)
}
}
+ }
+
+ /* As part of inlining, some instructions are moved to a new block.
+ * In detail: the instructions moved to a new block originally appeared after a (by now inlined) callsite.
+ * Their new home is an `afterBlock` created by `doInline()` to that effect.
+ * Each block in staleIn is one such `afterBlock`.
+ *
+ * Some of those instructions may be CALL_METHOD possibly tracked in `remainingCALLs`
+ * (with an entry still noting the old containing block). However, that causes no problem:
+ *
+ * (1) such callsites won't be analyzed for inlining by `analyzeInc()` (*in this iteration*)
+ * because of the `break` that abandons the original basic block where it was contained.
+ *
+ * (2) Additionally, its new containing block won't be visited either (*in this iteration*)
+ * because the new blocks don't show up in the linearization computed before inlinings started:
+ * `for(bb <- tfa.callerLin; if tfa.preCandidates(bb)) {`
+ *
+ * For a next iteration, the new home of any instructions that have moved
+ * will be tracked properly in `remainingCALLs` after `MTFAGrowable.reinit()` puts on radar their new homes.
+ *
+ */
+ if(retry) {
+ for(afterBlock <- staleIn) {
+ val justCALLsAfter = afterBlock.toList collect { case c : opcodes.CALL_METHOD => c }
+ for(ia <- justCALLsAfter) { tfa.remainingCALLs.remove(ia) }
+ }
+ }
+ /*
+ if(splicedBlocks.nonEmpty) { // TODO explore (saves time but leads to slightly different inlining decisions)
+ // opportunistically perform straightforward inlinings before the next typeflow round
+ val savedRetry = retry
+ val savedStaleOut = staleOut.toSet; staleOut.clear()
+ val savedStaleIn = staleIn.toSet ; staleIn.clear()
+ val howmany = inlineWithoutTFA(splicedBlocks, tfa.knownBeforehand)
+ splicedBlocks ++= staleIn
+ staleOut.clear(); staleOut ++= savedStaleOut;
+ staleIn.clear(); staleIn ++= savedStaleIn;
+ retry = savedRetry
}
+ */
if (tfa.stat)
log(m.symbol.fullName + " iterations: " + tfa.iterations + " (size: " + caller.length + ")")
@@ -288,15 +466,10 @@ abstract class Inliners extends SubComponent {
}
}
- private def isMonadicMethod(sym: Symbol) = {
- nme.unspecializedName(sym.name) match {
- case nme.foreach | nme.filter | nme.withFilter | nme.map | nme.flatMap => true
- case _ => false
- }
- }
-
- private def isHigherOrderMethod(sym: Symbol) =
- sym.isMethod && atPhase(currentRun.erasurePhase.prev)(sym.info.paramTypes exists isFunctionType)
+ private def isHigherOrderMethod(sym: Symbol) = (
+ sym.isMethod
+ && beforeExplicitOuter(sym.info.paramTypes exists isFunctionType) // was "at erasurePhase.prev"
+ )
/** Should method 'sym' being called in 'receiver' be loaded from disk? */
def shouldLoadImplFor(sym: Symbol, receiver: Symbol): Boolean = {
@@ -308,33 +481,6 @@ abstract class Inliners extends SubComponent {
res
}
- /** Look up implementation of method 'sym in 'clazz'.
- */
- def lookupImplFor(sym: Symbol, clazz: Symbol): Symbol = {
- // TODO: verify that clazz.superClass is equivalent here to clazz.tpe.parents(0).typeSymbol (.tpe vs .info)
- def needsLookup = (
- (clazz != NoSymbol)
- && (clazz != sym.owner)
- && !sym.isEffectivelyFinal
- && clazz.isEffectivelyFinal
- )
- def lookup(clazz: Symbol): Symbol = {
- // println("\t\tlooking up " + meth + " in " + clazz.fullName + " meth.owner = " + meth.owner)
- if (sym.owner == clazz || isBottomType(clazz)) sym
- else sym.overridingSymbol(clazz) match {
- case NoSymbol => if (sym.owner.isTrait) sym else lookup(clazz.superClass)
- case imp => imp
- }
- }
- if (needsLookup) {
- val concreteMethod = lookup(clazz)
- debuglog("\tlooked up method: " + concreteMethod.fullName)
-
- concreteMethod
- }
- else sym
- }
-
class IMethodInfo(val m: IMethod) {
val sym = m.symbol
val name = sym.name
@@ -386,10 +532,13 @@ abstract class Inliners extends SubComponent {
/** Inline 'inc' into 'caller' at the given block and instruction.
* The instruction must be a CALL_METHOD.
*/
- def doInline(block: BasicBlock, instr: Instruction) {
+ def doInline(block: BasicBlock, instr: CALL_METHOD) {
staleOut += block
+ tfa.remainingCALLs.remove(instr) // this bookkpeeping is done here and not in MTFAGrowable.reinit due to (1st) convenience and (2nd) necessity.
+ tfa.isOnWatchlist.remove(instr) // ditto
+
val targetPos = instr.pos
log("Inlining " + inc.m + " in " + caller.m + " at pos: " + posToStr(targetPos))
@@ -403,9 +552,9 @@ abstract class Inliners extends SubComponent {
val activeHandlers = caller.handlers filter (_ covered block)
/* Map 'original' blocks to the ones inlined in the caller. */
- val inlinedBlock: mutable.Map[BasicBlock, BasicBlock] = new mutable.HashMap
+ val inlinedBlock = mutable.Map[BasicBlock, BasicBlock]()
- val varsInScope: mutable.Set[Local] = mutable.HashSet() ++= block.varsInScope
+ val varsInScope = mutable.HashSet[Local]() ++= block.varsInScope
/** Side effects varsInScope when it sees SCOPE_ENTERs. */
def instrBeforeFilter(i: Instruction): Boolean = {
@@ -560,7 +709,7 @@ abstract class Inliners extends SubComponent {
def isStampedForInlining(stack: TypeStack) =
!sameSymbols && inc.m.hasCode && shouldInline && isSafeToInline(stack) && !inc.m.symbol.isSynchronized
- def logFailure(stack: TypeStack) = log(
+ def logFailure(stackLength: Int) = log(
"""|inline failed for %s:
| pair.sameSymbols: %s
| inc.numInlined < 2: %s
@@ -569,11 +718,11 @@ abstract class Inliners extends SubComponent {
| shouldInline: %s
""".stripMargin.format(
inc.m, sameSymbols, inlinedMethodCount(inc.sym) < 2,
- inc.m.hasCode, isSafeToInline(stack), shouldInline
+ inc.m.hasCode, isSafeToInline(stackLength), shouldInline
)
)
- def failureReason(stack: TypeStack) =
+ def failureReason(stackLength: Int) =
if (!inc.m.hasCode) "bytecode was unavailable"
else if (!isSafeToInline(stack)) "it is unsafe (target may reference private fields)"
else if (inc.m.symbol.isSynchronized) "method is synchronized"
@@ -588,15 +737,26 @@ abstract class Inliners extends SubComponent {
private def sameOwner = caller.owner == inc.owner
/** A method is safe to inline when:
- * - it does not contain calls to private methods when
- * called from another class
+ * - it does not contain calls to private methods when called from another class
* - it is not inlined into a position with non-empty stack,
* while having a top-level finalizer (see liftedTry problem)
* - it is not recursive
* Note:
* - synthetic private members are made public in this pass.
*/
- def isSafeToInline(stack: TypeStack): Boolean = {
+ def isSafeToInline(stackLength: Int): Boolean = {
+
+ if(tfa.blackballed(inc.sym)) { return false }
+ if(tfa.knownSafe(inc.sym)) { return true }
+
+ if(helperIsSafeToInline(stackLength)) {
+ tfa.knownSafe += inc.sym; true
+ } else {
+ tfa.knownUnsafe += inc.sym; false
+ }
+ }
+
+ private def helperIsSafeToInline(stackLength: Int): Boolean = {
def makePublic(f: Symbol): Boolean =
(inc.m.sourceFile ne NoSourceFile) && (f.isSynthetic || f.isParamAccessor) && {
debuglog("Making not-private symbol out of synthetic: " + f)
@@ -643,9 +803,10 @@ abstract class Inliners extends SubComponent {
})
canAccess(accessNeeded) && {
- val isIllegalStack = (stack.length > inc.minimumStack && inc.hasNonFinalizerHandler)
+ val isIllegalStack = (stackLength > inc.minimumStack && inc.hasNonFinalizerHandler)
+
!isIllegalStack || {
- debuglog("method " + inc.sym + " is used on a non-empty stack with finalizer. Stack: " + stack)
+ debuglog("method " + inc.sym + " is used on a non-empty stack with finalizer.")
false
}
}
diff --git a/src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala b/src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala
index bd890b7194..395757237b 100644
--- a/src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala
+++ b/src/compiler/scala/tools/nsc/dependencies/DependencyAnalysis.scala
@@ -145,10 +145,8 @@ trait DependencyAnalysis extends SubComponent with Files {
val name = d.toString
d.symbol match {
case s : ModuleClassSymbol =>
- val isTopLevelModule =
- atPhase (currentRun.picklerPhase.next) {
- !s.isImplClass && !s.isNestedClass
- }
+ val isTopLevelModule = afterPickler { !s.isImplClass && !s.isNestedClass }
+
if (isTopLevelModule && (s.companionModule != NoSymbol)) {
dependencies.emits(source, nameToFile(unit.source.file, name))
}
@@ -182,16 +180,18 @@ trait DependencyAnalysis extends SubComponent with Files {
|| (tree.symbol.sourceFile.path != file.path))
&& (!tree.symbol.isClassConstructor)) {
updateReferences(tree.symbol.fullName)
- atPhase(currentRun.uncurryPhase.prev) {
- checkType(tree.symbol.tpe)
- }
+ // was "at uncurryPhase.prev", which is actually non-deterministic
+ // because the continuations plugin may or may not supply uncurry's
+ // immediately preceding phase.
+ beforeRefchecks(checkType(tree.symbol.tpe))
}
tree match {
case cdef: ClassDef if !cdef.symbol.hasPackageFlag &&
!cdef.symbol.isAnonymousFunction =>
if (cdef.symbol != NoSymbol) buf += cdef.symbol
- atPhase(currentRun.erasurePhase.prev) {
+ // was "at erasurePhase.prev"
+ beforeExplicitOuter {
for (s <- cdef.symbol.info.decls)
s match {
case ts: TypeSymbol if !ts.isClass =>
@@ -202,9 +202,8 @@ trait DependencyAnalysis extends SubComponent with Files {
super.traverse(tree)
case ddef: DefDef =>
- atPhase(currentRun.typerPhase.prev) {
- checkType(ddef.symbol.tpe)
- }
+ // was "at typer.prev"
+ beforeTyper { checkType(ddef.symbol.tpe) }
super.traverse(tree)
case a @ Select(q, n) if ((a.symbol != NoSymbol) && (q.symbol != null)) => // #2556
if (!a.symbol.isConstructor &&
diff --git a/src/compiler/scala/tools/nsc/interactive/Global.scala b/src/compiler/scala/tools/nsc/interactive/Global.scala
index 477cec8c8e..0f28407f5a 100644
--- a/src/compiler/scala/tools/nsc/interactive/Global.scala
+++ b/src/compiler/scala/tools/nsc/interactive/Global.scala
@@ -1052,6 +1052,7 @@ class Global(settings: Settings, _reporter: Reporter, projectName: String = "")
def newTyperRun() {
currentTyperRun = new TyperRun
+ perRunCaches.clearAll()
}
class TyperResult(val tree: Tree) extends ControlThrowable
diff --git a/src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala b/src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala
index f251fd83fb..bad181eb76 100644
--- a/src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala
+++ b/src/compiler/scala/tools/nsc/interactive/RefinedBuildManager.scala
@@ -22,6 +22,7 @@ import scala.tools.util.PathResolver
* changes require a compilation. It repeats this process until
* a fixpoint is reached.
*/
+@deprecated("Use sbt incremental compilation mechanism", "2.10.0")
class RefinedBuildManager(val settings: Settings) extends Changes with BuildManager {
class BuilderGlobal(settings: Settings, reporter : Reporter) extends scala.tools.nsc.Global(settings, reporter) {
@@ -47,7 +48,7 @@ class RefinedBuildManager(val settings: Settings) extends Changes with BuildMana
protected def newCompiler(settings: Settings) = new BuilderGlobal(settings)
val compiler = newCompiler(settings)
- import compiler.{Symbol, Type, atPhase, currentRun}
+ import compiler.{ Symbol, Type, beforeErasure }
import compiler.dependencyAnalysis.Inherited
private case class SymWithHistory(sym: Symbol, befErasure: Type)
@@ -159,10 +160,8 @@ class RefinedBuildManager(val settings: Settings) extends Changes with BuildMana
isCorrespondingSym(s.sym, sym)) match {
case Some(SymWithHistory(oldSym, info)) =>
val changes = changeSet(oldSym.info, sym)
- val changesErasure =
- atPhase(currentRun.erasurePhase.prev) {
- changeSet(info, sym)
- }
+ val changesErasure = beforeErasure(changeSet(info, sym))
+
changesOf(oldSym) = (changes ++ changesErasure).distinct
case _ =>
// a new top level definition
@@ -332,11 +331,7 @@ class RefinedBuildManager(val settings: Settings) extends Changes with BuildMana
for (src <- files; localDefs = compiler.dependencyAnalysis.definitions(src)) {
definitions(src) = (localDefs map (s => {
this.classes += s.fullName -> src
- SymWithHistory(
- s.cloneSymbol,
- atPhase(currentRun.erasurePhase.prev) {
- s.info.cloneInfo(s)
- })
+ SymWithHistory(s.cloneSymbol, beforeErasure(s.info.cloneInfo(s)))
}))
}
this.references = compiler.dependencyAnalysis.references
diff --git a/src/compiler/scala/tools/nsc/interpreter/ILoop.scala b/src/compiler/scala/tools/nsc/interpreter/ILoop.scala
index 7c71438b98..16085c07d6 100644
--- a/src/compiler/scala/tools/nsc/interpreter/ILoop.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/ILoop.scala
@@ -324,7 +324,7 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
private def implicitsCommand(line: String): Result = {
val intp = ILoop.this.intp
import intp._
- import global.Symbol
+ import global.{ Symbol, afterTyper }
def p(x: Any) = intp.reporter.printMessage("" + x)
@@ -348,7 +348,7 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
// This groups the members by where the symbol is defined
val byOwner = syms groupBy (_.owner)
- val sortedOwners = byOwner.toList sortBy { case (owner, _) => intp.afterTyper(source.info.baseClasses indexOf owner) }
+ val sortedOwners = byOwner.toList sortBy { case (owner, _) => afterTyper(source.info.baseClasses indexOf owner) }
sortedOwners foreach {
case (owner, members) =>
@@ -440,7 +440,7 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
else {
val tp = intp.typeOfExpression(line, false)
if (tp == NoType) "" // the error message was already printed
- else intp.afterTyper(tp.toString)
+ else intp.global.afterTyper(tp.toString)
}
}
private def warningsCommand(): Result = {
diff --git a/src/compiler/scala/tools/nsc/interpreter/IMain.scala b/src/compiler/scala/tools/nsc/interpreter/IMain.scala
index de408f083f..9a12bc1471 100644
--- a/src/compiler/scala/tools/nsc/interpreter/IMain.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/IMain.scala
@@ -230,9 +230,6 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
} with MemberHandlers
import memberHandlers._
- def atPickler[T](op: => T): T = atPhase(currentRun.picklerPhase)(op)
- def afterTyper[T](op: => T): T = atPhase(currentRun.typerPhase.next)(op)
-
/** Temporarily be quiet */
def beQuietDuring[T](body: => T): T = {
val saved = printResults
@@ -787,10 +784,6 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
}
def compile(source: String): Boolean = compileAndSaveRun("<console>", source)
- def lineAfterTyper[T](op: => T): T = {
- assert(lastRun != null, "Internal error: trying to use atPhase, but Run is null." + this)
- atPhase(lastRun.typerPhase.next)(op)
- }
/** The innermost object inside the wrapper, found by
* following accessPath into the outer one.
@@ -799,7 +792,7 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
val readRoot = getRequiredModule(readPath) // the outermost wrapper
(accessPath split '.').foldLeft(readRoot) { (sym, name) =>
if (name == "") sym else
- lineAfterTyper(sym.info member newTermName(name))
+ afterTyper(sym.info member newTermName(name))
}
}
/** We get a bunch of repeated warnings for reasons I haven't
@@ -842,7 +835,6 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
// private
class Request(val line: String, val trees: List[Tree]) {
val lineRep = new ReadEvalPrint()
- import lineRep.lineAfterTyper
private var _originalLine: String = null
def withOriginalLine(s: String): this.type = { _originalLine = s ; this }
@@ -961,7 +953,7 @@ class IMain(initialSettings: Settings, protected val out: JPrintWriter) extends
}
lazy val resultSymbol = lineRep.resolvePathToSymbol(accessPath)
- def applyToResultMember[T](name: Name, f: Symbol => T) = lineAfterTyper(f(resultSymbol.info.nonPrivateDecl(name)))
+ def applyToResultMember[T](name: Name, f: Symbol => T) = afterTyper(f(resultSymbol.info.nonPrivateDecl(name)))
/* typeOf lookup with encoding */
def lookupTypeOf(name: Name) = typeOf.getOrElse(name, typeOf(global.encode(name.toString)))
diff --git a/src/compiler/scala/tools/nsc/interpreter/Imports.scala b/src/compiler/scala/tools/nsc/interpreter/Imports.scala
index d34ca8bbca..073501912a 100644
--- a/src/compiler/scala/tools/nsc/interpreter/Imports.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/Imports.scala
@@ -191,5 +191,5 @@ trait Imports {
prevRequestList flatMap (req => req.handlers map (req -> _))
private def membersAtPickler(sym: Symbol): List[Symbol] =
- atPickler(sym.info.nonPrivateMembers)
+ beforePickler(sym.info.nonPrivateMembers)
} \ No newline at end of file
diff --git a/src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala b/src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala
index d96e8b07fc..0e2c34efbf 100644
--- a/src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/JLineCompletion.scala
@@ -18,7 +18,7 @@ class JLineCompletion(val intp: IMain) extends Completion with CompletionOutput
import global._
import definitions.{ PredefModule, RootClass, AnyClass, AnyRefClass, ScalaPackage, JavaLangPackage, getModuleIfDefined }
type ExecResult = Any
- import intp.{ debugging, afterTyper }
+ import intp.{ debugging }
// verbosity goes up with consecutive tabs
private var verbosity: Int = 0
diff --git a/src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala b/src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala
index c742ab89c0..7e032753f2 100644
--- a/src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/MemberHandlers.scala
@@ -13,7 +13,7 @@ import scala.reflect.internal.Chars
trait MemberHandlers {
val intp: IMain
- import intp.{ Request, global, naming, atPickler }
+ import intp.{ Request, global, naming }
import global._
import naming._
@@ -118,8 +118,9 @@ trait MemberHandlers {
class DefHandler(member: DefDef) extends MemberDefHandler(member) {
private def vparamss = member.vparamss
- // true if 0-arity
- override def definesValue = vparamss.isEmpty || vparamss.head.isEmpty
+ private def isMacro = member.mods.hasFlag(scala.reflect.internal.Flags.MACRO)
+ // true if not a macro and 0-arity
+ override def definesValue = !isMacro && (vparamss.isEmpty || vparamss.head.isEmpty)
override def resultExtractionCode(req: Request) =
if (mods.isPublic) codegenln(name, ": ", req.typeOf(name)) else ""
}
@@ -199,10 +200,10 @@ trait MemberHandlers {
def importedSymbols = individualSymbols ++ wildcardSymbols
lazy val individualSymbols: List[Symbol] =
- atPickler(individualNames map (targetType nonPrivateMember _))
+ beforePickler(individualNames map (targetType nonPrivateMember _))
lazy val wildcardSymbols: List[Symbol] =
- if (importsWildcard) atPickler(targetType.nonPrivateMembers)
+ if (importsWildcard) beforePickler(targetType.nonPrivateMembers)
else Nil
/** Complete list of names imported by a wildcard */
diff --git a/src/compiler/scala/tools/nsc/interpreter/Power.scala b/src/compiler/scala/tools/nsc/interpreter/Power.scala
index 835fbb5638..ef84876b94 100644
--- a/src/compiler/scala/tools/nsc/interpreter/Power.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/Power.scala
@@ -15,6 +15,31 @@ import scala.io.Codec
import java.net.{ URL, MalformedURLException }
import io.{ Path }
+/** Collecting some power mode examples.
+
+scala> trait F[@specialized(Int) T] { def f: T = ??? }
+defined trait F
+
+scala> trait G[@specialized(Long, Int) T] extends F[T] { override def f: T = super.f }
+defined trait G
+
+scala> changesAfterEachPhase(intp("G").info.members filter (_.name.toString contains "super")) >
+Gained after 1/parser {
+ method super$f
+}
+
+Gained after 12/specialize {
+ method super$f$mcJ$sp
+ method super$f$mcI$sp
+}
+
+Lost after 18/flatten {
+ method super$f$mcJ$sp
+ method super$f$mcI$sp
+ method super$f
+}
+*/
+
/** A class for methods to be injected into the intp in power mode.
*/
class Power[ReplValsImpl <: ReplVals : Manifest](val intp: IMain, replVals: ReplValsImpl) {
diff --git a/src/compiler/scala/tools/nsc/interpreter/ReplVals.scala b/src/compiler/scala/tools/nsc/interpreter/ReplVals.scala
index 6e5dec4205..b20017c1d3 100644
--- a/src/compiler/scala/tools/nsc/interpreter/ReplVals.scala
+++ b/src/compiler/scala/tools/nsc/interpreter/ReplVals.scala
@@ -62,10 +62,12 @@ object ReplVals {
class AppliedTypeFromManifests(sym: Symbol) {
def apply[M](implicit m1: Manifest[M]): Type =
- appliedType(sym.typeConstructor, List(m1) map (x => manifestToType(x).asInstanceOf[Type]))
+ if (sym eq NoSymbol) NoType
+ else appliedType(sym.typeConstructor, List(m1) map (x => manifestToType(x).asInstanceOf[Type]))
def apply[M1, M2](implicit m1: Manifest[M1], m2: Manifest[M2]): Type =
- appliedType(sym.typeConstructor, List(m1, m2) map (x => manifestToType(x).asInstanceOf[Type]))
+ if (sym eq NoSymbol) NoType
+ else appliedType(sym.typeConstructor, List(m1, m2) map (x => manifestToType(x).asInstanceOf[Type]))
}
(sym: Symbol) => new AppliedTypeFromManifests(sym)
diff --git a/src/compiler/scala/tools/nsc/javac/JavaParsers.scala b/src/compiler/scala/tools/nsc/javac/JavaParsers.scala
index 0c94e40d68..06b06c50a6 100644
--- a/src/compiler/scala/tools/nsc/javac/JavaParsers.scala
+++ b/src/compiler/scala/tools/nsc/javac/JavaParsers.scala
@@ -393,8 +393,7 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
// assumed true unless we see public/private/protected
var isPackageAccess = true
var annots: List[Tree] = Nil
- def addAnnot(sym: Symbol) =
- annots :+= New(TypeTree(sym.tpe), List(Nil))
+ def addAnnot(sym: Symbol) = annots :+= New(sym.tpe)
while (true) {
in.token match {
@@ -654,15 +653,12 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
// leaves auxiliary constructors unable to access members of the companion object
// as unqualified identifiers.
def addCompanionObject(statics: List[Tree], cdef: ClassDef): List[Tree] = {
- def implWithImport(importStmt: Tree) = {
- import cdef.impl._
- treeCopy.Template(cdef.impl, parents, self, importStmt :: body)
- }
+ def implWithImport(importStmt: Tree) = deriveTemplate(cdef.impl)(importStmt :: _)
// if there are no statics we can use the original cdef, but we always
// create the companion so import A._ is not an error (see ticket #1700)
val cdefNew =
if (statics.isEmpty) cdef
- else treeCopy.ClassDef(cdef, cdef.mods, cdef.name, cdef.tparams, implWithImport(importCompanionObject(cdef)))
+ else deriveClassDef(cdef)(_ => implWithImport(importCompanionObject(cdef)))
List(makeCompanionObject(cdefNew, statics), cdefNew)
}
diff --git a/src/compiler/scala/tools/nsc/settings/MutableSettings.scala b/src/compiler/scala/tools/nsc/settings/MutableSettings.scala
index f99d1399c0..e7959f36b2 100644
--- a/src/compiler/scala/tools/nsc/settings/MutableSettings.scala
+++ b/src/compiler/scala/tools/nsc/settings/MutableSettings.scala
@@ -533,7 +533,7 @@ class MutableSettings(val errorFn: String => Unit)
Some(rest)
}
override def tryToSetColon(args: List[String]) = tryToSet(args)
- override def tryToSetFromPropertyValue(s: String) = tryToSet(s.trim.split(" +").toList)
+ override def tryToSetFromPropertyValue(s: String) = tryToSet(s.trim.split(',').toList)
def unparse: List[String] = value map { name + ":" + _ }
withHelpSyntax(name + ":<" + arg + ">")
diff --git a/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala b/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
index a8083d7a2d..a7ddfae819 100644
--- a/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
+++ b/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
@@ -424,19 +424,21 @@ abstract class ClassfileParser {
def forceMangledName(name: Name, module: Boolean): Symbol = {
val parts = name.decode.toString.split(Array('.', '$'))
var sym: Symbol = definitions.RootClass
- atPhase(currentRun.flattenPhase.prev) {
+
+ // was "at flatten.prev"
+ beforeFlatten {
for (part0 <- parts; if !(part0 == ""); part = newTermName(part0)) {
- val sym1 = atPhase(currentRun.icodePhase) {
+ val sym1 = beforeIcode {
sym.linkedClassOfClass.info
sym.info.decl(part.encode)
}//.suchThat(module == _.isModule)
- if (sym1 == NoSymbol)
- sym = sym.info.decl(part.encode.toTypeName)
- else
- sym = sym1
+
+ sym = (
+ if (sym1 ne NoSymbol) sym1
+ else sym.info.decl(part.encode.toTypeName)
+ )
}
}
-// println("found: " + sym)
sym
}
@@ -1205,7 +1207,7 @@ abstract class ClassfileParser {
// if loading during initialization of `definitions` typerPhase is not yet set.
// in that case we simply load the member at the current phase
if (currentRun.typerPhase != null)
- atPhase(currentRun.typerPhase)(getMember(sym, innerName.toTypeName))
+ beforeTyper(getMember(sym, innerName.toTypeName))
else
getMember(sym, innerName.toTypeName)
diff --git a/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala b/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala
index 7d42dabc08..68af518d3a 100644
--- a/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala
+++ b/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala
@@ -179,7 +179,7 @@ abstract class ICodeReader extends ClassfileParser {
}
else {
forceMangledName(name, false)
- atPhase(currentRun.flattenPhase.next)(definitions.getClass(name))
+ afterFlatten(definitions.getClass(name.toTypeName))
}
if (sym.isModule)
sym.moduleClass
diff --git a/src/compiler/scala/tools/nsc/transform/AddInterfaces.scala b/src/compiler/scala/tools/nsc/transform/AddInterfaces.scala
index b4ec8a23ce..1abaf1c1d6 100644
--- a/src/compiler/scala/tools/nsc/transform/AddInterfaces.scala
+++ b/src/compiler/scala/tools/nsc/transform/AddInterfaces.scala
@@ -82,7 +82,9 @@ abstract class AddInterfaces extends InfoTransform {
implClassMap.getOrElse(iface, {
atPhase(implClassPhase) {
- log("%s.implClass == %s".format(iface, iface.implClass))
+ if (iface.implClass ne NoSymbol)
+ log("%s.implClass == %s".format(iface, iface.implClass))
+
val implName = nme.implClassName(iface.name)
var impl = if (iface.owner.isClass) iface.owner.info.decl(implName) else NoSymbol
@@ -193,7 +195,7 @@ abstract class AddInterfaces extends InfoTransform {
case PolyType(_, restpe) =>
implType(restpe)
}
- sym setInfo implType(atPhase(currentRun.erasurePhase)(iface.info))
+ sym setInfo implType(beforeErasure(iface.info))
}
override def load(clazz: Symbol) { complete(clazz) }
@@ -316,13 +318,11 @@ abstract class AddInterfaces extends InfoTransform {
override def transform(tree: Tree): Tree = {
val sym = tree.symbol
val tree1 = tree match {
- case ClassDef(mods, name, tparams, impl) if (sym.needsImplClass) =>
+ case ClassDef(mods, _, _, impl) if sym.needsImplClass =>
implClass(sym).initialize // to force lateDEFERRED flags
- treeCopy.ClassDef(tree, mods | INTERFACE, name, tparams, ifaceTemplate(impl))
- case DefDef(mods, name, tparams, vparamss, tpt, rhs)
- if (sym.isClassConstructor && sym.isPrimaryConstructor && sym.owner != ArrayClass) =>
- treeCopy.DefDef(tree, mods, name, tparams, vparamss, tpt,
- addMixinConstructorCalls(rhs, sym.owner)) // (3)
+ copyClassDef(tree)(mods = mods | INTERFACE, impl = ifaceTemplate(impl))
+ case DefDef(_,_,_,_,_,_) if sym.isClassConstructor && sym.isPrimaryConstructor && sym.owner != ArrayClass =>
+ deriveDefDef(tree)(addMixinConstructorCalls(_, sym.owner)) // (3)
case Template(parents, self, body) =>
val parents1 = sym.owner.info.parents map (t => TypeTree(t) setPos tree.pos)
treeCopy.Template(tree, parents1, emptyValDef, body)
@@ -339,7 +339,7 @@ abstract class AddInterfaces extends InfoTransform {
val mix1 = mix
if (mix == tpnme.EMPTY) mix
else {
- val ps = atPhase(currentRun.erasurePhase) {
+ val ps = beforeErasure {
sym.info.parents dropWhile (p => p.symbol.name != mix)
}
assert(!ps.isEmpty, tree);
diff --git a/src/compiler/scala/tools/nsc/transform/CleanUp.scala b/src/compiler/scala/tools/nsc/transform/CleanUp.scala
index 50e6139e65..a6ecb16b43 100644
--- a/src/compiler/scala/tools/nsc/transform/CleanUp.scala
+++ b/src/compiler/scala/tools/nsc/transform/CleanUp.scala
@@ -47,7 +47,7 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
val Template(parents, self, body) = tree
clearStatics()
val newBody = transformTrees(body)
- val templ = treeCopy.Template(tree, parents, self, transformTrees(newStaticMembers.toList) ::: newBody)
+ val templ = deriveTemplate(tree)(_ => transformTrees(newStaticMembers.toList) ::: newBody)
try addStaticInits(templ) // postprocess to include static ctors
finally clearStatics()
}
@@ -85,6 +85,11 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
case "poly-cache" => POLY_CACHE
}
+ def shouldRewriteTry(tree: Try) = {
+ val sym = tree.tpe.typeSymbol
+ forMSIL && (sym != UnitClass) && (sym != NothingClass)
+ }
+
private def typedWithPos(pos: Position)(tree: Tree) =
localTyper.typedPos(pos)(tree)
@@ -560,8 +565,7 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
* Hence, we here rewrite all try blocks with a result != {Unit, All} such that they
* store their result in a local variable. The catch blocks are adjusted as well.
* The try tree is subsituted by a block whose result expression is read of that variable. */
- case theTry @ Try(block, catches, finalizer)
- if theTry.tpe.typeSymbol != definitions.UnitClass && theTry.tpe.typeSymbol != definitions.NothingClass =>
+ case theTry @ Try(block, catches, finalizer) if shouldRewriteTry(theTry) =>
val tpe = theTry.tpe.widen
val tempVar = currentOwner.newVariable(mkTerm(nme.EXCEPTION_RESULT_PREFIX), theTry.pos).setInfo(tpe)
def assignBlock(rhs: Tree) = super.transform(BLOCK(Ident(tempVar) === transform(rhs)))
@@ -669,9 +673,9 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
val newCtor = findStaticCtor(template) match {
// in case there already were static ctors - augment existing ones
// currently, however, static ctors aren't being generated anywhere else
- case Some(ctor @ DefDef(mods, name, tparams, vparamss, tpt, rhs)) =>
+ case Some(ctor @ DefDef(_,_,_,_,_,_)) =>
// modify existing static ctor
- val newBlock = rhs match {
+ deriveDefDef(ctor) {
case block @ Block(stats, expr) =>
// need to add inits to existing block
treeCopy.Block(block, newStaticInits.toList ::: stats, expr)
@@ -679,15 +683,14 @@ abstract class CleanUp extends Transform with ast.TreeDSL {
// need to create a new block with inits and the old term
treeCopy.Block(term, newStaticInits.toList, term)
}
- treeCopy.DefDef(ctor, mods, name, tparams, vparamss, tpt, newBlock)
case None =>
// create new static ctor
val staticCtorSym = currentClass.newStaticConstructor(template.pos)
- val rhs = Block(newStaticInits.toList, Literal(Constant()))
+ val rhs = Block(newStaticInits.toList, Literal(Constant(())))
localTyper.typedPos(template.pos)(DefDef(staticCtorSym, rhs))
}
- treeCopy.Template(template, template.parents, template.self, newCtor :: template.body)
+ deriveTemplate(template)(newCtor :: _)
}
}
diff --git a/src/compiler/scala/tools/nsc/transform/Constructors.scala b/src/compiler/scala/tools/nsc/transform/Constructors.scala
index d1c71faf1e..d8f19f85c0 100644
--- a/src/compiler/scala/tools/nsc/transform/Constructors.scala
+++ b/src/compiler/scala/tools/nsc/transform/Constructors.scala
@@ -24,8 +24,8 @@ abstract class Constructors extends Transform with ast.TreeDSL {
protected def newTransformer(unit: CompilationUnit): Transformer =
new ConstructorTransformer(unit)
- private val guardedCtorStats: mutable.Map[Symbol, List[Tree]] = new mutable.HashMap[Symbol, List[Tree]]
- private val ctorParams: mutable.Map[Symbol, List[Symbol]] = new mutable.HashMap[Symbol, List[Symbol]]
+ private val guardedCtorStats: mutable.Map[Symbol, List[Tree]] = perRunCaches.newMap[Symbol, List[Tree]]
+ private val ctorParams: mutable.Map[Symbol, List[Symbol]] = perRunCaches.newMap[Symbol, List[Symbol]]
class ConstructorTransformer(unit: CompilationUnit) extends Transformer {
@@ -126,7 +126,7 @@ abstract class Constructors extends Transform with ast.TreeDSL {
if (from.name != nme.OUTER) result
else localTyper.typedPos(to.pos) {
- IF (from OBJ_EQ NULL) THEN THROW(NullPointerExceptionClass) ELSE result
+ IF (from OBJ_EQ NULL) THEN Throw(NullPointerExceptionClass.tpe) ELSE result
}
}
@@ -164,20 +164,18 @@ abstract class Constructors extends Transform with ast.TreeDSL {
// Triage all template definitions to go into defBuf/auxConstructorBuf, constrStatBuf, or constrPrefixBuf.
for (stat <- stats) stat match {
- case DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
+ case DefDef(_,_,_,_,_,rhs) =>
// methods with constant result type get literals as their body
// all methods except the primary constructor go into template
stat.symbol.tpe match {
case MethodType(List(), tp @ ConstantType(c)) =>
- defBuf += treeCopy.DefDef(
- stat, mods, name, tparams, vparamss, tpt,
- Literal(c) setPos rhs.pos setType tp)
+ defBuf += deriveDefDef(stat)(Literal(c) setPos _.pos setType tp)
case _ =>
if (stat.symbol.isPrimaryConstructor) ()
else if (stat.symbol.isConstructor) auxConstructorBuf += stat
else defBuf += stat
}
- case ValDef(mods, name, tpt, rhs) =>
+ case ValDef(_, _, _, rhs) =>
// val defs with constant right-hand sides are eliminated.
// for all other val defs, an empty valdef goes into the template and
// the initializer goes as an assignment into the constructor
@@ -190,7 +188,7 @@ abstract class Constructors extends Transform with ast.TreeDSL {
(if (canBeMoved(stat)) constrPrefixBuf else constrStatBuf) += mkAssign(
stat.symbol, rhs1)
}
- defBuf += treeCopy.ValDef(stat, mods, name, tpt, EmptyTree)
+ defBuf += deriveValDef(stat)(_ => EmptyTree)
}
case ClassDef(_, _, _, _) =>
// classes are treated recursively, and left in the template
@@ -228,11 +226,11 @@ abstract class Constructors extends Transform with ast.TreeDSL {
tree match {
case DefDef(_, _, _, _, _, body)
if (tree.symbol.isOuterAccessor && tree.symbol.owner == clazz && clazz.isEffectivelyFinal) =>
- log("outerAccessors += " + tree.symbol.fullName)
+ debuglog("outerAccessors += " + tree.symbol.fullName)
outerAccessors ::= ((tree.symbol, body))
case Select(_, _) =>
if (!mustbeKept(tree.symbol)) {
- log("accessedSyms += " + tree.symbol.fullName)
+ debuglog("accessedSyms += " + tree.symbol.fullName)
accessedSyms addEntry tree.symbol
}
super.traverse(tree)
@@ -516,14 +514,9 @@ abstract class Constructors extends Transform with ast.TreeDSL {
}
}
- def delayedInitCall(closure: Tree) =
- localTyper.typed {
- atPos(impl.pos) {
- Apply(
- Select(This(clazz), delayedInitMethod),
- List(New(TypeTree(closure.symbol.tpe), List(List(This(clazz))))))
- }
- }
+ def delayedInitCall(closure: Tree) = localTyper.typedPos(impl.pos) {
+ gen.mkMethodCall(This(clazz), delayedInitMethod, Nil, List(New(closure.symbol.tpe, This(clazz))))
+ }
/** Return a pair consisting of (all statements up to and including superclass and trait constr calls, rest) */
def splitAtSuper(stats: List[Tree]) = {
@@ -552,13 +545,12 @@ abstract class Constructors extends Transform with ast.TreeDSL {
}
// Assemble final constructor
- defBuf += treeCopy.DefDef(
- constr, constr.mods, constr.name, constr.tparams, constr.vparamss, constr.tpt,
+ defBuf += deriveDefDef(constr)(_ =>
treeCopy.Block(
constrBody,
paramInits ::: constrPrefixBuf.toList ::: uptoSuperStats :::
guardSpecializedInitializer(remainingConstrStats),
- constrBody.expr));
+ constrBody.expr))
// Followed by any auxiliary constructors
defBuf ++= auxConstructorBuf
@@ -568,14 +560,13 @@ abstract class Constructors extends Transform with ast.TreeDSL {
clazz.info.decls unlink sym
// Eliminate all field definitions that can be dropped from template
- treeCopy.Template(impl, impl.parents, impl.self,
- defBuf.toList filter (stat => mustbeKept(stat.symbol)))
+ deriveTemplate(impl)(_ => defBuf.toList filter (stat => mustbeKept(stat.symbol)))
} // transformClassTemplate
override def transform(tree: Tree): Tree =
tree match {
- case ClassDef(mods, name, tparams, impl) if !tree.symbol.isInterface && !isValueClass(tree.symbol) =>
- treeCopy.ClassDef(tree, mods, name, tparams, transformClassTemplate(impl))
+ case ClassDef(_,_,_,_) if !tree.symbol.isInterface && !isValueClass(tree.symbol) =>
+ deriveClassDef(tree)(transformClassTemplate)
case _ =>
super.transform(tree)
}
diff --git a/src/compiler/scala/tools/nsc/transform/Erasure.scala b/src/compiler/scala/tools/nsc/transform/Erasure.scala
index 5f84d765b9..2412c90962 100644
--- a/src/compiler/scala/tools/nsc/transform/Erasure.scala
+++ b/src/compiler/scala/tools/nsc/transform/Erasure.scala
@@ -223,7 +223,7 @@ abstract class Erasure extends AddInterfaces
/** The Java signature of type 'info', for symbol sym. The symbol is used to give the right return
* type for constructors.
*/
- def javaSig(sym0: Symbol, info: Type): Option[String] = atPhase(currentRun.erasurePhase) {
+ def javaSig(sym0: Symbol, info: Type): Option[String] = beforeErasure {
val isTraitSignature = sym0.enclClass.isTrait
def superSig(parents: List[Type]) = traceSig("superSig", parents) {
@@ -257,7 +257,7 @@ abstract class Erasure extends AddInterfaces
// Anything which could conceivably be a module (i.e. isn't known to be
// a type parameter or similar) must go through here or the signature is
// likely to end up with Foo<T>.Empty where it needs Foo<T>.Empty$.
- def fullNameInSig(sym: Symbol) = "L" + atPhase(currentRun.icodePhase)(sym.javaBinaryName)
+ def fullNameInSig(sym: Symbol) = "L" + beforeIcode(sym.javaBinaryName)
def jsig(tp0: Type, existentiallyBound: List[Symbol] = Nil, toplevel: Boolean = false, primitiveOK: Boolean = true): String = {
val tp = tp0.dealias
@@ -421,9 +421,9 @@ abstract class Erasure extends AddInterfaces
/** Box `tree` of unboxed type */
private def box(tree: Tree): Tree = tree match {
- case LabelDef(name, params, rhs) =>
- val rhs1 = box(rhs)
- treeCopy.LabelDef(tree, name, params, rhs1) setType rhs1.tpe
+ case LabelDef(_, _, _) =>
+ val ldef = deriveLabelDef(tree)(box)
+ ldef setType ldef.rhs.tpe
case _ =>
typedPos(tree.pos)(tree.tpe.typeSymbol match {
case UnitClass =>
@@ -440,7 +440,7 @@ abstract class Erasure extends AddInterfaces
* fields (see TupleX). (ID)
*/
case Apply(boxFun, List(arg)) if isUnbox(tree.symbol) && safeToRemoveUnbox(arg.tpe.typeSymbol) =>
- log("boxing an unbox: " + tree + " and replying with " + arg)
+ log("boxing an unbox: " + tree + "/" + tree.symbol + " and replying with " + arg + " of type " + arg.tpe)
arg
case _ =>
(REF(boxMethod(x)) APPLY tree) setPos (tree.pos) setType ObjectClass.tpe
@@ -460,9 +460,9 @@ abstract class Erasure extends AddInterfaces
println("unbox shorten: "+tree) // this never seems to kick in during build and test; therefore disabled.
adaptToType(unboxed, pt)
*/
- case LabelDef(name, params, rhs) =>
- val rhs1 = unbox(rhs, pt)
- treeCopy.LabelDef(tree, name, params, rhs1) setType rhs1.tpe
+ case LabelDef(_, _, _) =>
+ val ldef = deriveLabelDef(tree)(unbox(_, pt))
+ ldef setType ldef.rhs.tpe
case _ =>
typedPos(tree.pos)(pt.typeSymbol match {
case UnitClass =>
@@ -604,8 +604,8 @@ abstract class Erasure extends AddInterfaces
throw ex
}
def adaptCase(cdef: CaseDef): CaseDef = {
- val body1 = adaptToType(cdef.body, tree1.tpe)
- treeCopy.CaseDef(cdef, cdef.pat, cdef.guard, body1) setType body1.tpe
+ val newCdef = deriveCaseDef(cdef)(adaptToType(_, tree1.tpe))
+ newCdef setType newCdef.body.tpe
}
def adaptBranch(branch: Tree): Tree =
if (branch == EmptyTree) branch else adaptToType(branch, tree1.tpe);
@@ -648,21 +648,20 @@ abstract class Erasure extends AddInterfaces
private def checkNoDoubleDefs(root: Symbol) {
def doubleDefError(sym1: Symbol, sym2: Symbol) {
// the .toString must also be computed at the earlier phase
- def atRefc[T](op: => T) = atPhase[T](currentRun.refchecksPhase.next)(op)
- val tpe1 = atRefc(root.thisType.memberType(sym1))
- val tpe2 = atRefc(root.thisType.memberType(sym2))
+ val tpe1 = afterRefchecks(root.thisType.memberType(sym1))
+ val tpe2 = afterRefchecks(root.thisType.memberType(sym2))
if (!tpe1.isErroneous && !tpe2.isErroneous)
unit.error(
if (sym1.owner == root) sym1.pos else root.pos,
(if (sym1.owner == sym2.owner) "double definition:\n"
else if (sym1.owner == root) "name clash between defined and inherited member:\n"
else "name clash between inherited members:\n") +
- sym1 + ":" + atRefc(tpe1.toString) +
+ sym1 + ":" + afterRefchecks(tpe1.toString) +
(if (sym1.owner == root) "" else sym1.locationString) + " and\n" +
- sym2 + ":" + atRefc(tpe2.toString) +
+ sym2 + ":" + afterRefchecks(tpe2.toString) +
(if (sym2.owner == root) " at line " + (sym2.pos).line else sym2.locationString) +
"\nhave same type" +
- (if (atRefc(tpe1 =:= tpe2)) "" else " after erasure: " + atPhase(phase.next)(sym1.tpe)))
+ (if (afterRefchecks(tpe1 =:= tpe2)) "" else " after erasure: " + afterErasure(sym1.tpe)))
sym1.setInfo(ErrorType)
}
@@ -672,7 +671,7 @@ abstract class Erasure extends AddInterfaces
if (e.sym.isTerm) {
var e1 = decls.lookupNextEntry(e)
while (e1 ne null) {
- if (atPhase(phase.next)(e1.sym.info =:= e.sym.info)) doubleDefError(e.sym, e1.sym)
+ if (afterErasure(e1.sym.info =:= e.sym.info)) doubleDefError(e.sym, e1.sym)
e1 = decls.lookupNextEntry(e1)
}
}
@@ -686,10 +685,10 @@ abstract class Erasure extends AddInterfaces
|| !sym.hasTypeAt(currentRun.refchecksPhase.id))
override def matches(sym1: Symbol, sym2: Symbol): Boolean =
- atPhase(phase.next)(sym1.tpe =:= sym2.tpe)
+ afterErasure(sym1.tpe =:= sym2.tpe)
}
while (opc.hasNext) {
- if (!atPhase(currentRun.refchecksPhase.next)(
+ if (!afterRefchecks(
root.thisType.memberType(opc.overriding) matches
root.thisType.memberType(opc.overridden))) {
debuglog("" + opc.overriding.locationString + " " +
@@ -708,8 +707,8 @@ abstract class Erasure extends AddInterfaces
for (member <- root.info.nonPrivateMember(other.name).alternatives) {
if (member != other &&
!(member hasFlag BRIDGE) &&
- atPhase(phase.next)(member.tpe =:= other.tpe) &&
- !atPhase(refchecksPhase.next)(
+ afterErasure(member.tpe =:= other.tpe) &&
+ !afterRefchecks(
root.thisType.memberType(member) matches root.thisType.memberType(other))) {
debuglog("" + member.locationString + " " + member.infosString + other.locationString + " " + other.infosString);
doubleDefError(member, other)
@@ -733,13 +732,13 @@ abstract class Erasure extends AddInterfaces
*/
private def bridgeDefs(owner: Symbol): (List[Tree], immutable.Set[Symbol]) = {
var toBeRemoved: immutable.Set[Symbol] = immutable.Set()
- //println("computing bridges for " + owner)//DEBUG
- assert(phase == currentRun.erasurePhase)
+ debuglog("computing bridges for " + owner)//DEBUG
+ assert(phase == currentRun.erasurePhase, phase)
val site = owner.thisType
val bridgesScope = newScope
val bridgeTarget = new mutable.HashMap[Symbol, Symbol]
var bridges: List[Tree] = List()
- val opc = atPhase(currentRun.explicitouterPhase) {
+ val opc = beforeExplicitOuter {
new overridingPairs.Cursor(owner) {
override def parents: List[Type] = List(owner.info.parents.head)
override def exclude(sym: Symbol): Boolean =
@@ -750,9 +749,9 @@ abstract class Erasure extends AddInterfaces
val member = opc.overriding
val other = opc.overridden
//println("bridge? " + member + ":" + member.tpe + member.locationString + " to " + other + ":" + other.tpe + other.locationString)//DEBUG
- if (atPhase(currentRun.explicitouterPhase)(!member.isDeferred)) {
+ if (beforeExplicitOuter(!member.isDeferred)) {
val otpe = erasure(owner, other.tpe)
- val bridgeNeeded = atPhase(phase.next) (
+ val bridgeNeeded = afterErasure (
!(other.tpe =:= member.tpe) &&
!(deconstMap(other.tpe) =:= deconstMap(member.tpe)) &&
{ var e = bridgesScope.lookupEntry(member.name)
@@ -767,15 +766,15 @@ abstract class Erasure extends AddInterfaces
// the parameter symbols need to have the new owner
bridge.setInfo(otpe.cloneInfo(bridge))
bridgeTarget(bridge) = member
- atPhase(phase.next) { owner.info.decls.enter(bridge) }
+ afterErasure { owner.info.decls.enter(bridge) }
if (other.owner == owner) {
//println("bridge to same: "+other+other.locationString)//DEBUG
- atPhase(phase.next) { owner.info.decls.unlink(other) }
+ afterErasure { owner.info.decls.unlink(other) }
toBeRemoved += other
}
bridgesScope enter bridge
bridges =
- atPhase(phase.next) {
+ afterErasure {
atPos(bridge.pos) {
val bridgeDef =
DefDef(bridge,
@@ -789,7 +788,7 @@ abstract class Erasure extends AddInterfaces
if ( member.isSynthetic // TODO: should we do this for user-defined unapplies as well?
&& ((member.name == nme.unapply) || (member.name == nme.unapplySeq))
// && (bridge.paramss.nonEmpty && bridge.paramss.head.nonEmpty && bridge.paramss.head.tail.isEmpty) // does the first argument list has exactly one argument -- for user-defined unapplies we can't be sure
- && !(atPhase(phase.next)(member.tpe <:< other.tpe))) { // no static guarantees (TODO: is the subtype test ever true?)
+ && !(afterErasure(member.tpe <:< other.tpe))) { // no static guarantees (TODO: is the subtype test ever true?)
import CODE._
val typeTest = gen.mkIsInstanceOf(REF(bridge.firstParam), member.tpe.params.head.tpe, any = true, wrapInApply = true) // any = true since we're before erasure (?), wrapInapply is true since we're after uncurry
// println("unapp type test: "+ typeTest)
@@ -846,11 +845,11 @@ abstract class Erasure extends AddInterfaces
*/
private val preTransformer = new TypingTransformer(unit) {
def preErase(tree: Tree): Tree = tree match {
- case ClassDef(mods, name, tparams, impl) =>
+ case ClassDef(_,_,_,_) =>
debuglog("defs of " + tree.symbol + " = " + tree.symbol.info.decls)
- treeCopy.ClassDef(tree, mods, name, List(), impl)
- case DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
- treeCopy.DefDef(tree, mods, name, List(), vparamss, tpt, rhs)
+ copyClassDef(tree)(tparams = Nil)
+ case DefDef(_,_,_,_,_,_) =>
+ copyDefDef(tree)(tparams = Nil)
case TypeDef(_, _, _, _) =>
EmptyTree
case Apply(instanceOf @ TypeApply(fun @ Select(qual, name), args @ List(arg)), List()) // !!! todo: simplify by having GenericArray also extract trees
@@ -1055,7 +1054,7 @@ abstract class Erasure extends AddInterfaces
*/
override def transform(tree: Tree): Tree = {
val tree1 = preTransformer.transform(tree)
- atPhase(phase.next) {
+ afterErasure {
val tree2 = mixinTransformer.transform(tree1)
debuglog("tree after addinterfaces: \n" + tree2)
diff --git a/src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala b/src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala
index 7f7f7e7b65..c15da6e9a9 100644
--- a/src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala
+++ b/src/compiler/scala/tools/nsc/transform/ExplicitOuter.scala
@@ -170,7 +170,7 @@ abstract class ExplicitOuter extends InfoTransform
}
if (!clazz.isTrait && !parents.isEmpty) {
for (mc <- clazz.mixinClasses) {
- val mixinOuterAcc: Symbol = atPhase(phase.next)(outerAccessor(mc))
+ val mixinOuterAcc: Symbol = afterExplicitOuter(outerAccessor(mc))
if (mixinOuterAcc != NoSymbol) {
if (decls1 eq decls) decls1 = decls.cloneScope
val newAcc = mixinOuterAcc.cloneSymbol(clazz, mixinOuterAcc.flags & ~DEFERRED)
@@ -468,10 +468,12 @@ abstract class ExplicitOuter extends InfoTransform
}
}
super.transform(
- treeCopy.Template(tree, parents, self,
- if (newDefs.isEmpty) decls else decls ::: newDefs.toList)
+ deriveTemplate(tree)(decls =>
+ if (newDefs.isEmpty) decls
+ else decls ::: newDefs.toList
+ )
)
- case DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
+ case DefDef(_, _, _, vparamss, _, rhs) =>
if (sym.isClassConstructor) {
rhs match {
case Literal(_) =>
@@ -484,7 +486,7 @@ abstract class ExplicitOuter extends InfoTransform
sym.newValueParameter(nme.OUTER, sym.pos) setInfo outerField(clazz).info
((ValDef(outerParam) setType NoType) :: vparamss.head) :: vparamss.tail
} else vparamss
- super.transform(treeCopy.DefDef(tree, mods, name, tparams, vparamss1, tpt, rhs))
+ super.transform(copyDefDef(tree)(vparamss = vparamss1))
}
} else
super.transform(tree)
@@ -517,7 +519,7 @@ abstract class ExplicitOuter extends InfoTransform
super.transform(treeCopy.Apply(tree, sel, outerVal :: args))
// entry point for pattern matcher translation
- case mch: Match =>
+ case mch: Match if (!opt.virtPatmat) => // don't use old pattern matcher as fallback when the user wants the virtualizing one
matchTranslation(mch)
case _ =>
@@ -559,7 +561,7 @@ abstract class ExplicitOuter extends InfoTransform
/** The transformation method for whole compilation units */
override def transformUnit(unit: CompilationUnit) {
- atPhase(phase.next)(super.transformUnit(unit))
+ afterExplicitOuter(super.transformUnit(unit))
}
}
diff --git a/src/compiler/scala/tools/nsc/transform/Flatten.scala b/src/compiler/scala/tools/nsc/transform/Flatten.scala
index 89f1cc26e0..8856024a30 100644
--- a/src/compiler/scala/tools/nsc/transform/Flatten.scala
+++ b/src/compiler/scala/tools/nsc/transform/Flatten.scala
@@ -20,16 +20,14 @@ abstract class Flatten extends InfoTransform {
/** Updates the owning scope with the given symbol; returns the old symbol.
*/
- private def replaceSymbolInCurrentScope(sym: Symbol): Symbol = {
- atPhase(phase.next) {
- val scope = sym.owner.info.decls
- val old = scope lookup sym.name
- if (old ne NoSymbol)
- scope unlink old
+ private def replaceSymbolInCurrentScope(sym: Symbol): Symbol = afterFlatten {
+ val scope = sym.owner.info.decls
+ val old = scope lookup sym.name
+ if (old ne NoSymbol)
+ scope unlink old
- scope enter sym
- old
- }
+ scope enter sym
+ old
}
private def liftClass(sym: Symbol) {
@@ -53,7 +51,8 @@ abstract class Flatten extends InfoTransform {
val clazz = pre.typeSymbol
clazz.isClass && !clazz.isPackageClass && {
// Cannot flatten here: class A[T] { object B }
- atPhase(currentRun.erasurePhase.prev)(clazz.typeParams.isEmpty)
+ // was "at erasurePhase.prev"
+ beforeErasure(clazz.typeParams.isEmpty)
}
}
@@ -67,10 +66,11 @@ abstract class Flatten extends InfoTransform {
val decls1 = scopeTransform(clazz) {
val decls1 = newScope
if (clazz.isPackageClass) {
- atPhase(phase.next)(decls foreach (decls1 enter _))
- } else {
+ afterFlatten { decls foreach (decls1 enter _) }
+ }
+ else {
val oldowner = clazz.owner
- atPhase(phase.next)(oldowner.info)
+ afterFlatten { oldowner.info }
parents1 = parents mapConserve (this)
for (sym <- decls) {
@@ -102,7 +102,7 @@ abstract class Flatten extends InfoTransform {
class Flattener extends Transformer {
/** Buffers for lifted out classes */
- private val liftedDefs = new mutable.HashMap[Symbol, ListBuffer[Tree]]
+ private val liftedDefs = perRunCaches.newMap[Symbol, ListBuffer[Tree]]()
override def transform(tree: Tree): Tree = {
tree match {
@@ -122,11 +122,7 @@ abstract class Flatten extends InfoTransform {
liftedDefs(sym.enclosingTopLevelClass.owner) += tree
EmptyTree
case Select(qual, name) if (sym.isStaticModule && !sym.owner.isPackageClass) =>
- atPhase(phase.next) {
- atPos(tree.pos) {
- gen.mkAttributedRef(sym)
- }
- }
+ afterFlatten(atPos(tree.pos)(gen.mkAttributedRef(sym)))
case _ =>
tree
}
diff --git a/src/compiler/scala/tools/nsc/transform/LambdaLift.scala b/src/compiler/scala/tools/nsc/transform/LambdaLift.scala
index 4fc7b9f92f..dafce76d45 100644
--- a/src/compiler/scala/tools/nsc/transform/LambdaLift.scala
+++ b/src/compiler/scala/tools/nsc/transform/LambdaLift.scala
@@ -128,7 +128,7 @@ abstract class LambdaLift extends InfoTransform {
if (!ss(sym)) {
ss addEntry sym
renamable addEntry sym
- atPhase(currentRun.picklerPhase) {
+ beforePickler {
// The param symbol in the MethodType should not be renamed, only the symbol in scope. This way,
// parameter names for named arguments are not changed. Example: without cloning the MethodType,
// def closure(x: Int) = { () => x }
@@ -262,7 +262,7 @@ abstract class LambdaLift extends InfoTransform {
}
}
- atPhase(phase.next) {
+ afterOwnPhase {
for ((owner, freeValues) <- free.toList) {
val newFlags = SYNTHETIC | ( if (owner.isClass) PARAMACCESSOR | PrivateLocal else PARAM )
debuglog("free var proxy: %s, %s".format(owner.fullLocationString, freeValues.toList.mkString(", ")))
@@ -320,12 +320,13 @@ abstract class LambdaLift extends InfoTransform {
case Some(ps) =>
val freeParams = ps map (p => ValDef(p) setPos tree.pos setType NoType)
tree match {
- case DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
+ case DefDef(_, _, _, vparams :: _, _, _) =>
val addParams = cloneSymbols(ps).map(_.setFlag(PARAM))
sym.updateInfo(
lifted(MethodType(sym.info.params ::: addParams, sym.info.resultType)))
- treeCopy.DefDef(tree, mods, name, tparams, List(vparamss.head ++ freeParams), tpt, rhs)
- case ClassDef(mods, name, tparams, impl @ Template(parents, self, body)) =>
+
+ copyDefDef(tree)(vparamss = List(vparams ++ freeParams))
+ case ClassDef(_, _, _, _) =>
// Disabled attempt to to add getters to freeParams
// this does not work yet. Problem is that local symbols need local names
// and references to local symbols need to be transformed into
@@ -337,8 +338,7 @@ abstract class LambdaLift extends InfoTransform {
// DefDef(getter, rhs) setPos tree.pos setType NoType
// }
// val newDefs = if (sym.isTrait) freeParams ::: (ps map paramGetter) else freeParams
- treeCopy.ClassDef(tree, mods, name, tparams,
- treeCopy.Template(impl, parents, self, body ::: freeParams))
+ deriveClassDef(tree)(impl => deriveTemplate(impl)(_ ::: freeParams))
}
case None =>
tree
@@ -420,7 +420,7 @@ abstract class LambdaLift extends InfoTransform {
case Try(block, catches, finalizer) =>
Try(refConstr(block), catches map refConstrCase, finalizer)
case _ =>
- Apply(Select(New(TypeTree(sym.tpe)), nme.CONSTRUCTOR), List(expr))
+ New(sym.tpe, expr)
}
def refConstrCase(cdef: CaseDef): CaseDef =
CaseDef(cdef.pat, cdef.guard, refConstr(cdef.body))
@@ -480,17 +480,16 @@ abstract class LambdaLift extends InfoTransform {
/** Transform statements and add lifted definitions to them. */
override def transformStats(stats: List[Tree], exprOwner: Symbol): List[Tree] = {
def addLifted(stat: Tree): Tree = stat match {
- case ClassDef(mods, name, tparams, impl @ Template(parents, self, body)) =>
+ case ClassDef(_, _, _, _) =>
val lifted = liftedDefs get stat.symbol match {
case Some(xs) => xs reverseMap addLifted
case _ => log("unexpectedly no lifted defs for " + stat.symbol) ; Nil
}
- val result = treeCopy.ClassDef(
- stat, mods, name, tparams, treeCopy.Template(impl, parents, self, body ::: lifted))
- liftedDefs -= stat.symbol
- result
- case DefDef(mods, name, tp, vp, tpt, Block(Nil, expr)) if !stat.symbol.isConstructor =>
- treeCopy.DefDef(stat, mods, name, tp, vp, tpt, expr)
+ try deriveClassDef(stat)(impl => deriveTemplate(impl)(_ ::: lifted))
+ finally liftedDefs -= stat.symbol
+
+ case DefDef(_, _, _, _, _, Block(Nil, expr)) if !stat.symbol.isConstructor =>
+ deriveDefDef(stat)(_ => expr)
case _ =>
stat
}
@@ -499,7 +498,7 @@ abstract class LambdaLift extends InfoTransform {
override def transformUnit(unit: CompilationUnit) {
computeFreeVars
- atPhase(phase.next)(super.transformUnit(unit))
+ afterOwnPhase(super.transformUnit(unit))
assert(liftedDefs.isEmpty, liftedDefs.keys mkString ", ")
}
} // class LambdaLifter
diff --git a/src/compiler/scala/tools/nsc/transform/LazyVals.scala b/src/compiler/scala/tools/nsc/transform/LazyVals.scala
index f8c5f5bfc6..75d3e443d4 100644
--- a/src/compiler/scala/tools/nsc/transform/LazyVals.scala
+++ b/src/compiler/scala/tools/nsc/transform/LazyVals.scala
@@ -50,9 +50,7 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
*/
class LazyValues(unit: CompilationUnit) extends TypingTransformer(unit) {
/** map from method symbols to the number of lazy values it defines. */
- private val lazyVals = new mutable.HashMap[Symbol, Int] {
- override def default(meth: Symbol) = 0
- }
+ private val lazyVals = perRunCaches.newMap[Symbol, Int]() withDefaultValue 0
import symtab.Flags._
import lazyVals._
@@ -70,7 +68,7 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
curTree = tree
tree match {
- case DefDef(mods, name, tparams, vparams, tpt, rhs) => atOwner(tree.symbol) {
+ case DefDef(_, _, _, _, _, rhs) => atOwner(tree.symbol) {
val res = if (!sym.owner.isClass && sym.isLazy) {
val enclosingClassOrDummyOrMethod = {
val enclMethod = sym.enclMethod
@@ -92,11 +90,10 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
} else
super.transform(rhs)
- treeCopy.DefDef(tree, mods, name, tparams, vparams, tpt,
- if (LocalLazyValFinder.find(res)) typed(addBitmapDefs(sym, res)) else res)
+ deriveDefDef(tree)(_ => if (LocalLazyValFinder.find(res)) typed(addBitmapDefs(sym, res)) else res)
}
- case Template(parents, self, body) => atOwner(currentOwner) {
+ case Template(_, _, body) => atOwner(currentOwner) {
val body1 = super.transformTrees(body)
var added = false
val stats =
@@ -108,8 +105,8 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
added = true
typed(addBitmapDefs(sym, stat))
} else stat
- case ValDef(mods, name, tpt, rhs) =>
- typed(treeCopy.ValDef(stat, mods, name, tpt, addBitmapDefs(stat.symbol, rhs)))
+ case ValDef(_, _, _, _) =>
+ typed(deriveValDef(stat)(addBitmapDefs(stat.symbol, _)))
case _ =>
stat
}
@@ -124,29 +121,29 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
})
toAdd0
} else List()
- treeCopy.Template(tree, parents, self, innerClassBitmaps ++ stats)
+ deriveTemplate(tree)(_ => innerClassBitmaps ++ stats)
}
- case ValDef(mods, name, tpt, rhs0) if (!sym.owner.isModule && !sym.owner.isClass) =>
- val rhs = super.transform(rhs0)
- treeCopy.ValDef(tree, mods, name, tpt,
- if (LocalLazyValFinder.find(rhs)) typed(addBitmapDefs(sym, rhs)) else rhs)
+ case ValDef(_, _, _, _) if !sym.owner.isModule && !sym.owner.isClass =>
+ deriveValDef(tree) { rhs0 =>
+ val rhs = super.transform(rhs0)
+ if (LocalLazyValFinder.find(rhs)) typed(addBitmapDefs(sym, rhs)) else rhs
+ }
case l@LabelDef(name0, params0, ifp0@If(_, _, _)) if name0.startsWith(nme.WHILE_PREFIX) =>
val ifp1 = super.transform(ifp0)
val If(cond0, thenp0, elsep0) = ifp1
+
if (LocalLazyValFinder.find(thenp0))
- treeCopy.LabelDef(l, name0, params0,
- treeCopy.If(ifp1, cond0, typed(addBitmapDefs(sym.owner, thenp0)), elsep0))
+ deriveLabelDef(l)(_ => treeCopy.If(ifp1, cond0, typed(addBitmapDefs(sym.owner, thenp0)), elsep0))
else
l
- case l@LabelDef(name0, params0, block@Block(stats0, _))
+ case l@LabelDef(name0, params0, block@Block(stats0, expr))
if name0.startsWith(nme.WHILE_PREFIX) || name0.startsWith(nme.DO_WHILE_PREFIX) =>
val stats1 = super.transformTrees(stats0)
if (LocalLazyValFinder.find(stats1))
- treeCopy.LabelDef(l, name0, params0,
- treeCopy.Block(block, typed(addBitmapDefs(sym.owner, stats1.head))::stats1.tail, block.expr))
+ deriveLabelDef(l)(_ => treeCopy.Block(block, typed(addBitmapDefs(sym.owner, stats1.head))::stats1.tail, expr))
else
l
@@ -171,9 +168,9 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
def isMatch(params: List[Ident]) = (params.tail corresponds methSym.tpe.params)(_.tpe == _.tpe)
if (bmps.isEmpty) rhs else rhs match {
- case Block(assign, l @ LabelDef(name, params, rhs1))
+ case Block(assign, l @ LabelDef(name, params, _))
if name.toString == ("_" + methSym.name) && isMatch(params) =>
- Block(assign, treeCopy.LabelDef(l, name, params, typed(prependStats(bmps, rhs1))))
+ Block(assign, deriveLabelDef(l)(rhs => typed(prependStats(bmps, rhs))))
case _ => prependStats(bmps, rhs)
}
@@ -233,9 +230,7 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
private def mkSetFlag(bmp: Symbol, mask: Tree, bmpRef: Tree): Tree =
bmpRef === (bmpRef INT_| mask)
- val bitmaps = new mutable.HashMap[Symbol, List[Symbol]] {
- override def default(meth: Symbol) = Nil
- }
+ val bitmaps = mutable.Map[Symbol, List[Symbol]]() withDefaultValue Nil
/** Return the symbol corresponding of the right bitmap int inside meth,
* given offset.
@@ -247,7 +242,7 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
bmps(n)
else {
val sym = meth.newVariable(nme.newBitmapName(nme.BITMAP_NORMAL, n), meth.pos).setInfo(IntClass.tpe)
- atPhase(currentRun.typerPhase) {
+ beforeTyper {
sym addAnnotation VolatileAttr
}
diff --git a/src/compiler/scala/tools/nsc/transform/Mixin.scala b/src/compiler/scala/tools/nsc/transform/Mixin.scala
index b3b7596f9a..3a2482e816 100644
--- a/src/compiler/scala/tools/nsc/transform/Mixin.scala
+++ b/src/compiler/scala/tools/nsc/transform/Mixin.scala
@@ -71,7 +71,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
* maps all other types to themselves.
*/
private def toInterface(tp: Type): Type =
- atPhase(currentRun.mixinPhase)(tp.typeSymbol.toInterface).tpe
+ beforeMixin(tp.typeSymbol.toInterface).tpe
private def isFieldWithBitmap(field: Symbol) = {
field.info // ensure that nested objects are transformed
@@ -103,7 +103,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
private val toInterfaceMap = new TypeMap {
def apply(tp: Type): Type = mapOver( tp match {
case TypeRef(pre, sym, args) if (sym.isImplClass) =>
- typeRef(pre, atPhase(currentRun.mixinPhase)(sym.toInterface), args)
+ typeRef(pre, beforeMixin(sym.toInterface), args)
case _ => tp
})
}
@@ -123,7 +123,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
* @param mixinClass The mixin class that produced the superaccessor
*/
private def rebindSuper(base: Symbol, member: Symbol, mixinClass: Symbol): Symbol =
- atPhase(currentRun.picklerPhase.next) {
+ afterPickler {
var bcs = base.info.baseClasses.dropWhile(mixinClass !=).tail
var sym: Symbol = NoSymbol
debuglog("starting rebindsuper " + base + " " + member + ":" + member.tpe +
@@ -131,7 +131,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
while (!bcs.isEmpty && sym == NoSymbol) {
if (settings.debug.value) {
val other = bcs.head.info.nonPrivateDecl(member.name);
- log("rebindsuper " + bcs.head + " " + other + " " + other.tpe +
+ debuglog("rebindsuper " + bcs.head + " " + other + " " + other.tpe +
" " + other.isDeferred)
}
sym = member.matchingSymbol(bcs.head, base.thisType).suchThat(sym => !sym.hasFlag(DEFERRED | BRIDGE))
@@ -147,7 +147,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
member.hasAccessorFlag && (!member.isDeferred || (member hasFlag lateDEFERRED))
/** Is member overridden (either directly or via a bridge) in base class sequence `bcs`? */
- def isOverriddenAccessor(member: Symbol, bcs: List[Symbol]): Boolean = atPhase(ownPhase) {
+ def isOverriddenAccessor(member: Symbol, bcs: List[Symbol]): Boolean = atOwnPhase {
def hasOverridingAccessor(clazz: Symbol) = {
clazz.info.nonPrivateDecl(member.name).alternatives.exists(
sym =>
@@ -155,8 +155,9 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
!sym.hasFlag(MIXEDIN) &&
matchesType(sym.tpe, member.tpe, true))
}
- bcs.head != member.owner &&
- (hasOverridingAccessor(bcs.head) || isOverriddenAccessor(member, bcs.tail))
+ ( bcs.head != member.owner
+ && (hasOverridingAccessor(bcs.head) || isOverriddenAccessor(member, bcs.tail))
+ )
}
/** Add given member to given class, and mark member as mixed-in.
@@ -241,7 +242,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
*/
def addMixedinMembers(clazz: Symbol, unit : CompilationUnit) {
def cloneBeforeErasure(iface: Symbol, clazz: Symbol, imember: Symbol): Symbol = {
- val newSym = atPhase(currentRun.erasurePhase) {
+ val newSym = beforeErasure {
val res = imember.cloneSymbol(clazz)
// since we used the member (imember) from the interface that represents the trait that's being mixed in,
// have to instantiate the interface type params (that may occur in imember's info) as they are seen from the class
@@ -337,8 +338,8 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
case _ => // otherwise mixin a field as well
// atPhase: the private field is moved to the implementation class by erasure,
// so it can no longer be found in the member's owner (the trait)
- val accessed = atPhase(currentRun.picklerPhase)(member.accessed)
- val sym = atPhase(currentRun.erasurePhase){ // #3857, need to retain info before erasure when cloning (since cloning only carries over the current entry in the type history)
+ val accessed = beforePickler(member.accessed)
+ val sym = beforeErasure { // #3857, need to retain info before erasure when cloning (since cloning only carries over the current entry in the type history)
clazz.newValue(nme.getterToLocal(member.name), member.pos).setInfo(member.tpe.resultType) // so we have a type history entry before erasure
}
sym.updateInfo(member.tpe.resultType) // info at current phase
@@ -349,13 +350,15 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
setAnnotations accessed.annotations)
}
}
- } else if (member.isSuperAccessor) { // mixin super accessors
+ }
+ else if (member.isSuperAccessor) { // mixin super accessors
val member1 = addMember(clazz, member.cloneSymbol(clazz)) setPos clazz.pos
assert(member1.alias != NoSymbol, member1)
val alias1 = rebindSuper(clazz, member.alias, mixinClass)
member1.asInstanceOf[TermSymbol] setAlias alias1
- } else if (member.isMethod && member.isModule && member.hasNoFlags(LIFTED | BRIDGE)) {
+ }
+ else if (member.isMethod && member.isModule && member.hasNoFlags(LIFTED | BRIDGE)) {
// mixin objects: todo what happens with abstract objects?
addMember(clazz, member.cloneSymbol(clazz, member.flags & ~(DEFERRED | lateDEFERRED)) setPos clazz.pos)
}
@@ -383,7 +386,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
var parents1 = parents
var decls1 = decls
if (!clazz.isPackageClass) {
- atPhase(phase.next)(clazz.owner.info)
+ afterMixin(clazz.owner.info)
if (clazz.isImplClass) {
clazz setFlag lateMODULE
var sourceModule = clazz.owner.info.decls.lookup(sym.name.toTermName)
@@ -449,7 +452,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
&& sym.owner == templ.symbol.owner
&& !sym.isLazy
&& !tree.isDef) {
- log("added use in: " + currentOwner + " -- " + tree)
+ debuglog("added use in: " + currentOwner + " -- " + tree)
usedIn(sym) ::= currentOwner
}
@@ -459,7 +462,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
}
}
SingleUseTraverser(templ)
- log("usedIn: " + usedIn)
+ debuglog("usedIn: " + usedIn)
usedIn filter {
case (_, member :: Nil) => member.isValue && member.isLazy
case _ => false
@@ -515,7 +518,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
tree match {
case Template(parents, self, body) =>
localTyper = erasure.newTyper(rootContext.make(tree, currentOwner))
- atPhase(phase.next)(currentOwner.owner.info)//todo: needed?
+ afterMixin(currentOwner.owner.info)//todo: needed?
if (!currentOwner.isTrait && !isValueClass(currentOwner))
addMixedinMembers(currentOwner, unit)
@@ -523,18 +526,18 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
addLateInterfaceMembers(currentOwner)
tree
- case DefDef(mods, name, tparams, List(vparams), tpt, rhs) =>
+ case DefDef(_, _, _, vparams :: Nil, _, _) =>
if (currentOwner.isImplClass) {
if (isImplementedStatically(sym)) {
sym setFlag notOVERRIDE
self = sym.newValueParameter(nme.SELF, sym.pos) setInfo toInterface(currentOwner.typeOfThis)
val selfdef = ValDef(self) setType NoType
- treeCopy.DefDef(tree, mods, name, tparams, List(selfdef :: vparams), tpt, rhs)
- } else {
- EmptyTree
- }
- } else {
- if (currentOwner.isTrait && sym.isSetter && !atPhase(currentRun.picklerPhase)(sym.isDeferred)) {
+ copyDefDef(tree)(vparamss = List(selfdef :: vparams))
+ }
+ else EmptyTree
+ }
+ else {
+ if (currentOwner.isTrait && sym.isSetter && !beforePickler(sym.isDeferred)) {
sym.addAnnotation(TraitSetterAnnotationClass)
}
tree
@@ -699,15 +702,11 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
* This rhs is typed and then mixin transformed.
*/
def completeSuperAccessor(stat: Tree) = stat match {
- case DefDef(mods, name, tparams, List(vparams), tpt, EmptyTree) if stat.symbol.isSuperAccessor =>
+ case DefDef(_, _, _, vparams :: Nil, _, EmptyTree) if stat.symbol.isSuperAccessor =>
val rhs0 = (Super(clazz, tpnme.EMPTY) DOT stat.symbol.alias)(vparams map (v => Ident(v.symbol)): _*)
val rhs1 = localTyped(stat.pos, rhs0, stat.symbol.tpe.resultType)
- val rhs2 = atPhase(currentRun.mixinPhase)(transform(rhs1))
- debuglog("complete super acc " + stat.symbol.fullLocationString +
- " " + rhs1 + " " + stat.symbol.alias.fullLocationString +
- "/" + stat.symbol.alias.owner.hasFlag(lateINTERFACE))//debug
- treeCopy.DefDef(stat, mods, name, tparams, List(vparams), tpt, rhs2)
+ deriveDefDef(stat)(_ => beforeMixin(transform(rhs1)))
case _ =>
stat
}
@@ -738,7 +737,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
def createBitmap: Symbol = {
val sym = clazz0.newVariable(bitmapName, clazz0.pos) setInfo IntClass.tpe
- atPhase(currentRun.typerPhase)(sym addAnnotation VolatileAttr)
+ beforeTyper(sym addAnnotation VolatileAttr)
category match {
case nme.BITMAP_TRANSIENT | nme.BITMAP_CHECKINIT_TRANSIENT => sym addAnnotation TransientAttr
@@ -846,7 +845,9 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
val nulls = lazyValNullables(lzyVal).toList sortBy (_.id) map nullify
def syncBody = init ::: List(mkSetFlag(clazz, offset, lzyVal), UNIT)
- log("nulling fields inside " + lzyVal + ": " + nulls)
+ if (nulls.nonEmpty)
+ log("nulling fields inside " + lzyVal + ": " + nulls)
+
val result = gen.mkDoubleCheckedLocking(clazz, cond, syncBody, nulls)
typedPos(init.head.pos)(BLOCK(result, retVal))
}
@@ -883,14 +884,13 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
*/
def addCheckedGetters(clazz: Symbol, stats: List[Tree]): List[Tree] = {
def dd(stat: DefDef) = {
- val DefDef(mods, name, tp, vp, tpt, rhs) = stat
- val sym = stat.symbol
- def isUnit = sym.tpe.resultType.typeSymbol == UnitClass
- def isEmpty = rhs == EmptyTree
+ val sym = stat.symbol
+ def isUnit = sym.tpe.resultType.typeSymbol == UnitClass
+ def isEmpty = stat.rhs == EmptyTree
if (sym.isLazy && !isEmpty && !clazz.isImplClass) {
assert(fieldOffset contains sym, sym)
- treeCopy.DefDef(stat, mods, name, tp, vp, tpt,
+ deriveDefDef(stat)(rhs =>
if (isUnit)
mkLazyDef(clazz, sym, List(rhs), UNIT, fieldOffset(sym))
else {
@@ -901,7 +901,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
}
else if (needsInitFlag(sym) && !isEmpty && !clazz.hasFlag(IMPLCLASS | TRAIT)) {
assert(fieldOffset contains sym, sym)
- treeCopy.DefDef(stat, mods, name, tp, vp, tpt,
+ deriveDefDef(stat)(rhs =>
(mkCheckedAccessor(clazz, _: Tree, fieldOffset(sym), stat.pos, sym))(
if (sym.tpe.resultType.typeSymbol == UnitClass) UNIT
else rhs
@@ -909,26 +909,24 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
)
}
else if (sym.isConstructor) {
- treeCopy.DefDef(stat, mods, name, tp, vp, tpt, addInitBits(clazz, rhs))
+ deriveDefDef(stat)(addInitBits(clazz, _))
}
else if (settings.checkInit.value && !clazz.isTrait && sym.isSetter) {
val getter = sym.getter(clazz)
if (needsInitFlag(getter) && fieldOffset.isDefinedAt(getter))
- treeCopy.DefDef(stat, mods, name, tp, vp, tpt,
- Block(List(rhs, localTyper.typed(mkSetFlag(clazz, fieldOffset(getter), getter))), UNIT)
- )
+ deriveDefDef(stat)(rhs => Block(List(rhs, localTyper.typed(mkSetFlag(clazz, fieldOffset(getter), getter))), UNIT))
else stat
}
else if (sym.isModule && (!clazz.isTrait || clazz.isImplClass) && !sym.isBridge) {
- treeCopy.DefDef(stat, mods, name, tp, vp, tpt,
- typedPos(stat.pos) {
+ deriveDefDef(stat)(rhs =>
+ typedPos(stat.pos)(
mkInnerClassAccessorDoubleChecked(
// Martin to Hubert: I think this can be replaced by selfRef(tree.pos)
// @PP: It does not seem so, it crashes for me trying to bootstrap.
- if (clazz.isImplClass) gen.mkAttributedIdent(vp.head.head.symbol) else gen.mkAttributedThis(clazz),
+ if (clazz.isImplClass) gen.mkAttributedIdent(stat.vparamss.head.head.symbol) else gen.mkAttributedThis(clazz),
rhs
)
- }
+ )
)
}
else stat
@@ -943,7 +941,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
private def checkedGetter(lhs: Tree) = {
val sym = clazz.info decl lhs.symbol.getterName suchThat (_.isGetter)
if (needsInitAndHasOffset(sym)) {
- log("adding checked getter for: " + sym + " " + lhs.symbol.defaultFlagString)
+ debuglog("adding checked getter for: " + sym + " " + lhs.symbol.defaultFlagString)
List(localTyper typed mkSetFlag(clazz, fieldOffset(sym), sym))
}
else Nil
@@ -1130,7 +1128,6 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
*/
private def postTransform(tree: Tree): Tree = {
val sym = tree.symbol
- // assert(tree.tpe ne null, tree.getClass +" : "+ tree +" in "+ localTyper.context.tree)
// change every node type that refers to an implementation class to its
// corresponding interface, unless the node's symbol is an implementation class.
if (tree.tpe.typeSymbol.isImplClass && ((sym eq null) || !sym.isImplClass))
@@ -1237,7 +1234,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
val tree1 = super.transform(preTransform(tree))
// localTyper needed when not flattening inner classes. parts after an
// inner class will otherwise be typechecked with a wrong scope
- try atPhase(phase.next)(postTransform(tree1))
+ try afterMixin(postTransform(tree1))
finally localTyper = saved
}
}
diff --git a/src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala b/src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala
index 05f5dbc379..e5d1e348d6 100644
--- a/src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala
+++ b/src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala
@@ -69,7 +69,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
ScalaValueClasses, isValueClass, isScalaValueType,
SpecializedClass, RepeatedParamClass, JavaRepeatedParamClass,
AnyRefClass, ObjectClass, AnyRefModule,
- GroupOfSpecializable, uncheckedVarianceClass
+ GroupOfSpecializable, uncheckedVarianceClass, ScalaInlineClass
}
/** TODO - this is a lot of maps.
@@ -79,17 +79,17 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
val specializedClass: mutable.Map[(Symbol, TypeEnv), Symbol] = new mutable.LinkedHashMap
/** Map a method symbol to a list of its specialized overloads in the same class. */
- private val overloads: mutable.Map[Symbol, List[Overload]] = mutable.HashMap[Symbol, List[Overload]]() withDefaultValue Nil
+ private val overloads = perRunCaches.newMap[Symbol, List[Overload]]() withDefaultValue Nil
/** Map a symbol to additional information on specialization. */
- private val info: mutable.Map[Symbol, SpecializedInfo] = perRunCaches.newMap[Symbol, SpecializedInfo]()
+ private val info = perRunCaches.newMap[Symbol, SpecializedInfo]()
/** Map class symbols to the type environments where they were created. */
- private val typeEnv = mutable.HashMap[Symbol, TypeEnv]() withDefaultValue emptyEnv
+ private val typeEnv = perRunCaches.newMap[Symbol, TypeEnv]() withDefaultValue emptyEnv
- // holds mappings from regular type parameter symbols to symbols of
- // specialized type parameters which are subtypes of AnyRef
- private val anyrefSpecCache = perRunCaches.newMap[Symbol, Symbol]()
+ // Key: a specialized class or method
+ // Value: a map from tparams in the original class to tparams in the specialized class.
+ private val anyrefSpecCache = perRunCaches.newMap[Symbol, mutable.Map[Symbol, Symbol]]()
// holds mappings from members to the type variables in the class
// that they were already specialized for, so that they don't get
@@ -97,22 +97,24 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
private val wasSpecializedForTypeVars = perRunCaches.newMap[Symbol, Set[Symbol]]() withDefaultValue Set()
/** Concrete methods that use a specialized type, or override such methods. */
- private val concreteSpecMethods = new mutable.HashSet[Symbol]()
+ private val concreteSpecMethods = perRunCaches.newSet[Symbol]()
private def isSpecialized(sym: Symbol) = sym hasAnnotation SpecializedClass
private def hasSpecializedFlag(sym: Symbol) = sym hasFlag SPECIALIZED
private def specializedTypes(tps: List[Symbol]) = tps filter isSpecialized
private def specializedOn(sym: Symbol): List[Symbol] = {
sym getAnnotation SpecializedClass match {
- case Some(ann @ AnnotationInfo(_, args, _)) =>
+ case Some(AnnotationInfo(_, Nil, _)) => specializableTypes.map(_.typeSymbol)
+ case Some(ann @ AnnotationInfo(_, args, _)) => {
args map (_.tpe) flatMap { tp =>
tp baseType GroupOfSpecializable match {
case TypeRef(_, GroupOfSpecializable, arg :: Nil) =>
arg.typeArgs map (_.typeSymbol)
- case _ =>
+ case _ =>
List(tp.typeSymbol)
}
}
+ }
case _ => Nil
}
}
@@ -120,17 +122,28 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
// If we replace `isBoundedGeneric` with (tp <:< AnyRefClass.tpe),
// then pos/spec-List.scala fails - why? Does this kind of check fail
// for similar reasons? Does `sym.isAbstractType` make a difference?
- private def isSpecializedAnyRefSubtype(tp: Type, sym: Symbol) = (
- (specializedOn(sym) contains AnyRefModule)
- && !isValueClass(tp.typeSymbol)
- && isBoundedGeneric(tp)
- )
+ private def isSpecializedAnyRefSubtype(tp: Type, sym: Symbol) = {
+ specializedOn(sym).exists(s => !isValueClass(s)) &&
+ !isValueClass(tp.typeSymbol) &&
+ isBoundedGeneric(tp)
+ //(tp <:< AnyRefClass.tpe)
+ }
private def isBoundedGeneric(tp: Type) = tp match {
case TypeRef(_, sym, _) if sym.isAbstractType => (tp <:< AnyRefClass.tpe)
case TypeRef(_, sym, _) => !isValueClass(sym)
case _ => false
}
+ def unspecializedSymbol(sym: Symbol): Symbol = {
+ if (sym hasFlag SPECIALIZED) {
+ // add initialization from its generic class constructor
+ val genericName = nme.unspecializedName(sym.name)
+ val member = sym.owner.info.decl(genericName.toTypeName)
+ member
+ }
+ else NoSymbol
+ }
+
object TypeEnv {
/** Return a new type environment binding specialized type parameters of sym to
* the given args. Expects the lists to have the same length.
@@ -248,7 +261,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
val stvTypeParams = specializedTypeVars(target.info.typeParams map (_.info))
val stvResult = specializedTypeVars(target.info.resultType)
- log("degenerate: " + target + " stv tparams: " + stvTypeParams + " stv info: " + stvResult)
+ debuglog("degenerate: " + target + " stv tparams: " + stvTypeParams + " stv info: " + stvResult)
(stvTypeParams -- stvResult).nonEmpty
}
@@ -279,10 +292,10 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
val pre1 = this(pre)
// when searching for a specialized class, take care to map all
// type parameters that are subtypes of AnyRef to AnyRef
- val args1 = map2(args, sym.typeParams) {
- case (tp, orig) if isSpecializedAnyRefSubtype(tp, orig) => AnyRefClass.tpe
- case (tp, _) => tp
- }
+ val args1 = map2(args, sym.info.typeParams)((tp, orig) =>
+ if (isSpecializedAnyRefSubtype(tp, orig)) AnyRefClass.tpe
+ else tp
+ )
specializedClass.get((sym, TypeEnv.fromSpecialization(sym, args1))) match {
case Some(sym1) => typeRef(pre1, sym1, survivingArgs(sym, args))
case None => typeRef(pre1, sym, args)
@@ -322,10 +335,9 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
nme.getterToLocal(specializedName(nme.localToGetter(name), types1, types2))
else {
val (base, cs, ms) = nme.splitSpecializedName(name)
- val abbrevs = definitions.abbrvTag withDefaultValue definitions.abbrvTag(ObjectClass)
newTermName(base.toString + "$"
- + "m" + ms + types1.map(t => abbrevs(t.typeSymbol)).mkString("", "", "")
- + "c" + cs + types2.map(t => abbrevs(t.typeSymbol)).mkString("", "", "$sp"))
+ + "m" + ms + types1.map(t => definitions.abbrvTag(t.typeSymbol)).mkString("", "", "")
+ + "c" + cs + types2.map(t => definitions.abbrvTag(t.typeSymbol)).mkString("", "", "$sp"))
}
}
@@ -345,13 +357,11 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
* These are in a meaningful order for stability purposes.
*/
def concreteTypes(sym: Symbol): List[Type] = {
- val types = (
- if (!isSpecialized(sym)) Nil // no @specialized Annotation
- else specializedOn(sym) match {
- case Nil => specializableTypes // specialized on everything
- case args => args map (s => specializesClass(s).tpe) sorted // specialized on args
- }
- )
+ val types = if (!isSpecialized(sym))
+ Nil // no @specialized Annotation
+ else
+ specializedOn(sym) map (s => specializesClass(s).tpe) sorted
+
if (isBoundedGeneric(sym.tpe) && (types contains AnyRefClass))
reporter.warning(sym.pos, sym + " is always a subtype of " + AnyRefClass.tpe + ".")
@@ -370,8 +380,10 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case set :: Nil => set map (x => List(x))
case set :: sets => for (x <- set ; xs <- loop(sets)) yield x :: xs
}
- // zip the keys with each permutation to create a TypeEnv
- loop(keys map concreteTypes) map (xss => Map(keys zip xss: _*))
+ // zip the keys with each permutation to create a TypeEnv.
+ // If we don't exclude the "all AnyRef" specialization, we will
+ // incur duplicate members and crash during mixin.
+ loop(keys map concreteTypes) filterNot (_ forall (_ <:< AnyRefClass.tpe)) map (xss => Map(keys zip xss: _*))
}
/** Does the given 'sym' need to be specialized in the environment 'env'?
@@ -395,8 +407,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
tpes foreach (tp => buf ++= specializedTypeVars(tp))
buf.result
}
- def specializedTypeVars(sym: Symbol): immutable.Set[Symbol] =
- atPhase(currentRun.typerPhase)(specializedTypeVars(sym.info))
+ def specializedTypeVars(sym: Symbol): immutable.Set[Symbol] = beforeTyper(specializedTypeVars(sym.info))
/** Return the set of @specialized type variables mentioned by the given type.
* It only counts type variables that appear:
@@ -428,27 +439,24 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case _ => Set()
}
- /** Returns the type parameter in the specialized class `clazz` that corresponds to type parameter
- * `sym` in the original class. It will create it if needed or use the one from the cache.
+ /** Returns the type parameter in the specialized class `sClass` that corresponds to type parameter
+ * `tparam` in the original class. It will create it if needed or use the one from the cache.
*/
- private def typeParamSubAnyRef(sym: Symbol, clazz: Symbol) = (
- anyrefSpecCache.getOrElseUpdate(sym,
- clazz.newTypeParameter(sym.name append nme.SPECIALIZED_SUFFIX_NAME toTypeName, sym.pos)
- setInfo TypeBounds(sym.info.bounds.lo, AnyRefClass.tpe)
+ private def typeParamSubAnyRef(tparam: Symbol, sClass: Symbol): Type = {
+ val sClassMap = anyrefSpecCache.getOrElseUpdate(sClass, mutable.Map[Symbol, Symbol]())
+
+ sClassMap.getOrElseUpdate(tparam,
+ tparam.cloneSymbol(sClass, tparam.flags, tparam.name append tpnme.SPECIALIZED_SUFFIX)
+ modifyInfo (info => TypeBounds(info.bounds.lo, AnyRefClass.tpe))
).tpe
- )
+ }
/** Cleans the anyrefSpecCache of all type parameter symbols of a class.
*/
- private def cleanAnyRefSpecCache(clazz: Symbol, decls: List[Symbol]) = (
+ private def cleanAnyRefSpecCache(clazz: Symbol, decls: List[Symbol]) {
// remove class type parameters and those of normalized members.
- clazz :: decls foreach {
- _.tpe match {
- case PolyType(tparams, _) => anyrefSpecCache --= tparams
- case _ => ()
- }
- }
- )
+ clazz :: decls foreach (anyrefSpecCache remove _)
+ }
/** Type parameters that survive when specializing in the specified environment. */
def survivingParams(params: List[Symbol], env: TypeEnv) =
@@ -540,7 +548,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
var res: List[Type] = Nil
// log(specializedClass + ": seeking specialized parents of class with parents: " + parents.map(_.typeSymbol))
for (p <- parents) {
- val stp = atPhase(phase.next)(specializedType(p))
+ val stp = afterSpecialize(specializedType(p))
if (stp != p)
if (p.typeSymbol.isTrait) res ::= stp
else if (currentRun.compiles(clazz))
@@ -550,7 +558,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
res
}
- var parents = List(applyContext(atPhase(currentRun.typerPhase)(clazz.tpe)))
+ var parents = List(applyContext(beforeTyper(clazz.tpe)))
// log("!!! Parents: " + parents + ", sym: " + parents.map(_.typeSymbol))
if (parents.head.typeSymbol.isTrait)
parents = parents.head.parents.head :: parents
@@ -564,7 +572,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
// as with the parents and assign it to typeOfThis.
if (clazz.typeOfThis.typeConstructor ne clazz.typeConstructor) {
sClass.typeOfThis = applyContext(clazz.typeOfThis)
- log("Rewriting self-type for specialized class:\n" +
+ debuglog("Rewriting self-type for specialized class:\n" +
" " + clazz.defStringSeenAs(clazz.typeOfThis) + "\n" +
" => " + sClass.defStringSeenAs(sClass.typeOfThis)
)
@@ -572,7 +580,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
polyType(newClassTParams, ClassInfoType(parents ::: extraSpecializedMixins, decls1, sClass))
}
- atPhase(phase.next)(sClass setInfo specializedInfoType)
+ afterSpecialize(sClass setInfo specializedInfoType)
val fullEnv = outerEnv ++ env
/** Enter 'sym' in the scope of the current specialized class. It's type is
@@ -646,7 +654,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
})
}
else
- log("conflicting env for " + m + " env: " + env)
+ debuglog("conflicting env for " + m + " env: " + env)
}
else if (m.isDeferred) { // abstract methods
val specMember = enterMember(cloneInSpecializedClass(m, _ | DEFERRED))
@@ -715,7 +723,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
typeEnv(specClass) = fullEnv
specClass.name = specializedName(specClass, fullEnv).toTypeName
enterMember(specClass)
- log("entered specialized class " + specClass.fullName)
+ debuglog("entered specialized class " + specClass.fullName)
info(specClass) = SpecializedInnerClass(m, fullEnv)
}
}
@@ -752,7 +760,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
if (existing != NoSymbol)
clazz.owner.info.decls.unlink(existing)
- atPhase(phase.next)(clazz.owner.info.decls enter spc) //!!! assumes fully specialized classes
+ afterSpecialize(clazz.owner.info.decls enter spc) //!!! assumes fully specialized classes
}
if (subclasses.nonEmpty) clazz.resetFlag(FINAL)
cleanAnyRefSpecCache(clazz, decls1)
@@ -771,7 +779,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
private def normalizeMember(owner: Symbol, sym: Symbol, outerEnv: TypeEnv): List[Symbol] = {
debuglog("normalizeMember: " + sym.fullName)
sym :: (
- if (!sym.isMethod || atPhase(currentRun.typerPhase)(sym.typeParams.isEmpty)) Nil
+ if (!sym.isMethod || beforeTyper(sym.typeParams.isEmpty)) Nil
else {
var specializingOn = specializedParams(sym)
val unusedStvars = specializingOn filterNot specializedTypeVars(sym.info)
@@ -829,10 +837,9 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
if (sym.isPrivate/* || sym.isProtected*/) {
//sym.privateWithin = sym.enclosingPackage
sym.resetFlag(PRIVATE).setFlag(PROTECTED)
- log("-->d SETTING PRIVATE WITHIN TO " + sym.enclosingPackage + " for " + sym)
+ debuglog("-->d SETTING PRIVATE WITHIN TO " + sym.enclosingPackage + " for " + sym)
}
- sym.resetFlag(FINAL)
val specMember = subst(outerEnv)(specializedOverload(owner, sym, spec))
typeEnv(specMember) = typeEnv(sym) ++ outerEnv ++ spec
wasSpecializedForTypeVars(specMember) ++= spec collect { case (s, tp) if s.tpe == tp => s }
@@ -913,7 +920,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
checkOverriddenTParams(overridden)
val env = unify(overridden.info, overriding.info, emptyEnv, false)
- def atNext = atPhase(phase.next)(overridden.owner.info.decl(specializedName(overridden, env)))
+ def atNext = afterSpecialize(overridden.owner.info.decl(specializedName(overridden, env)))
debuglog("\t\tenv: " + env + "isValid: " + TypeEnv.isValid(env, overridden) + "found: " + atNext)
if (TypeEnv.restrict(env, stvars).nonEmpty && TypeEnv.isValid(env, overridden) && atNext != NoSymbol)
@@ -928,7 +935,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case (NoSymbol, _) => None
case (overridden, env) =>
val om = specializedOverload(clazz, overridden, env)
- log("Added specialized overload %s for %s in env: %s with type: %s".format(om, overriding.fullName, env, om.info))
+ debuglog("Added specialized overload %s for %s in env: %s with type: %s".format(om, overriding.fullName, env, om.info))
typeEnv(om) = env
addConcreteSpecMethod(overriding)
info(om) = (
@@ -953,7 +960,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
}
)
overloads(overriding) ::= Overload(om, env)
- ifDebug(atPhase(phase.next)(assert(
+ ifDebug(afterSpecialize(assert(
overridden.owner.info.decl(om.name) != NoSymbol,
"Could not find " + om.name + " in " + overridden.owner.info.decls))
)
@@ -963,6 +970,10 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
}
case object UnifyError extends scala.util.control.ControlThrowable
+ private[this] def unifyError(tp1: Any, tp2: Any): Nothing = {
+ log("unifyError" + ((tp1, tp2)))
+ throw UnifyError
+ }
/** Return the most general type environment that specializes tp1 to tp2.
* It only allows binding of type parameters annotated with @specialized.
@@ -973,29 +984,34 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
private def unify(tp1: Type, tp2: Type, env: TypeEnv, strict: Boolean): TypeEnv = (tp1, tp2) match {
case (TypeRef(_, sym1, _), _) if isSpecialized(sym1) =>
debuglog("Unify - basic case: " + tp1 + ", " + tp2)
- if (isValueClass(tp2.typeSymbol) || isSpecializedAnyRefSubtype(tp2, sym1))
+ if (isValueClass(tp2.typeSymbol))
env + ((sym1, tp2))
+ else if (isSpecializedAnyRefSubtype(tp2, sym1))
+ env + ((sym1, tp2)) // env + ((sym1, AnyRefClass.tpe))
+ else if (strict)
+ unifyError(tp1, tp2)
else
- if (strict) throw UnifyError else env
+ env
case (TypeRef(_, sym1, args1), TypeRef(_, sym2, args2)) =>
debuglog("Unify TypeRefs: " + tp1 + " and " + tp2 + " with args " + (args1, args2) + " - ")
- if (strict && args1.length != args2.length) throw UnifyError
+ if (strict && args1.length != args2.length) unifyError(tp1, tp2)
val e = unify(args1, args2, env, strict)
debuglog("unified to: " + e)
e
case (TypeRef(_, sym1, _), _) if sym1.isTypeParameterOrSkolem =>
env
case (MethodType(params1, res1), MethodType(params2, res2)) =>
- if (strict && params1.length != params2.length) throw UnifyError
+ if (strict && params1.length != params2.length) unifyError(tp1, tp2)
debuglog("Unify MethodTypes: " + tp1 + " and " + tp2)
unify(res1 :: (params1 map (_.tpe)), res2 :: (params2 map (_.tpe)), env, strict)
case (PolyType(tparams1, res1), PolyType(tparams2, res2)) =>
- if (strict && tparams1.length != tparams2.length) throw UnifyError
debuglog("Unify PolyTypes: " + tp1 + " and " + tp2)
- unify(res1, res2, env, strict)
- case (PolyType(_, res), other) =>
- unify(res, other, env, strict)
- case (ThisType(_), ThisType(_)) => env
+ if (strict && tparams1.length != tparams2.length)
+ unifyError(tp1, tp2)
+ else
+ unify(res1, res2, env, strict)
+ case (PolyType(_, res), other) => unify(res, other, env, strict)
+ case (ThisType(_), ThisType(_)) => env
case (_, SingleType(_, _)) => unify(tp1, tp2.underlying, env, strict)
case (SingleType(_, _), _) => unify(tp1.underlying, tp2, env, strict)
case (ThisType(_), _) => unify(tp1.widen, tp2, env, strict)
@@ -1017,7 +1033,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
if (env.keySet intersect nenv.keySet isEmpty) env ++ nenv
else {
debuglog("could not unify: u(" + args._1 + ", " + args._2 + ") yields " + nenv + ", env: " + env)
- throw UnifyError
+ unifyError(tp1, tp2)
}
}
}
@@ -1095,7 +1111,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case cinfo @ ClassInfoType(parents, decls, clazz) if !unspecializableClass(cinfo) =>
val tparams = tpe.typeParams
if (tparams.isEmpty)
- atPhase(phase.next)(parents map (_.typeSymbol.info))
+ afterSpecialize(parents map (_.typeSymbol.info))
val parents1 = parents map specializedType
debuglog("transformInfo %s %s with parents1 %s ph: %s".format(
@@ -1141,7 +1157,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
if (warnings)
reporter.warning(tvar.pos, "Bounds prevent specialization of " + tvar)
- log("specvars: " +
+ debuglog("specvars: " +
tvar.info.bounds.lo + ": " +
specializedTypeVars(tvar.info.bounds.lo) + " " +
subst(env, tvar.info.bounds.hi) + ": " +
@@ -1216,21 +1232,21 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
def specializeCalls(unit: CompilationUnit) = new TypingTransformer(unit) {
/** Map a specializable method to it's rhs, when not deferred. */
- val body: mutable.Map[Symbol, Tree] = new mutable.HashMap
+ val body = perRunCaches.newMap[Symbol, Tree]()
/** Map a specializable method to its value parameter symbols. */
- val parameters: mutable.Map[Symbol, List[List[Symbol]]] = new mutable.HashMap
+ val parameters = perRunCaches.newMap[Symbol, List[Symbol]]()
/** Collect method bodies that are concrete specialized methods.
*/
class CollectMethodBodies extends Traverser {
override def traverse(tree: Tree) = tree match {
- case DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
+ case DefDef(_, _, _, vparams :: Nil, _, rhs) =>
if (concreteSpecMethods(tree.symbol) || tree.symbol.isConstructor) {
debuglog("!!! adding body of a defdef %s, symbol %s: %s".format(tree, tree.symbol, rhs))
body(tree.symbol) = rhs
// body(tree.symbol) = tree // whole method
- parameters(tree.symbol) = mmap(vparamss)(_.symbol)
+ parameters(tree.symbol) = vparams.map(_.symbol)
concreteSpecMethods -= tree.symbol
} // no need to descend further down inside method bodies
@@ -1243,7 +1259,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
}
}
- def doesConform(origSymbol: Symbol, treeType: Type, memberType: Type, env: TypeEnv) =
+ def doesConform(origSymbol: Symbol, treeType: Type, memberType: Type, env: TypeEnv) = {
(treeType =:= memberType) || { // anyref specialization
memberType match {
case PolyType(_, resTpe) =>
@@ -1260,6 +1276,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case _ => false
}
}
+ }
override def transform(tree: Tree): Tree = {
val symbol = tree.symbol
@@ -1267,7 +1284,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
/** The specialized symbol of 'tree.symbol' for tree.tpe, if there is one */
def specSym(qual: Tree): Option[Symbol] = {
val env = unify(symbol.tpe, tree.tpe, emptyEnv, false)
- log("[specSym] checking for rerouting: %s with \n\tsym.tpe: %s, \n\ttree.tpe: %s \n\tenv: %s \n\tname: %s"
+ debuglog("[specSym] checking for rerouting: %s with \n\tsym.tpe: %s, \n\ttree.tpe: %s \n\tenv: %s \n\tname: %s"
.format(tree, symbol.tpe, tree.tpe, env, specializedName(symbol, env)))
if (!env.isEmpty) { // a method?
val specCandidates = qual.tpe.member(specializedName(symbol, env))
@@ -1275,12 +1292,12 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
doesConform(symbol, tree.tpe, qual.tpe.memberType(s), env)
}
- log("[specSym] found: " + specCandidates.tpe + ", instantiated as: " + tree.tpe)
- log("[specSym] found specMember: " + specMember)
+ debuglog("[specSym] found: " + specCandidates.tpe + ", instantiated as: " + tree.tpe)
+ debuglog("[specSym] found specMember: " + specMember)
if (specMember ne NoSymbol)
if (TypeEnv.includes(typeEnv(specMember), env)) Some(specMember)
else {
- log("wrong environments for specialized member: \n\ttypeEnv(%s) = %s\n\tenv = %s".format(specMember, typeEnv(specMember), env))
+ debuglog("wrong environments for specialized member: \n\ttypeEnv(%s) = %s\n\tenv = %s".format(specMember, typeEnv(specMember), env))
None
}
else None
@@ -1290,16 +1307,13 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
curTree = tree
tree match {
case Apply(Select(New(tpt), nme.CONSTRUCTOR), args) =>
- if (findSpec(tpt.tpe).typeSymbol ne tpt.tpe.typeSymbol) {
+ debuglog("Attempting to specialize new %s(%s)".format(tpt, args.mkString(", ")))
+ val found = findSpec(tpt.tpe)
+ if (found.typeSymbol ne tpt.tpe.typeSymbol) {
// the ctor can be specialized
- log("** instantiated specialized type: " + findSpec(tpt.tpe))
- try {
- atPos(tree.pos)(
- localTyper.typed(
- Apply(
- Select(New(TypeTree(findSpec(tpt.tpe))), nme.CONSTRUCTOR),
- transformTrees(args))))
- } catch {
+ debuglog("** instantiated specialized type: " + found)
+ try localTyper.typedPos(tree.pos)(New(found, transformTrees(args): _*))
+ catch {
case te: TypeError =>
reporter.error(tree.pos, te.msg)
super.transform(tree)
@@ -1326,7 +1340,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
)
val tree1 = gen.mkTypeApply(Select(qual1, specMember), residualTargs)
- log("rewrote " + tree + " to " + tree1)
+ debuglog("rewrote " + tree + " to " + tree1)
localTyper.typedOperator(atPos(tree.pos)(tree1)) // being polymorphic, it must be a method
case None => super.transform(tree)
@@ -1334,8 +1348,8 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
case Select(Super(_, _), name) if illegalSpecializedInheritance(currentClass) =>
val pos = tree.pos
- log(pos.source.file.name+":"+pos.line+": not specializing call to super inside illegal specialized inheritance class.")
- log(pos.lineContent)
+ debuglog(pos.source.file.name+":"+pos.line+": not specializing call to super inside illegal specialized inheritance class.")
+ debuglog(pos.lineContent)
tree
case Select(qual, name) =>
@@ -1397,7 +1411,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
self,
atOwner(currentOwner)(transformTrees(body ::: specMembers)))
- case ddef @ DefDef(mods, name, tparams, vparamss, tpt, rhs) if info.isDefinedAt(symbol) =>
+ case ddef @ DefDef(_, _, _, vparamss, _, _) if info.isDefinedAt(symbol) =>
// log("--> method: " + ddef + " in " + ddef.symbol.owner + ", " + info(symbol))
if (symbol.isConstructor) {
@@ -1405,110 +1419,90 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
val superRef: Tree = Select(Super(This(tpnme.EMPTY), tpnme.EMPTY), nme.CONSTRUCTOR)
forwardCtorCall(tree.pos, superRef, vparamss, symbol.owner)
}
- if (symbol.isPrimaryConstructor) localTyper typed {
- atPos(symbol.pos)(treeCopy.DefDef(tree, mods, name, tparams, vparamss, tpt, Block(List(t), Literal(Constant()))))
- } else {
- // duplicate the original constructor
+ if (symbol.isPrimaryConstructor)
+ localTyper.typedPos(symbol.pos)(deriveDefDef(tree)(_ => Block(List(t), Literal(Constant()))))
+ else // duplicate the original constructor
duplicateBody(ddef, info(symbol).target)
- }
- } else info(symbol) match {
-
+ }
+ else info(symbol) match {
case Implementation(target) =>
assert(body.isDefinedAt(target), "sym: " + symbol.fullName + " target: " + target.fullName)
// we have an rhs, specialize it
val tree1 = duplicateBody(ddef, target)
debuglog("implementation: " + tree1)
- val DefDef(mods, name, tparams, vparamss, tpt, rhs) = tree1
- treeCopy.DefDef(tree1, mods, name, tparams, vparamss, tpt, transform(rhs))
+ deriveDefDef(tree1)(transform)
case NormalizedMember(target) =>
- log("Normalized member: " + symbol + ", target: " + target)
+ debuglog("Normalized member: " + symbol + ", target: " + target)
if (target.isDeferred || conflicting(typeEnv(symbol))) {
- treeCopy.DefDef(
- tree, mods, name, tparams, vparamss, tpt,
- localTyper typed gen.mkSysErrorCall("boom! you stepped on a bug. This method should never be called.")
- )
+ deriveDefDef(tree)(_ => localTyper typed gen.mkSysErrorCall("Fatal error in code generation: this should never be called."))
}
else {
// we have an rhs, specialize it
val tree1 = duplicateBody(ddef, target)
debuglog("implementation: " + tree1)
- val DefDef(mods, name, tparams, vparamss, tpt, rhs) = tree1
- treeCopy.DefDef(tree1, mods, name, tparams, vparamss, tpt, transform(rhs))
+ deriveDefDef(tree1)(transform)
}
case SpecialOverride(target) =>
assert(body.isDefinedAt(target), "sym: " + symbol.fullName + " target: " + target.fullName)
//debuglog("moving implementation, body of target " + target + ": " + body(target))
- log("%s is param accessor? %b".format(ddef.symbol, ddef.symbol.isParamAccessor))
+ debuglog("%s is param accessor? %b".format(ddef.symbol, ddef.symbol.isParamAccessor))
// we have an rhs, specialize it
val tree1 = addBody(ddef, target)
(new ChangeOwnerTraverser(target, tree1.symbol))(tree1.rhs)
debuglog("changed owners, now: " + tree1)
- val DefDef(mods, name, tparams, vparamss, tpt, rhs) = tree1
- treeCopy.DefDef(tree1, mods, name, tparams, vparamss, tpt, transform(rhs))
-
+ deriveDefDef(tree1)(transform)
case SpecialOverload(original, env) =>
debuglog("completing specialized " + symbol.fullName + " calling " + original)
- log("special overload " + original + " -> " + env)
+ debuglog("special overload " + original + " -> " + env)
val t = DefDef(symbol, { vparamss =>
val fun = Apply(Select(This(symbol.owner), original),
makeArguments(original, vparamss.head))
- log("inside defdef: " + symbol + "; type: " + symbol.tpe + "; owner: " + symbol.owner)
+ debuglog("inside defdef: " + symbol + "; type: " + symbol.tpe + "; owner: " + symbol.owner)
gen.maybeMkAsInstanceOf(fun,
symbol.owner.thisType.memberType(symbol).finalResultType,
symbol.owner.thisType.memberType(original).finalResultType)
})
- log("created special overload tree " + t)
+ debuglog("created special overload tree " + t)
debuglog("created " + t)
localTyper.typed(t)
case fwd @ Forward(_) =>
- log("forward: " + fwd + ", " + ddef)
+ debuglog("forward: " + fwd + ", " + ddef)
val rhs1 = forwardCall(tree.pos, gen.mkAttributedRef(symbol.owner.thisType, fwd.target), vparamss)
- log("-->d completed forwarder to specialized overload: " + fwd.target + ": " + rhs1)
- localTyper.typed(treeCopy.DefDef(tree, mods, name, tparams, vparamss, tpt, rhs1))
+ debuglog("-->d completed forwarder to specialized overload: " + fwd.target + ": " + rhs1)
+ localTyper.typed(deriveDefDef(tree)(_ => rhs1))
case SpecializedAccessor(target) =>
val rhs1 = if (symbol.isGetter)
gen.mkAttributedRef(target)
else
Assign(gen.mkAttributedRef(target), Ident(vparamss.head.head.symbol))
- log("specialized accessor: " + target + " -> " + rhs1)
- localTyper.typed(treeCopy.DefDef(tree, mods, name, tparams, vparamss, tpt, rhs1))
+ debuglog("specialized accessor: " + target + " -> " + rhs1)
+ localTyper.typed(deriveDefDef(tree)(_ => rhs1))
case Abstract(targ) =>
- log("abstract: " + targ)
- val DefDef(mods, name, tparams, vparamss, tpt, rhs) = tree
- val t = treeCopy.DefDef(tree, mods, name, tparams, vparamss, tpt, rhs)
- localTyper.typed(t)
+ debuglog("abstract: " + targ)
+ localTyper.typed(deriveDefDef(tree)(rhs => rhs))
}
- case ValDef(mods, name, tpt, rhs) if symbol.hasFlag(SPECIALIZED) && !symbol.isParamAccessor =>
+ case ValDef(_, _, _, _) if symbol.hasFlag(SPECIALIZED) && !symbol.isParamAccessor =>
assert(body.isDefinedAt(symbol.alias), body)
- val tree1 = treeCopy.ValDef(tree, mods, name, tpt, body(symbol.alias).duplicate)
+ val tree1 = deriveValDef(tree)(_ => body(symbol.alias).duplicate)
debuglog("now typing: " + tree1 + " in " + tree.symbol.owner.fullName)
+
val d = new Duplicator
- val ValDef(mods1, name1, tpt1, rhs1) = d.retyped(
+ val newValDef = d.retyped(
localTyper.context1.asInstanceOf[d.Context],
tree1,
symbol.alias.enclClass,
symbol.enclClass,
typeEnv(symbol.alias) ++ typeEnv(tree.symbol)
)
- val t = treeCopy.ValDef(tree1, mods1, name1, tpt1, transform(rhs1))
- log("valdef " + tree + " -> " + t)
- t
-
-// val tree1 =
-// treeCopy.ValDef(tree, mods, name, tpt,
-// localTyper.typed(
-// Apply(Select(Super(currentClass, nme.EMPTY), symbol.alias.getter(symbol.alias.owner)),
-// List())))
-// debuglog("replaced ValDef: " + tree1 + " in " + tree.symbol.owner.fullName)
-// tree1
+ deriveValDef(newValDef)(transform)
case Apply(sel @ Select(sup @ Super(qual, name), name1), args)
if (sup.symbol.info.parents != atPhase(phase.prev)(sup.symbol.info.parents)) =>
@@ -1526,18 +1520,12 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
}
}
- private def reskolemize(tparams: List[TypeDef]): (List[Symbol], List[Symbol]) = {
- val saved = tparams map (_.symbol)
- localTyper skolemizeTypeParams tparams
- (saved, tparams map (_.symbol))
- }
-
private def duplicateBody(tree: DefDef, source: Symbol) = {
val symbol = tree.symbol
val meth = addBody(tree, source)
val d = new Duplicator
- log("-->d DUPLICATING: " + meth)
+ debuglog("-->d DUPLICATING: " + meth)
d.retyped(
localTyper.context1.asInstanceOf[d.Context],
meth,
@@ -1555,8 +1543,8 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
*/
private def addBody(tree: DefDef, source: Symbol): DefDef = {
val symbol = tree.symbol
- debuglog("specializing body of" + symbol.fullName + ": " + symbol.info)
- val DefDef(mods, name, tparams, vparamss, tpt, _) = tree
+ debuglog("specializing body of" + symbol.defString)
+ val DefDef(_, _, tparams, vparams :: Nil, tpt, _) = tree
// val (_, origtparams) = splitParams(source.typeParams)
val env = typeEnv(symbol)
val boundTvars = env.keySet
@@ -1564,12 +1552,12 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
debuglog("substituting " + origtparams + " for " + symbol.typeParams)
// skolemize type parameters
- val (oldtparams, newtparams) = reskolemize(tparams)
+ val oldtparams = tparams map (_.symbol)
+ val newtparams = deriveFreshSkolems(oldtparams)
+ map2(tparams, newtparams)(_ setSymbol _)
// create fresh symbols for value parameters to hold the skolem types
- val vparamss1 = List(for (vdef <- vparamss.head; param = vdef.symbol) yield {
- ValDef(param cloneSymbol symbol substInfo (oldtparams, newtparams))
- })
+ val newSyms = cloneSymbolsAtOwnerAndModify(vparams map (_.symbol), symbol, _.substSym(oldtparams, newtparams))
// replace value and type parameters of the old method with the new ones
// log("Adding body for " + tree.symbol + " - origtparams: " + origtparams + "; tparams: " + tparams)
@@ -1577,14 +1565,15 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
// log("Type env of: " + tree.symbol + ": " + boundTvars)
// log("newtparams: " + newtparams)
val symSubstituter = new ImplementationAdapter(
- parameters(source).flatten ::: origtparams,
- vparamss1.flatten.map(_.symbol) ::: newtparams,
+ parameters(source) ::: origtparams,
+ newSyms ::: newtparams,
source.enclClass,
false) // don't make private fields public
- val tmp = symSubstituter(body(source).duplicate)
+
+ val newBody = symSubstituter(body(source).duplicate)
tpt.tpe = tpt.tpe.substSym(oldtparams, newtparams)
- treeCopy.DefDef(tree, mods, name, tparams, vparamss1, tpt, tmp)
+ copyDefDef(tree)(vparamss = List(newSyms map ValDef), rhs = newBody)
}
/** Create trees for specialized members of 'sClass', based on the
@@ -1601,13 +1590,13 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
if m.hasFlag(SPECIALIZED)
&& (m.sourceFile ne null)
&& satisfiable(typeEnv(m), !sClass.hasFlag(SPECIALIZED))) {
- log("creating tree for " + m.fullName)
+ debuglog("creating tree for " + m.fullName)
if (m.isMethod) {
if (info(m).target.hasAccessorFlag) hasSpecializedFields = true
if (m.isClassConstructor) {
- val origParamss = parameters(info(m).target)
+ val origParams = parameters(info(m).target)
val vparams = (
- map2(m.info.paramTypes, origParamss(0))((tp, sym) =>
+ map2(m.info.paramTypes, origParams)((tp, sym) =>
m.newValue(specializedName(sym, typeEnv(sClass)), sym.pos, sym.flags) setInfo tp
)
)
@@ -1657,7 +1646,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
buf +=
ClassDef(specCls, atPos(impl.pos)(Template(parents, emptyValDef, List()))
.setSymbol(specCls.newLocalDummy(sym1.pos))) setPos tree.pos
- log("created synthetic class: " + specCls + " of " + sym1 + " in env: " + env)
+ debuglog("created synthetic class: " + specCls + " of " + sym1 + " in env: " + env)
}
case _ =>
}
@@ -1698,7 +1687,7 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
* - there is a getter for the specialized field in the same class
*/
def initializesSpecializedField(f: Symbol) = (
- (f.name endsWith nme.SPECIALIZED_SUFFIX_NAME)
+ (f.name endsWith nme.SPECIALIZED_SUFFIX)
&& clazz.info.member(nme.originalName(f.name)).isPublic
&& clazz.info.decl(f.name).suchThat(_.isGetter) != NoSymbol
)
@@ -1733,9 +1722,27 @@ abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
class SpecializationTransformer(unit: CompilationUnit) extends Transformer {
informProgress("specializing " + unit)
- override def transform(tree: Tree) =
- if (settings.nospecialization.value) tree
- else atPhase(phase.next)(specializeCalls(unit).transform(tree))
+ override def transform(tree: Tree) = {
+ val resultTree = if (settings.nospecialization.value) tree
+ else afterSpecialize(specializeCalls(unit).transform(tree))
+
+ // Remove the final modifier and @inline annotation from anything in the
+ // original class (since it's being overridden in at least onesubclass).
+ //
+ // We do this here so that the specialized subclasses will correctly copy
+ // final and @inline.
+ info.foreach {
+ case (sym, SpecialOverload(target, _)) => {
+ sym.resetFlag(FINAL)
+ target.resetFlag(FINAL)
+ sym.removeAnnotation(ScalaInlineClass)
+ target.removeAnnotation(ScalaInlineClass)
+ }
+ case _ => {}
+ }
+
+ resultTree
+ }
}
def printSpecStats() {
diff --git a/src/compiler/scala/tools/nsc/transform/TailCalls.scala b/src/compiler/scala/tools/nsc/transform/TailCalls.scala
index 97b9caaa73..39ca60d870 100644
--- a/src/compiler/scala/tools/nsc/transform/TailCalls.scala
+++ b/src/compiler/scala/tools/nsc/transform/TailCalls.scala
@@ -227,7 +227,7 @@ abstract class TailCalls extends Transform {
fail(reason)
}
def rewriteTailCall(recv: Tree): Tree = {
- log("Rewriting tail recursive call: " + fun.pos.lineContent.trim)
+ debuglog("Rewriting tail recursive call: " + fun.pos.lineContent.trim)
ctx.accessed = true
typedPos(fun.pos)(Apply(Ident(ctx.label), recv :: transformArgs))
@@ -248,11 +248,10 @@ abstract class TailCalls extends Transform {
transformSynchronizedMethods(tree) match {
case dd @ DefDef(mods, name, tparams, vparams, tpt, rhs) =>
val newCtx = new Context(dd)
+ debuglog("Considering " + dd.name + " for tailcalls")
+ val newRHS = transform(rhs0, newCtx)
- debuglog("Considering " + name + " for tailcalls")
- val newRHS = transform(rhs, newCtx)
-
- treeCopy.DefDef(tree, mods, name, tparams, vparams, tpt, {
+ deriveDefDef(tree)(rhs =>
if (newCtx.isTransformed) {
/** We have rewritten the tree, but there may be nested recursive calls remaining.
* If @tailrec is given we need to fail those now.
@@ -264,7 +263,7 @@ abstract class TailCalls extends Transform {
}
}
val newThis = newCtx.newThis(tree.pos)
- val vpSyms = vparams.flatten map (_.symbol)
+ val vpSyms = vparamss0.flatten map (_.symbol)
typedPos(tree.pos)(Block(
List(ValDef(newThis, This(currentClass))),
@@ -277,7 +276,7 @@ abstract class TailCalls extends Transform {
newRHS
}
- })
+ )
case Block(stats, expr) =>
treeCopy.Block(tree,
@@ -286,11 +285,7 @@ abstract class TailCalls extends Transform {
)
case CaseDef(pat, guard, body) =>
- treeCopy.CaseDef(tree,
- pat,
- guard,
- transform(body)
- )
+ deriveCaseDef(tree)(transform)
case If(cond, thenp, elsep) =>
treeCopy.If(tree,
diff --git a/src/compiler/scala/tools/nsc/transform/UnCurry.scala b/src/compiler/scala/tools/nsc/transform/UnCurry.scala
index ab4a2141a9..b9b115b7c8 100644
--- a/src/compiler/scala/tools/nsc/transform/UnCurry.scala
+++ b/src/compiler/scala/tools/nsc/transform/UnCurry.scala
@@ -129,7 +129,7 @@ abstract class UnCurry extends InfoTransform
appliedType(NonLocalReturnControlClass.typeConstructor, List(argtype))
/** A hashmap from method symbols to non-local return keys */
- private val nonLocalReturnKeys = new mutable.HashMap[Symbol, Symbol]
+ private val nonLocalReturnKeys = perRunCaches.newMap[Symbol, Symbol]()
/** Return non-local return key for given method */
private def nonLocalReturnKey(meth: Symbol) =
@@ -144,13 +144,13 @@ abstract class UnCurry extends InfoTransform
* todo: maybe clone a pre-existing exception instead?
* (but what to do about exceptions that miss their targets?)
*/
- private def nonLocalReturnThrow(expr: Tree, meth: Symbol) =
- localTyper.typed {
- Throw(
- New(
- TypeTree(nonLocalReturnExceptionType(expr.tpe)),
- List(List(Ident(nonLocalReturnKey(meth)), expr))))
- }
+ private def nonLocalReturnThrow(expr: Tree, meth: Symbol) = localTyper typed {
+ Throw(
+ nonLocalReturnExceptionType(expr.tpe.widen),
+ Ident(nonLocalReturnKey(meth)),
+ expr
+ )
+ }
/** Transform (body, key) to:
*
@@ -166,31 +166,18 @@ abstract class UnCurry extends InfoTransform
* }
*/
private def nonLocalReturnTry(body: Tree, key: Symbol, meth: Symbol) = {
- localTyper.typed {
- val extpe = nonLocalReturnExceptionType(meth.tpe.finalResultType)
- val ex = meth.newValue(nme.ex, body.pos) setInfo extpe
- val pat = Bind(ex,
- Typed(Ident(nme.WILDCARD),
- AppliedTypeTree(Ident(NonLocalReturnControlClass),
- List(Bind(tpnme.WILDCARD,
- EmptyTree)))))
- val rhs =
- If(
- Apply(
- Select(
- Apply(Select(Ident(ex), "key"), List()),
- Object_eq),
- List(Ident(key))),
- Apply(
- TypeApply(
- Select(
- Apply(Select(Ident(ex), "value"), List()),
- Any_asInstanceOf),
- List(TypeTree(meth.tpe.finalResultType))),
- List()),
- Throw(Ident(ex)))
- val keyDef = ValDef(key, New(TypeTree(ObjectClass.tpe), List(List())))
- val tryCatch = Try(body, List(CaseDef(pat, EmptyTree, rhs)), EmptyTree)
+ localTyper typed {
+ val extpe = nonLocalReturnExceptionType(meth.tpe.finalResultType)
+ val ex = meth.newValue(body.pos, nme.ex) setInfo extpe
+ val pat = gen.mkBindForCase(ex, NonLocalReturnControlClass, List(meth.tpe.finalResultType))
+ val rhs = (
+ IF ((ex DOT nme.key)() OBJ_EQ Ident(key))
+ THEN ((ex DOT nme.value)())
+ ELSE (Throw(Ident(ex)))
+ )
+ val keyDef = ValDef(key, New(ObjectClass.tpe))
+ val tryCatch = Try(body, pat -> rhs)
+
Block(List(keyDef), tryCatch)
}
}
@@ -228,9 +215,9 @@ abstract class UnCurry extends InfoTransform
* case P_1 if G_1 => E_1
* ...
* case P_n if G_n => true
- * case _ => this.missingCase(x)
+ * case _ => this.missingCase(expr)
* }
- * def isDefinedAtCurrent(x: T): boolean = (x: @unchecked) match {
+ * def _isDefinedAt(x: T): boolean = (x: @unchecked) match {
* case P_1 if G_1 => true
* ...
* case P_n if G_n => true
@@ -240,7 +227,7 @@ abstract class UnCurry extends InfoTransform
* new $anon()
*
* However, if one of the patterns P_i if G_i is a default pattern,
- * drop the last default clause in tghe definition of `apply` and generate for `isDefinedAtCurrent` instead
+ * drop the last default clause in the definition of `apply` and generate for `_isDefinedAt` instead
*
* def isDefinedAtCurrent(x: T): boolean = true
*/
@@ -291,73 +278,26 @@ abstract class UnCurry extends InfoTransform
val substParam = new TreeSymSubstituter(fun.vparams map (_.symbol), params)
def substTree[T <: Tree](t: T): T = substParam(resetLocalAttrs(t))
- // waiting here until we can mix case classes and extractors reliably (i.e., when virtpatmat becomes the default)
- // object VirtPatmatOpt {
- // object Last {
- // def unapply[T](xs: List[T]) = xs.lastOption
- // }
- // // keep this in synch by what's generated by combineCases/runOrElse
- // object MatcherBlock {
- // def unapply(matcher: Tree): Option[(ValDef, ValDef, ValDef, ValDef, List[Tree])] = matcher match { // TODO: BUG the unapplySeq version of the case below does not seem to work in virtpatmat??
- // case Block((zero: ValDef) :: (x: ValDef) :: (matchRes: ValDef) :: (keepGoing: ValDef) :: stats, _) => Some(zero, x, matchRes, keepGoing, stats)
- // case _ => None
- // }
- // }
- // // TODO: virtpatmat use case: would be nice if could abstract over the repeated pattern more easily
- // // case Block(Last(P)) =>
- // // case P =>
- // def unapply(matcher: Tree): Option[(ValDef, ValDef, ValDef, ValDef, List[Tree], Tree => Tree)] = matcher match {
- // case MatcherBlock(zero, x, matchRes, keepGoing, stats) => Some(zero, x, matchRes, keepGoing, stats, identity[Tree])
- // case Block(outerStats, MatcherBlock(zero, x, matchRes, keepGoing, stats)) => Some(zero, x, matchRes, keepGoing, stats, inner => Block(outerStats, inner))
- // case b => treeBrowser browse b; None
- // }
- // }
-
- // TODO: optimize duplication, but make sure ValDef's introduced by wrap are treated correctly
- def dupMatch(selector: Tree, cases: List[CaseDef], wrap: Match => Tree = identity) = {
- def transformCase(cdef: CaseDef): CaseDef =
- CaseDef(cdef.pat, cdef.guard, Literal(Constant(true)))
- def defaultCase = CaseDef(Ident(nme.WILDCARD), EmptyTree, Literal(Constant(false)))
-
- gen.mkUncheckedMatch(
- if (cases exists treeInfo.isDefaultCase) Literal(Constant(true))
- else substTree(wrap(Match(selector, (cases map transformCase) :+ defaultCase)).duplicate)
- )
- }
+ object isDefinedAtTransformer extends gen.MatchMatcher {
+ // TODO: optimize duplication, but make sure ValDef's introduced by wrap are treated correctly
+ override def caseMatch(orig: Tree, selector: Tree, cases: List[CaseDef], wrap: Tree => Tree): Tree = {
+ def transformCase(cdef: CaseDef): CaseDef =
+ CaseDef(cdef.pat, cdef.guard, Literal(Constant(true)))
- def dupVirtMatch(zero: ValDef, x: ValDef, matchRes: ValDef, keepGoing: ValDef, stats: List[Tree], wrap: Block => Tree = identity) = {
- object dropMatchResAssign extends Transformer {
- // override val treeCopy = newStrictTreeCopier // will duplicate below
- override def transform(tree: Tree): Tree = tree match {
- // don't compute the result of the match -- remove the block for the RHS (emitted by pmgen.one), except for the assignment to keepGoing
- case Block(List(matchRes, ass@Assign(keepGoingLhs, falseLit)), zero) if keepGoingLhs.symbol eq keepGoing.symbol =>
- Block(List(ass), zero)
- case _ =>
- super.transform(tree)
- }
+ def defaultCase = CaseDef(Ident(nme.WILDCARD), EmptyTree, Literal(Constant(false)))
+
+ val casesNoSynthCatchAll = dropSyntheticCatchAll(cases)
+
+ gen.mkUncheckedMatch(
+ if (casesNoSynthCatchAll exists treeInfo.isDefaultCase) Literal(Constant(true))
+ else substTree(wrap(Match(selector, (casesNoSynthCatchAll map transformCase) :+ defaultCase)).duplicate)
+ )
}
- val statsNoMatchRes: List[Tree] = stats map (dropMatchResAssign.transform) toList
- val idaBlock = wrap(Block(
- zero ::
- x ::
- /* drop matchRes def */
- keepGoing ::
- statsNoMatchRes,
- NOT(REF(keepGoing.symbol)) // replace `if (keepGoing) throw new MatchError(...) else matchRes` by `!keepGoing`
- ))
- substTree(idaBlock.duplicate) // duplicate on block as a whole to ensure valdefs are properly cloned and substed
- }
- DefDef(m, (fun.body: @unchecked) match {
- case Match(selector, cases) =>
- dupMatch(selector, cases)
- case Block((vd: ValDef) :: Nil, Match(selector, cases)) => // can't factor this out using an extractor due to bugs in the old pattern matcher
- dupMatch(selector, cases, m => Block(List(vd), m))
- // virtpatmat -- TODO: find a better way to keep this in synch with the code generated by patmatvirtualizer
- case Apply(Apply(TypeApply(Select(tgt, nme.runOrElse), targs), args_scrut), args_pm) if opt.virtPatmat =>
+ override def caseVirtualizedMatch(orig: Tree, _match: Tree, targs: List[Tree], scrut: Tree, matcher: Tree): Tree = {
object noOne extends Transformer {
override val treeCopy = newStrictTreeCopier // must duplicate everything
- val one = tgt.tpe member newTermName("one")
+ val one = _match.tpe member newTermName("one")
override def transform(tree: Tree): Tree = tree match {
case Apply(fun, List(a)) if fun.symbol == one =>
// blow one's argument away since all we want to know is whether the match succeeds or not
@@ -367,15 +307,34 @@ abstract class UnCurry extends InfoTransform
super.transform(tree)
}
}
- substTree(Apply(Apply(TypeApply(Select(tgt.duplicate, tgt.tpe.member(newTermName("isSuccess"))), targs map (_.duplicate)), args_scrut map (_.duplicate)), args_pm map (noOne.transform)))
- // for the optimized version of virtpatmat
- case Block((zero: ValDef) :: (x: ValDef) :: (matchRes: ValDef) :: (keepGoing: ValDef) :: stats, _) if opt.virtPatmat =>
- dupVirtMatch(zero, x, matchRes, keepGoing, stats)
- case Block(outerStats, Block((zero: ValDef) :: (x: ValDef) :: (matchRes: ValDef) :: (keepGoing: ValDef) :: stats, _)) if opt.virtPatmat => // can't factor this out using an extractor due to bugs in the old pattern matcher
- dupVirtMatch(zero, x, matchRes, keepGoing, stats, m => Block(outerStats, m))
- // case other =>
- // treeBrowser browse other
- })
+ substTree(Apply(Apply(TypeApply(Select(_match.duplicate, _match.tpe.member(newTermName("isSuccess"))), targs map (_.duplicate)), List(scrut.duplicate)), List(noOne.transform(matcher))))
+ }
+
+ override def caseVirtualizedMatchOpt(orig: Tree, zero: ValDef, x: ValDef, matchRes: ValDef, keepGoing: ValDef, stats: List[Tree], epilogue: Tree, wrap: Tree => Tree) = {
+ object dropMatchResAssign extends Transformer {
+ // override val treeCopy = newStrictTreeCopier // will duplicate below
+ override def transform(tree: Tree): Tree = tree match {
+ // don't compute the result of the match -- remove the block for the RHS (emitted by pmgen.one), except for the assignment to keepGoing
+ case gen.VirtualCaseDef(assignKeepGoing, matchRes, zero) if assignKeepGoing.lhs.symbol eq keepGoing.symbol =>
+ Block(List(assignKeepGoing), zero)
+ case _ =>
+ super.transform(tree)
+ }
+ }
+ val statsNoMatchRes: List[Tree] = stats map (dropMatchResAssign.transform) toList
+ val idaBlock = wrap(Block(
+ zero ::
+ x ::
+ /* drop matchRes def */
+ keepGoing ::
+ statsNoMatchRes,
+ NOT(REF(keepGoing.symbol)) // replace `if (keepGoing) throw new MatchError(...) else matchRes` epilogue by `!keepGoing`
+ ))
+ substTree(idaBlock.duplicate) // duplicate on block as a whole to ensure valdefs are properly cloned and substed
+ }
+ }
+
+ DefDef(m, isDefinedAtTransformer(fun.body))
}
val members =
@@ -385,9 +344,7 @@ abstract class UnCurry extends InfoTransform
localTyper.typedPos(fun.pos) {
Block(
List(ClassDef(anonClass, NoMods, List(List()), List(List()), members, fun.pos)),
- Typed(
- New(TypeTree(anonClass.tpe), List(List())),
- TypeTree(fun.tpe)))
+ Typed(New(anonClass.tpe), TypeTree(fun.tpe)))
}
}
}
@@ -400,7 +357,7 @@ abstract class UnCurry extends InfoTransform
// when calling into scala varargs, make sure it's a sequence.
def arrayToSequence(tree: Tree, elemtp: Type) = {
- atPhase(phase.next) {
+ afterUncurry {
localTyper.typedPos(pos) {
val pt = arrayType(elemtp)
val adaptedTree = // might need to cast to Array[elemtp], as arrays are not covariant
@@ -424,7 +381,7 @@ abstract class UnCurry extends InfoTransform
else if (tp.bounds.hi ne tp) getManifest(tp.bounds.hi)
else localTyper.getManifestTree(tree, tp, false)
}
- atPhase(phase.next) {
+ afterUncurry {
localTyper.typedPos(pos) {
Apply(gen.mkAttributedSelect(tree, toArraySym),
List(getManifest(tree.tpe.baseType(TraversableClass).typeArgs.head)))
@@ -449,7 +406,7 @@ abstract class UnCurry extends InfoTransform
else arrayToSequence(mkArray, varargsElemType)
}
- atPhase(phase.next) {
+ afterUncurry {
if (isJava && isPrimitiveArray(suffix.tpe) && isArrayOfSymbol(fun.tpe.params.last.tpe, ObjectClass)) {
suffix = localTyper.typedPos(pos) {
gen.mkRuntimeCall(nme.toObjectArray, List(suffix))
@@ -482,16 +439,16 @@ abstract class UnCurry extends InfoTransform
}
}
- /** For removing calls to specially designated methods.
+ /** Called if a tree's symbol is elidable. If it's a DefDef,
+ * replace only the body/rhs with 0/false/()/null; otherwise replace
+ * the whole tree with it.
*/
- def elideIntoUnit(tree: Tree): Tree = Literal(Constant()) setPos tree.pos setType UnitClass.tpe
- def isElidable(tree: Tree) = {
- val sym = treeInfo.methPart(tree).symbol
- // XXX settings.noassertions.value temporarily retained to avoid
- // breakage until a reasonable interface is settled upon.
- sym != null && sym.elisionLevel.exists(x => x < settings.elidebelow.value || settings.noassertions.value) && {
- log("Eliding call from " + tree.symbol.owner + " to " + sym + " based on its elision threshold of " + sym.elisionLevel.get)
- true
+ private def replaceElidableTree(tree: Tree): Tree = {
+ tree match {
+ case DefDef(_,_,_,_,_,_) =>
+ deriveDefDef(tree)(rhs => Block(Nil, gen.mkZero(rhs.tpe)) setType rhs.tpe) setSymbol tree.symbol setType tree.tpe
+ case _ =>
+ gen.mkZero(tree.tpe) setType tree.tpe
}
}
@@ -532,112 +489,99 @@ abstract class UnCurry extends InfoTransform
finally this.inConstructorFlag = saved
}
- if (isElidable(tree)) elideIntoUnit(tree)
- else tree match {
- case dd @ DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
- if (dd.symbol hasAnnotation VarargsClass) saveRepeatedParams(dd)
- withNeedLift(false) {
- if (tree.symbol.isClassConstructor) {
- atOwner(tree.symbol) {
- val rhs1 = (rhs: @unchecked) match {
- case Block(stats, expr) =>
- def transformInConstructor(stat: Tree) =
- withInConstructorFlag(INCONSTRUCTOR) { transform(stat) }
- val presupers = treeInfo.preSuperFields(stats) map transformInConstructor
- val rest = stats drop presupers.length
- val supercalls = rest take 1 map transformInConstructor
- val others = rest drop 1 map transform
- treeCopy.Block(rhs, presupers ::: supercalls ::: others, transform(expr))
+ val sym = tree.symbol
+ val result = (
+ // TODO - settings.noassertions.value temporarily retained to avoid
+ // breakage until a reasonable interface is settled upon.
+ if ((sym ne null) && (sym.elisionLevel.exists (_ < settings.elidebelow.value || settings.noassertions.value)))
+ replaceElidableTree(tree)
+ else tree match {
+ case dd @ DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
+ if (dd.symbol hasAnnotation VarargsClass) saveRepeatedParams(dd)
+ withNeedLift(false) {
+ if (dd.symbol.isClassConstructor) {
+ atOwner(sym) {
+ val rhs1 = (rhs: @unchecked) match {
+ case Block(stats, expr) =>
+ def transformInConstructor(stat: Tree) =
+ withInConstructorFlag(INCONSTRUCTOR) { transform(stat) }
+ val presupers = treeInfo.preSuperFields(stats) map transformInConstructor
+ val rest = stats drop presupers.length
+ val supercalls = rest take 1 map transformInConstructor
+ val others = rest drop 1 map transform
+ treeCopy.Block(rhs, presupers ::: supercalls ::: others, transform(expr))
+ }
+ treeCopy.DefDef(
+ tree, mods, name, transformTypeDefs(tparams),
+ transformValDefss(vparamss), transform(tpt), rhs1)
}
- treeCopy.DefDef(
- tree, mods, name, transformTypeDefs(tparams),
- transformValDefss(vparamss), transform(tpt), rhs1)
+ } else {
+ super.transform(tree)
}
- } else {
- super.transform(tree)
}
- }
- case ValDef(_, _, _, rhs) =>
- val sym = tree.symbol
- if (sym eq NoSymbol) throw new IllegalStateException("Encountered Valdef without symbol: "+ tree + " in "+ unit)
- // a local variable that is mutable and free somewhere later should be lifted
- // as lambda lifting (coming later) will wrap 'rhs' in an Ref object.
- if (!sym.owner.isSourceMethod)
- withNeedLift(true) { super.transform(tree) }
- else
- super.transform(tree)
-/*
- case Apply(Select(Block(List(), Function(vparams, body)), nme.apply), args) =>
- // perform beta-reduction; this helps keep view applications small
- println("beta-reduce1: "+tree)
- withNeedLift(true) {
- mainTransform(new TreeSubstituter(vparams map (_.symbol), args).transform(body))
- }
+ case ValDef(_, _, _, rhs) =>
+ if (sym eq NoSymbol) throw new IllegalStateException("Encountered Valdef without symbol: "+ tree + " in "+ unit)
+ // a local variable that is mutable and free somewhere later should be lifted
+ // as lambda lifting (coming later) will wrap 'rhs' in an Ref object.
+ if (!sym.owner.isSourceMethod)
+ withNeedLift(true) { super.transform(tree) }
+ else
+ super.transform(tree)
+ case UnApply(fn, args) =>
+ val fn1 = withInPattern(false)(transform(fn))
+ val args1 = transformTrees(fn.symbol.name match {
+ case nme.unapply => args
+ case nme.unapplySeq => transformArgs(tree.pos, fn.symbol, args, analyzer.unapplyTypeListFromReturnTypeSeq(fn.tpe))
+ case _ => sys.error("internal error: UnApply node has wrong symbol")
+ })
+ treeCopy.UnApply(tree, fn1, args1)
+
+ case Apply(fn, args) =>
+ if (fn.symbol == Object_synchronized && shouldBeLiftedAnyway(args.head))
+ transform(treeCopy.Apply(tree, fn, List(liftTree(args.head))))
+ else
+ withNeedLift(true) {
+ val formals = fn.tpe.paramTypes
+ treeCopy.Apply(tree, transform(fn), transformTrees(transformArgs(tree.pos, fn.symbol, args, formals)))
+ }
- case Apply(Select(Function(vparams, body), nme.apply), args) =>
-// if (List.forall2(vparams, args)((vparam, arg) => treeInfo.isAffineIn(body) ||
-// treeInfo.isExprSafeToInline(arg))) =>
- // perform beta-reduction; this helps keep view applications small
- println("beta-reduce2: "+tree)
- withNeedLift(true) {
- mainTransform(new TreeSubstituter(vparams map (_.symbol), args).transform(body))
- }
-*/
- case UnApply(fn, args) =>
- val fn1 = withInPattern(false)(transform(fn))
- val args1 = transformTrees(fn.symbol.name match {
- case nme.unapply => args
- case nme.unapplySeq => transformArgs(tree.pos, fn.symbol, args, analyzer.unapplyTypeListFromReturnTypeSeq(fn.tpe))
- case _ => sys.error("internal error: UnApply node has wrong symbol")
- })
- treeCopy.UnApply(tree, fn1, args1)
-
- case Apply(fn, args) =>
- if (fn.symbol == Object_synchronized && shouldBeLiftedAnyway(args.head))
- transform(treeCopy.Apply(tree, fn, List(liftTree(args.head))))
- else
- withNeedLift(true) {
- val formals = fn.tpe.paramTypes
- treeCopy.Apply(tree, transform(fn), transformTrees(transformArgs(tree.pos, fn.symbol, args, formals)))
- }
+ case Assign(Select(_, _), _) =>
+ withNeedLift(true) { super.transform(tree) }
- case Assign(Select(_, _), _) =>
- withNeedLift(true) { super.transform(tree) }
+ case Assign(lhs, _) if lhs.symbol.owner != currentMethod || lhs.symbol.hasFlag(LAZY | ACCESSOR) =>
+ withNeedLift(true) { super.transform(tree) }
- case Assign(lhs, _) if lhs.symbol.owner != currentMethod || lhs.symbol.hasFlag(LAZY | ACCESSOR) =>
- withNeedLift(true) { super.transform(tree) }
+ case Try(block, catches, finalizer) =>
+ if (needTryLift || shouldBeLiftedAnyway(tree)) transform(liftTree(tree))
+ else super.transform(tree)
- case Try(block, catches, finalizer) =>
- if (needTryLift || shouldBeLiftedAnyway(tree)) transform(liftTree(tree))
- else super.transform(tree)
+ case CaseDef(pat, guard, body) =>
+ val pat1 = withInPattern(true)(transform(pat))
+ treeCopy.CaseDef(tree, pat1, transform(guard), transform(body))
- case CaseDef(pat, guard, body) =>
- val pat1 = withInPattern(true)(transform(pat))
- treeCopy.CaseDef(tree, pat1, transform(guard), transform(body))
+ case fun @ Function(_, _) =>
+ mainTransform(transformFunction(fun))
- case fun @ Function(_, _) =>
- mainTransform(transformFunction(fun))
+ case Template(_, _, _) =>
+ withInConstructorFlag(0) { super.transform(tree) }
- case Template(_, _, _) =>
- withInConstructorFlag(0) { super.transform(tree) }
-
- case _ =>
- val tree1 = super.transform(tree)
- if (isByNameRef(tree1)) {
- val tree2 = tree1 setType functionType(Nil, tree1.tpe)
- return {
- if (noApply contains tree2) tree2
- else localTyper.typedPos(tree1.pos)(Apply(Select(tree2, nme.apply), Nil))
+ case _ =>
+ val tree1 = super.transform(tree)
+ if (isByNameRef(tree1)) {
+ val tree2 = tree1 setType functionType(Nil, tree1.tpe)
+ return {
+ if (noApply contains tree2) tree2
+ else localTyper.typedPos(tree1.pos)(Apply(Select(tree2, nme.apply), Nil))
+ }
}
- }
- tree1
- }
- } setType {
- assert(tree.tpe != null, tree + " tpe is null")
- uncurryTreeType(tree.tpe)
+ tree1
+ }
+ )
+ assert(result.tpe != null, result + " tpe is null")
+ result setType uncurryTreeType(result.tpe)
}
- def postTransform(tree: Tree): Tree = atPhase(phase.next) {
+ def postTransform(tree: Tree): Tree = afterUncurry {
def applyUnary(): Tree = {
// TODO_NMT: verify that the inner tree of a type-apply also gets parens if the
// whole tree is a polymorphic nullary method application
@@ -663,23 +607,24 @@ abstract class UnCurry extends InfoTransform
* In particular, this case will add:
* - synthetic Java varargs forwarders for repeated parameters
*/
- case Template(parents, self, body) =>
+ case Template(_, _, _) =>
localTyper = typer.atOwner(tree, currentClass)
- val tmpl = if (!forMSIL || forMSIL) {
- treeCopy.Template(tree, parents, self, transformTrees(newMembers.toList) ::: body)
- } else super.transform(tree).asInstanceOf[Template]
- newMembers.clear
- tmpl
- case dd @ DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
- val rhs1 = nonLocalReturnKeys.get(tree.symbol) match {
- case None => rhs
- case Some(k) => atPos(rhs.pos)(nonLocalReturnTry(rhs, k, tree.symbol))
- }
- val flatdd = treeCopy.DefDef(tree, mods, name, tparams, List(vparamss.flatten), tpt, rhs1)
- if (dd.symbol hasAnnotation VarargsClass) addJavaVarargsForwarders(dd, flatdd, tree)
- flatdd
+ try deriveTemplate(tree)(transformTrees(newMembers.toList) ::: _)
+ finally newMembers.clear()
+
+ case dd @ DefDef(_, _, _, vparamss0, _, rhs0) =>
+ val flatdd = copyDefDef(dd)(
+ vparamss = List(vparamss0.flatten),
+ rhs = nonLocalReturnKeys get dd.symbol match {
+ case Some(k) => atPos(rhs0.pos)(nonLocalReturnTry(rhs0, k, dd.symbol))
+ case None => rhs0
+ }
+ )
+ addJavaVarargsForwarders(dd, flatdd)
+
case Try(body, catches, finalizer) =>
- if (catches forall treeInfo.isCatchCase) tree
+ if (opt.virtPatmat) { if(catches exists (cd => !treeInfo.isCatchCase(cd))) debugwarn("VPM BUG! illegal try/catch "+ catches); tree }
+ else if (catches forall treeInfo.isCatchCase) tree
else {
val exname = unit.freshTermName("ex$")
val cases =
@@ -701,7 +646,7 @@ abstract class UnCurry extends InfoTransform
}
debuglog("rewrote try: " + catches + " ==> " + catchall);
val catches1 = localTyper.typedCases(
- tree, List(catchall), ThrowableClass.tpe, WildcardType)
+ List(catchall), ThrowableClass.tpe, WildcardType)
treeCopy.Try(tree, body, catches1, finalizer)
}
case Apply(Apply(fn, args), args1) =>
@@ -739,9 +684,9 @@ abstract class UnCurry extends InfoTransform
* It looks for the method in the `repeatedParams` map, and generates a Java-style
* varargs forwarder. It then adds the forwarder to the `newMembers` sequence.
*/
- private def addJavaVarargsForwarders(dd: DefDef, flatdd: DefDef, tree: Tree): Unit = {
- if (!repeatedParams.contains(dd.symbol))
- return
+ private def addJavaVarargsForwarders(dd: DefDef, flatdd: DefDef): DefDef = {
+ if (!dd.symbol.hasAnnotation(VarargsClass) || !repeatedParams.contains(dd.symbol))
+ return flatdd
def toSeqType(tp: Type): Type = {
val arg = elementType(ArrayClass, tp)
@@ -762,7 +707,7 @@ abstract class UnCurry extends InfoTransform
val reps = repeatedParams(dd.symbol)
val rpsymbols = reps.map(_.symbol).toSet
- val theTyper = typer.atOwner(tree, currentClass)
+ val theTyper = typer.atOwner(dd, currentClass)
val flatparams = flatdd.vparamss.head
// create the type
@@ -814,10 +759,11 @@ abstract class UnCurry extends InfoTransform
case None =>
// enter symbol into scope
currentClass.info.decls enter forwsym
-
// add the method to `newMembers`
newMembers += forwtree
}
+
+ flatdd
}
}
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala b/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala
index 6ee09d064f..140df53816 100644
--- a/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala
@@ -624,11 +624,21 @@ trait ContextErrors {
setError(tree)
}
- // checkNoDoubleDefs...
- def DefDefinedTwiceError(sym0: Symbol, sym1: Symbol) =
- issueSymbolTypeError(sym0, sym1+" is defined twice"+
- {if(!settings.debug.value) "" else " in "+context0.unit}+
- {if (sym0.isMacro && sym1.isMacro) " \n(note that macros cannot be overloaded)" else ""})
+ // checkNoDoubleDefs...
+ // @PP: I hacked the filename in (context0.unit) to work around SI-4893. It would be
+ // much better if every symbol could offer some idea of where it came from, else
+ // the obviously untrue claim that something has been defined twice can only frustrate.
+ // There's no direct test because partest doesn't work, but to reproduce, separately
+ // compile the next two lines:
+ // package object foo { val x: Class[_] = null }
+ // package foo
+ def DefDefinedTwiceError(sym0: Symbol, sym1: Symbol) = {
+ val isBug = sym0.isAbstractType && sym1.isAbstractType && (sym0.name startsWith "_$")
+ issueSymbolTypeError(sym0, sym1+" is defined twice in " + context0.unit
+ + ( if (sym0.isMacro && sym1.isMacro) "\n(note that macros cannot be overloaded)" else "" )
+ + ( if (isBug) "\n(this error is likely due to a bug in the scala compiler involving wildcards in package objects)" else "" )
+ )
+ }
// cyclic errors
def CyclicAliasingOrSubtypingError(errPos: Position, sym0: Symbol) =
@@ -636,11 +646,6 @@ trait ContextErrors {
def CyclicReferenceError(errPos: Position, lockedSym: Symbol) =
issueTypeError(PosAndMsgTypeError(errPos, "illegal cyclic reference involving " + lockedSym))
-
- def MacroExpandError(tree: Tree, t: Any) = {
- issueNormalTypeError(tree, "macros must return a compiler-specific tree; returned class is: " + t.getClass)
- setError(tree)
- }
}
}
@@ -713,10 +718,17 @@ trait ContextErrors {
"constructor cannot be instantiated to expected type" + foundReqMsg(restpe, pt))
setError(tree)
}
-
- def NoBestMethodAlternativeError(tree: Tree, argtpes: List[Type], pt: Type) =
+
+ def NoBestMethodAlternativeError(tree: Tree, argtpes: List[Type], pt: Type) = {
issueNormalTypeError(tree,
applyErrorMsg(tree, " cannot be applied to ", argtpes, pt))
+ // since inferMethodAlternative modifies the state of the tree
+ // we have to set the type of tree to ErrorType only in the very last
+ // fallback action that is done in the inference (tracking it manually is error prone).
+ // This avoids entering infinite loop in doTypeApply.
+ // TODO: maybe we should do the same thing with inferExprAlternative.
+ if (implicitly[Context].reportErrors) setError(tree)
+ }
def AmbiguousMethodAlternativeError(tree: Tree, pre: Type, best: Symbol,
firstCompeting: Symbol, argtpes: List[Type], pt: Type) = {
diff --git a/src/compiler/scala/tools/nsc/typechecker/Contexts.scala b/src/compiler/scala/tools/nsc/typechecker/Contexts.scala
index 740acbd10f..8586ebf0d4 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Contexts.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Contexts.scala
@@ -128,6 +128,8 @@ trait Contexts { self: Analyzer =>
var typingIndentLevel: Int = 0
def typingIndent = " " * typingIndentLevel
+
+ var buffer: Set[AbsTypeError] = _
def enclClassOrMethod: Context =
if ((owner eq NoSymbol) || (owner.isClass) || (owner.isMethod)) this
@@ -146,7 +148,6 @@ trait Contexts { self: Analyzer =>
}
private[this] var mode = 0
- private[this] val buffer = LinkedHashSet[AbsTypeError]()
def errBuffer = buffer
def hasErrors = buffer.nonEmpty
@@ -161,7 +162,7 @@ trait Contexts { self: Analyzer =>
def setReportErrors() = mode = (ReportErrors | AmbiguousErrors)
def setBufferErrors() = {
- assert(bufferErrors || !hasErrors, "When entering the buffer state, context has to be clean. Current buffer: " + buffer)
+ //assert(bufferErrors || !hasErrors, "When entering the buffer state, context has to be clean. Current buffer: " + buffer)
mode = BufferErrors
}
def setThrowErrors() = mode &= (~AllMask)
@@ -226,6 +227,7 @@ trait Contexts { self: Analyzer =>
c.checking = this.checking
c.retyping = this.retyping
c.openImplicits = this.openImplicits
+ c.buffer = if (this.buffer == null) LinkedHashSet[AbsTypeError]() else this.buffer // need to initialize
registerContext(c.asInstanceOf[analyzer.Context])
debuglog("[context] ++ " + c.unit + " / " + tree.summaryString)
c
@@ -266,6 +268,7 @@ trait Contexts { self: Analyzer =>
val c = make(newtree)
c.setBufferErrors()
c.setAmbiguousErrors(reportAmbiguousErrors)
+ c.buffer = new LinkedHashSet[AbsTypeError]()
c
}
@@ -309,6 +312,7 @@ trait Contexts { self: Analyzer =>
unit.error(pos, if (checking) "\n**** ERROR DURING INTERNAL CHECKING ****\n" + msg else msg)
def issue(err: AbsTypeError) {
+ if (settings.debug.value) println("issue error: " + err.errMsg)
if (reportErrors) unitError(err.errPos, addDiagString(err.errMsg))
else if (bufferErrors) { buffer += err }
else throw new TypeError(err.errPos, err.errMsg)
diff --git a/src/compiler/scala/tools/nsc/typechecker/Duplicators.scala b/src/compiler/scala/tools/nsc/typechecker/Duplicators.scala
index 179bea0035..29831c8469 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Duplicators.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Duplicators.scala
@@ -36,7 +36,7 @@ abstract class Duplicators extends Analyzer {
} else resetClassOwners
envSubstitution = new SubstSkolemsTypeMap(env.keysIterator.toList, env.valuesIterator.toList)
- log("retyped with env: " + env)
+ debuglog("retyped with env: " + env)
(new BodyDuplicator(context)).typed(tree)
}
@@ -82,14 +82,14 @@ abstract class Duplicators extends Analyzer {
val sym1 = context.scope.lookup(sym.name)
// assert(sym1 ne NoSymbol, tpe)
if ((sym1 ne NoSymbol) && (sym1 ne sym)) {
- log("fixing " + sym + " -> " + sym1)
+ debuglog("fixing " + sym + " -> " + sym1)
typeRef(NoPrefix, sym1, mapOverArgs(args, sym1.typeParams))
} else super.mapOver(tpe)
case TypeRef(pre, sym, args) =>
val newsym = updateSym(sym)
if (newsym ne sym) {
- log("fixing " + sym + " -> " + newsym)
+ debuglog("fixing " + sym + " -> " + newsym)
typeRef(mapOver(pre), newsym, mapOverArgs(args, newsym.typeParams))
} else
super.mapOver(tpe)
@@ -97,7 +97,7 @@ abstract class Duplicators extends Analyzer {
case SingleType(pre, sym) =>
val sym1 = updateSym(sym)
if (sym1 ne sym) {
- log("fixing " + sym + " -> " + sym1)
+ debuglog("fixing " + sym + " -> " + sym1)
singleType(mapOver(pre), sym1)
} else
super.mapOver(tpe)
@@ -105,7 +105,7 @@ abstract class Duplicators extends Analyzer {
case ThisType(sym) =>
val sym1 = updateSym(sym)
if (sym1 ne sym) {
- log("fixing " + sym + " -> " + sym1)
+ debuglog("fixing " + sym + " -> " + sym1)
ThisType(sym1)
} else
super.mapOver(tpe)
@@ -136,26 +136,26 @@ abstract class Duplicators extends Analyzer {
private def invalidate(tree: Tree) {
debuglog("attempting to invalidate " + tree.symbol + ", owner - " + (if (tree.symbol ne null) tree.symbol.owner else "<NULL>"))
if (tree.isDef && tree.symbol != NoSymbol) {
- log("invalid " + tree.symbol)
+ debuglog("invalid " + tree.symbol)
invalidSyms(tree.symbol) = tree
tree match {
case ldef @ LabelDef(name, params, rhs) =>
- log("LabelDef " + name + " sym.info: " + ldef.symbol.info)
+ debuglog("LabelDef " + name + " sym.info: " + ldef.symbol.info)
invalidSyms(ldef.symbol) = ldef
// breakIf(true, this, ldef, context)
val newsym = ldef.symbol.cloneSymbol(context.owner)
newsym.setInfo(fixType(ldef.symbol.info))
ldef.symbol = newsym
- log("newsym: " + newsym + " info: " + newsym.info)
+ debuglog("newsym: " + newsym + " info: " + newsym.info)
case vdef @ ValDef(mods, name, _, rhs) if mods.hasFlag(Flags.LAZY) =>
- log("ValDef " + name + " sym.info: " + vdef.symbol.info)
+ debuglog("ValDef " + name + " sym.info: " + vdef.symbol.info)
invalidSyms(vdef.symbol) = vdef
val newsym = vdef.symbol.cloneSymbol(context.owner)
newsym.setInfo(fixType(vdef.symbol.info))
vdef.symbol = newsym
- log("newsym: " + newsym + " info: " + newsym.info)
+ debuglog("newsym: " + newsym + " info: " + newsym.info)
case DefDef(_, name, tparams, vparamss, _, rhs) =>
// invalidate parameters
@@ -182,7 +182,7 @@ abstract class Duplicators extends Analyzer {
}
ddef.symbol = NoSymbol
enterSym(context, ddef)
- log("remapping this of " + oldClassOwner + " to " + newClassOwner)
+ debuglog("remapping this of " + oldClassOwner + " to " + newClassOwner)
typed(ddef)
}
@@ -228,7 +228,7 @@ abstract class Duplicators extends Analyzer {
ttree
case Block(stats, res) =>
- log("invalidating block")
+ debuglog("invalidating block")
invalidate(stats)
invalidate(res)
tree.tpe = null
@@ -256,7 +256,7 @@ abstract class Duplicators extends Analyzer {
case ldef @ LabelDef(name, params, rhs) =>
// log("label def: " + ldef)
ldef.tpe = null
- val params1 = params map { p => Ident(updateSym(p.symbol)) }
+ val params1 = params map (p => Ident(updateSym(p.symbol)))
super.typed(treeCopy.LabelDef(tree, name, params1, rhs), mode, pt)
case Bind(name, _) =>
@@ -266,13 +266,13 @@ abstract class Duplicators extends Analyzer {
super.typed(tree, mode, pt)
case Ident(_) if tree.symbol.isLabel =>
- log("Ident to labeldef " + tree + " switched to ")
+ debuglog("Ident to labeldef " + tree + " switched to ")
tree.symbol = updateSym(tree.symbol)
tree.tpe = null
super.typed(tree, mode, pt)
case Ident(_) if (origtreesym ne null) && origtreesym.isLazy =>
- log("Ident to a lazy val " + tree + ", " + tree.symbol + " updated to " + origtreesym)
+ debuglog("Ident to a lazy val " + tree + ", " + tree.symbol + " updated to " + origtreesym)
tree.symbol = updateSym(origtreesym)
tree.tpe = null
super.typed(tree, mode, pt)
@@ -336,7 +336,7 @@ abstract class Duplicators extends Analyzer {
tree
case _ =>
- log("default: " + tree)
+ debuglog("Duplicators default case: " + tree.summaryString)
if (tree.hasSymbol && tree.symbol != NoSymbol && (tree.symbol.owner == definitions.AnyClass)) {
tree.symbol = NoSymbol // maybe we can find a more specific member in a subclass of Any (see AnyVal members, like ==)
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/Infer.scala b/src/compiler/scala/tools/nsc/typechecker/Infer.scala
index b97fbebec2..e1aa8b46eb 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Infer.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Infer.scala
@@ -1388,7 +1388,7 @@ trait Infer {
NoBestExprAlternativeError(tree, pt)
} else if (!competing.isEmpty) {
if (secondTry) NoBestExprAlternativeError(tree, pt)
- else { if (!pt.isErroneous) AmbiguousExprAlternativeError(tree, pre, best, competing.head, pt) }
+ else if (!pt.isErroneous) AmbiguousExprAlternativeError(tree, pre, best, competing.head, pt)
} else {
// val applicable = alts1 filter (alt =>
// global.typer.infer.isWeaklyCompatible(pre.memberType(alt), pt))
@@ -1398,10 +1398,14 @@ trait Infer {
}
}
- @inline private def inSilentMode(expr: Typer => Boolean): Boolean = {
- val silentContext = context.makeSilent(context.ambiguousErrors)
- val res = expr(newTyper(silentContext))
- if (silentContext.hasErrors) false else res
+ @inline private def inSilentMode(context: Context)(expr: => Boolean): Boolean = {
+ val oldState = context.state
+ context.setBufferErrors()
+ val res = expr
+ val contextWithErrors = context.hasErrors
+ context.flushBuffer()
+ context.restoreState(oldState)
+ res && !contextWithErrors
}
// Checks against the name of the parameter and also any @deprecatedName.
@@ -1472,7 +1476,7 @@ trait Infer {
val applicable = resolveOverloadedMethod(argtpes, {
alts filter { alt =>
- inSilentMode(typer0 => typer0.infer.isApplicable(undetparams, followApply(pre.memberType(alt)), argtpes, pt)) &&
+ inSilentMode(context)(isApplicable(undetparams, followApply(pre.memberType(alt)), argtpes, pt)) &&
(!varArgsOnly || isVarArgsList(alt.tpe.params))
}
})
diff --git a/src/compiler/scala/tools/nsc/typechecker/Macros.scala b/src/compiler/scala/tools/nsc/typechecker/Macros.scala
index 470f3e7117..161e4b7a9a 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Macros.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Macros.scala
@@ -90,8 +90,19 @@ trait Macros { self: Analyzer =>
lazy val mirror = new scala.reflect.runtime.Mirror {
lazy val libraryClassLoader = {
+ // todo. this is more or less okay, but not completely correct
+ // see https://issues.scala-lang.org/browse/SI-5433 for more info
val classpath = global.classPath.asURLs
- ScalaClassLoader.fromURLs(classpath, self.getClass.getClassLoader)
+ var loader: ClassLoader = ScalaClassLoader.fromURLs(classpath, self.getClass.getClassLoader)
+
+ // an heuristic to detect REPL
+ if (global.settings.exposeEmptyPackage.value) {
+ import scala.tools.nsc.interpreter._
+ val virtualDirectory = global.settings.outputDirs.getSingleOutput.get
+ loader = new AbstractFileClassLoader(virtualDirectory, loader) {}
+ }
+
+ loader
}
override def defaultReflectiveClassLoader() = libraryClassLoader
@@ -112,19 +123,42 @@ trait Macros { self: Analyzer =>
if (mmeth == NoSymbol) None
else {
- val receiverClass: mirror.Symbol = mirror.symbolForName(mmeth.owner.fullName)
+ trace("loading implementation class: ")(mmeth.owner.fullName)
+ trace("classloader is: ")("%s of type %s".format(mirror.libraryClassLoader, mirror.libraryClassLoader.getClass))
+ def inferClasspath(cl: ClassLoader) = cl match {
+ case cl: java.net.URLClassLoader => "[" + (cl.getURLs mkString ",") + "]"
+ case _ => "<unknown>"
+ }
+ trace("classpath is: ")(inferClasspath(mirror.libraryClassLoader))
+
+ // @xeno.by: relies on the fact that macros can only be defined in static classes
+ def classfile(sym: Symbol): String = {
+ def recur(sym: Symbol): String = sym match {
+ case sym if sym.owner.isPackageClass =>
+ val suffix = if (sym.isModuleClass) "$" else ""
+ sym.fullName + suffix
+ case sym =>
+ val separator = if (sym.owner.isModuleClass) "" else "$"
+ recur(sym.owner) + separator + sym.javaSimpleName
+ }
+
+ if (sym.isClass || sym.isModule) recur(sym)
+ else recur(sym.enclClass)
+ }
+
+ // @xeno.by: this doesn't work for inner classes
+ // neither does mmeth.owner.javaClassName, so I had to roll my own implementation
+ //val receiverName = mmeth.owner.fullName
+ val receiverName = classfile(mmeth.owner)
+ val receiverClass: mirror.Symbol = mirror.symbolForName(receiverName)
+
if (debug) {
println("receiverClass is: " + receiverClass.fullNameString)
val jreceiverClass = mirror.classToJava(receiverClass)
val jreceiverSource = jreceiverClass.getProtectionDomain.getCodeSource
println("jreceiverClass is %s from %s".format(jreceiverClass, jreceiverSource))
-
- val jreceiverClasspath = jreceiverClass.getClassLoader match {
- case cl: java.net.URLClassLoader => "[" + (cl.getURLs mkString ",") + "]"
- case _ => "<unknown>"
- }
- println("jreceiverClassLoader is %s with classpath %s".format(jreceiverClass.getClassLoader, jreceiverClasspath))
+ println("jreceiverClassLoader is %s with classpath %s".format(jreceiverClass.getClassLoader, inferClasspath(jreceiverClass.getClassLoader)))
}
val receiverObj = receiverClass.companionModule
@@ -132,7 +166,11 @@ trait Macros { self: Analyzer =>
if (receiverObj == mirror.NoSymbol) None
else {
- val receiver = mirror.companionInstance(receiverClass)
+ // @xeno.by: yet another reflection method that doesn't work for inner classes
+ //val receiver = mirror.companionInstance(receiverClass)
+ val clazz = java.lang.Class.forName(receiverName, true, mirror.libraryClassLoader)
+ val receiver = clazz getField "MODULE$" get null
+
val rmeth = receiverObj.info.member(mirror.newTermName(mmeth.name.toString))
if (debug) {
println("rmeth is: " + rmeth.fullNameString)
@@ -147,6 +185,7 @@ trait Macros { self: Analyzer =>
}
} catch {
case ex: ClassNotFoundException =>
+ trace("implementation class failed to load: ")(ex.toString)
None
}
}
@@ -155,7 +194,7 @@ trait Macros { self: Analyzer =>
* Or, if that fails, and the macro overrides a method return
* tree that calls this method instead of the macro.
*/
- def macroExpand(tree: Tree, context: Context): Option[Any] = {
+ def macroExpand(tree: Tree, typer: Typer): Option[Any] = {
val trace = scala.tools.nsc.util.trace when settings.Ymacrodebug.value
trace("macroExpand: ")(tree)
@@ -180,7 +219,19 @@ trait Macros { self: Analyzer =>
// if one of those children involves macro expansion, things might get nasty
// that's why I'm temporarily turning this behavior off
nodePrinters.infolevel = nodePrinters.InfoLevel.Quiet
- Some(mirror.invoke(receiver, rmeth)(rawArgs: _*))
+ val expanded = mirror.invoke(receiver, rmeth)(rawArgs: _*)
+ expanded match {
+ case expanded: Tree =>
+ val expectedTpe = tree.tpe
+ val typed = typer.typed(expanded, EXPRmode, expectedTpe)
+ Some(typed)
+ case expanded if expanded.isInstanceOf[Tree] =>
+ typer.context.unit.error(tree.pos, "macro must return a compiler-specific tree; returned value is Tree, but it doesn't belong to this compiler's universe")
+ None
+ case expanded =>
+ typer.context.unit.error(tree.pos, "macro must return a compiler-specific tree; returned value is of class: " + expanded.getClass)
+ None
+ }
} catch {
case ex =>
val realex = ReflectionUtils.unwrapThrowable(ex)
@@ -191,14 +242,14 @@ trait Macros { self: Analyzer =>
} else {
realex.getMessage
}
- context.unit.error(tree.pos, "exception during macro expansion: " + msg)
+ typer.context.unit.error(tree.pos, "exception during macro expansion: " + msg)
None
} finally {
nodePrinters.infolevel = savedInfolevel
}
case None =>
def notFound() = {
- context.unit.error(tree.pos, "macro implementation not found: " + macroDef.name)
+ typer.context.unit.error(tree.pos, "macro implementation not found: " + macroDef.name)
None
}
def fallBackToOverridden(tree: Tree): Option[Tree] = {
diff --git a/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala b/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala
index 915d7a98db..3ba8cefca8 100644
--- a/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala
@@ -82,7 +82,7 @@ trait MethodSynthesis {
}
private def finishMethod(method: Symbol, f: Symbol => Tree): Tree =
- logResult("finishMethod")(localTyper typed ValOrDefDef(method, f(method)))
+ localTyper typed ValOrDefDef(method, f(method))
private def createInternal(name: Name, f: Symbol => Tree, info: Type): Tree = {
val m = clazz.newMethod(name.toTermName, clazz.pos.focus, newMethodFlags(name))
@@ -200,7 +200,7 @@ trait MethodSynthesis {
map (acc => atPos(vd.pos.focus)(acc derive annotations))
filterNot (_ eq EmptyTree)
)
- log(trees.mkString("Accessor trees:\n ", "\n ", "\n"))
+ // log(trees.mkString("Accessor trees:\n ", "\n ", "\n"))
if (vd.symbol.isLazy) List(stat)
else trees
case _ =>
@@ -282,7 +282,7 @@ trait MethodSynthesis {
}
}
private def logDerived(result: Tree): Tree = {
- log("[+derived] " + ojoin(mods.defaultFlagString, basisSym.accurateKindString, basisSym.getterName.decode)
+ debuglog("[+derived] " + ojoin(mods.defaultFlagString, basisSym.accurateKindString, basisSym.getterName.decode)
+ " (" + derivedSym + ")\n " + result)
result
@@ -376,7 +376,7 @@ trait MethodSynthesis {
override def keepClean = !mods.isParamAccessor
override def derivedTree = (
if (mods.isDeferred) EmptyTree
- else treeCopy.ValDef(tree, mods | flagsExtra, name, tree.tpt, tree.rhs)
+ else copyValDef(tree)(mods = mods | flagsExtra, name = this.name)
)
}
case class Param(tree: ValDef) extends DerivedFromValDef {
diff --git a/src/compiler/scala/tools/nsc/typechecker/Namers.scala b/src/compiler/scala/tools/nsc/typechecker/Namers.scala
index 51542ec757..eb7ea51d2b 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Namers.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Namers.scala
@@ -277,12 +277,16 @@ trait Namers extends MethodSynthesis {
def assignAndEnterFinishedSymbol(tree: MemberDef): Symbol = {
val sym = assignAndEnterSymbol(tree)
sym setInfo completerOf(tree)
- log("[+info] " + sym.fullLocationString)
+ // log("[+info] " + sym.fullLocationString)
sym
}
private def logAssignSymbol(tree: Tree, sym: Symbol): Symbol = {
- log("[+symbol] " + sym.debugLocationString)
+ sym.name.toTermName match {
+ case nme.IMPORT | nme.OUTER | nme.ANON_CLASS_NAME | nme.ANON_FUN_NAME | nme.CONSTRUCTOR => ()
+ case _ =>
+ log("[+symbol] " + sym.debugLocationString)
+ }
tree.symbol = sym
sym
}
@@ -1299,7 +1303,7 @@ trait Namers extends MethodSynthesis {
catch typeErrorHandler(tree, ErrorType)
result match {
- case PolyType(tparams @ (tp :: _), _) if tp.owner.isTerm => typer.deskolemizeTypeParams(tparams)(result)
+ case PolyType(tparams @ (tp :: _), _) if tp.owner.isTerm => deskolemizeTypeParams(tparams)(result)
case _ => result
}
}
@@ -1439,8 +1443,11 @@ trait Namers extends MethodSynthesis {
private val ownerSym = owner.symbol
override val typeParams = tparams map (_.symbol) //@M
override val tree = restp.tree
- if (ownerSym.isTerm)
- typer skolemizeTypeParams tparams
+
+ if (ownerSym.isTerm) {
+ val skolems = deriveFreshSkolems(tparams map (_.symbol))
+ map2(tparams, skolems)(_ setSymbol _)
+ }
def completeImpl(sym: Symbol) = {
// @M an abstract type's type parameters are entered.
diff --git a/src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala b/src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala
index 3a3c244d1c..44a3abf1b2 100644
--- a/src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/NamesDefaults.scala
@@ -37,21 +37,17 @@ trait NamesDefaults { self: Analyzer =>
}
def isNamed(arg: Tree) = nameOf(arg).isDefined
- /** @param pos maps indicies from old to new */
+ /** @param pos maps indices from old to new */
def reorderArgs[T: ClassManifest](args: List[T], pos: Int => Int): List[T] = {
val res = new Array[T](args.length)
- // (hopefully) faster than zipWithIndex
- (0 /: args) { case (index, arg) => res(pos(index)) = arg; index + 1 }
+ foreachWithIndex(args)((arg, index) => res(pos(index)) = arg)
res.toList
}
- /** @param pos maps indicies from new to old (!) */
+ /** @param pos maps indices from new to old (!) */
def reorderArgsInv[T: ClassManifest](args: List[T], pos: Int => Int): List[T] = {
val argsArray = args.toArray
- val res = new mutable.ListBuffer[T]
- for (i <- 0 until argsArray.length)
- res += argsArray(pos(i))
- res.toList
+ argsArray.indices map (i => argsArray(pos(i))) toList
}
/** returns `true` if every element is equal to its index */
@@ -507,7 +503,7 @@ trait NamesDefaults { self: Analyzer =>
/**
* Removes name assignments from args. Additionally, returns an array mapping
- * argument indicies from call-site-order to definition-site-order.
+ * argument indices from call-site-order to definition-site-order.
*
* Verifies that names are not specified twice, positional args don't appear
* after named ones.
diff --git a/src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala b/src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala
index 6d31243fd0..8bf5fc3557 100644
--- a/src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/PatMatVirtualiser.scala
@@ -43,7 +43,7 @@ trait PatMatVirtualiser extends ast.TreeDSL { self: Analyzer =>
val outer = newTermName("<outer>")
val runOrElse = newTermName("runOrElse")
val zero = newTermName("zero")
- val __match = newTermName("__match")
+ val _match = newTermName("__match") // don't call it __match, since that will trigger virtual pattern matching...
def counted(str: String, i: Int) = newTermName(str+i)
}
@@ -51,8 +51,8 @@ trait PatMatVirtualiser extends ast.TreeDSL { self: Analyzer =>
object MatchTranslator {
def apply(typer: Typer): MatchTranslation = {
import typer._
- // typing `__match` to decide which MatchTranslator to create adds 4% to quick.comp.timer
- newTyper(context.makeImplicit(reportAmbiguousErrors = false)).silent(_.typed(Ident(vpmName.__match), EXPRmode, WildcardType), reportAmbiguousErrors = false) match {
+ // typing `_match` to decide which MatchTranslator to create adds 4% to quick.comp.timer
+ newTyper(context.makeImplicit(reportAmbiguousErrors = false)).silent(_.typed(Ident(vpmName._match), EXPRmode, WildcardType), reportAmbiguousErrors = false) match {
case SilentResultValue(ms) => new PureMatchTranslator(typer, ms)
case _ => new OptimizingMatchTranslator(typer)
}
@@ -116,6 +116,10 @@ trait PatMatVirtualiser extends ast.TreeDSL { self: Analyzer =>
trait MatchTranslation extends MatchMonadInterface { self: TreeMakers with CodegenCore =>
import typer.{typed, context, silent, reallyExists}
+ private def repeatedToSeq(tp: Type): Type = (tp baseType RepeatedParamClass) match {
+ case TypeRef(_, RepeatedParamClass, args) => appliedType(SeqClass.typeConstructor, args)
+ case _ => tp
+ }
/** Implement a pattern match by turning its cases (including the implicit failure case)
* into the corresponding (monadic) extractors, and combining them with the `orElse` combinator.
@@ -133,11 +137,6 @@ trait PatMatVirtualiser extends ast.TreeDSL { self: Analyzer =>
// and the only place that emits Matches after typers is for exception handling anyway)
assert(phase.id <= currentRun.typerPhase.id, phase)
- def repeatedToSeq(tp: Type): Type = (tp baseType RepeatedParamClass) match {
- case TypeRef(_, RepeatedParamClass, args) => appliedType(SeqClass.typeConstructor, args)
- case _ => tp
- }
-
val scrutType = repeatedToSeq(elimAnonymousClass(scrut.tpe.widen))
val scrutSym = freshSym(scrut.pos, pureType(scrutType))
@@ -146,6 +145,47 @@ trait PatMatVirtualiser extends ast.TreeDSL { self: Analyzer =>
combineCases(scrut, scrutSym, cases map translateCase(scrutSym, okPt), okPt, matchOwner)
}
+ // return list of typed CaseDefs that are supported by the backend (typed/bind/wildcard)
+ // we don't have a global scrutinee -- the caught exception must be bound in each of the casedefs
+ // there's no need to check the scrutinee for null -- "throw null" becomes "throw new NullPointerException"
+ // try to simplify to a type-based switch, or fall back to a catch-all case that runs a normal pattern match
+ // unlike translateMatch, we type our result before returning it
+ def translateTry(caseDefs: List[CaseDef], pt: Type, pos: Position): List[CaseDef] =
+ // if they're already simple enough to be handled by the back-end, we're done
+ if (caseDefs forall treeInfo.isCatchCase) caseDefs
+ else {
+ val okPt = repeatedToSeq(pt)
+ val switch = {
+ val bindersAndCases = caseDefs map { caseDef =>
+ // generate a fresh symbol for each case, hoping we'll end up emitting a type-switch (we don't have a global scrut there)
+ // if we fail to emit a fine-grained switch, have to do translateCase again with a single scrutSym (TODO: uniformize substitution on treemakers so we can avoid this)
+ val caseScrutSym = freshSym(pos, pureType(ThrowableClass.tpe))
+ (caseScrutSym, propagateSubstitution(translateCase(caseScrutSym, okPt)(caseDef), EmptySubstitution))
+ }
+
+ (emitTypeSwitch(bindersAndCases, pt) map (_.map(fixerUpper(matchOwner, pos).apply(_).asInstanceOf[CaseDef])))
+ }
+
+ val catches = switch getOrElse {
+ val scrutSym = freshSym(pos, pureType(ThrowableClass.tpe))
+ val casesNoSubstOnly = caseDefs map { caseDef => (propagateSubstitution(translateCase(scrutSym, okPt)(caseDef), EmptySubstitution))}
+
+ val exSym = freshSym(pos, pureType(ThrowableClass.tpe), "ex")
+
+ List(
+ atPos(pos) {
+ CaseDef(
+ Bind(exSym, Ident(nme.WILDCARD)), // TODO: does this need fixing upping?
+ EmptyTree,
+ combineCasesNoSubstOnly(CODE.REF(exSym), scrutSym, casesNoSubstOnly, pt, matchOwner, scrut => Throw(CODE.REF(exSym)))
+ )
+ })
+ }
+
+ typer.typedCases(catches, ThrowableClass.tpe, WildcardType)
+ }
+
+
/** The translation of `pat if guard => body` has two aspects:
* 1) the substitution due to the variables bound by patterns
@@ -213,13 +253,14 @@ trait PatMatVirtualiser extends ast.TreeDSL { self: Analyzer =>
withSubPats(typeTestTreeMaker :+ extractor.treeMaker(patBinderOrCasted, pos), extractor.subBindersAndPatterns: _*)
}
- /** Decompose the pattern in `tree`, of shape C(p_1, ..., p_N), into a list of N symbols, and a list of its N sub-trees
- * The list of N symbols contains symbols for every bound name as well as the un-named sub-patterns (fresh symbols are generated here for these)
- *
- * @arg patBinder symbol used to refer to the result of the previous pattern's extractor (will later be replaced by the outer tree with the correct tree to refer to that patterns result)
- */
+
object MaybeBoundTyped {
- // the returned type is the one inferred by inferTypedPattern (`owntype`)
+ /** Decompose the pattern in `tree`, of shape C(p_1, ..., p_N), into a list of N symbols, and a list of its N sub-trees
+ * The list of N symbols contains symbols for every bound name as well as the un-named sub-patterns (fresh symbols are generated here for these).
+ * The returned type is the one inferred by inferTypedPattern (`owntype`)
+ *
+ * @arg patBinder symbol used to refer to the result of the previous pattern's extractor (will later be replaced by the outer tree with the correct tree to refer to that patterns result)
+ */
def unapply(tree: Tree): Option[(Symbol, Type)] = tree match {
case Bound(subpatBinder, typed@Typed(expr, tpt)) => Some((subpatBinder, typed.tpe))
case Bind(_, typed@Typed(expr, tpt)) => Some((patBinder, typed.tpe))
@@ -668,6 +709,10 @@ class Foo(x: Other) { x._1 } // no error in this order
def emitSwitch(scrut: Tree, scrutSym: Symbol, cases: List[List[TreeMaker]], pt: Type): Option[Tree] =
None
+ // for catch
+ def emitTypeSwitch(bindersAndCases: List[(Symbol, List[TreeMaker])], pt: Type): Option[List[CaseDef]] =
+ None
+
abstract class TreeMaker {
/** captures the scope and the value of the bindings in patterns
* important *when* the substitution happens (can't accumulate and do at once after the full matcher has been constructed)
@@ -788,6 +833,7 @@ class Foo(x: Other) { x._1 } // no error in this order
}
// implements the run-time aspects of (§8.2) (typedPattern has already done the necessary type transformations)
+ // TODO: normalize construction, which yields a combination of a EqualityTestTreeMaker (when necessary) and a TypeTestTreeMaker
case class TypeAndEqualityTestTreeMaker(prevBinder: Symbol, patBinder: Symbol, pt: Type, pos: Position) extends CondTreeMaker {
val nextBinderTp = glb(List(patBinder.info.widen, pt))
@@ -843,6 +889,10 @@ class Foo(x: Other) { x._1 } // no error in this order
val cond = typeAndEqualityTest(patBinder, pt)
val res = codegen._asInstanceOf(patBinder, nextBinderTp)
+
+ // TODO: remove this
+ def isStraightTypeTest = cond match { case TypeApply(_, _) => cond.symbol == Any_isInstanceOf case _ => false }
+
override def toString = "TET"+(patBinder, pt)
}
@@ -926,25 +976,30 @@ class Foo(x: Other) { x._1 } // no error in this order
}
// calls propagateSubstitution on the treemakers
- def combineCases(scrut: Tree, scrutSym: Symbol, casesRaw: List[List[TreeMaker]], pt: Type, owner: Symbol): Tree = fixerUpper(owner, scrut.pos){
- val casesUnOpt = casesRaw map (propagateSubstitution(_, EmptySubstitution)) // drops SubstOnlyTreeMakers, since their effect is now contained in the TreeMakers that follow them
+ def combineCases(scrut: Tree, scrutSym: Symbol, casesRaw: List[List[TreeMaker]], pt: Type, owner: Symbol): Tree = {
+ val casesNoSubstOnly = casesRaw map (propagateSubstitution(_, EmptySubstitution)) // drops SubstOnlyTreeMakers, since their effect is now contained in the TreeMakers that follow them
+ combineCasesNoSubstOnly(scrut, scrutSym, casesNoSubstOnly, pt, owner, CODE.MATCHERROR(_))
+ }
- emitSwitch(scrut, scrutSym, casesUnOpt, pt).getOrElse{
+ def combineCasesNoSubstOnly(scrut: Tree, scrutSym: Symbol, casesNoSubstOnly: List[List[TreeMaker]], pt: Type, owner: Symbol, matchFail: Tree => Tree): Tree = fixerUpper(owner, scrut.pos){
+ emitSwitch(scrut, scrutSym, casesNoSubstOnly, pt).getOrElse{
val (matcher, hasDefault, toHoist) =
- if (casesUnOpt nonEmpty) {
+ if (casesNoSubstOnly nonEmpty) {
// when specified, need to propagate pt explicitly (type inferencer can't handle it)
val optPt =
if (isFullyDefined(pt)) inMatchMonad(pt)
else NoType
- // do this check on casesUnOpt, since DCE will eliminate trivial cases like `case _ =>`, even if they're the last one
+ // do this check on casesNoSubstOnly, since DCE will eliminate trivial cases like `case _ =>`, even if they're the last one
// exhaustivity and reachability must be checked before optimization as well
- val hasDefault = casesUnOpt.nonEmpty && {
- val nonTrivLast = casesUnOpt.last
+ // TODO: improve, a trivial type test before the body still makes for a default case
+ // ("trivial" depends on whether we're emitting a straight match or an exception, or more generally, any supertype of scrutSym.tpe is a no-op)
+ val hasDefault = casesNoSubstOnly.nonEmpty && {
+ val nonTrivLast = casesNoSubstOnly.last
nonTrivLast.nonEmpty && nonTrivLast.head.isInstanceOf[BodyTreeMaker]
}
- val (cases, toHoist) = optimizeCases(scrutSym, casesUnOpt, pt)
+ val (cases, toHoist) = optimizeCases(scrutSym, casesNoSubstOnly, pt)
val combinedCases =
cases.map(combineExtractors(_, pt)).reduceLeft(codegen.typedOrElse(optPt))
@@ -952,7 +1007,11 @@ class Foo(x: Other) { x._1 } // no error in this order
(combinedCases, hasDefault, toHoist)
} else (codegen.zero, false, Nil)
- val expr = codegen.runOrElse(scrut, scrutSym, matcher, if (isFullyDefined(pt)) pt else NoType, hasDefault)
+ // catch-all
+ val catchAll =
+ if (hasDefault) None // no need for a catch-all when there's already a default
+ else Some(matchFail)
+ val expr = codegen.runOrElse(scrut, scrutSym, matcher, if (isFullyDefined(pt)) pt else NoType, catchAll)
if (toHoist isEmpty) expr
else Block(toHoist, expr)
}
@@ -966,7 +1025,7 @@ class Foo(x: Other) { x._1 } // no error in this order
// TODO: do this during tree construction, but that will require tracking the current owner in treemakers
// TODO: assign more fine-grained positions
// fixes symbol nesting, assigns positions
- private def fixerUpper(origOwner: Symbol, pos: Position) = new Traverser {
+ protected def fixerUpper(origOwner: Symbol, pos: Position) = new Traverser {
currentOwner = origOwner
override def traverse(t: Tree) {
@@ -1019,7 +1078,7 @@ class Foo(x: Other) { x._1 } // no error in this order
// codegen relevant to the structure of the translation (how extractors are combined)
trait AbsCodegen {
- def runOrElse(scrut: Tree, scrutSym: Symbol, matcher: Tree, resTp: Type, hasDefault: Boolean): Tree
+ def runOrElse(scrut: Tree, scrutSym: Symbol, matcher: Tree, resTp: Type, catchAll: Option[Tree => Tree]): Tree
def one(res: Tree, bodyPt: Type, matchPt: Type): Tree
def zero: Tree
def flatMap(prev: Tree, b: Symbol, next: Tree): Tree
@@ -1098,10 +1157,10 @@ class Foo(x: Other) { x._1 } // no error in this order
protected def matchMonadSym = oneSig.finalResultType.typeSymbol
import CODE._
- def __match(n: Name): SelectStart = matchStrategy DOT n
+ def _match(n: Name): SelectStart = matchStrategy DOT n
private lazy val oneSig: Type =
- typer.typed(__match(vpmName.one), EXPRmode | POLYmode | TAPPmode | FUNmode, WildcardType).tpe // TODO: error message
+ typer.typed(_match(vpmName.one), EXPRmode | POLYmode | TAPPmode | FUNmode, WildcardType).tpe // TODO: error message
}
trait PureCodegen extends CodegenCore with PureMatchMonadInterface {
@@ -1110,14 +1169,15 @@ class Foo(x: Other) { x._1 } // no error in this order
object pureCodegen extends CommonCodegen { import CODE._
//// methods in MatchingStrategy (the monad companion) -- used directly in translation
// __match.runOrElse(`scrut`)(`scrutSym` => `matcher`)
- def runOrElse(scrut: Tree, scrutSym: Symbol, matcher: Tree, resTp: Type, hasDefault: Boolean): Tree
- = __match(vpmName.runOrElse) APPLY (scrut) APPLY (fun(scrutSym, matcher))
+ // TODO: consider catchAll, or virtualized matching will break in exception handlers
+ def runOrElse(scrut: Tree, scrutSym: Symbol, matcher: Tree, resTp: Type, catchAll: Option[Tree => Tree]): Tree
+ = _match(vpmName.runOrElse) APPLY (scrut) APPLY (fun(scrutSym, matcher))
// __match.one(`res`)
- def one(res: Tree, bodyPt: Type, matchPt: Type): Tree = (__match(vpmName.one)) (res)
+ def one(res: Tree, bodyPt: Type, matchPt: Type): Tree = (_match(vpmName.one)) (res)
// __match.zero
- def zero: Tree = __match(vpmName.zero)
+ def zero: Tree = _match(vpmName.zero)
// __match.guard(`c`, `then`)
- def guard(c: Tree, then: Tree, tp: Type): Tree = __match(vpmName.guard) APPLY (c, then)
+ def guard(c: Tree, then: Tree, tp: Type): Tree = _match(vpmName.guard) APPLY (c, then)
//// methods in the monad instance -- used directly in translation
// `prev`.flatMap(`b` => `next`)
@@ -1437,94 +1497,145 @@ class Foo(x: Other) { x._1 } // no error in this order
}
}
- //// SWITCHES
+ //// SWITCHES -- TODO: operate on Tests rather than TreeMakers
trait SwitchEmission extends TreeMakers with OptimizedMatchMonadInterface { self: CodegenCore =>
- object SwitchablePattern { def unapply(pat: Tree) = pat match {
- case Literal(Constant((_: Byte ) | (_: Short) | (_: Int ) | (_: Char ))) => true // TODO: Java 7 allows strings in switches
- case _ => false
- }}
-
- // def isSwitchable(cases: List[(List[TreeMaker], Tree)]): Boolean = {
- // def isSwitchableTreeMaker(tm: TreeMaker) = tm match {
- // case tm@EqualityTestTreeMaker(_, SwitchablePattern(), _) => true
- // case SubstOnlyTreeMaker(_) => true
- // case AlternativesTreeMaker(_, altss, _) => altss forall (_.forall(isSwitchableTreeMaker))
- // case _ => false
- // }
- // }
+ abstract class SwitchMaker {
+ abstract class SwitchableTreeMakerExtractor { def unapply(x: TreeMaker): Option[Tree] }
+ val SwitchableTreeMaker: SwitchableTreeMakerExtractor
- private val switchableTpes = Set(ByteClass.tpe, ShortClass.tpe, IntClass.tpe, CharClass.tpe)
+ def alternativesSupported: Boolean
- override def emitSwitch(scrut: Tree, scrutSym: Symbol, cases: List[List[TreeMaker]], pt: Type): Option[Tree] = {
- def sequence[T](xs: List[Option[T]]): Option[List[T]] =
+ def isDefault(x: CaseDef): Boolean
+ def defaultSym: Symbol
+ def defaultBody: Tree
+ def defaultCase(scrutSym: Symbol = defaultSym, body: Tree = defaultBody): CaseDef
+
+ private def sequence[T](xs: List[Option[T]]): Option[List[T]] =
if (xs exists (_.isEmpty)) None else Some(xs.flatten)
- def isSwitchableTpe(tpe: Type): Boolean =
- switchableTpes contains tpe
- def switchableConstToInt(x: Tree): Tree = {
- val Literal(const) = x
- const.tag match {
- case IntTag => x
- case ByteTag | ShortTag | CharTag => Literal(Constant(const.intValue))
+ // empty list ==> failure
+ def apply(cases: List[(Symbol, List[TreeMaker])], pt: Type): List[CaseDef] = {
+ val caseDefs = cases map { case (scrutSym, makers) =>
+ makers match {
+ // default case
+ case (btm@BodyTreeMaker(body, _)) :: Nil =>
+ Some(defaultCase(scrutSym, btm.substitution(body)))
+ // constant (or typetest for typeSwitch)
+ case SwitchableTreeMaker(pattern) :: (btm@BodyTreeMaker(body, _)) :: Nil =>
+ Some(CaseDef(pattern, EmptyTree, btm.substitution(body)))
+ // alternatives
+ case AlternativesTreeMaker(_, altss, _) :: (btm@BodyTreeMaker(body, _)) :: Nil if alternativesSupported =>
+ val casePatterns = altss map {
+ case SwitchableTreeMaker(pattern) :: Nil =>
+ Some(pattern)
+ case _ =>
+ None
+ }
+
+ sequence(casePatterns) map { patterns =>
+ val substedBody = btm.substitution(body)
+ CaseDef(Alternative(patterns), EmptyTree, substedBody)
+ }
+ case _ => //println("can't emit switch for "+ makers)
+ None //failure (can't translate pattern to a switch)
+ }
}
- }
- val caseDefs = cases map { makers =>
- removeSubstOnly(makers) match {
- // default case (don't move this to unfold, as it may only occur on the top level, not as an alternative -- well, except in degenerate matches)
- case (btm@BodyTreeMaker(body, _)) :: Nil =>
- Some(CaseDef(Ident(nme.WILDCARD), EmptyTree, btm.substitution(body)))
- // constant
- case (EqualityTestTreeMaker(_, const@SwitchablePattern(), _)) :: (btm@BodyTreeMaker(body, _)) :: Nil =>
- Some(CaseDef(switchableConstToInt(const), EmptyTree, btm.substitution(body)))
- // alternatives
- case AlternativesTreeMaker(_, altss, _) :: (btm@BodyTreeMaker(body, _)) :: Nil => // assert(currLabel.isEmpty && nextLabel.isEmpty)
- val caseConstants = altss map {
- case EqualityTestTreeMaker(_, const@SwitchablePattern(), _) :: Nil =>
- Some(switchableConstToInt(const))
- case _ =>
- None
+ (for(
+ caseDefs <- sequence(caseDefs)) yield
+ if (caseDefs exists isDefault) caseDefs
+ else {
+ caseDefs :+ defaultCase()
}
+ ) getOrElse Nil
+ }
+ }
- sequence(caseConstants) map { contants =>
- val substedBody = btm.substitution(body)
- CaseDef(Alternative(contants), EmptyTree, substedBody)
- }
- case _ =>
- None //failure (can't translate pattern to a switch)
+ class RegularSwitchMaker(scrutSym: Symbol) extends SwitchMaker {
+ val switchableTpe = Set(ByteClass.tpe, ShortClass.tpe, IntClass.tpe, CharClass.tpe)
+ val alternativesSupported = true
+
+ object SwitchablePattern { def unapply(pat: Tree): Option[Tree] = pat match {
+ case Literal(const@Constant((_: Byte ) | (_: Short) | (_: Int ) | (_: Char ))) =>
+ Some(Literal(Constant(const.intValue))) // TODO: Java 7 allows strings in switches
+ case _ => None
+ }}
+
+ object SwitchableTreeMaker extends SwitchableTreeMakerExtractor {
+ def unapply(x: TreeMaker): Option[Tree] = x match {
+ case EqualityTestTreeMaker(_, SwitchablePattern(const), _) => Some(const)
+ case _ => None
}
}
- if (!isSwitchableTpe(scrut.tpe))
- None // TODO: emit a cast of the scrutinee and a switch on the cast scrutinee if patterns allow switch but the type of the scrutinee doesn't
- else {
- sequence(caseDefs) map { caseDefs =>
- import CODE._
- val caseDefsWithDefault = {
- def isDefault(x: CaseDef): Boolean = x match {
- case CaseDef(Ident(nme.WILDCARD), EmptyTree, _) => true
- case _ => false
- }
- val hasDefault = caseDefs exists isDefault
- if (hasDefault) caseDefs else {
- val default = atPos(scrut.pos) { DEFAULT ==> MATCHERROR(REF(scrutSym)) }
- caseDefs :+ default
- }
- }
- val matcher = BLOCK(
- if (scrut.tpe != IntClass.tpe) {
- scrutSym setInfo IntClass.tpe
- VAL(scrutSym) === (scrut DOT nme.toInt)
- } else {
- VAL(scrutSym) === scrut
- },
- Match(REF(scrutSym), caseDefsWithDefault) // match on scrutSym, not scrut to avoid duplicating scrut
- )
- // matcher filter (tree => tree.tpe == null) foreach println
- // treeBrowser browse matcher
- matcher // set type to avoid recursion in typedMatch
+ def isDefault(x: CaseDef): Boolean = x match {
+ case CaseDef(Ident(nme.WILDCARD), EmptyTree, _) => true
+ case _ => false
+ }
+
+ def defaultSym: Symbol = scrutSym
+ def defaultBody: Tree = { import CODE._; MATCHERROR(REF(scrutSym)) }
+ def defaultCase(scrutSym: Symbol = defaultSym, body: Tree = defaultBody): CaseDef = { import CODE._; atPos(body.pos) {
+ DEFAULT ==> body
+ }}
+ }
+
+ override def emitSwitch(scrut: Tree, scrutSym: Symbol, cases: List[List[TreeMaker]], pt: Type): Option[Tree] = { import CODE._
+ val regularSwitchMaker = new RegularSwitchMaker(scrutSym)
+ // TODO: if patterns allow switch but the type of the scrutinee doesn't, cast (type-test) the scrutinee to the corresponding switchable type and switch on the result
+ if (regularSwitchMaker.switchableTpe(scrutSym.tpe)) {
+ val caseDefsWithDefault = regularSwitchMaker(cases map {c => (scrutSym, c)}, pt)
+ if (caseDefsWithDefault isEmpty) None
+ else {
+ // match on scrutSym -- converted to an int if necessary -- not on scrut directly (to avoid duplicating scrut)
+ val scrutToInt: Tree =
+ if(scrutSym.tpe =:= IntClass.tpe) REF(scrutSym)
+ else (REF(scrutSym) DOT (nme.toInt))
+ Some(BLOCK(
+ VAL(scrutSym) === scrut,
+ Match(scrutToInt, caseDefsWithDefault)
+ ))
}
+ } else None
+ }
+
+ // for the catch-cases in a try/catch
+ private object typeSwitchMaker extends SwitchMaker {
+ def switchableTpe(tp: Type) = true
+ val alternativesSupported = false // TODO: needs either back-end support of flattening of alternatives during typers
+
+ // TODO: there are more treemaker-sequences that can be handled by type tests
+ // analyze the result of approximateTreeMaker rather than the TreeMaker itself
+ object SwitchableTreeMaker extends SwitchableTreeMakerExtractor {
+ def unapply(x: TreeMaker): Option[Tree] = x match {
+ case tm@TypeTestTreeMaker(_, _, _) =>
+ Some(Bind(tm.nextBinder, Typed(Ident(nme.WILDCARD), TypeTree(tm.nextBinderTp)) /* not used by back-end */)) // -- TODO: use this if binder does not occur in the body
+ case tm@TypeAndEqualityTestTreeMaker(_, patBinder, pt, _) if tm.isStraightTypeTest =>
+ Some(Bind(tm.nextBinder, Typed(Ident(nme.WILDCARD), TypeTree(tm.nextBinderTp)) /* not used by back-end */))
+ case _ =>
+ None
+ }
+ }
+
+ def isDefault(x: CaseDef): Boolean = x match {
+ case CaseDef(Typed(Ident(nme.WILDCARD), tpt), EmptyTree, _) if (tpt.tpe =:= ThrowableClass.tpe) => true
+ case CaseDef(Bind(_, Typed(Ident(nme.WILDCARD), tpt)), EmptyTree, _) if (tpt.tpe =:= ThrowableClass.tpe) => true
+ case CaseDef(Ident(nme.WILDCARD), EmptyTree, _) => true
+ case _ => false
}
+
+ lazy val defaultSym: Symbol = freshSym(NoPosition, ThrowableClass.tpe)
+ def defaultBody: Tree = Throw(CODE.REF(defaultSym))
+ def defaultCase(scrutSym: Symbol = defaultSym, body: Tree = defaultBody): CaseDef = { import CODE._; atPos(body.pos) {
+ CASE (Bind(scrutSym, Typed(Ident(nme.WILDCARD), TypeTree(ThrowableClass.tpe)))) ==> body
+ }}
+ }
+
+ // TODO: drop null checks
+ override def emitTypeSwitch(bindersAndCases: List[(Symbol, List[TreeMaker])], pt: Type): Option[List[CaseDef]] = {
+ val caseDefsWithDefault = typeSwitchMaker(bindersAndCases, pt)
+ if (caseDefsWithDefault isEmpty) None
+ else Some(caseDefsWithDefault)
}
}
@@ -1551,33 +1662,31 @@ class Foo(x: Other) { x._1 } // no error in this order
/** Inline runOrElse and get rid of Option allocations
*
- * runOrElse(scrut: scrutTp)(matcher): resTp = matcher(scrut) getOrElse (throw new MatchError(x))
+ * runOrElse(scrut: scrutTp)(matcher): resTp = matcher(scrut) getOrElse ${catchAll(`scrut`)}
* the matcher's optional result is encoded as a flag, keepGoing, where keepGoing == true encodes result.isEmpty,
* if keepGoing is false, the result Some(x) of the naive translation is encoded as matchRes == x
*/
@inline private def dontStore(tp: Type) = (tp.typeSymbol eq UnitClass) || (tp.typeSymbol eq NothingClass)
lazy val keepGoing = freshSym(NoPosition, BooleanClass.tpe, "keepGoing") setFlag MUTABLE
lazy val matchRes = freshSym(NoPosition, AnyClass.tpe, "matchRes") setFlag MUTABLE
- def runOrElse(scrut: Tree, scrutSym: Symbol, matcher: Tree, resTp: Type, hasDefault: Boolean) = {
+ def runOrElse(scrut: Tree, scrutSym: Symbol, matcher: Tree, resTp: Type, catchAll: Option[Tree => Tree]) = {
matchRes.info = if (resTp ne NoType) resTp.widen else AnyClass.tpe // we don't always know resTp, and it might be AnyVal, in which case we can't assign NULL
if (dontStore(resTp)) matchRes resetFlag MUTABLE // don't assign to Unit-typed var's, in fact, make it a val -- conveniently also works around SI-5245
BLOCK(
VAL(zeroSym) === REF(NoneModule), // TODO: can we just get rid of explicitly emitted zero? don't know how to do that as a local rewrite...
- VAL(scrutSym) === scrut, // reuse the symbol of the function's argument to avoid creating a fresh one and substituting it for scrutSym in `matcher` -- the owner structure is repaired by fixerUpper
+ VAL(scrutSym) === scrut,
VAL(matchRes) === mkZero(matchRes.info), // must cast to deal with GADT typing, hence the private mkZero above
VAL(keepGoing) === TRUE,
matcher,
- if(hasDefault) REF(matchRes)
- else (IF (REF(keepGoing)) THEN MATCHERROR(REF(scrutSym)) ELSE REF(matchRes))
+ catchAll map { catchAllGen => (IF (REF(keepGoing)) THEN catchAllGen(REF(scrutSym)) ELSE REF(matchRes)) } getOrElse REF(matchRes)
)
}
// only used to wrap the RHS of a body
def one(res: Tree, bodyPt: Type, matchPt: Type): Tree = {
BLOCK(
- if (dontStore(matchPt)) res // runOrElse hasn't been called yet, so matchRes.isMutable is irrelevant, also, tp may be a subtype of resTp used in runOrElse...
- else (REF(matchRes) === res), // _asInstanceOf(res, tp.widen, force = true)
- REF(keepGoing) === FALSE,
+ REF(keepGoing) === FALSE, // comes before assignment to matchRes, so the latter is in tail positions (can ignore the trailing zero -- will disappear when we flatten blocks, which is TODO)
+ if (dontStore(matchPt)) res else (REF(matchRes) === res), // runOrElse hasn't been called yet, so matchRes.isMutable is irrelevant, also, tp may be a subtype of resTp used in runOrElse...
zero // to have a nice lub for lubs -- otherwise we'll get a boxed unit here -- TODO: get rid of all those dangling else zero's
)
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala b/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
index e313edb3f6..4a92458403 100644
--- a/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
@@ -282,6 +282,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
def otherTp = self.memberType(other)
def noErrorType = other.tpe != ErrorType && member.tpe != ErrorType
def isRootOrNone(sym: Symbol) = sym == RootClass || sym == NoSymbol
+ def isNeitherInClass = (member.owner != clazz) && (other.owner != clazz)
def objectOverrideErrorMsg = (
"overriding " + other.fullLocationString + " with " + member.fullLocationString + ":\n" +
"an overriding object must conform to the overridden object's class bound" +
@@ -383,7 +384,14 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
overrideError("cannot override final member");
// synthetic exclusion needed for (at least) default getters.
} else if (!other.isDeferred && !member.isAnyOverride && !member.isSynthetic) {
- overrideError("needs `override' modifier");
+ if (isNeitherInClass && !(other.owner isSubClass member.owner))
+ emitOverrideError(
+ clazz + " inherits conflicting members:\n "
+ + infoStringWithLocation(other) + " and\n " + infoStringWithLocation(member)
+ + "\n(Note: this can be resolved by declaring an override in " + clazz + ".)"
+ )
+ else
+ overrideError("needs `override' modifier")
} else if (other.isAbstractOverride && other.isIncompleteIn(clazz) && !member.isAbstractOverride) {
overrideError("needs `abstract override' modifiers")
} else if (member.isAnyOverride && (other hasFlag ACCESSOR) && other.accessed.isVariable && !other.accessed.isLazy) {
@@ -520,13 +528,13 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
def uncurryAndErase(tp: Type) = erasure.erasure(sym, uncurry.transformInfo(sym, tp))
val tp1 = uncurryAndErase(clazz.thisType.memberType(sym))
val tp2 = uncurryAndErase(clazz.thisType.memberType(other))
- atPhase(currentRun.erasurePhase.next)(tp1 matches tp2)
+ afterErasure(tp1 matches tp2)
})
def ignoreDeferred(member: Symbol) = (
(member.isAbstractType && !member.isFBounded) || (
member.isJavaDefined &&
- // the test requires atPhase(erasurePhase.next) so shouldn't be
+ // the test requires afterErasure so shouldn't be
// done if the compiler has no erasure phase available
(currentRun.erasurePhase == NoPhase || javaErasedOverridingSym(member) != NoSymbol)
)
@@ -1167,7 +1175,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
case vsym => ValDef(vsym)
}
}
- def createStaticModuleAccessor() = atPhase(phase.next) {
+ def createStaticModuleAccessor() = afterRefchecks {
val method = (
sym.owner.newMethod(sym.name.toTermName, sym.pos, (sym.flags | STABLE) & ~MODULE)
setInfoAndEnter NullaryMethodType(sym.moduleClass.tpe)
@@ -1178,7 +1186,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
vdef,
localTyper.typedPos(tree.pos) {
val vsym = vdef.symbol
- atPhase(phase.next) {
+ afterRefchecks {
val rhs = gen.newModule(sym, vsym.tpe)
val body = if (sym.owner.isTrait) rhs else gen.mkAssignAndReturn(vsym, rhs)
DefDef(sym, body.changeOwner(vsym -> sym))
@@ -1214,12 +1222,12 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
else gen.mkAssignAndReturn(vsym, rhs)
)
val lazyDef = atPos(tree.pos)(DefDef(lazySym, body.changeOwner(vsym -> lazySym)))
- log("Made lazy def: " + lazyDef)
+ debuglog("Created lazy accessor: " + lazyDef)
if (hasUnitType) List(typed(lazyDef))
else List(
typed(ValDef(vsym)),
- atPhase(phase.next)(typed(lazyDef))
+ afterRefchecks(typed(lazyDef))
)
}
@@ -1443,26 +1451,6 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
transform(qual)
- case Apply(Select(New(tpt), name), args)
- if (tpt.tpe.typeSymbol == ArrayClass && args.length >= 2) =>
- unit.deprecationWarning(tree.pos,
- "new Array(...) with multiple dimensions has been deprecated; use Array.ofDim(...) instead")
- val manif = {
- var etpe = tpt.tpe
- for (_ <- args) { etpe = etpe.typeArgs.headOption.getOrElse(NoType) }
- if (etpe == NoType) {
- unit.error(tree.pos, "too many dimensions for array creation")
- Literal(Constant(null))
- } else {
- localTyper.getManifestTree(tree, etpe, false)
- }
- }
- val newResult = localTyper.typedPos(tree.pos) {
- new ApplyToImplicitArgs(gen.mkMethodCall(ArrayModule, nme.ofDim, args), List(manif))
- }
- currentApplication = tree
- newResult
-
case Apply(fn, args) =>
checkSensible(tree.pos, fn, args)
currentApplication = tree
@@ -1540,12 +1528,9 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
// inside annotations.
applyRefchecksToAnnotations(tree)
var result: Tree = tree match {
- case DefDef(mods, name, tparams, vparams, tpt, EmptyTree) if tree.symbol.hasAnnotation(NativeAttr) =>
- tree.symbol.resetFlag(DEFERRED)
- transform(treeCopy.DefDef(
- tree, mods, name, tparams, vparams, tpt,
- typed(gen.mkSysErrorCall("native method stub"))
- ))
+ case DefDef(_, _, _, _, _, EmptyTree) if sym hasAnnotation NativeAttr =>
+ sym resetFlag DEFERRED
+ transform(deriveDefDef(tree)(_ => typed(gen.mkSysErrorCall("native method stub"))))
case ValDef(_, _, _, _) | DefDef(_, _, _, _, _, _) =>
checkDeprecatedOvers(tree)
@@ -1563,8 +1548,7 @@ abstract class RefChecks extends InfoTransform with reflect.internal.transform.R
checkOverloadedRestrictions(currentOwner)
val bridges = addVarargBridges(currentOwner)
checkAllOverrides(currentOwner)
- if (bridges.nonEmpty) treeCopy.Template(tree, parents, self, body ::: bridges)
- else tree
+ if (bridges.nonEmpty) deriveTemplate(tree)(_ ::: bridges) else tree
case dc@TypeTreeWithDeferredRefCheck() => assert(false, "adapt should have turned dc: TypeTreeWithDeferredRefCheck into tpt: TypeTree, with tpt.original == dc"); dc
case tpt@TypeTree() =>
diff --git a/src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala b/src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala
index 0ab09b4fec..64f1662a22 100644
--- a/src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/SuperAccessors.scala
@@ -177,7 +177,7 @@ abstract class SuperAccessors extends transform.Transform with transform.TypingT
case ModuleDef(_, _, _) =>
checkCompanionNameClashes(sym)
super.transform(tree)
- case Template(parents, self, body) =>
+ case Template(_, _, body) =>
val ownAccDefs = new ListBuffer[Tree]
accDefs(currentOwner) = ownAccDefs
@@ -189,7 +189,7 @@ abstract class SuperAccessors extends transform.Transform with transform.TypingT
val body1 = atOwner(currentOwner)(transformTrees(body))
accDefs -= currentOwner
ownAccDefs ++= body1
- treeCopy.Template(tree, parents, self, ownAccDefs.toList)
+ deriveTemplate(tree)(_ => ownAccDefs.toList)
case TypeApply(sel @ Select(This(_), name), args) =>
mayNeedProtectedAccessor(sel, args, false)
diff --git a/src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala b/src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala
index cf90577959..c53b92c5be 100644
--- a/src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/SyntheticMethods.scala
@@ -248,11 +248,11 @@ trait SyntheticMethods extends ast.TreeDSL {
}
if (phase.id > currentRun.typerPhase.id) templ
- else treeCopy.Template(templ, templ.parents, templ.self,
+ else deriveTemplate(templ)(body =>
if (clazz.isCase) caseTemplateBody()
else synthesize() match {
- case Nil => templ.body // avoiding unnecessary copy
- case ms => templ.body ++ ms
+ case Nil => body // avoiding unnecessary copy
+ case ms => body ++ ms
}
)
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/Typers.scala b/src/compiler/scala/tools/nsc/typechecker/Typers.scala
index ef69e1525e..0a1a385846 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Typers.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Typers.scala
@@ -614,7 +614,12 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
if (tree.isErrorTyped) tree
else if ((mode & (PATTERNmode | FUNmode)) == PATTERNmode && tree.isTerm) { // (1)
- if (sym.isValue) checkStable(tree)
+ if (sym.isValue) {
+ val tree1 = checkStable(tree)
+ // A module reference in a pattern has type Foo.type, not "object Foo"
+ if (sym.isModule && !sym.isMethod) tree1 setType singleType(pre, sym)
+ else tree1
+ }
else fail()
} else if ((mode & (EXPRmode | QUALmode)) == EXPRmode && !sym.isValue && !phase.erasedTypes) { // (2)
fail()
@@ -941,10 +946,14 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
}
if (tree.isType)
adaptType()
- else if (inExprModeButNot(mode, FUNmode) && tree.symbol != null && tree.symbol.isMacro && !tree.isDef) {
- val tree1 = expandMacro(tree)
- if (tree1.isErroneous) tree1 else typed(tree1, mode, pt)
- } else if ((mode & (PATTERNmode | FUNmode)) == (PATTERNmode | FUNmode))
+ else if (inExprModeButNot(mode, FUNmode) && tree.symbol != null && tree.symbol.isMacro && !tree.isDef && !(tree exists (_.isErroneous)))
+ macroExpand(tree, this) match {
+ case Some(expanded: Tree) =>
+ typed(expanded, mode, pt)
+ case None =>
+ setError(tree) // error already reported
+ }
+ else if ((mode & (PATTERNmode | FUNmode)) == (PATTERNmode | FUNmode))
adaptConstrPattern()
else if (inAllModes(mode, EXPRmode | FUNmode) &&
!tree.tpe.isInstanceOf[MethodType] &&
@@ -1499,12 +1508,12 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
if (templ.symbol == NoSymbol)
templ setSymbol clazz.newLocalDummy(templ.pos)
val self1 = templ.self match {
- case vd @ ValDef(mods, name, tpt, EmptyTree) =>
+ case vd @ ValDef(_, _, tpt, EmptyTree) =>
val tpt1 = checkNoEscaping.privates(
clazz.thisSym,
treeCopy.TypeTree(tpt).setOriginal(tpt) setType vd.symbol.tpe
)
- treeCopy.ValDef(vd, mods, name, tpt1, EmptyTree) setType NoType
+ copyValDef(vd)(tpt = tpt1, rhs = EmptyTree) setType NoType
}
// was:
// val tpt1 = checkNoEscaping.privates(clazz.thisSym, typedType(tpt))
@@ -1857,8 +1866,9 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
val restpe = ldef.symbol.tpe.resultType
val rhs1 = typed(ldef.rhs, restpe)
ldef.params foreach (param => param.tpe = param.symbol.tpe)
- treeCopy.LabelDef(ldef, ldef.name, ldef.params, rhs1) setType restpe
- } else {
+ deriveLabelDef(ldef)(_ => rhs1) setType restpe
+ }
+ else {
val initpe = ldef.symbol.tpe.resultType
val rhs1 = typed(ldef.rhs)
val restpe = rhs1.tpe
@@ -1871,7 +1881,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
context.owner.newLabel(ldef.name, ldef.pos) setInfo MethodType(List(), restpe))
val rhs2 = typed(resetAllAttrs(ldef.rhs), restpe)
ldef.params foreach (param => param.tpe = param.symbol.tpe)
- treeCopy.LabelDef(ldef, ldef.name, ldef.params, rhs2) setSymbol sym2 setType restpe
+ deriveLabelDef(ldef)(_ => rhs2) setSymbol sym2 setType restpe
}
}
}
@@ -1987,7 +1997,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
treeCopy.CaseDef(cdef, pat1, guard1, body1) setType body1.tpe
}
- def typedCases(tree: Tree, cases: List[CaseDef], pattp: Type, pt: Type): List[CaseDef] =
+ def typedCases(cases: List[CaseDef], pattp: Type, pt: Type): List[CaseDef] =
cases mapConserve { cdef =>
newTyper(context.makeNewScope(cdef, context.owner)).typedCase(cdef, pattp, pt)
}
@@ -2169,6 +2179,10 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
// error for this is issued in RefChecks.checkDefaultsInOverloaded
if (!e.sym.isErroneous && !e1.sym.isErroneous && !e.sym.hasDefaultFlag &&
!e.sym.hasAnnotation(BridgeClass) && !e1.sym.hasAnnotation(BridgeClass)) {
+ log("Double definition detected:\n " +
+ ((e.sym.getClass, e.sym.info, e.sym.ownerChain)) + "\n " +
+ ((e1.sym.getClass, e1.sym.info, e1.sym.ownerChain)))
+
DefDefinedTwiceError(e.sym, e1.sym)
scope.unlink(e1) // need to unlink to avoid later problems with lub; see #2779
}
@@ -3028,40 +3042,6 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
packSymbols(localSyms.toList, normalizedTpe)
}
- /** Replace type parameters with their TypeSkolems, which can later
- * be deskolemized to the original type param. (A skolem is a
- * representation of a bound variable when viewed inside its scope)
- * !!!Adriaan: this does not work for hk types.
- */
- def skolemizeTypeParams(tparams: List[TypeDef]): List[TypeDef] = {
- class Deskolemizer extends LazyType {
- override val typeParams = tparams map (_.symbol)
- val typeSkolems = typeParams map (_.newTypeSkolem setInfo this)
- // Replace the symbols
- def substitute() = map2(tparams, typeSkolems)(_ setSymbol _)
- override def complete(sym: Symbol) {
- // The info of a skolem is the skolemized info of the
- // actual type parameter of the skolem
- sym setInfo sym.deSkolemize.info.substSym(typeParams, typeSkolems)
- }
- }
- (new Deskolemizer).substitute()
- }
- /** Convert to corresponding type parameters all skolems of method
- * parameters which appear in `tparams`.
- */
- def deskolemizeTypeParams(tparams: List[Symbol])(tp: Type): Type = {
- class DeSkolemizeMap extends TypeMap {
- def apply(tp: Type): Type = tp match {
- case TypeRef(pre, sym, args) if sym.isTypeSkolem && (tparams contains sym.deSkolemize) =>
- mapOver(typeRef(NoPrefix, sym.deSkolemize, args))
- case _ =>
- mapOver(tp)
- }
- }
- new DeSkolemizeMap mapOver tp
- }
-
def typedClassOf(tree: Tree, tpt: Tree, noGen: Boolean = false) =
if (!checkClassType(tpt, true, false) && noGen) tpt
else atPos(tree.pos)(gen.mkClassOf(tpt.tpe))
@@ -3318,7 +3298,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
typed1(atPos(tree.pos) { Function(params, body) }, mode, pt)
} else {
val selector1 = checkDead(typed(selector, EXPRmode | BYVALmode, WildcardType))
- var cases1 = typedCases(tree, cases, packCaptured(selector1.tpe.widen), pt)
+ var cases1 = typedCases(cases, packCaptured(selector1.tpe.widen), pt)
if (isPastTyper || !opt.virtPatmat) {
val (owntype, needAdapt) = ptOrLub(cases1 map (_.tpe))
@@ -3334,7 +3314,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
(MatchTranslator(this)).translateMatch(selector1, cases1, owntype) match {
case Block(vd :: Nil, tree@Match(selector, cases)) =>
val selector1 = checkDead(typed(selector, EXPRmode | BYVALmode, WildcardType))
- var cases1 = typedCases(tree, cases, packCaptured(selector1.tpe.widen), pt)
+ var cases1 = typedCases(cases, packCaptured(selector1.tpe.widen), pt)
val (owntype, needAdapt) = ptOrLub(cases1 map (_.tpe))
if (needAdapt)
cases1 = cases1 map (adaptCase(_, owntype))
@@ -3670,8 +3650,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
if (ps.isEmpty)
ps = site.parents filter (_.typeSymbol.toInterface.name == mix)
if (ps.isEmpty) {
- if (settings.debug.value)
- Console.println(site.parents map (_.typeSymbol.name))//debug
+ debuglog("Fatal: couldn't find site " + site + " in " + site.parents.map(_.typeSymbol.name))
if (phase.erasedTypes && context.enclClass.owner.isImplClass) {
// println(qual1)
// println(clazz)
@@ -4138,7 +4117,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
}
def adaptCase(cdef: CaseDef, tpe: Type): CaseDef =
- treeCopy.CaseDef(cdef, cdef.pat, cdef.guard, adapt(cdef.body, mode, tpe))
+ deriveCaseDef(cdef)(adapt(_, mode, tpe))
// begin typed1
val sym: Symbol = tree.symbol
@@ -4242,7 +4221,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
case Try(block, catches, finalizer) =>
var block1 = typed(block, pt)
- var catches1 = typedCases(tree, catches, ThrowableClass.tpe, pt)
+ var catches1 = typedCases(catches, ThrowableClass.tpe, pt)
val finalizer1 = if (finalizer.isEmpty) finalizer
else typed(finalizer, UnitClass.tpe)
val (owntype, needAdapt) = ptOrLub(block1.tpe :: (catches1 map (_.tpe)))
@@ -4250,6 +4229,11 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
block1 = adapt(block1, mode, owntype)
catches1 = catches1 map (adaptCase(_, owntype))
}
+
+ if(!isPastTyper && opt.virtPatmat) {
+ catches1 = (MatchTranslator(this)).translateTry(catches1, owntype, tree.pos)
+ }
+
treeCopy.Try(tree, block1, catches1, finalizer1) setType owntype
case Throw(expr) =>
@@ -4536,13 +4520,6 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
}
}
- def expandMacro(tree: Tree): Tree =
- macroExpand(tree, context) match {
- case Some(t: Tree) => t
- case Some(t) => MacroExpandError(tree, t)
- case None => setError(tree) // error already reported
- }
-
def atOwner(owner: Symbol): Typer =
newTyper(context.make(context.tree, owner))
@@ -4658,7 +4635,7 @@ trait Typers extends Modes with Adaptations with PatMatVirtualiser {
case None => typed(tree, mode, pt)
}
- def findManifest(tp: Type, full: Boolean) = atPhase(currentRun.typerPhase) {
+ def findManifest(tp: Type, full: Boolean) = beforeTyper {
inferImplicit(
EmptyTree,
appliedType((if (full) FullManifestClass else PartialManifestClass).typeConstructor, List(tp)),
diff --git a/src/compiler/scala/tools/nsc/util/ProxyReport.scala b/src/compiler/scala/tools/nsc/util/ProxyReport.scala
index 2f4f029308..4fc86c3a32 100644
--- a/src/compiler/scala/tools/nsc/util/ProxyReport.scala
+++ b/src/compiler/scala/tools/nsc/util/ProxyReport.scala
@@ -141,6 +141,6 @@ object ProxyReportRunner {
s.processArguments(args.toList.tail, true)
val g = new ProxyGlobal(s)
val run = new g.Run()
- g.atPhase(run.typerPhase.next)(g.proxyReport.generate(dir))
+ g.afterTyper(g.proxyReport.generate(dir))
}
}
diff --git a/src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala b/src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala
index ab1510bd7f..9930f28229 100644
--- a/src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala
+++ b/src/continuations/plugin/scala/tools/selectivecps/CPSAnnotationChecker.scala
@@ -50,7 +50,27 @@ abstract class CPSAnnotationChecker extends CPSUtils {
// @plus @cps will fall through and compare the @cps type args
// @cps parameters must match exactly
- (annots1 corresponds annots2)(_.atp <:< _.atp)
+ if ((annots1 corresponds annots2)(_.atp <:< _.atp))
+ return true
+
+ // Need to handle uninstantiated type vars specially:
+
+ // g map (x => x) with expected type List[Int] @cps
+ // results in comparison ?That <:< List[Int] @cps
+
+ // Instantiating ?That to an annotated type would fail during
+ // transformation.
+
+ // Instead we force-compare tpe1 <:< tpe2.withoutAnnotations
+ // to trigger instantiation of the TypeVar to the base type
+
+ // This is a bit unorthodox (we're only supposed to look at
+ // annotations here) but seems to work.
+
+ if (!annots2.isEmpty && !tpe1.isGround)
+ return tpe1 <:< tpe2.withoutAnnotations
+
+ false
}
/** Refine the computed least upper bound of a list of types.
@@ -222,6 +242,9 @@ abstract class CPSAnnotationChecker extends CPSUtils {
case OverloadedType(pre, alts) =>
OverloadedType(pre, alts.map((sym: Symbol) => updateAttributes(pre.memberType(sym), annots)))
*/
+ case OverloadedType(pre, alts) => tpe //reconstruct correct annotations later
+ case MethodType(params, restpe) => tpe
+ case PolyType(params, restpe) => tpe
case _ =>
assert(childAnnots forall (_ matches MarkerCPSTypes), childAnnots)
/*
@@ -229,7 +252,7 @@ abstract class CPSAnnotationChecker extends CPSUtils {
plus + [] = plus
cps + [] = cps
plus cps + [] = plus cps
- minus cps + [] = minus cp
+ minus cps + [] = minus cps
synth cps + [] = synth cps // <- synth on left - does it happen?
[] + cps = cps
@@ -318,13 +341,27 @@ abstract class CPSAnnotationChecker extends CPSUtils {
}
def transChildrenInOrder(tree: Tree, tpe: Type, childTrees: List[Tree], byName: List[Tree]) = {
- val children = childTrees.flatMap { t =>
+ def inspect(t: Tree): List[AnnotationInfo] = {
if (t.tpe eq null) Nil else {
+ val extra: List[AnnotationInfo] = t.tpe match {
+ case _: MethodType | _: PolyType | _: OverloadedType =>
+ // method types, poly types and overloaded types do not obtain cps annotions by propagat
+ // need to reconstruct transitively from their children.
+ t match {
+ case Select(qual, name) => inspect(qual)
+ case Apply(fun, args) => (fun::args) flatMap inspect
+ case TypeApply(fun, args) => (fun::args) flatMap inspect
+ case _ => Nil
+ }
+ case _ => Nil
+ }
+
val types = cpsParamAnnotation(t.tpe)
// TODO: check that it has been adapted and if so correctly
- if (types.isEmpty) Nil else List(single(types))
+ extra ++ (if (types.isEmpty) Nil else List(single(types)))
}
}
+ val children = childTrees flatMap inspect
val newtpe = updateAttributesFromChildren(tpe, children, byName)
@@ -359,9 +396,15 @@ abstract class CPSAnnotationChecker extends CPSUtils {
transChildrenInOrder(tree, tpe, qual::(transArgList(fun, args).flatten), Nil)
+ case Apply(TypeApply(fun @ Select(qual, name), targs), args) if fun.isTyped => // not trigge
+
+ vprintln("[checker] checking select apply type-apply " + tree + "/" + tpe)
+
+ transChildrenInOrder(tree, tpe, qual::(transArgList(fun, args).flatten), Nil)
+
case TypeApply(fun @ Select(qual, name), args) if fun.isTyped =>
def stripNullaryMethodType(tp: Type) = tp match { case NullaryMethodType(restpe) => restpe case tp => tp }
- vprintln("[checker] checking select apply " + tree + "/" + tpe)
+ vprintln("[checker] checking select type-apply " + tree + "/" + tpe)
transChildrenInOrder(tree, stripNullaryMethodType(tpe), List(qual, fun), Nil)
@@ -373,7 +416,7 @@ abstract class CPSAnnotationChecker extends CPSUtils {
case TypeApply(fun, args) =>
- vprintln("[checker] checking type apply " + tree + "/" + tpe)
+ vprintln("[checker] checking unknown type apply " + tree + "/" + tpe)
transChildrenInOrder(tree, tpe, List(fun), Nil)
diff --git a/src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala b/src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala
index cea558d2d3..d98169f21a 100644
--- a/src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala
+++ b/src/continuations/plugin/scala/tools/selectivecps/SelectiveANFTransform.scala
@@ -47,20 +47,20 @@ abstract class SelectiveANFTransform extends PluginComponent with Transform with
// ValDef case here.
case dd @ DefDef(mods, name, tparams, vparamss, tpt, rhs) =>
- log("transforming " + dd.symbol)
+ debuglog("transforming " + dd.symbol)
atOwner(dd.symbol) {
val rhs1 = transExpr(rhs, None, getExternalAnswerTypeAnn(tpt.tpe))
- log("result "+rhs1)
- log("result is of type "+rhs1.tpe)
+ debuglog("result "+rhs1)
+ debuglog("result is of type "+rhs1.tpe)
treeCopy.DefDef(dd, mods, name, transformTypeDefs(tparams), transformValDefss(vparamss),
transform(tpt), rhs1)
}
case ff @ Function(vparams, body) =>
- log("transforming anon function " + ff.symbol)
+ debuglog("transforming anon function " + ff.symbol)
atOwner(ff.symbol) {
@@ -88,14 +88,14 @@ abstract class SelectiveANFTransform extends PluginComponent with Transform with
transExpr(body, None, ext)
}
- log("result "+body1)
- log("result is of type "+body1.tpe)
+ debuglog("result "+body1)
+ debuglog("result is of type "+body1.tpe)
treeCopy.Function(ff, transformValDefs(vparams), body1)
}
case vd @ ValDef(mods, name, tpt, rhs) => // object-level valdefs
- log("transforming valdef " + vd.symbol)
+ debuglog("transforming valdef " + vd.symbol)
atOwner(vd.symbol) {
@@ -298,8 +298,8 @@ abstract class SelectiveANFTransform extends PluginComponent with Transform with
if (!expr.isEmpty && (expr.tpe.typeSymbol ne NothingClass)) {
// must convert!
- log("cps type conversion (has: " + cpsA + "/" + spc + "/" + expr.tpe + ")")
- log("cps type conversion (expected: " + cpsR.get + "): " + expr)
+ debuglog("cps type conversion (has: " + cpsA + "/" + spc + "/" + expr.tpe + ")")
+ debuglog("cps type conversion (expected: " + cpsR.get + "): " + expr)
if (!hasPlusMarker(expr.tpe))
unit.warning(tree.pos, "expression " + tree + " is cps-transformed unexpectedly")
@@ -322,10 +322,10 @@ abstract class SelectiveANFTransform extends PluginComponent with Transform with
} else if (!cpsR.isDefined && bot.isDefined) {
// error!
- log("cps type error: " + expr)
+ debuglog("cps type error: " + expr)
//println("cps type error: " + expr + "/" + expr.tpe + "/" + getAnswerTypeAnn(expr.tpe))
- println(cpsR + "/" + spc + "/" + bot)
+ //println(cpsR + "/" + spc + "/" + bot)
unit.error(tree.pos, "found cps expression in non-cps position")
} else {
diff --git a/src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala b/src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala
index b2a1546b4e..6453671eac 100644
--- a/src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala
+++ b/src/continuations/plugin/scala/tools/selectivecps/SelectiveCPSTransform.scala
@@ -39,10 +39,10 @@ abstract class SelectiveCPSTransform extends PluginComponent with
val newtp = transformCPSType(tp)
if (newtp != tp)
- log("transformInfo changed type for " + sym + " to " + newtp);
+ debuglog("transformInfo changed type for " + sym + " to " + newtp);
if (sym == MethReifyR)
- log("transformInfo (not)changed type for " + sym + " to " + newtp);
+ debuglog("transformInfo (not)changed type for " + sym + " to " + newtp);
newtp
}
@@ -83,13 +83,13 @@ abstract class SelectiveCPSTransform extends PluginComponent with
case Apply(TypeApply(fun, targs), args)
if (fun.symbol == MethShift) =>
- log("found shift: " + tree)
+ debuglog("found shift: " + tree)
atPos(tree.pos) {
val funR = gen.mkAttributedRef(MethShiftR) // TODO: correct?
//gen.mkAttributedSelect(gen.mkAttributedSelect(gen.mkAttributedSelect(gen.mkAttributedIdent(ScalaPackage),
//ScalaPackage.tpe.member("util")), ScalaPackage.tpe.member("util").tpe.member("continuations")), MethShiftR)
//gen.mkAttributedRef(ModCPS.tpe, MethShiftR) // TODO: correct?
- log(funR.tpe)
+ debuglog("funR.tpe = " + funR.tpe)
Apply(
TypeApply(funR, targs).setType(appliedType(funR.tpe, targs.map((t:Tree) => t.tpe))),
args.map(transform(_))
@@ -98,10 +98,10 @@ abstract class SelectiveCPSTransform extends PluginComponent with
case Apply(TypeApply(fun, targs), args)
if (fun.symbol == MethShiftUnit) =>
- log("found shiftUnit: " + tree)
+ debuglog("found shiftUnit: " + tree)
atPos(tree.pos) {
val funR = gen.mkAttributedRef(MethShiftUnitR) // TODO: correct?
- log(funR.tpe)
+ debuglog("funR.tpe = " + funR.tpe)
Apply(
TypeApply(funR, List(targs(0), targs(1))).setType(appliedType(funR.tpe,
List(targs(0).tpe, targs(1).tpe))),
@@ -114,7 +114,7 @@ abstract class SelectiveCPSTransform extends PluginComponent with
log("found reify: " + tree)
atPos(tree.pos) {
val funR = gen.mkAttributedRef(MethReifyR) // TODO: correct?
- log(funR.tpe)
+ debuglog("funR.tpe = " + funR.tpe)
Apply(
TypeApply(funR, targs).setType(appliedType(funR.tpe, targs.map((t:Tree) => t.tpe))),
args.map(transform(_))
@@ -203,7 +203,7 @@ abstract class SelectiveCPSTransform extends PluginComponent with
rhs.changeOwner(currentOwner -> fun.symbol)
val exSym = currentOwner.newValueParameter(cpsNames.ex, pos).setInfo(ThrowableClass.tpe)
- val catch2 = { localTyper.typedCases(tree, List(
+ val catch2 = { localTyper.typedCases(List(
CaseDef(Bind(exSym, Typed(Ident("_"), TypeTree(ThrowableClass.tpe))),
Apply(Select(Ident(funSym), nme.isDefinedAt), List(Ident(exSym))),
Apply(Ident(funSym), List(Ident(exSym))))
@@ -258,17 +258,17 @@ abstract class SelectiveCPSTransform extends PluginComponent with
case vd @ ValDef(mods, name, tpt, rhs)
if (vd.symbol.hasAnnotation(MarkerCPSSym)) =>
- log("found marked ValDef "+name+" of type " + vd.symbol.tpe)
+ debuglog("found marked ValDef "+name+" of type " + vd.symbol.tpe)
val tpe = vd.symbol.tpe
val rhs1 = atOwner(vd.symbol) { transform(rhs) }
rhs1.changeOwner(vd.symbol -> currentOwner) // TODO: don't traverse twice
- log("valdef symbol " + vd.symbol + " has type " + tpe)
- log("right hand side " + rhs1 + " has type " + rhs1.tpe)
+ debuglog("valdef symbol " + vd.symbol + " has type " + tpe)
+ debuglog("right hand side " + rhs1 + " has type " + rhs1.tpe)
- log("currentOwner: " + currentOwner)
- log("currentMethod: " + currentMethod)
+ debuglog("currentOwner: " + currentOwner)
+ debuglog("currentMethod: " + currentMethod)
val (bodyStms, bodyExpr) = transBlock(rest, expr)
// FIXME: result will later be traversed again by TreeSymSubstituter and
@@ -308,12 +308,12 @@ abstract class SelectiveCPSTransform extends PluginComponent with
// see note about multiple traversals above
- log("fun.symbol: "+fun.symbol)
- log("fun.symbol.owner: "+fun.symbol.owner)
- log("arg.owner: "+arg.owner)
+ debuglog("fun.symbol: "+fun.symbol)
+ debuglog("fun.symbol.owner: "+fun.symbol.owner)
+ debuglog("arg.owner: "+arg.owner)
- log("fun.tpe:"+fun.tpe)
- log("return type of fun:"+body1.tpe)
+ debuglog("fun.tpe:"+fun.tpe)
+ debuglog("return type of fun:"+body1.tpe)
var methodName = nme.map
@@ -324,7 +324,7 @@ abstract class SelectiveCPSTransform extends PluginComponent with
else
unit.error(rhs.pos, "cannot compute type for CPS-transformed function result")
- log("will use method:"+methodName)
+ debuglog("will use method:"+methodName)
localTyper.typed(atPos(vd.symbol.pos) {
Apply(Select(ctxR, ctxR.tpe.member(methodName)), List(fun))
@@ -335,7 +335,7 @@ abstract class SelectiveCPSTransform extends PluginComponent with
try {
if (specialCaseTrivial) {
- log("will optimize possible tail call: " + bodyExpr)
+ debuglog("will optimize possible tail call: " + bodyExpr)
// FIXME: flatMap impl has become more complicated due to
// exceptions. do we need to put a try/catch in the then part??
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java
index 3fad92cbf1..e9389e9acb 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java
@@ -1,669 +1,2324 @@
/*
+
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.locks.*;
-import java.util.concurrent.atomic.*;
-import sun.misc.Unsafe;
-import java.lang.reflect.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+//import java.util.concurrent.AbstractExecutorService;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+//import java.util.concurrent.RunnableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.AbstractQueuedSynchronizer;
+import java.util.concurrent.locks.Condition;
+
+interface RunnableFuture<T> extends Runnable {
+ //TR placeholder for java.util.concurrent.RunnableFuture
+}
/**
- * An {@link ExecutorService} for running {@link ForkJoinTask}s. A
- * ForkJoinPool provides the entry point for submissions from
- * non-ForkJoinTasks, as well as management and monitoring operations.
- * Normally a single ForkJoinPool is used for a large number of
- * submitted tasks. Otherwise, use would not usually outweigh the
- * construction and bookkeeping overhead of creating a large set of
- * threads.
+ * An {@link ExecutorService} for running {@link ForkJoinTask}s.
+ * A {@code ForkJoinPool} provides the entry point for submissions
+ * from non-{@code ForkJoinTask} clients, as well as management and
+ * monitoring operations.
*
- * <p>ForkJoinPools differ from other kinds of Executors mainly in
- * that they provide <em>work-stealing</em>: all threads in the pool
- * attempt to find and execute subtasks created by other active tasks
- * (eventually blocking if none exist). This makes them efficient when
- * most tasks spawn other subtasks (as do most ForkJoinTasks), as well
- * as the mixed execution of some plain Runnable- or Callable- based
- * activities along with ForkJoinTasks. When setting
- * <tt>setAsyncMode</tt>, a ForkJoinPools may also be appropriate for
- * use with fine-grained tasks that are never joined. Otherwise, other
- * ExecutorService implementations are typically more appropriate
- * choices.
+ * <p>A {@code ForkJoinPool} differs from other kinds of {@link
+ * ExecutorService} mainly by virtue of employing
+ * <em>work-stealing</em>: all threads in the pool attempt to find and
+ * execute tasks submitted to the pool and/or created by other active
+ * tasks (eventually blocking waiting for work if none exist). This
+ * enables efficient processing when most tasks spawn other subtasks
+ * (as do most {@code ForkJoinTask}s), as well as when many small
+ * tasks are submitted to the pool from external clients. Especially
+ * when setting <em>asyncMode</em> to true in constructors, {@code
+ * ForkJoinPool}s may also be appropriate for use with event-style
+ * tasks that are never joined.
*
- * <p>A ForkJoinPool may be constructed with a given parallelism level
- * (target pool size), which it attempts to maintain by dynamically
- * adding, suspending, or resuming threads, even if some tasks are
- * waiting to join others. However, no such adjustments are performed
- * in the face of blocked IO or other unmanaged synchronization. The
- * nested <code>ManagedBlocker</code> interface enables extension of
- * the kinds of synchronization accommodated. The target parallelism
- * level may also be changed dynamically (<code>setParallelism</code>)
- * and thread construction can be limited using methods
- * <code>setMaximumPoolSize</code> and/or
- * <code>setMaintainsParallelism</code>.
+ * <p>A {@code ForkJoinPool} is constructed with a given target
+ * parallelism level; by default, equal to the number of available
+ * processors. The pool attempts to maintain enough active (or
+ * available) threads by dynamically adding, suspending, or resuming
+ * internal worker threads, even if some tasks are stalled waiting to
+ * join others. However, no such adjustments are guaranteed in the
+ * face of blocked IO or other unmanaged synchronization. The nested
+ * {@link ManagedBlocker} interface enables extension of the kinds of
+ * synchronization accommodated.
*
* <p>In addition to execution and lifecycle control methods, this
* class provides status check methods (for example
- * <code>getStealCount</code>) that are intended to aid in developing,
+ * {@link #getStealCount}) that are intended to aid in developing,
* tuning, and monitoring fork/join applications. Also, method
- * <code>toString</code> returns indications of pool state in a
+ * {@link #toString} returns indications of pool state in a
* convenient form for informal monitoring.
*
+ * <p> As is the case with other ExecutorServices, there are three
+ * main task execution methods summarized in the following table.
+ * These are designed to be used primarily by clients not already
+ * engaged in fork/join computations in the current pool. The main
+ * forms of these methods accept instances of {@code ForkJoinTask},
+ * but overloaded forms also allow mixed execution of plain {@code
+ * Runnable}- or {@code Callable}- based activities as well. However,
+ * tasks that are already executing in a pool should normally instead
+ * use the within-computation forms listed in the table unless using
+ * async event-style tasks that are not usually joined, in which case
+ * there is little difference among choice of methods.
+ *
+ * <table BORDER CELLPADDING=3 CELLSPACING=1>
+ * <tr>
+ * <td></td>
+ * <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td>
+ * <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td>
+ * </tr>
+ * <tr>
+ * <td> <b>Arrange async execution</td>
+ * <td> {@link #execute(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#fork}</td>
+ * </tr>
+ * <tr>
+ * <td> <b>Await and obtain result</td>
+ * <td> {@link #invoke(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#invoke}</td>
+ * </tr>
+ * <tr>
+ * <td> <b>Arrange exec and obtain Future</td>
+ * <td> {@link #submit(ForkJoinTask)}</td>
+ * <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
+ * </tr>
+ * </table>
+ *
+ * <p><b>Sample Usage.</b> Normally a single {@code ForkJoinPool} is
+ * used for all parallel task execution in a program or subsystem.
+ * Otherwise, use would not usually outweigh the construction and
+ * bookkeeping overhead of creating a large set of threads. For
+ * example, a common pool could be used for the {@code SortTasks}
+ * illustrated in {@link RecursiveAction}. Because {@code
+ * ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon
+ * daemon} mode, there is typically no need to explicitly {@link
+ * #shutdown} such a pool upon program exit.
+ *
+ * <pre> {@code
+ * static final ForkJoinPool mainPool = new ForkJoinPool();
+ * ...
+ * public void sort(long[] array) {
+ * mainPool.invoke(new SortTask(array, 0, array.length));
+ * }}</pre>
+ *
* <p><b>Implementation notes</b>: This implementation restricts the
* maximum number of running threads to 32767. Attempts to create
- * pools with greater than the maximum result in
- * IllegalArgumentExceptions.
+ * pools with greater than the maximum number result in
+ * {@code IllegalArgumentException}.
+ *
+ * <p>This implementation rejects submitted tasks (that is, by throwing
+ * {@link RejectedExecutionException}) only when the pool is shut down
+ * or internal resources have been exhausted.
+ *
+ * @since 1.7
+ * @author Doug Lea
*/
public class ForkJoinPool /*extends AbstractExecutorService*/ {
/*
- * See the extended comments interspersed below for design,
- * rationale, and walkthroughs.
+ * Implementation Overview
+ *
+ * This class and its nested classes provide the main
+ * functionality and control for a set of worker threads:
+ * Submissions from non-FJ threads enter into submission queues.
+ * Workers take these tasks and typically split them into subtasks
+ * that may be stolen by other workers. Preference rules give
+ * first priority to processing tasks from their own queues (LIFO
+ * or FIFO, depending on mode), then to randomized FIFO steals of
+ * tasks in other queues.
+ *
+ * WorkQueues
+ * ==========
+ *
+ * Most operations occur within work-stealing queues (in nested
+ * class WorkQueue). These are special forms of Deques that
+ * support only three of the four possible end-operations -- push,
+ * pop, and poll (aka steal), under the further constraints that
+ * push and pop are called only from the owning thread (or, as
+ * extended here, under a lock), while poll may be called from
+ * other threads. (If you are unfamiliar with them, you probably
+ * want to read Herlihy and Shavit's book "The Art of
+ * Multiprocessor programming", chapter 16 describing these in
+ * more detail before proceeding.) The main work-stealing queue
+ * design is roughly similar to those in the papers "Dynamic
+ * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
+ * (http://research.sun.com/scalable/pubs/index.html) and
+ * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
+ * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
+ * The main differences ultimately stem from GC requirements that
+ * we null out taken slots as soon as we can, to maintain as small
+ * a footprint as possible even in programs generating huge
+ * numbers of tasks. To accomplish this, we shift the CAS
+ * arbitrating pop vs poll (steal) from being on the indices
+ * ("base" and "top") to the slots themselves. So, both a
+ * successful pop and poll mainly entail a CAS of a slot from
+ * non-null to null. Because we rely on CASes of references, we
+ * do not need tag bits on base or top. They are simple ints as
+ * used in any circular array-based queue (see for example
+ * ArrayDeque). Updates to the indices must still be ordered in a
+ * way that guarantees that top == base means the queue is empty,
+ * but otherwise may err on the side of possibly making the queue
+ * appear nonempty when a push, pop, or poll have not fully
+ * committed. Note that this means that the poll operation,
+ * considered individually, is not wait-free. One thief cannot
+ * successfully continue until another in-progress one (or, if
+ * previously empty, a push) completes. However, in the
+ * aggregate, we ensure at least probabilistic non-blockingness.
+ * If an attempted steal fails, a thief always chooses a different
+ * random victim target to try next. So, in order for one thief to
+ * progress, it suffices for any in-progress poll or new push on
+ * any empty queue to complete. (This is why we normally use
+ * method pollAt and its variants that try once at the apparent
+ * base index, else consider alternative actions, rather than
+ * method poll.)
+ *
+ * This approach also enables support of a user mode in which local
+ * task processing is in FIFO, not LIFO order, simply by using
+ * poll rather than pop. This can be useful in message-passing
+ * frameworks in which tasks are never joined. However neither
+ * mode considers affinities, loads, cache localities, etc, so
+ * rarely provide the best possible performance on a given
+ * machine, but portably provide good throughput by averaging over
+ * these factors. (Further, even if we did try to use such
+ * information, we do not usually have a basis for exploiting it.
+ * For example, some sets of tasks profit from cache affinities,
+ * but others are harmed by cache pollution effects.)
+ *
+ * WorkQueues are also used in a similar way for tasks submitted
+ * to the pool. We cannot mix these tasks in the same queues used
+ * for work-stealing (this would contaminate lifo/fifo
+ * processing). Instead, we loosely associate submission queues
+ * with submitting threads, using a form of hashing. The
+ * ThreadLocal Submitter class contains a value initially used as
+ * a hash code for choosing existing queues, but may be randomly
+ * repositioned upon contention with other submitters. In
+ * essence, submitters act like workers except that they never
+ * take tasks, and they are multiplexed on to a finite number of
+ * shared work queues. However, classes are set up so that future
+ * extensions could allow submitters to optionally help perform
+ * tasks as well. Insertion of tasks in shared mode requires a
+ * lock (mainly to protect in the case of resizing) but we use
+ * only a simple spinlock (using bits in field runState), because
+ * submitters encountering a busy queue move on to try or create
+ * other queues -- they block only when creating and registering
+ * new queues.
+ *
+ * Management
+ * ==========
+ *
+ * The main throughput advantages of work-stealing stem from
+ * decentralized control -- workers mostly take tasks from
+ * themselves or each other. We cannot negate this in the
+ * implementation of other management responsibilities. The main
+ * tactic for avoiding bottlenecks is packing nearly all
+ * essentially atomic control state into two volatile variables
+ * that are by far most often read (not written) as status and
+ * consistency checks.
+ *
+ * Field "ctl" contains 64 bits holding all the information needed
+ * to atomically decide to add, inactivate, enqueue (on an event
+ * queue), dequeue, and/or re-activate workers. To enable this
+ * packing, we restrict maximum parallelism to (1<<15)-1 (which is
+ * far in excess of normal operating range) to allow ids, counts,
+ * and their negations (used for thresholding) to fit into 16bit
+ * fields.
+ *
+ * Field "runState" contains 32 bits needed to register and
+ * deregister WorkQueues, as well as to enable shutdown. It is
+ * only modified under a lock (normally briefly held, but
+ * occasionally protecting allocations and resizings) but even
+ * when locked remains available to check consistency.
+ *
+ * Recording WorkQueues. WorkQueues are recorded in the
+ * "workQueues" array that is created upon pool construction and
+ * expanded if necessary. Updates to the array while recording
+ * new workers and unrecording terminated ones are protected from
+ * each other by a lock but the array is otherwise concurrently
+ * readable, and accessed directly. To simplify index-based
+ * operations, the array size is always a power of two, and all
+ * readers must tolerate null slots. Shared (submission) queues
+ * are at even indices, worker queues at odd indices. Grouping
+ * them together in this way simplifies and speeds up task
+ * scanning.
+ *
+ * All worker thread creation is on-demand, triggered by task
+ * submissions, replacement of terminated workers, and/or
+ * compensation for blocked workers. However, all other support
+ * code is set up to work with other policies. To ensure that we
+ * do not hold on to worker references that would prevent GC, ALL
+ * accesses to workQueues are via indices into the workQueues
+ * array (which is one source of some of the messy code
+ * constructions here). In essence, the workQueues array serves as
+ * a weak reference mechanism. Thus for example the wait queue
+ * field of ctl stores indices, not references. Access to the
+ * workQueues in associated methods (for example signalWork) must
+ * both index-check and null-check the IDs. All such accesses
+ * ignore bad IDs by returning out early from what they are doing,
+ * since this can only be associated with termination, in which
+ * case it is OK to give up. All uses of the workQueues array
+ * also check that it is non-null (even if previously
+ * non-null). This allows nulling during termination, which is
+ * currently not necessary, but remains an option for
+ * resource-revocation-based shutdown schemes. It also helps
+ * reduce JIT issuance of uncommon-trap code, which tends to
+ * unnecessarily complicate control flow in some methods.
+ *
+ * Event Queuing. Unlike HPC work-stealing frameworks, we cannot
+ * let workers spin indefinitely scanning for tasks when none can
+ * be found immediately, and we cannot start/resume workers unless
+ * there appear to be tasks available. On the other hand, we must
+ * quickly prod them into action when new tasks are submitted or
+ * generated. In many usages, ramp-up time to activate workers is
+ * the main limiting factor in overall performance (this is
+ * compounded at program start-up by JIT compilation and
+ * allocation). So we try to streamline this as much as possible.
+ * We park/unpark workers after placing in an event wait queue
+ * when they cannot find work. This "queue" is actually a simple
+ * Treiber stack, headed by the "id" field of ctl, plus a 15bit
+ * counter value (that reflects the number of times a worker has
+ * been inactivated) to avoid ABA effects (we need only as many
+ * version numbers as worker threads). Successors are held in
+ * field WorkQueue.nextWait. Queuing deals with several intrinsic
+ * races, mainly that a task-producing thread can miss seeing (and
+ * signalling) another thread that gave up looking for work but
+ * has not yet entered the wait queue. We solve this by requiring
+ * a full sweep of all workers (via repeated calls to method
+ * scan()) both before and after a newly waiting worker is added
+ * to the wait queue. During a rescan, the worker might release
+ * some other queued worker rather than itself, which has the same
+ * net effect. Because enqueued workers may actually be rescanning
+ * rather than waiting, we set and clear the "parker" field of
+ * WorkQueues to reduce unnecessary calls to unpark. (This
+ * requires a secondary recheck to avoid missed signals.) Note
+ * the unusual conventions about Thread.interrupts surrounding
+ * parking and other blocking: Because interrupts are used solely
+ * to alert threads to check termination, which is checked anyway
+ * upon blocking, we clear status (using Thread.interrupted)
+ * before any call to park, so that park does not immediately
+ * return due to status being set via some other unrelated call to
+ * interrupt in user code.
+ *
+ * Signalling. We create or wake up workers only when there
+ * appears to be at least one task they might be able to find and
+ * execute. When a submission is added or another worker adds a
+ * task to a queue that previously had fewer than two tasks, they
+ * signal waiting workers (or trigger creation of new ones if
+ * fewer than the given parallelism level -- see signalWork).
+ * These primary signals are buttressed by signals during rescans;
+ * together these cover the signals needed in cases when more
+ * tasks are pushed but untaken, and improve performance compared
+ * to having one thread wake up all workers.
+ *
+ * Trimming workers. To release resources after periods of lack of
+ * use, a worker starting to wait when the pool is quiescent will
+ * time out and terminate if the pool has remained quiescent for
+ * SHRINK_RATE nanosecs. This will slowly propagate, eventually
+ * terminating all workers after long periods of non-use.
+ *
+ * Shutdown and Termination. A call to shutdownNow atomically sets
+ * a runState bit and then (non-atomically) sets each worker's
+ * runState status, cancels all unprocessed tasks, and wakes up
+ * all waiting workers. Detecting whether termination should
+ * commence after a non-abrupt shutdown() call requires more work
+ * and bookkeeping. We need consensus about quiescence (i.e., that
+ * there is no more work). The active count provides a primary
+ * indication but non-abrupt shutdown still requires a rechecking
+ * scan for any workers that are inactive but not queued.
+ *
+ * Joining Tasks
+ * =============
+ *
+ * Any of several actions may be taken when one worker is waiting
+ * to join a task stolen (or always held) by another. Because we
+ * are multiplexing many tasks on to a pool of workers, we can't
+ * just let them block (as in Thread.join). We also cannot just
+ * reassign the joiner's run-time stack with another and replace
+ * it later, which would be a form of "continuation", that even if
+ * possible is not necessarily a good idea since we sometimes need
+ * both an unblocked task and its continuation to progress.
+ * Instead we combine two tactics:
+ *
+ * Helping: Arranging for the joiner to execute some task that it
+ * would be running if the steal had not occurred.
+ *
+ * Compensating: Unless there are already enough live threads,
+ * method tryCompensate() may create or re-activate a spare
+ * thread to compensate for blocked joiners until they unblock.
+ *
+ * A third form (implemented in tryRemoveAndExec and
+ * tryPollForAndExec) amounts to helping a hypothetical
+ * compensator: If we can readily tell that a possible action of a
+ * compensator is to steal and execute the task being joined, the
+ * joining thread can do so directly, without the need for a
+ * compensation thread (although at the expense of larger run-time
+ * stacks, but the tradeoff is typically worthwhile).
+ *
+ * The ManagedBlocker extension API can't use helping so relies
+ * only on compensation in method awaitBlocker.
+ *
+ * The algorithm in tryHelpStealer entails a form of "linear"
+ * helping: Each worker records (in field currentSteal) the most
+ * recent task it stole from some other worker. Plus, it records
+ * (in field currentJoin) the task it is currently actively
+ * joining. Method tryHelpStealer uses these markers to try to
+ * find a worker to help (i.e., steal back a task from and execute
+ * it) that could hasten completion of the actively joined task.
+ * In essence, the joiner executes a task that would be on its own
+ * local deque had the to-be-joined task not been stolen. This may
+ * be seen as a conservative variant of the approach in Wagner &
+ * Calder "Leapfrogging: a portable technique for implementing
+ * efficient futures" SIGPLAN Notices, 1993
+ * (http://portal.acm.org/citation.cfm?id=155354). It differs in
+ * that: (1) We only maintain dependency links across workers upon
+ * steals, rather than use per-task bookkeeping. This sometimes
+ * requires a linear scan of workQueues array to locate stealers,
+ * but often doesn't because stealers leave hints (that may become
+ * stale/wrong) of where to locate them. A stealHint is only a
+ * hint because a worker might have had multiple steals and the
+ * hint records only one of them (usually the most current).
+ * Hinting isolates cost to when it is needed, rather than adding
+ * to per-task overhead. (2) It is "shallow", ignoring nesting
+ * and potentially cyclic mutual steals. (3) It is intentionally
+ * racy: field currentJoin is updated only while actively joining,
+ * which means that we miss links in the chain during long-lived
+ * tasks, GC stalls etc (which is OK since blocking in such cases
+ * is usually a good idea). (4) We bound the number of attempts
+ * to find work (see MAX_HELP) and fall back to suspending the
+ * worker and if necessary replacing it with another.
+ *
+ * It is impossible to keep exactly the target parallelism number
+ * of threads running at any given time. Determining the
+ * existence of conservatively safe helping targets, the
+ * availability of already-created spares, and the apparent need
+ * to create new spares are all racy, so we rely on multiple
+ * retries of each. Compensation in the apparent absence of
+ * helping opportunities is challenging to control on JVMs, where
+ * GC and other activities can stall progress of tasks that in
+ * turn stall out many other dependent tasks, without us being
+ * able to determine whether they will ever require compensation.
+ * Even though work-stealing otherwise encounters little
+ * degradation in the presence of more threads than cores,
+ * aggressively adding new threads in such cases entails risk of
+ * unwanted positive feedback control loops in which more threads
+ * cause more dependent stalls (as well as delayed progress of
+ * unblocked threads to the point that we know they are available)
+ * leading to more situations requiring more threads, and so
+ * on. This aspect of control can be seen as an (analytically
+ * intractable) game with an opponent that may choose the worst
+ * (for us) active thread to stall at any time. We take several
+ * precautions to bound losses (and thus bound gains), mainly in
+ * methods tryCompensate and awaitJoin: (1) We only try
+ * compensation after attempting enough helping steps (measured
+ * via counting and timing) that we have already consumed the
+ * estimated cost of creating and activating a new thread. (2) We
+ * allow up to 50% of threads to be blocked before initially
+ * adding any others, and unless completely saturated, check that
+ * some work is available for a new worker before adding. Also, we
+ * create up to only 50% more threads until entering a mode that
+ * only adds a thread if all others are possibly blocked. All
+ * together, this means that we might be half as fast to react,
+ * and create half as many threads as possible in the ideal case,
+ * but present vastly fewer anomalies in all other cases compared
+ * to both more aggressive and more conservative alternatives.
+ *
+ * Style notes: There is a lot of representation-level coupling
+ * among classes ForkJoinPool, ForkJoinWorkerThread, and
+ * ForkJoinTask. The fields of WorkQueue maintain data structures
+ * managed by ForkJoinPool, so are directly accessed. There is
+ * little point trying to reduce this, since any associated future
+ * changes in representations will need to be accompanied by
+ * algorithmic changes anyway. Several methods intrinsically
+ * sprawl because they must accumulate sets of consistent reads of
+ * volatiles held in local variables. Methods signalWork() and
+ * scan() are the main bottlenecks, so are especially heavily
+ * micro-optimized/mangled. There are lots of inline assignments
+ * (of form "while ((local = field) != 0)") which are usually the
+ * simplest way to ensure the required read orderings (which are
+ * sometimes critical). This leads to a "C"-like style of listing
+ * declarations of these locals at the heads of methods or blocks.
+ * There are several occurrences of the unusual "do {} while
+ * (!cas...)" which is the simplest way to force an update of a
+ * CAS'ed variable. There are also other coding oddities that help
+ * some methods perform reasonably even when interpreted (not
+ * compiled).
+ *
+ * The order of declarations in this file is:
+ * (1) Static utility functions
+ * (2) Nested (static) classes
+ * (3) Static fields
+ * (4) Fields, along with constants used when unpacking some of them
+ * (5) Internal control methods
+ * (6) Callbacks and other support for ForkJoinTask methods
+ * (7) Exported methods
+ * (8) Static block initializing statics in minimally dependent order
*/
- /** Mask for packing and unpacking shorts */
- private static final int shortMask = 0xffff;
-
- /** Max pool size -- must be a power of two minus 1 */
- private static final int MAX_THREADS = 0x7FFF;
+ // Static utilities
- // placeholder for java.util.concurrent.RunnableFuture
- interface RunnableFuture<T> extends Runnable {
+ /**
+ * If there is a security manager, makes sure caller has
+ * permission to modify threads.
+ */
+ private static void checkPermission() {
+ SecurityManager security = System.getSecurityManager();
+ if (security != null)
+ security.checkPermission(modifyThreadPermission);
}
+ // Nested classes
+
/**
- * Factory for creating new ForkJoinWorkerThreads. A
- * ForkJoinWorkerThreadFactory must be defined and used for
- * ForkJoinWorkerThread subclasses that extend base functionality
- * or initialize threads with different contexts.
+ * Factory for creating new {@link ForkJoinWorkerThread}s.
+ * A {@code ForkJoinWorkerThreadFactory} must be defined and used
+ * for {@code ForkJoinWorkerThread} subclasses that extend base
+ * functionality or initialize threads with different contexts.
*/
public static interface ForkJoinWorkerThreadFactory {
/**
* Returns a new worker thread operating in the given pool.
*
* @param pool the pool this thread works in
- * @throws NullPointerException if pool is null;
+ * @throws NullPointerException if the pool is null
*/
public ForkJoinWorkerThread newThread(ForkJoinPool pool);
}
/**
- * Default ForkJoinWorkerThreadFactory implementation, creates a
+ * Default ForkJoinWorkerThreadFactory implementation; creates a
* new ForkJoinWorkerThread.
*/
- static class DefaultForkJoinWorkerThreadFactory
+ static class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public ForkJoinWorkerThread newThread(ForkJoinPool pool) {
- try {
- return new ForkJoinWorkerThread(pool);
- } catch (OutOfMemoryError oom) {
- return null;
- }
+ return new ForkJoinWorkerThread(pool);
}
}
/**
- * Creates a new ForkJoinWorkerThread. This factory is used unless
- * overridden in ForkJoinPool constructors.
+ * A simple non-reentrant lock used for exclusion when managing
+ * queues and workers. We use a custom lock so that we can readily
+ * probe lock state in constructions that check among alternative
+ * actions. The lock is normally only very briefly held, and
+ * sometimes treated as a spinlock, but other usages block to
+ * reduce overall contention in those cases where locked code
+ * bodies perform allocation/resizing.
*/
- public static final ForkJoinWorkerThreadFactory
- defaultForkJoinWorkerThreadFactory =
- new DefaultForkJoinWorkerThreadFactory();
-
- /**
- * Permission required for callers of methods that may start or
- * kill threads.
- */
- private static final RuntimePermission modifyThreadPermission =
- new RuntimePermission("modifyThread");
+ static final class Mutex extends AbstractQueuedSynchronizer {
+ public final boolean tryAcquire(int ignore) {
+ return compareAndSetState(0, 1);
+ }
+ public final boolean tryRelease(int ignore) {
+ setState(0);
+ return true;
+ }
+ public final void lock() { acquire(0); }
+ public final void unlock() { release(0); }
+ public final boolean isHeldExclusively() { return getState() == 1; }
+ public final Condition newCondition() { return new ConditionObject(); }
+ }
/**
- * If there is a security manager, makes sure caller has
- * permission to modify threads.
+ * Class for artificial tasks that are used to replace the target
+ * of local joins if they are removed from an interior queue slot
+ * in WorkQueue.tryRemoveAndExec. We don't need the proxy to
+ * actually do anything beyond having a unique identity.
*/
- private static void checkPermission() {
- SecurityManager security = System.getSecurityManager();
- if (security != null)
- security.checkPermission(modifyThreadPermission);
+ static final class EmptyTask extends ForkJoinTask<Void> {
+ EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void x) {}
+ public final boolean exec() { return true; }
}
/**
- * Generator for assigning sequence numbers as pool names.
- */
- private static final AtomicInteger poolNumberGenerator =
- new AtomicInteger();
+ * Queues supporting work-stealing as well as external task
+ * submission. See above for main rationale and algorithms.
+ * Implementation relies heavily on "Unsafe" intrinsics
+ * and selective use of "volatile":
+ *
+ * Field "base" is the index (mod array.length) of the least valid
+ * queue slot, which is always the next position to steal (poll)
+ * from if nonempty. Reads and writes require volatile orderings
+ * but not CAS, because updates are only performed after slot
+ * CASes.
+ *
+ * Field "top" is the index (mod array.length) of the next queue
+ * slot to push to or pop from. It is written only by owner thread
+ * for push, or under lock for trySharedPush, and accessed by
+ * other threads only after reading (volatile) base. Both top and
+ * base are allowed to wrap around on overflow, but (top - base)
+ * (or more commonly -(base - top) to force volatile read of base
+ * before top) still estimates size.
+ *
+ * The array slots are read and written using the emulation of
+ * volatiles/atomics provided by Unsafe. Insertions must in
+ * general use putOrderedObject as a form of releasing store to
+ * ensure that all writes to the task object are ordered before
+ * its publication in the queue. (Although we can avoid one case
+ * of this when locked in trySharedPush.) All removals entail a
+ * CAS to null. The array is always a power of two. To ensure
+ * safety of Unsafe array operations, all accesses perform
+ * explicit null checks and implicit bounds checks via
+ * power-of-two masking.
+ *
+ * In addition to basic queuing support, this class contains
+ * fields described elsewhere to control execution. It turns out
+ * to work better memory-layout-wise to include them in this
+ * class rather than a separate class.
+ *
+ * Performance on most platforms is very sensitive to placement of
+ * instances of both WorkQueues and their arrays -- we absolutely
+ * do not want multiple WorkQueue instances or multiple queue
+ * arrays sharing cache lines. (It would be best for queue objects
+ * and their arrays to share, but there is nothing available to
+ * help arrange that). Unfortunately, because they are recorded
+ * in a common array, WorkQueue instances are often moved to be
+ * adjacent by garbage collectors. To reduce impact, we use field
+ * padding that works OK on common platforms; this effectively
+ * trades off slightly slower average field access for the sake of
+ * avoiding really bad worst-case access. (Until better JVM
+ * support is in place, this padding is dependent on transient
+ * properties of JVM field layout rules.) We also take care in
+ * allocating, sizing and resizing the array. Non-shared queue
+ * arrays are initialized (via method growArray) by workers before
+ * use. Others are allocated on first use.
+ */
+ static final class WorkQueue {
+ /**
+ * Capacity of work-stealing queue array upon initialization.
+ * Must be a power of two; at least 4, but should be larger to
+ * reduce or eliminate cacheline sharing among queues.
+ * Currently, it is much larger, as a partial workaround for
+ * the fact that JVMs often place arrays in locations that
+ * share GC bookkeeping (especially cardmarks) such that
+ * per-write accesses encounter serious memory contention.
+ */
+ static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
- /**
- * Array holding all worker threads in the pool. Initialized upon
- * first use. Array size must be a power of two. Updates and
- * replacements are protected by workerLock, but it is always kept
- * in a consistent enough state to be randomly accessed without
- * locking by workers performing work-stealing.
- */
- public volatile ForkJoinWorkerThread[] workers;
+ /**
+ * Maximum size for queue arrays. Must be a power of two less
+ * than or equal to 1 << (31 - width of array entry) to ensure
+ * lack of wraparound of index calculations, but defined to a
+ * value a bit less than this to help users trap runaway
+ * programs before saturating systems.
+ */
+ static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
+
+ volatile long totalSteals; // cumulative number of steals
+ int seed; // for random scanning; initialize nonzero
+ volatile int eventCount; // encoded inactivation count; < 0 if inactive
+ int nextWait; // encoded record of next event waiter
+ int rescans; // remaining scans until block
+ int nsteals; // top-level task executions since last idle
+ final int mode; // lifo, fifo, or shared
+ int poolIndex; // index of this queue in pool (or 0)
+ int stealHint; // index of most recent known stealer
+ volatile int runState; // 1: locked, -1: terminate; else 0
+ volatile int base; // index of next slot for poll
+ int top; // index of next slot for push
+ ForkJoinTask<?>[] array; // the elements (initially unallocated)
+ final ForkJoinPool pool; // the containing pool (may be null)
+ final ForkJoinWorkerThread owner; // owning thread or null if shared
+ volatile Thread parker; // == owner during call to park; else null
+ ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
+ ForkJoinTask<?> currentSteal; // current non-local task being executed
+ // Heuristic padding to ameliorate unfortunate memory placements
+ Object p00, p01, p02, p03, p04, p05, p06, p07;
+ Object p08, p09, p0a, p0b, p0c, p0d, p0e;
+
+ WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode) {
+ this.mode = mode;
+ this.pool = pool;
+ this.owner = owner;
+ // Place indices in the center of array (that is not yet allocated)
+ base = top = INITIAL_QUEUE_CAPACITY >>> 1;
+ }
- /**
- * Lock protecting access to workers.
- */
- private final ReentrantLock workerLock;
+ /**
+ * Returns the approximate number of tasks in the queue.
+ */
+ final int queueSize() {
+ int n = base - top; // non-owner callers must read base first
+ return (n >= 0) ? 0 : -n; // ignore transient negative
+ }
- /**
- * Condition for awaitTermination.
- */
- private final Condition termination;
+ /**
+ * Provides a more accurate estimate of whether this queue has
+ * any tasks than does queueSize, by checking whether a
+ * near-empty queue has at least one unclaimed task.
+ */
+ final boolean isEmpty() {
+ ForkJoinTask<?>[] a; int m, s;
+ int n = base - (s = top);
+ return (n >= 0 ||
+ (n == -1 &&
+ ((a = array) == null ||
+ (m = a.length - 1) < 0 ||
+ U.getObjectVolatile
+ (a, ((m & (s - 1)) << ASHIFT) + ABASE) == null)));
+ }
+
+ /**
+ * Pushes a task. Call only by owner in unshared queues.
+ *
+ * @param task the task. Caller must ensure non-null.
+ * @throw RejectedExecutionException if array cannot be resized
+ */
+ final void push(ForkJoinTask<?> task) {
+ ForkJoinTask<?>[] a; ForkJoinPool p;
+ int s = top, m, n;
+ if ((a = array) != null) { // ignore if queue removed
+ U.putOrderedObject
+ (a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task);
+ if ((n = (top = s + 1) - base) <= 2) {
+ if ((p = pool) != null)
+ p.signalWork();
+ }
+ else if (n >= m)
+ growArray(true);
+ }
+ }
+
+ /**
+ * Pushes a task if lock is free and array is either big
+ * enough or can be resized to be big enough.
+ *
+ * @param task the task. Caller must ensure non-null.
+ * @return true if submitted
+ */
+ final boolean trySharedPush(ForkJoinTask<?> task) {
+ boolean submitted = false;
+ if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) {
+ ForkJoinTask<?>[] a = array;
+ int s = top;
+ try {
+ if ((a != null && a.length > s + 1 - base) ||
+ (a = growArray(false)) != null) { // must presize
+ int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
+ U.putObject(a, (long)j, task); // don't need "ordered"
+ top = s + 1;
+ submitted = true;
+ }
+ } finally {
+ runState = 0; // unlock
+ }
+ }
+ return submitted;
+ }
+
+ /**
+ * Takes next task, if one exists, in LIFO order. Call only
+ * by owner in unshared queues. (We do not have a shared
+ * version of this method because it is never needed.)
+ */
+ final ForkJoinTask<?> pop() {
+ ForkJoinTask<?> t; int m;
+ ForkJoinTask<?>[] a = array;
+ if (a != null && (m = a.length - 1) >= 0) {
+ for (int s; (s = top - 1) - base >= 0;) {
+ int j = ((m & s) << ASHIFT) + ABASE;
+ if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) == null)
+ break;
+ if (U.compareAndSwapObject(a, j, t, null)) {
+ top = s;
+ return t;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes a task in FIFO order if b is base of queue and a task
+ * can be claimed without contention. Specialized versions
+ * appear in ForkJoinPool methods scan and tryHelpStealer.
+ */
+ final ForkJoinTask<?> pollAt(int b) {
+ ForkJoinTask<?> t; ForkJoinTask<?>[] a;
+ if ((a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
+ base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ base = b + 1;
+ return t;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes next task, if one exists, in FIFO order.
+ */
+ final ForkJoinTask<?> poll() {
+ ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
+ while ((b = base) - top < 0 && (a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t != null) {
+ if (base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ base = b + 1;
+ return t;
+ }
+ }
+ else if (base == b) {
+ if (b + 1 == top)
+ break;
+ Thread.yield(); // wait for lagging update
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Takes next task, if one exists, in order specified by mode.
+ */
+ final ForkJoinTask<?> nextLocalTask() {
+ return mode == 0 ? pop() : poll();
+ }
+
+ /**
+ * Returns next task, if one exists, in order specified by mode.
+ */
+ final ForkJoinTask<?> peek() {
+ ForkJoinTask<?>[] a = array; int m;
+ if (a == null || (m = a.length - 1) < 0)
+ return null;
+ int i = mode == 0 ? top - 1 : base;
+ int j = ((i & m) << ASHIFT) + ABASE;
+ return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ }
+
+ /**
+ * Pops the given task only if it is at the current top.
+ */
+ final boolean tryUnpush(ForkJoinTask<?> t) {
+ ForkJoinTask<?>[] a; int s;
+ if ((a = array) != null && (s = top) != base &&
+ U.compareAndSwapObject
+ (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
+ top = s;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Polls the given task only if it is at the current base.
+ */
+ final boolean pollFor(ForkJoinTask<?> task) {
+ ForkJoinTask<?>[] a; int b;
+ if ((b = base) - top < 0 && (a = array) != null) {
+ int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ if (U.getObjectVolatile(a, j) == task && base == b &&
+ U.compareAndSwapObject(a, j, task, null)) {
+ base = b + 1;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * If present, removes from queue and executes the given task, or
+ * any other cancelled task. Returns (true) immediately on any CAS
+ * or consistency check failure so caller can retry.
+ *
+ * @return false if no progress can be made
+ */
+ final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
+ boolean removed = false, empty = true, progress = true;
+ ForkJoinTask<?>[] a; int m, s, b, n;
+ if ((a = array) != null && (m = a.length - 1) >= 0 &&
+ (n = (s = top) - (b = base)) > 0) {
+ for (ForkJoinTask<?> t;;) { // traverse from s to b
+ int j = ((--s & m) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t == null) // inconsistent length
+ break;
+ else if (t == task) {
+ if (s + 1 == top) { // pop
+ if (!U.compareAndSwapObject(a, j, task, null))
+ break;
+ top = s;
+ removed = true;
+ }
+ else if (base == b) // replace with proxy
+ removed = U.compareAndSwapObject(a, j, task,
+ new EmptyTask());
+ break;
+ }
+ else if (t.status >= 0)
+ empty = false;
+ else if (s + 1 == top) { // pop and throw away
+ if (U.compareAndSwapObject(a, j, t, null))
+ top = s;
+ break;
+ }
+ if (--n == 0) {
+ if (!empty && base == b)
+ progress = false;
+ break;
+ }
+ }
+ }
+ if (removed)
+ task.doExec();
+ return progress;
+ }
+
+ /**
+ * Initializes or doubles the capacity of array. Call either
+ * by owner or with lock held -- it is OK for base, but not
+ * top, to move while resizings are in progress.
+ *
+ * @param rejectOnFailure if true, throw exception if capacity
+ * exceeded (relayed ultimately to user); else return null.
+ */
+ final ForkJoinTask<?>[] growArray(boolean rejectOnFailure) {
+ ForkJoinTask<?>[] oldA = array;
+ int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
+ if (size <= MAXIMUM_QUEUE_CAPACITY) {
+ int oldMask, t, b;
+ ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
+ if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
+ (t = top) - (b = base) > 0) {
+ int mask = size - 1;
+ do {
+ ForkJoinTask<?> x;
+ int oldj = ((b & oldMask) << ASHIFT) + ABASE;
+ int j = ((b & mask) << ASHIFT) + ABASE;
+ x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
+ if (x != null &&
+ U.compareAndSwapObject(oldA, oldj, x, null))
+ U.putObjectVolatile(a, j, x);
+ } while (++b != t);
+ }
+ return a;
+ }
+ else if (!rejectOnFailure)
+ return null;
+ else
+ throw new RejectedExecutionException("Queue capacity exceeded");
+ }
+
+ /**
+ * Removes and cancels all known tasks, ignoring any exceptions.
+ */
+ final void cancelAll() {
+ ForkJoinTask.cancelIgnoringExceptions(currentJoin);
+ ForkJoinTask.cancelIgnoringExceptions(currentSteal);
+ for (ForkJoinTask<?> t; (t = poll()) != null; )
+ ForkJoinTask.cancelIgnoringExceptions(t);
+ }
+
+ /**
+ * Computes next value for random probes. Scans don't require
+ * a very high quality generator, but also not a crummy one.
+ * Marsaglia xor-shift is cheap and works well enough. Note:
+ * This is manually inlined in its usages in ForkJoinPool to
+ * avoid writes inside busy scan loops.
+ */
+ final int nextSeed() {
+ int r = seed;
+ r ^= r << 13;
+ r ^= r >>> 17;
+ return seed = r ^= r << 5;
+ }
+
+ // Execution methods
+
+ /**
+ * Removes and runs tasks until empty, using local mode
+ * ordering. Normally called only after checking for apparent
+ * non-emptiness.
+ */
+ final void runLocalTasks() {
+ // hoist checks from repeated pop/poll
+ ForkJoinTask<?>[] a; int m;
+ if ((a = array) != null && (m = a.length - 1) >= 0) {
+ if (mode == 0) {
+ for (int s; (s = top - 1) - base >= 0;) {
+ int j = ((m & s) << ASHIFT) + ABASE;
+ ForkJoinTask<?> t =
+ (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t != null) {
+ if (U.compareAndSwapObject(a, j, t, null)) {
+ top = s;
+ t.doExec();
+ }
+ }
+ else
+ break;
+ }
+ }
+ else {
+ for (int b; (b = base) - top < 0;) {
+ int j = ((m & b) << ASHIFT) + ABASE;
+ ForkJoinTask<?> t =
+ (ForkJoinTask<?>)U.getObjectVolatile(a, j);
+ if (t != null) {
+ if (base == b &&
+ U.compareAndSwapObject(a, j, t, null)) {
+ base = b + 1;
+ t.doExec();
+ }
+ } else if (base == b) {
+ if (b + 1 == top)
+ break;
+ Thread.yield(); // wait for lagging update
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Executes a top-level task and any local tasks remaining
+ * after execution.
+ *
+ * @return true unless terminating
+ */
+ final boolean runTask(ForkJoinTask<?> t) {
+ boolean alive = true;
+ if (t != null) {
+ currentSteal = t;
+ t.doExec();
+ if (top != base) // conservative guard
+ runLocalTasks();
+ ++nsteals;
+ currentSteal = null;
+ }
+ else if (runState < 0) // terminating
+ alive = false;
+ return alive;
+ }
+
+ /**
+ * Executes a non-top-level (stolen) task.
+ */
+ final void runSubtask(ForkJoinTask<?> t) {
+ if (t != null) {
+ ForkJoinTask<?> ps = currentSteal;
+ currentSteal = t;
+ t.doExec();
+ currentSteal = ps;
+ }
+ }
+
+ /**
+ * Returns true if owned and not known to be blocked.
+ */
+ final boolean isApparentlyUnblocked() {
+ Thread wt; Thread.State s;
+ return (eventCount >= 0 &&
+ (wt = owner) != null &&
+ (s = wt.getState()) != Thread.State.BLOCKED &&
+ s != Thread.State.WAITING &&
+ s != Thread.State.TIMED_WAITING);
+ }
+
+ /**
+ * If this owned and is not already interrupted, try to
+ * interrupt and/or unpark, ignoring exceptions.
+ */
+ final void interruptOwner() {
+ Thread wt, p;
+ if ((wt = owner) != null && !wt.isInterrupted()) {
+ try {
+ wt.interrupt();
+ } catch (SecurityException ignore) {
+ }
+ }
+ if ((p = parker) != null)
+ U.unpark(p);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long RUNSTATE;
+ private static final int ABASE;
+ private static final int ASHIFT;
+ static {
+ int s;
+ try {
+ U = getUnsafe();
+ Class<?> k = WorkQueue.class;
+ Class<?> ak = ForkJoinTask[].class;
+ RUNSTATE = U.objectFieldOffset
+ (k.getDeclaredField("runState"));
+ ABASE = U.arrayBaseOffset(ak);
+ s = U.arrayIndexScale(ak);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ if ((s & (s-1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
+ }
+ }
/**
- * The uncaught exception handler used when any worker
- * abrupty terminates
- */
- private Thread.UncaughtExceptionHandler ueh;
+ * Per-thread records for threads that submit to pools. Currently
+ * holds only pseudo-random seed / index that is used to choose
+ * submission queues in method doSubmit. In the future, this may
+ * also incorporate a means to implement different task rejection
+ * and resubmission policies.
+ *
+ * Seeds for submitters and workers/workQueues work in basically
+ * the same way but are initialized and updated using slightly
+ * different mechanics. Both are initialized using the same
+ * approach as in class ThreadLocal, where successive values are
+ * unlikely to collide with previous values. This is done during
+ * registration for workers, but requires a separate AtomicInteger
+ * for submitters. Seeds are then randomly modified upon
+ * collisions using xorshifts, which requires a non-zero seed.
+ */
+ static final class Submitter {
+ int seed;
+ Submitter() {
+ int s = nextSubmitterSeed.getAndAdd(SEED_INCREMENT);
+ seed = (s == 0) ? 1 : s; // ensure non-zero
+ }
+ }
+
+ /** ThreadLocal class for Submitters */
+ static final class ThreadSubmitter extends ThreadLocal<Submitter> {
+ public Submitter initialValue() { return new Submitter(); }
+ }
+
+ // static fields (initialized in static initializer below)
/**
- * Creation factory for worker threads.
+ * Creates a new ForkJoinWorkerThread. This factory is used unless
+ * overridden in ForkJoinPool constructors.
*/
- private final ForkJoinWorkerThreadFactory factory;
+ public static final ForkJoinWorkerThreadFactory
+ defaultForkJoinWorkerThreadFactory;
/**
- * Head of stack of threads that were created to maintain
- * parallelism when other threads blocked, but have since
- * suspended when the parallelism level rose.
+ * Generator for assigning sequence numbers as pool names.
*/
- private volatile WaitQueueNode spareStack;
+ private static final AtomicInteger poolNumberGenerator;
/**
- * Sum of per-thread steal counts, updated only when threads are
- * idle or terminating.
+ * Generator for initial hashes/seeds for submitters. Accessed by
+ * Submitter class constructor.
*/
- private final AtomicLong stealCount;
+ static final AtomicInteger nextSubmitterSeed;
/**
- * Queue for external submissions.
+ * Permission required for callers of methods that may start or
+ * kill threads.
*/
- private final LinkedTransferQueue<ForkJoinTask<?>> submissionQueue;
+ private static final RuntimePermission modifyThreadPermission;
/**
- * Head of Treiber stack for barrier sync. See below for explanation
+ * Per-thread submission bookeeping. Shared across all pools
+ * to reduce ThreadLocal pollution and because random motion
+ * to avoid contention in one pool is likely to hold for others.
*/
- private volatile WaitQueueNode syncStack;
+ private static final ThreadSubmitter submitters;
+
+ // static constants
/**
- * The count for event barrier
+ * The wakeup interval (in nanoseconds) for a worker waiting for a
+ * task when the pool is quiescent to instead try to shrink the
+ * number of workers. The exact value does not matter too
+ * much. It must be short enough to release resources during
+ * sustained periods of idleness, but not so short that threads
+ * are continually re-created.
*/
- private volatile long eventCount;
+ private static final long SHRINK_RATE =
+ 4L * 1000L * 1000L * 1000L; // 4 seconds
/**
- * Pool number, just for assigning useful names to worker threads
+ * The timeout value for attempted shrinkage, includes
+ * some slop to cope with system timer imprecision.
*/
- private final int poolNumber;
+ private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10);
/**
- * The maximum allowed pool size
+ * The maximum stolen->joining link depth allowed in method
+ * tryHelpStealer. Must be a power of two. This value also
+ * controls the maximum number of times to try to help join a task
+ * without any apparent progress or change in pool state before
+ * giving up and blocking (see awaitJoin). Depths for legitimate
+ * chains are unbounded, but we use a fixed constant to avoid
+ * (otherwise unchecked) cycles and to bound staleness of
+ * traversal parameters at the expense of sometimes blocking when
+ * we could be helping.
*/
- private volatile int maxPoolSize;
+ private static final int MAX_HELP = 32;
/**
- * The desired parallelism level, updated only under workerLock.
+ * Secondary time-based bound (in nanosecs) for helping attempts
+ * before trying compensated blocking in awaitJoin. Used in
+ * conjunction with MAX_HELP to reduce variance due to different
+ * polling rates associated with different helping options. The
+ * value should roughly approximate the time required to create
+ * and/or activate a worker thread.
*/
- private volatile int parallelism;
+ private static final long COMPENSATION_DELAY = 100L * 1000L; // 0.1 millisec
/**
- * True if use local fifo, not default lifo, for local polling
+ * Increment for seed generators. See class ThreadLocal for
+ * explanation.
*/
- private volatile boolean locallyFifo;
+ private static final int SEED_INCREMENT = 0x61c88647;
/**
- * Holds number of total (i.e., created and not yet terminated)
- * and running (i.e., not blocked on joins or other managed sync)
- * threads, packed into one int to ensure consistent snapshot when
- * making decisions about creating and suspending spare
- * threads. Updated only by CAS. Note: CASes in
- * updateRunningCount and preJoin running active count is in low
- * word, so need to be modified if this changes
- */
- private volatile int workerCounts;
+ * Bits and masks for control variables
+ *
+ * Field ctl is a long packed with:
+ * AC: Number of active running workers minus target parallelism (16 bits)
+ * TC: Number of total workers minus target parallelism (16 bits)
+ * ST: true if pool is terminating (1 bit)
+ * EC: the wait count of top waiting thread (15 bits)
+ * ID: poolIndex of top of Treiber stack of waiters (16 bits)
+ *
+ * When convenient, we can extract the upper 32 bits of counts and
+ * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
+ * (int)ctl. The ec field is never accessed alone, but always
+ * together with id and st. The offsets of counts by the target
+ * parallelism and the positionings of fields makes it possible to
+ * perform the most common checks via sign tests of fields: When
+ * ac is negative, there are not enough active workers, when tc is
+ * negative, there are not enough total workers, and when e is
+ * negative, the pool is terminating. To deal with these possibly
+ * negative fields, we use casts in and out of "short" and/or
+ * signed shifts to maintain signedness.
+ *
+ * When a thread is queued (inactivated), its eventCount field is
+ * set negative, which is the only way to tell if a worker is
+ * prevented from executing tasks, even though it must continue to
+ * scan for them to avoid queuing races. Note however that
+ * eventCount updates lag releases so usage requires care.
+ *
+ * Field runState is an int packed with:
+ * SHUTDOWN: true if shutdown is enabled (1 bit)
+ * SEQ: a sequence number updated upon (de)registering workers (30 bits)
+ * INIT: set true after workQueues array construction (1 bit)
+ *
+ * The sequence number enables simple consistency checks:
+ * Staleness of read-only operations on the workQueues array can
+ * be checked by comparing runState before vs after the reads.
+ */
+
+ // bit positions/shifts for fields
+ private static final int AC_SHIFT = 48;
+ private static final int TC_SHIFT = 32;
+ private static final int ST_SHIFT = 31;
+ private static final int EC_SHIFT = 16;
+
+ // bounds
+ private static final int SMASK = 0xffff; // short bits
+ private static final int MAX_CAP = 0x7fff; // max #workers - 1
+ private static final int SQMASK = 0xfffe; // even short bits
+ private static final int SHORT_SIGN = 1 << 15;
+ private static final int INT_SIGN = 1 << 31;
+
+ // masks
+ private static final long STOP_BIT = 0x0001L << ST_SHIFT;
+ private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
+ private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
+
+ // units for incrementing and decrementing
+ private static final long TC_UNIT = 1L << TC_SHIFT;
+ private static final long AC_UNIT = 1L << AC_SHIFT;
+
+ // masks and units for dealing with u = (int)(ctl >>> 32)
+ private static final int UAC_SHIFT = AC_SHIFT - 32;
+ private static final int UTC_SHIFT = TC_SHIFT - 32;
+ private static final int UAC_MASK = SMASK << UAC_SHIFT;
+ private static final int UTC_MASK = SMASK << UTC_SHIFT;
+ private static final int UAC_UNIT = 1 << UAC_SHIFT;
+ private static final int UTC_UNIT = 1 << UTC_SHIFT;
+
+ // masks and units for dealing with e = (int)ctl
+ private static final int E_MASK = 0x7fffffff; // no STOP_BIT
+ private static final int E_SEQ = 1 << EC_SHIFT;
+
+ // runState bits
+ private static final int SHUTDOWN = 1 << 31;
+
+ // access mode for WorkQueue
+ static final int LIFO_QUEUE = 0;
+ static final int FIFO_QUEUE = 1;
+ static final int SHARED_QUEUE = -1;
+
+ // Instance fields
- private static int totalCountOf(int s) { return s >>> 16; }
- private static int runningCountOf(int s) { return s & shortMask; }
- private static int workerCountsFor(int t, int r) { return (t << 16) + r; }
+ /*
+ * Field layout order in this class tends to matter more than one
+ * would like. Runtime layout order is only loosely related to
+ * declaration order and may differ across JVMs, but the following
+ * empirically works OK on current JVMs.
+ */
+
+ volatile long ctl; // main pool control
+ final int parallelism; // parallelism level
+ final int localMode; // per-worker scheduling mode
+ final int submitMask; // submit queue index bound
+ int nextSeed; // for initializing worker seeds
+ volatile int runState; // shutdown status and seq
+ WorkQueue[] workQueues; // main registry
+ final Mutex lock; // for registration
+ final Condition termination; // for awaitTermination
+ final ForkJoinWorkerThreadFactory factory; // factory for new workers
+ final Thread.UncaughtExceptionHandler ueh; // per-worker UEH
+ final AtomicLong stealCount; // collect counts when terminated
+ final AtomicInteger nextWorkerNumber; // to create worker name string
+ final String workerNamePrefix; // to create worker name string
+
+ // Creating, registering, and deregistering workers
+
+ /**
+ * Tries to create and start a worker
+ */
+ private void addWorker() {
+ Throwable ex = null;
+ ForkJoinWorkerThread wt = null;
+ try {
+ if ((wt = factory.newThread(this)) != null) {
+ wt.start();
+ return;
+ }
+ } catch (Throwable e) {
+ ex = e;
+ }
+ deregisterWorker(wt, ex); // adjust counts etc on failure
+ }
/**
- * Add delta (which may be negative) to running count. This must
- * be called before (with negative arg) and after (with positive)
- * any managed synchronization (i.e., mainly, joins)
- * @param delta the number to add
+ * Callback from ForkJoinWorkerThread constructor to assign a
+ * public name. This must be separate from registerWorker because
+ * it is called during the "super" constructor call in
+ * ForkJoinWorkerThread.
*/
- final void updateRunningCount(int delta) {
- int s;
- do;while (!casWorkerCounts(s = workerCounts, s + delta));
+ final String nextWorkerName() {
+ return workerNamePrefix.concat
+ (Integer.toString(nextWorkerNumber.addAndGet(1)));
}
/**
- * Add delta (which may be negative) to both total and running
- * count. This must be called upon creation and termination of
- * worker threads.
- * @param delta the number to add
+ * Callback from ForkJoinWorkerThread constructor to establish its
+ * poolIndex and record its WorkQueue. To avoid scanning bias due
+ * to packing entries in front of the workQueues array, we treat
+ * the array as a simple power-of-two hash table using per-thread
+ * seed as hash, expanding as needed.
+ *
+ * @param w the worker's queue
*/
- private void updateWorkerCount(int delta) {
- int d = delta + (delta << 16); // add to both lo and hi parts
- int s;
- do;while (!casWorkerCounts(s = workerCounts, s + d));
+ final void registerWorker(WorkQueue w) {
+ Mutex lock = this.lock;
+ lock.lock();
+ try {
+ WorkQueue[] ws = workQueues;
+ if (w != null && ws != null) { // skip on shutdown/failure
+ int rs, n;
+ while ((n = ws.length) < // ensure can hold total
+ (parallelism + (short)(ctl >>> TC_SHIFT) << 1))
+ workQueues = ws = Arrays.copyOf(ws, n << 1);
+ int m = n - 1;
+ int s = nextSeed += SEED_INCREMENT; // rarely-colliding sequence
+ w.seed = (s == 0) ? 1 : s; // ensure non-zero seed
+ int r = (s << 1) | 1; // use odd-numbered indices
+ while (ws[r &= m] != null) // step by approx half size
+ r += ((n >>> 1) & SQMASK) + 2;
+ w.eventCount = w.poolIndex = r; // establish before recording
+ ws[r] = w; // also update seq
+ runState = ((rs = runState) & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN);
+ }
+ } finally {
+ lock.unlock();
+ }
}
/**
- * Lifecycle control. High word contains runState, low word
- * contains the number of workers that are (probably) executing
- * tasks. This value is atomically incremented before a worker
- * gets a task to run, and decremented when worker has no tasks
- * and cannot find any. These two fields are bundled together to
- * support correct termination triggering. Note: activeCount
- * CAS'es cheat by assuming active count is in low word, so need
- * to be modified if this changes
- */
- private volatile int runControl;
-
- // RunState values. Order among values matters
- private static final int RUNNING = 0;
- private static final int SHUTDOWN = 1;
- private static final int TERMINATING = 2;
- private static final int TERMINATED = 3;
+ * Final callback from terminating worker, as well as upon failure
+ * to construct or start a worker in addWorker. Removes record of
+ * worker from array, and adjusts counts. If pool is shutting
+ * down, tries to complete termination.
+ *
+ * @param wt the worker thread or null if addWorker failed
+ * @param ex the exception causing failure, or null if none
+ */
+ final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
+ Mutex lock = this.lock;
+ WorkQueue w = null;
+ if (wt != null && (w = wt.workQueue) != null) {
+ w.runState = -1; // ensure runState is set
+ stealCount.getAndAdd(w.totalSteals + w.nsteals);
+ int idx = w.poolIndex;
+ lock.lock();
+ try { // remove record from array
+ WorkQueue[] ws = workQueues;
+ if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
+ ws[idx] = null;
+ } finally {
+ lock.unlock();
+ }
+ }
- private static int runStateOf(int c) { return c >>> 16; }
- private static int activeCountOf(int c) { return c & shortMask; }
- private static int runControlFor(int r, int a) { return (r << 16) + a; }
+ long c; // adjust ctl counts
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
+ ((c - TC_UNIT) & TC_MASK) |
+ (c & ~(AC_MASK|TC_MASK)))));
+
+ if (!tryTerminate(false, false) && w != null) {
+ w.cancelAll(); // cancel remaining tasks
+ if (w.array != null) // suppress signal if never ran
+ signalWork(); // wake up or create replacement
+ if (ex == null) // help clean refs on way out
+ ForkJoinTask.helpExpungeStaleExceptions();
+ }
- /**
- * Try incrementing active count; fail on contention. Called by
- * workers before/during executing tasks.
- * @return true on success;
- */
- final boolean tryIncrementActiveCount() {
- int c = runControl;
- return casRunControl(c, c+1);
+ if (ex != null) // rethrow
+ U.throwException(ex);
}
+
+ // Submissions
+
/**
- * Try decrementing active count; fail on contention.
- * Possibly trigger termination on success
- * Called by workers when they can't find tasks.
- * @return true on success
- */
- final boolean tryDecrementActiveCount() {
- int c = runControl;
- int nextc = c - 1;
- if (!casRunControl(c, nextc))
- return false;
- if (canTerminateOnShutdown(nextc))
- terminateOnShutdown();
- return true;
+ * Unless shutting down, adds the given task to a submission queue
+ * at submitter's current queue index (modulo submission
+ * range). If no queue exists at the index, one is created. If
+ * the queue is busy, another index is randomly chosen. The
+ * submitMask bounds the effective number of queues to the
+ * (nearest power of two for) parallelism level.
+ *
+ * @param task the task. Caller must ensure non-null.
+ */
+ private void doSubmit(ForkJoinTask<?> task) {
+ Submitter s = submitters.get();
+ for (int r = s.seed, m = submitMask;;) {
+ WorkQueue[] ws; WorkQueue q;
+ int k = r & m & SQMASK; // use only even indices
+ if (runState < 0 || (ws = workQueues) == null || ws.length <= k)
+ throw new RejectedExecutionException(); // shutting down
+ else if ((q = ws[k]) == null) { // create new queue
+ WorkQueue nq = new WorkQueue(this, null, SHARED_QUEUE);
+ Mutex lock = this.lock; // construct outside lock
+ lock.lock();
+ try { // recheck under lock
+ int rs = runState; // to update seq
+ if (ws == workQueues && ws[k] == null) {
+ ws[k] = nq;
+ runState = ((rs & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN));
+ }
+ } finally {
+ lock.unlock();
+ }
+ }
+ else if (q.trySharedPush(task)) {
+ signalWork();
+ return;
+ }
+ else if (m > 1) { // move to a different index
+ r ^= r << 13; // same xorshift as WorkQueues
+ r ^= r >>> 17;
+ s.seed = r ^= r << 5;
+ }
+ else
+ Thread.yield(); // yield if no alternatives
+ }
}
+ // Maintaining ctl counts
+
/**
- * Return true if argument represents zero active count and
- * nonzero runstate, which is the triggering condition for
- * terminating on shutdown.
+ * Increments active count; mainly called upon return from blocking.
*/
- private static boolean canTerminateOnShutdown(int c) {
- return ((c & -c) >>> 16) != 0; // i.e. least bit is nonzero runState bit
+ final void incrementActiveCount() {
+ long c;
+ do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT));
}
/**
- * Transition run state to at least the given state. Return true
- * if not already at least given state.
+ * Tries to activate or create a worker if too few are active.
*/
- private boolean transitionRunStateTo(int state) {
- for (;;) {
- int c = runControl;
- if (runStateOf(c) >= state)
- return false;
- if (casRunControl(c, runControlFor(state, activeCountOf(c))))
- return true;
+ final void signalWork() {
+ long c; int u;
+ while ((u = (int)((c = ctl) >>> 32)) < 0) { // too few active
+ WorkQueue[] ws = workQueues; int e, i; WorkQueue w; Thread p;
+ if ((e = (int)c) > 0) { // at least one waiting
+ if (ws != null && (i = e & SMASK) < ws.length &&
+ (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
+ long nc = (((long)(w.nextWait & E_MASK)) |
+ ((long)(u + UAC_UNIT) << 32));
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = w.parker) != null)
+ U.unpark(p); // activate and release
+ break;
+ }
+ }
+ else
+ break;
+ }
+ else if (e == 0 && (u & SHORT_SIGN) != 0) { // too few total
+ long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
+ ((u + UAC_UNIT) & UAC_MASK)) << 32;
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ addWorker();
+ break;
+ }
+ }
+ else
+ break;
}
}
- /**
- * Controls whether to add spares to maintain parallelism
- */
- private volatile boolean maintainsParallelism;
- // Constructors
+ // Scanning for tasks
/**
- * Creates a ForkJoinPool with a pool size equal to the number of
- * processors available on the system and using the default
- * ForkJoinWorkerThreadFactory,
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
+ * Top-level runloop for workers, called by ForkJoinWorkerThread.run.
*/
- public ForkJoinPool() {
- this(Runtime.getRuntime().availableProcessors(),
- defaultForkJoinWorkerThreadFactory);
+ final void runWorker(WorkQueue w) {
+ w.growArray(false); // initialize queue array in this thread
+ do {} while (w.runTask(scan(w)));
}
/**
- * Creates a ForkJoinPool with the indicated parellelism level
- * threads, and using the default ForkJoinWorkerThreadFactory,
- * @param parallelism the number of worker threads
- * @throws IllegalArgumentException if parallelism less than or
- * equal to zero
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
- */
- public ForkJoinPool(int parallelism) {
- this(parallelism, defaultForkJoinWorkerThreadFactory);
+ * Scans for and, if found, returns one task, else possibly
+ * inactivates the worker. This method operates on single reads of
+ * volatile state and is designed to be re-invoked continuously,
+ * in part because it returns upon detecting inconsistencies,
+ * contention, or state changes that indicate possible success on
+ * re-invocation.
+ *
+ * The scan searches for tasks across a random permutation of
+ * queues (starting at a random index and stepping by a random
+ * relative prime, checking each at least once). The scan
+ * terminates upon either finding a non-empty queue, or completing
+ * the sweep. If the worker is not inactivated, it takes and
+ * returns a task from this queue. On failure to find a task, we
+ * take one of the following actions, after which the caller will
+ * retry calling this method unless terminated.
+ *
+ * * If pool is terminating, terminate the worker.
+ *
+ * * If not a complete sweep, try to release a waiting worker. If
+ * the scan terminated because the worker is inactivated, then the
+ * released worker will often be the calling worker, and it can
+ * succeed obtaining a task on the next call. Or maybe it is
+ * another worker, but with same net effect. Releasing in other
+ * cases as well ensures that we have enough workers running.
+ *
+ * * If not already enqueued, try to inactivate and enqueue the
+ * worker on wait queue. Or, if inactivating has caused the pool
+ * to be quiescent, relay to idleAwaitWork to check for
+ * termination and possibly shrink pool.
+ *
+ * * If already inactive, and the caller has run a task since the
+ * last empty scan, return (to allow rescan) unless others are
+ * also inactivated. Field WorkQueue.rescans counts down on each
+ * scan to ensure eventual inactivation and blocking.
+ *
+ * * If already enqueued and none of the above apply, park
+ * awaiting signal,
+ *
+ * @param w the worker (via its WorkQueue)
+ * @return a task or null of none found
+ */
+ private final ForkJoinTask<?> scan(WorkQueue w) {
+ WorkQueue[] ws; // first update random seed
+ int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
+ int rs = runState, m; // volatile read order matters
+ if ((ws = workQueues) != null && (m = ws.length - 1) > 0) {
+ int ec = w.eventCount; // ec is negative if inactive
+ int step = (r >>> 16) | 1; // relative prime
+ for (int j = (m + 1) << 2; ; r += step) {
+ WorkQueue q; ForkJoinTask<?> t; ForkJoinTask<?>[] a; int b;
+ if ((q = ws[r & m]) != null && (b = q.base) - q.top < 0 &&
+ (a = q.array) != null) { // probably nonempty
+ int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, i);
+ if (q.base == b && ec >= 0 && t != null &&
+ U.compareAndSwapObject(a, i, t, null)) {
+ q.base = b + 1; // specialization of pollAt
+ return t;
+ }
+ else if ((t != null || b + 1 != q.top) &&
+ (ec < 0 || j <= m)) {
+ rs = 0; // mark scan as imcomplete
+ break; // caller can retry after release
+ }
+ }
+ if (--j < 0)
+ break;
+ }
+ long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns;
+ if (e < 0) // decode ctl on empty scan
+ w.runState = -1; // pool is terminating
+ else if (rs == 0 || rs != runState) { // incomplete scan
+ WorkQueue v; Thread p; // try to release a waiter
+ if (e > 0 && a < 0 && w.eventCount == ec &&
+ (v = ws[e & m]) != null && v.eventCount == (e | INT_SIGN)) {
+ long nc = ((long)(v.nextWait & E_MASK) |
+ ((c + AC_UNIT) & (AC_MASK|TC_MASK)));
+ if (ctl == c && U.compareAndSwapLong(this, CTL, c, nc)) {
+ v.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = v.parker) != null)
+ U.unpark(p);
+ }
+ }
+ }
+ else if (ec >= 0) { // try to enqueue/inactivate
+ long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK));
+ w.nextWait = e;
+ w.eventCount = ec | INT_SIGN; // mark as inactive
+ if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc))
+ w.eventCount = ec; // unmark on CAS failure
+ else {
+ if ((ns = w.nsteals) != 0) {
+ w.nsteals = 0; // set rescans if ran task
+ w.rescans = (a > 0) ? 0 : a + parallelism;
+ w.totalSteals += ns;
+ }
+ if (a == 1 - parallelism) // quiescent
+ idleAwaitWork(w, nc, c);
+ }
+ }
+ else if (w.eventCount < 0) { // already queued
+ if ((nr = w.rescans) > 0) { // continue rescanning
+ int ac = a + parallelism;
+ if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0)
+ Thread.yield(); // yield before block
+ }
+ else {
+ Thread.interrupted(); // clear status
+ Thread wt = Thread.currentThread();
+ U.putObject(wt, PARKBLOCKER, this);
+ w.parker = wt; // emulate LockSupport.park
+ if (w.eventCount < 0) // recheck
+ U.park(false, 0L);
+ w.parker = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ }
+ }
+ }
+ return null;
}
/**
- * Creates a ForkJoinPool with parallelism equal to the number of
- * processors available on the system and using the given
- * ForkJoinWorkerThreadFactory,
- * @param factory the factory for creating new threads
- * @throws NullPointerException if factory is null
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
- */
- public ForkJoinPool(ForkJoinWorkerThreadFactory factory) {
- this(Runtime.getRuntime().availableProcessors(), factory);
+ * If inactivating worker w has caused the pool to become
+ * quiescent, checks for pool termination, and, so long as this is
+ * not the only worker, waits for event for up to SHRINK_RATE
+ * nanosecs. On timeout, if ctl has not changed, terminates the
+ * worker, which will in turn wake up another worker to possibly
+ * repeat this process.
+ *
+ * @param w the calling worker
+ * @param currentCtl the ctl value triggering possible quiescence
+ * @param prevCtl the ctl value to restore if thread is terminated
+ */
+ private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) {
+ if (w.eventCount < 0 && !tryTerminate(false, false) &&
+ (int)prevCtl != 0 && ctl == currentCtl) {
+ Thread wt = Thread.currentThread();
+ Thread.yield(); // yield before block
+ while (ctl == currentCtl) {
+ long startTime = System.nanoTime();
+ Thread.interrupted(); // timed variant of version in scan()
+ U.putObject(wt, PARKBLOCKER, this);
+ w.parker = wt;
+ if (ctl == currentCtl)
+ U.park(false, SHRINK_RATE);
+ w.parker = null;
+ U.putObject(wt, PARKBLOCKER, null);
+ if (ctl != currentCtl)
+ break;
+ if (System.nanoTime() - startTime >= SHRINK_TIMEOUT &&
+ U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) {
+ w.eventCount = (w.eventCount + E_SEQ) | E_MASK;
+ w.runState = -1; // shrink
+ break;
+ }
+ }
+ }
}
/**
- * Creates a ForkJoinPool with the given parallelism and factory.
+ * Tries to locate and execute tasks for a stealer of the given
+ * task, or in turn one of its stealers, Traces currentSteal ->
+ * currentJoin links looking for a thread working on a descendant
+ * of the given task and with a non-empty queue to steal back and
+ * execute tasks from. The first call to this method upon a
+ * waiting join will often entail scanning/search, (which is OK
+ * because the joiner has nothing better to do), but this method
+ * leaves hints in workers to speed up subsequent calls. The
+ * implementation is very branchy to cope with potential
+ * inconsistencies or loops encountering chains that are stale,
+ * unknown, or so long that they are likely cyclic. All of these
+ * cases are dealt with by just retrying by caller.
*
- * @param parallelism the targeted number of worker threads
- * @param factory the factory for creating new threads
- * @throws IllegalArgumentException if parallelism less than or
- * equal to zero, or greater than implementation limit.
- * @throws NullPointerException if factory is null
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
- */
- public ForkJoinPool(int parallelism, ForkJoinWorkerThreadFactory factory) {
- if (parallelism <= 0 || parallelism > MAX_THREADS)
- throw new IllegalArgumentException();
- if (factory == null)
- throw new NullPointerException();
- checkPermission();
- this.factory = factory;
- this.parallelism = parallelism;
- this.maxPoolSize = MAX_THREADS;
- this.maintainsParallelism = true;
- this.poolNumber = poolNumberGenerator.incrementAndGet();
- this.workerLock = new ReentrantLock();
- this.termination = workerLock.newCondition();
- this.stealCount = new AtomicLong();
- this.submissionQueue = new LinkedTransferQueue<ForkJoinTask<?>>();
- // worker array and workers are lazily constructed
- }
+ * @param joiner the joining worker
+ * @param task the task to join
+ * @return true if found or ran a task (and so is immediately retryable)
+ */
+ private boolean tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
+ WorkQueue[] ws;
+ int m, depth = MAX_HELP; // remaining chain depth
+ boolean progress = false;
+ if ((ws = workQueues) != null && (m = ws.length - 1) > 0 &&
+ task.status >= 0) {
+ ForkJoinTask<?> subtask = task; // current target
+ outer: for (WorkQueue j = joiner;;) {
+ WorkQueue stealer = null; // find stealer of subtask
+ WorkQueue v = ws[j.stealHint & m]; // try hint
+ if (v != null && v.currentSteal == subtask)
+ stealer = v;
+ else { // scan
+ for (int i = 1; i <= m; i += 2) {
+ if ((v = ws[i]) != null && v.currentSteal == subtask &&
+ v != joiner) {
+ stealer = v;
+ j.stealHint = i; // save hint
+ break;
+ }
+ }
+ if (stealer == null)
+ break;
+ }
- /**
- * Create new worker using factory.
- * @param index the index to assign worker
- * @return new worker, or null of factory failed
- */
- private ForkJoinWorkerThread createWorker(int index) {
- Thread.UncaughtExceptionHandler h = ueh;
- ForkJoinWorkerThread w = factory.newThread(this);
- if (w != null) {
- w.poolIndex = index;
- w.setDaemon(true);
- w.setAsyncMode(locallyFifo);
- w.setName("ForkJoinPool-" + poolNumber + "-worker-" + index);
- if (h != null)
- w.setUncaughtExceptionHandler(h);
+ for (WorkQueue q = stealer;;) { // try to help stealer
+ ForkJoinTask[] a; ForkJoinTask<?> t; int b;
+ if (task.status < 0)
+ break outer;
+ if ((b = q.base) - q.top < 0 && (a = q.array) != null) {
+ progress = true;
+ int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
+ t = (ForkJoinTask<?>)U.getObjectVolatile(a, i);
+ if (subtask.status < 0) // must recheck before taking
+ break outer;
+ if (t != null &&
+ q.base == b &&
+ U.compareAndSwapObject(a, i, t, null)) {
+ q.base = b + 1;
+ joiner.runSubtask(t);
+ }
+ else if (q.base == b)
+ break outer; // possibly stalled
+ }
+ else { // descend
+ ForkJoinTask<?> next = stealer.currentJoin;
+ if (--depth <= 0 || subtask.status < 0 ||
+ next == null || next == subtask)
+ break outer; // stale, dead-end, or cyclic
+ subtask = next;
+ j = stealer;
+ break;
+ }
+ }
+ }
}
- return w;
+ return progress;
}
/**
- * Return a good size for worker array given pool size.
- * Currently requires size to be a power of two.
+ * If task is at base of some steal queue, steals and executes it.
+ *
+ * @param joiner the joining worker
+ * @param task the task
*/
- private static int arraySizeFor(int ps) {
- return ps <= 1? 1 : (1 << (32 - Integer.numberOfLeadingZeros(ps-1)));
+ private void tryPollForAndExec(WorkQueue joiner, ForkJoinTask<?> task) {
+ WorkQueue[] ws;
+ if ((ws = workQueues) != null) {
+ for (int j = 1; j < ws.length && task.status >= 0; j += 2) {
+ WorkQueue q = ws[j];
+ if (q != null && q.pollFor(task)) {
+ joiner.runSubtask(task);
+ break;
+ }
+ }
+ }
}
- public static ForkJoinWorkerThread[] copyOfWorkers(ForkJoinWorkerThread[] original, int newLength) {
- ForkJoinWorkerThread[] copy = new ForkJoinWorkerThread[newLength];
- System.arraycopy(original, 0, copy, 0, Math.min(newLength, original.length));
- return copy;
+ /**
+ * Tries to decrement active count (sometimes implicitly) and
+ * possibly release or create a compensating worker in preparation
+ * for blocking. Fails on contention or termination. Otherwise,
+ * adds a new thread if no idle workers are available and either
+ * pool would become completely starved or: (at least half
+ * starved, and fewer than 50% spares exist, and there is at least
+ * one task apparently available). Even though the availability
+ * check requires a full scan, it is worthwhile in reducing false
+ * alarms.
+ *
+ * @param task if non-null, a task being waited for
+ * @param blocker if non-null, a blocker being waited for
+ * @return true if the caller can block, else should recheck and retry
+ */
+ final boolean tryCompensate(ForkJoinTask<?> task, ManagedBlocker blocker) {
+ int pc = parallelism, e;
+ long c = ctl;
+ WorkQueue[] ws = workQueues;
+ if ((e = (int)c) >= 0 && ws != null) {
+ int u, a, ac, hc;
+ int tc = (short)((u = (int)(c >>> 32)) >>> UTC_SHIFT) + pc;
+ boolean replace = false;
+ if ((a = u >> UAC_SHIFT) <= 0) {
+ if ((ac = a + pc) <= 1)
+ replace = true;
+ else if ((e > 0 || (task != null &&
+ ac <= (hc = pc >>> 1) && tc < pc + hc))) {
+ WorkQueue w;
+ for (int j = 0; j < ws.length; ++j) {
+ if ((w = ws[j]) != null && !w.isEmpty()) {
+ replace = true;
+ break; // in compensation range and tasks available
+ }
+ }
+ }
+ }
+ if ((task == null || task.status >= 0) && // recheck need to block
+ (blocker == null || !blocker.isReleasable()) && ctl == c) {
+ if (!replace) { // no compensation
+ long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
+ if (U.compareAndSwapLong(this, CTL, c, nc))
+ return true;
+ }
+ else if (e != 0) { // release an idle worker
+ WorkQueue w; Thread p; int i;
+ if ((i = e & SMASK) < ws.length && (w = ws[i]) != null) {
+ long nc = ((long)(w.nextWait & E_MASK) |
+ (c & (AC_MASK|TC_MASK)));
+ if (w.eventCount == (e | INT_SIGN) &&
+ U.compareAndSwapLong(this, CTL, c, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ return true;
+ }
+ }
+ }
+ else if (tc < MAX_CAP) { // create replacement
+ long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
+ if (U.compareAndSwapLong(this, CTL, c, nc)) {
+ addWorker();
+ return true;
+ }
+ }
+ }
+ }
+ return false;
}
/**
- * Create or resize array if necessary to hold newLength.
- * Call only under exlusion or lock
- * @return the array
- */
- private ForkJoinWorkerThread[] ensureWorkerArrayCapacity(int newLength) {
- ForkJoinWorkerThread[] ws = workers;
- if (ws == null)
- return workers = new ForkJoinWorkerThread[arraySizeFor(newLength)];
- else if (newLength > ws.length)
- return workers = copyOfWorkers(ws, arraySizeFor(newLength));
- else
- return ws;
+ * Helps and/or blocks until the given task is done.
+ *
+ * @param joiner the joining worker
+ * @param task the task
+ * @return task status on exit
+ */
+ final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
+ ForkJoinTask<?> prevJoin = joiner.currentJoin;
+ joiner.currentJoin = task;
+ long startTime = 0L;
+ for (int k = 0, s; ; ++k) {
+ if ((joiner.isEmpty() ? // try to help
+ !tryHelpStealer(joiner, task) :
+ !joiner.tryRemoveAndExec(task))) {
+ if (k == 0) {
+ startTime = System.nanoTime();
+ tryPollForAndExec(joiner, task); // check uncommon case
+ }
+ else if ((k & (MAX_HELP - 1)) == 0 &&
+ System.nanoTime() - startTime >= COMPENSATION_DELAY &&
+ tryCompensate(task, null)) {
+ if (task.trySetSignal() && task.status >= 0) {
+ synchronized (task) {
+ if (task.status >= 0) {
+ try { // see ForkJoinTask
+ task.wait(); // for explanation
+ } catch (InterruptedException ie) {
+ }
+ }
+ else
+ task.notifyAll();
+ }
+ }
+ long c; // re-activate
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c + AC_UNIT));
+ }
+ }
+ if ((s = task.status) < 0) {
+ joiner.currentJoin = prevJoin;
+ return s;
+ }
+ else if ((k & (MAX_HELP - 1)) == MAX_HELP >>> 1)
+ Thread.yield(); // for politeness
+ }
}
/**
- * Try to shrink workers into smaller array after one or more terminate
+ * Stripped-down variant of awaitJoin used by timed joins. Tries
+ * to help join only while there is continuous progress. (Caller
+ * will then enter a timed wait.)
+ *
+ * @param joiner the joining worker
+ * @param task the task
+ * @return task status on exit
*/
- private void tryShrinkWorkerArray() {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- int len = ws.length;
- int last = len - 1;
- while (last >= 0 && ws[last] == null)
- --last;
- int newLength = arraySizeFor(last+1);
- if (newLength < len)
- workers = copyOfWorkers(ws, newLength);
+ final int helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
+ int s;
+ while ((s = task.status) >= 0 &&
+ (joiner.isEmpty() ?
+ tryHelpStealer(joiner, task) :
+ joiner.tryRemoveAndExec(task)))
+ ;
+ return s;
+ }
+
+ /**
+ * Returns a (probably) non-empty steal queue, if one is found
+ * during a random, then cyclic scan, else null. This method must
+ * be retried by caller if, by the time it tries to use the queue,
+ * it is empty.
+ */
+ private WorkQueue findNonEmptyStealQueue(WorkQueue w) {
+ // Similar to loop in scan(), but ignoring submissions
+ int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
+ int step = (r >>> 16) | 1;
+ for (WorkQueue[] ws;;) {
+ int rs = runState, m;
+ if ((ws = workQueues) == null || (m = ws.length - 1) < 1)
+ return null;
+ for (int j = (m + 1) << 2; ; r += step) {
+ WorkQueue q = ws[((r << 1) | 1) & m];
+ if (q != null && !q.isEmpty())
+ return q;
+ else if (--j < 0) {
+ if (runState == rs)
+ return null;
+ break;
+ }
+ }
}
}
/**
- * Initialize workers if necessary
- */
- final void ensureWorkerInitialization() {
- ForkJoinWorkerThread[] ws = workers;
- if (ws == null) {
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ws = workers;
- if (ws == null) {
- int ps = parallelism;
- ws = ensureWorkerArrayCapacity(ps);
- for (int i = 0; i < ps; ++i) {
- ForkJoinWorkerThread w = createWorker(i);
- if (w != null) {
- ws[i] = w;
- w.start();
- updateWorkerCount(1);
- }
- }
+ * Runs tasks until {@code isQuiescent()}. We piggyback on
+ * active count ctl maintenance, but rather than blocking
+ * when tasks cannot be found, we rescan until all others cannot
+ * find tasks either.
+ */
+ final void helpQuiescePool(WorkQueue w) {
+ for (boolean active = true;;) {
+ if (w.base - w.top < 0)
+ w.runLocalTasks(); // exhaust local queue
+ WorkQueue q = findNonEmptyStealQueue(w);
+ if (q != null) {
+ ForkJoinTask<?> t; int b;
+ if (!active) { // re-establish active count
+ long c;
+ active = true;
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c + AC_UNIT));
+ }
+ if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
+ w.runSubtask(t);
+ }
+ else {
+ long c;
+ if (active) { // decrement active count without queuing
+ active = false;
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c -= AC_UNIT));
+ }
+ else
+ c = ctl; // re-increment on exit
+ if ((int)(c >> AC_SHIFT) + parallelism == 0) {
+ do {} while (!U.compareAndSwapLong
+ (this, CTL, c = ctl, c + AC_UNIT));
+ break;
}
- } finally {
- lock.unlock();
}
}
}
/**
- * Worker creation and startup for threads added via setParallelism.
+ * Gets and removes a local or stolen task for the given worker.
+ *
+ * @return a task, if available
+ */
+ final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
+ for (ForkJoinTask<?> t;;) {
+ WorkQueue q; int b;
+ if ((t = w.nextLocalTask()) != null)
+ return t;
+ if ((q = findNonEmptyStealQueue(w)) == null)
+ return null;
+ if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null)
+ return t;
+ }
+ }
+
+ /**
+ * Returns the approximate (non-atomic) number of idle threads per
+ * active thread to offset steal queue size for method
+ * ForkJoinTask.getSurplusQueuedTaskCount().
*/
- private void createAndStartAddedWorkers() {
- resumeAllSpares(); // Allow spares to convert to nonspare
- int ps = parallelism;
- ForkJoinWorkerThread[] ws = ensureWorkerArrayCapacity(ps);
- int len = ws.length;
- // Sweep through slots, to keep lowest indices most populated
- int k = 0;
- while (k < len) {
- if (ws[k] != null) {
- ++k;
- continue;
+ final int idlePerActive() {
+ // Approximate at powers of two for small values, saturate past 4
+ int p = parallelism;
+ int a = p + (int)(ctl >> AC_SHIFT);
+ return (a > (p >>>= 1) ? 0 :
+ a > (p >>>= 1) ? 1 :
+ a > (p >>>= 1) ? 2 :
+ a > (p >>>= 1) ? 4 :
+ 8);
+ }
+
+ // Termination
+
+ /**
+ * Possibly initiates and/or completes termination. The caller
+ * triggering termination runs three passes through workQueues:
+ * (0) Setting termination status, followed by wakeups of queued
+ * workers; (1) cancelling all tasks; (2) interrupting lagging
+ * threads (likely in external tasks, but possibly also blocked in
+ * joins). Each pass repeats previous steps because of potential
+ * lagging thread creation.
+ *
+ * @param now if true, unconditionally terminate, else only
+ * if no work and no active workers
+ * @param enable if true, enable shutdown when next possible
+ * @return true if now terminating or terminated
+ */
+ private boolean tryTerminate(boolean now, boolean enable) {
+ Mutex lock = this.lock;
+ for (long c;;) {
+ if (((c = ctl) & STOP_BIT) != 0) { // already terminating
+ if ((short)(c >>> TC_SHIFT) == -parallelism) {
+ lock.lock(); // don't need try/finally
+ termination.signalAll(); // signal when 0 workers
+ lock.unlock();
+ }
+ return true;
}
- int s = workerCounts;
- int tc = totalCountOf(s);
- int rc = runningCountOf(s);
- if (rc >= ps || tc >= ps)
- break;
- if (casWorkerCounts (s, workerCountsFor(tc+1, rc+1))) {
- ForkJoinWorkerThread w = createWorker(k);
- if (w != null) {
- ws[k++] = w;
- w.start();
+ if (runState >= 0) { // not yet enabled
+ if (!enable)
+ return false;
+ lock.lock();
+ runState |= SHUTDOWN;
+ lock.unlock();
+ }
+ if (!now) { // check if idle & no tasks
+ if ((int)(c >> AC_SHIFT) != -parallelism ||
+ hasQueuedSubmissions())
+ return false;
+ // Check for unqueued inactive workers. One pass suffices.
+ WorkQueue[] ws = workQueues; WorkQueue w;
+ if (ws != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && w.eventCount >= 0)
+ return false;
+ }
}
- else {
- updateWorkerCount(-1); // back out on failed creation
- break;
+ }
+ if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
+ for (int pass = 0; pass < 3; ++pass) {
+ WorkQueue[] ws = workQueues;
+ if (ws != null) {
+ WorkQueue w;
+ int n = ws.length;
+ for (int i = 0; i < n; ++i) {
+ if ((w = ws[i]) != null) {
+ w.runState = -1;
+ if (pass > 0) {
+ w.cancelAll();
+ if (pass > 1)
+ w.interruptOwner();
+ }
+ }
+ }
+ // Wake up workers parked on event queue
+ int i, e; long cc; Thread p;
+ while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
+ (i = e & SMASK) < n &&
+ (w = ws[i]) != null) {
+ long nc = ((long)(w.nextWait & E_MASK) |
+ ((cc + AC_UNIT) & AC_MASK) |
+ (cc & (TC_MASK|STOP_BIT)));
+ if (w.eventCount == (e | INT_SIGN) &&
+ U.compareAndSwapLong(this, CTL, cc, nc)) {
+ w.eventCount = (e + E_SEQ) & E_MASK;
+ w.runState = -1;
+ if ((p = w.parker) != null)
+ U.unpark(p);
+ }
+ }
+ }
}
}
}
}
- // Execution methods
+ // Exported methods
+
+ // Constructors
+
+ /**
+ * Creates a {@code ForkJoinPool} with parallelism equal to {@link
+ * java.lang.Runtime#availableProcessors}, using the {@linkplain
+ * #defaultForkJoinWorkerThreadFactory default thread factory},
+ * no UncaughtExceptionHandler, and non-async LIFO processing mode.
+ *
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool() {
+ this(Runtime.getRuntime().availableProcessors(),
+ defaultForkJoinWorkerThreadFactory, null, false);
+ }
+
+ /**
+ * Creates a {@code ForkJoinPool} with the indicated parallelism
+ * level, the {@linkplain
+ * #defaultForkJoinWorkerThreadFactory default thread factory},
+ * no UncaughtExceptionHandler, and non-async LIFO processing mode.
+ *
+ * @param parallelism the parallelism level
+ * @throws IllegalArgumentException if parallelism less than or
+ * equal to zero, or greater than implementation limit
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
+ */
+ public ForkJoinPool(int parallelism) {
+ this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
+ }
/**
- * Common code for execute, invoke and submit
+ * Creates a {@code ForkJoinPool} with the given parameters.
+ *
+ * @param parallelism the parallelism level. For default value,
+ * use {@link java.lang.Runtime#availableProcessors}.
+ * @param factory the factory for creating new threads. For default value,
+ * use {@link #defaultForkJoinWorkerThreadFactory}.
+ * @param handler the handler for internal worker threads that
+ * terminate due to unrecoverable errors encountered while executing
+ * tasks. For default value, use {@code null}.
+ * @param asyncMode if true,
+ * establishes local first-in-first-out scheduling mode for forked
+ * tasks that are never joined. This mode may be more appropriate
+ * than default locally stack-based mode in applications in which
+ * worker threads only process event-style asynchronous tasks.
+ * For default value, use {@code false}.
+ * @throws IllegalArgumentException if parallelism less than or
+ * equal to zero, or greater than implementation limit
+ * @throws NullPointerException if the factory is null
+ * @throws SecurityException if a security manager exists and
+ * the caller is not permitted to modify threads
+ * because it does not hold {@link
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
*/
- private <T> void doSubmit(ForkJoinTask<T> task) {
- if (isShutdown())
- throw new RejectedExecutionException();
- if (workers == null)
- ensureWorkerInitialization();
- submissionQueue.offer(task);
- signalIdleWorkers();
+ public ForkJoinPool(int parallelism,
+ ForkJoinWorkerThreadFactory factory,
+ Thread.UncaughtExceptionHandler handler,
+ boolean asyncMode) {
+ checkPermission();
+ if (factory == null)
+ throw new NullPointerException();
+ if (parallelism <= 0 || parallelism > MAX_CAP)
+ throw new IllegalArgumentException();
+ this.parallelism = parallelism;
+ this.factory = factory;
+ this.ueh = handler;
+ this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE;
+ long np = (long)(-parallelism); // offset ctl counts
+ this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
+ // Use nearest power 2 for workQueues size. See Hackers Delight sec 3.2.
+ int n = parallelism - 1;
+ n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16;
+ int size = (n + 1) << 1; // #slots = 2*#workers
+ this.submitMask = size - 1; // room for max # of submit queues
+ this.workQueues = new WorkQueue[size];
+ this.termination = (this.lock = new Mutex()).newCondition();
+ this.stealCount = new AtomicLong();
+ this.nextWorkerNumber = new AtomicInteger();
+ int pn = poolNumberGenerator.incrementAndGet();
+ StringBuilder sb = new StringBuilder("ForkJoinPool-");
+ sb.append(Integer.toString(pn));
+ sb.append("-worker-");
+ this.workerNamePrefix = sb.toString();
+ lock.lock();
+ this.runState = 1; // set init flag
+ lock.unlock();
}
+ // Execution methods
+
/**
- * Performs the given task; returning its result upon completion
+ * Performs the given task, returning its result upon completion.
+ * If the computation encounters an unchecked Exception or Error,
+ * it is rethrown as the outcome of this invocation. Rethrown
+ * exceptions behave in the same way as regular exceptions, but,
+ * when possible, contain stack traces (as displayed for example
+ * using {@code ex.printStackTrace()}) of both the current thread
+ * as well as the thread actually encountering the exception;
+ * minimally only the latter.
+ *
* @param task the task
* @return the task's result
- * @throws NullPointerException if task is null
- * @throws RejectedExecutionException if pool is shut down
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
*/
public <T> T invoke(ForkJoinTask<T> task) {
+ if (task == null)
+ throw new NullPointerException();
doSubmit(task);
return task.join();
}
/**
* Arranges for (asynchronous) execution of the given task.
+ *
* @param task the task
- * @throws NullPointerException if task is null
- * @throws RejectedExecutionException if pool is shut down
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
*/
- public <T> void execute(ForkJoinTask<T> task) {
+ public void execute(ForkJoinTask<?> task) {
+ if (task == null)
+ throw new NullPointerException();
doSubmit(task);
}
// AbstractExecutorService methods
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
public void execute(Runnable task) {
- doSubmit(new AdaptedRunnable<Void>(task, null));
+ if (task == null)
+ throw new NullPointerException();
+ ForkJoinTask<?> job;
+ if (task instanceof ForkJoinTask<?>) // avoid re-wrap
+ job = (ForkJoinTask<?>) task;
+ else
+ job = new ForkJoinTask.AdaptedRunnableAction(task);
+ doSubmit(job);
+ }
+
+ /**
+ * Submits a ForkJoinTask for execution.
+ *
+ * @param task the task to submit
+ * @return the task
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
+ public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
+ if (task == null)
+ throw new NullPointerException();
+ doSubmit(task);
+ return task;
}
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
public <T> ForkJoinTask<T> submit(Callable<T> task) {
- ForkJoinTask<T> job = new AdaptedCallable<T>(task);
+ ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
doSubmit(job);
return job;
}
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
public <T> ForkJoinTask<T> submit(Runnable task, T result) {
- ForkJoinTask<T> job = new AdaptedRunnable<T>(task, result);
+ ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
doSubmit(job);
return job;
}
+ /**
+ * @throws NullPointerException if the task is null
+ * @throws RejectedExecutionException if the task cannot be
+ * scheduled for execution
+ */
public ForkJoinTask<?> submit(Runnable task) {
- ForkJoinTask<Void> job = new AdaptedRunnable<Void>(task, null);
+ if (task == null)
+ throw new NullPointerException();
+ ForkJoinTask<?> job;
+ if (task instanceof ForkJoinTask<?>) // avoid re-wrap
+ job = (ForkJoinTask<?>) task;
+ else
+ job = new ForkJoinTask.AdaptedRunnableAction(task);
doSubmit(job);
return job;
}
/**
- * Adaptor for Runnables. This implements RunnableFuture
- * to be compliant with AbstractExecutorService constraints
+ * @throws NullPointerException {@inheritDoc}
+ * @throws RejectedExecutionException {@inheritDoc}
*/
- static final class AdaptedRunnable<T> extends ForkJoinTask<T>
- implements RunnableFuture<T> {
- final Runnable runnable;
- final T resultOnCompletion;
- T result;
- AdaptedRunnable(Runnable runnable, T result) {
- if (runnable == null) throw new NullPointerException();
- this.runnable = runnable;
- this.resultOnCompletion = result;
- }
- public T getRawResult() { return result; }
- public void setRawResult(T v) { result = v; }
- public boolean exec() {
- runnable.run();
- result = resultOnCompletion;
- return true;
- }
- public void run() { invoke(); }
- }
-
- /**
- * Adaptor for Callables
- */
- static final class AdaptedCallable<T> extends ForkJoinTask<T>
- implements RunnableFuture<T> {
- final Callable<T> callable;
- T result;
- AdaptedCallable(Callable<T> callable) {
- if (callable == null) throw new NullPointerException();
- this.callable = callable;
- }
- public T getRawResult() { return result; }
- public void setRawResult(T v) { result = v; }
- public boolean exec() {
- try {
- result = callable.call();
- return true;
- } catch (Error err) {
- throw err;
- } catch (RuntimeException rex) {
- throw rex;
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
- }
- public void run() { invoke(); }
- }
-
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
- ArrayList<ForkJoinTask<T>> ts =
- new ArrayList<ForkJoinTask<T>>(tasks.size());
- for (Callable<T> c : tasks)
- ts.add(new AdaptedCallable<T>(c));
- invoke(new InvokeAll<T>(ts));
- return (List<Future<T>>)(List)ts;
- }
-
- static final class InvokeAll<T> extends RecursiveAction {
- final ArrayList<ForkJoinTask<T>> tasks;
- InvokeAll(ArrayList<ForkJoinTask<T>> tasks) { this.tasks = tasks; }
- public void compute() {
- try { invokeAll(tasks); } catch(Exception ignore) {}
+ // In previous versions of this class, this method constructed
+ // a task to run ForkJoinTask.invokeAll, but now external
+ // invocation of multiple tasks is at least as efficient.
+ List<ForkJoinTask<T>> fs = new ArrayList<ForkJoinTask<T>>(tasks.size());
+ // Workaround needed because method wasn't declared with
+ // wildcards in return type but should have been.
+ @SuppressWarnings({"unchecked", "rawtypes"})
+ List<Future<T>> futures = (List<Future<T>>) (List) fs;
+
+ boolean done = false;
+ try {
+ for (Callable<T> t : tasks) {
+ ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
+ doSubmit(f);
+ fs.add(f);
+ }
+ for (ForkJoinTask<T> f : fs)
+ f.quietlyJoin();
+ done = true;
+ return futures;
+ } finally {
+ if (!done)
+ for (ForkJoinTask<T> f : fs)
+ f.cancel(false);
}
}
- // Configuration and status settings and queries
-
/**
- * Returns the factory used for constructing new workers
+ * Returns the factory used for constructing new workers.
*
* @return the factory used for constructing new workers
*/
@@ -674,92 +2329,17 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
/**
* Returns the handler for internal worker threads that terminate
* due to unrecoverable errors encountered while executing tasks.
- * @return the handler, or null if none
- */
- public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
- Thread.UncaughtExceptionHandler h;
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- h = ueh;
- } finally {
- lock.unlock();
- }
- return h;
- }
-
- /**
- * Sets the handler for internal worker threads that terminate due
- * to unrecoverable errors encountered while executing tasks.
- * Unless set, the current default or ThreadGroup handler is used
- * as handler.
*
- * @param h the new handler
- * @return the old handler, or null if none
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
+ * @return the handler, or {@code null} if none
*/
- public Thread.UncaughtExceptionHandler
- setUncaughtExceptionHandler(Thread.UncaughtExceptionHandler h) {
- checkPermission();
- Thread.UncaughtExceptionHandler old = null;
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- old = ueh;
- ueh = h;
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread w = ws[i];
- if (w != null)
- w.setUncaughtExceptionHandler(h);
- }
- }
- } finally {
- lock.unlock();
- }
- return old;
- }
-
-
- /**
- * Sets the target paralleism level of this pool.
- * @param parallelism the target parallelism
- * @throws IllegalArgumentException if parallelism less than or
- * equal to zero or greater than maximum size bounds.
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
- */
- public void setParallelism(int parallelism) {
- checkPermission();
- if (parallelism <= 0 || parallelism > maxPoolSize)
- throw new IllegalArgumentException();
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- if (!isTerminating()) {
- int p = this.parallelism;
- this.parallelism = parallelism;
- if (parallelism > p)
- createAndStartAddedWorkers();
- else
- trimSpares();
- }
- } finally {
- lock.unlock();
- }
- signalIdleWorkers();
+ public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
+ return ueh;
}
/**
- * Returns the targeted number of worker threads in this pool.
+ * Returns the targeted parallelism level of this pool.
*
- * @return the targeted number of worker threads in this pool
+ * @return the targeted parallelism level of this pool
*/
public int getParallelism() {
return parallelism;
@@ -767,141 +2347,71 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
/**
* Returns the number of worker threads that have started but not
- * yet terminated. This result returned by this method may differ
- * from <code>getParallelism</code> when threads are created to
+ * yet terminated. The result returned by this method may differ
+ * from {@link #getParallelism} when threads are created to
* maintain parallelism when others are cooperatively blocked.
*
* @return the number of worker threads
*/
public int getPoolSize() {
- return totalCountOf(workerCounts);
+ return parallelism + (short)(ctl >>> TC_SHIFT);
}
/**
- * Returns the maximum number of threads allowed to exist in the
- * pool, even if there are insufficient unblocked running threads.
- * @return the maximum
- */
- public int getMaximumPoolSize() {
- return maxPoolSize;
- }
-
- /**
- * Sets the maximum number of threads allowed to exist in the
- * pool, even if there are insufficient unblocked running threads.
- * Setting this value has no effect on current pool size. It
- * controls construction of new threads.
- * @throws IllegalArgumentException if negative or greater then
- * internal implementation limit.
- */
- public void setMaximumPoolSize(int newMax) {
- if (newMax < 0 || newMax > MAX_THREADS)
- throw new IllegalArgumentException();
- maxPoolSize = newMax;
- }
-
-
- /**
- * Returns true if this pool dynamically maintains its target
- * parallelism level. If false, new threads are added only to
- * avoid possible starvation.
- * This setting is by default true;
- * @return true if maintains parallelism
- */
- public boolean getMaintainsParallelism() {
- return maintainsParallelism;
- }
-
- /**
- * Sets whether this pool dynamically maintains its target
- * parallelism level. If false, new threads are added only to
- * avoid possible starvation.
- * @param enable true to maintains parallelism
- */
- public void setMaintainsParallelism(boolean enable) {
- maintainsParallelism = enable;
- }
-
- /**
- * Establishes local first-in-first-out scheduling mode for forked
- * tasks that are never joined. This mode may be more appropriate
- * than default locally stack-based mode in applications in which
- * worker threads only process asynchronous tasks. This method is
- * designed to be invoked only when pool is quiescent, and
- * typically only before any tasks are submitted. The effects of
- * invocations at ather times may be unpredictable.
- *
- * @param async if true, use locally FIFO scheduling
- * @return the previous mode.
- */
- public boolean setAsyncMode(boolean async) {
- boolean oldMode = locallyFifo;
- locallyFifo = async;
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null)
- t.setAsyncMode(async);
- }
- }
- return oldMode;
- }
-
- /**
- * Returns true if this pool uses local first-in-first-out
+ * Returns {@code true} if this pool uses local first-in-first-out
* scheduling mode for forked tasks that are never joined.
*
- * @return true if this pool uses async mode.
+ * @return {@code true} if this pool uses async mode
*/
public boolean getAsyncMode() {
- return locallyFifo;
+ return localMode != 0;
}
/**
* Returns an estimate of the number of worker threads that are
* not blocked waiting to join tasks or for other managed
- * synchronization.
+ * synchronization. This method may overestimate the
+ * number of running threads.
*
* @return the number of worker threads
*/
public int getRunningThreadCount() {
- return runningCountOf(workerCounts);
+ int rc = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && w.isApparentlyUnblocked())
+ ++rc;
+ }
+ }
+ return rc;
}
/**
* Returns an estimate of the number of threads that are currently
* stealing or executing tasks. This method may overestimate the
* number of active threads.
- * @return the number of active threads.
+ *
+ * @return the number of active threads
*/
public int getActiveThreadCount() {
- return activeCountOf(runControl);
- }
-
- /**
- * Returns an estimate of the number of threads that are currently
- * idle waiting for tasks. This method may underestimate the
- * number of idle threads.
- * @return the number of idle threads.
- */
- final int getIdleThreadCount() {
- int c = runningCountOf(workerCounts) - activeCountOf(runControl);
- return (c <= 0)? 0 : c;
+ int r = parallelism + (int)(ctl >> AC_SHIFT);
+ return (r <= 0) ? 0 : r; // suppress momentarily negative values
}
/**
- * Returns true if all worker threads are currently idle. An idle
- * worker is one that cannot obtain a task to execute because none
- * are available to steal from other threads, and there are no
- * pending submissions to the pool. This method is conservative:
- * It might not return true immediately upon idleness of all
- * threads, but will eventually become true if threads remain
- * inactive.
- * @return true if all threads are currently idle
+ * Returns {@code true} if all worker threads are currently idle.
+ * An idle worker is one that cannot obtain a task to execute
+ * because none are available to steal from other threads, and
+ * there are no pending submissions to the pool. This method is
+ * conservative; it might not return {@code true} immediately upon
+ * idleness of all threads, but will eventually become true if
+ * threads remain inactive.
+ *
+ * @return {@code true} if all threads are currently idle
*/
public boolean isQuiescent() {
- return activeCountOf(runControl) == 0;
+ return (int)(ctl >> AC_SHIFT) + parallelism == 0;
}
/**
@@ -909,23 +2419,22 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
* one thread's work queue by another. The reported value
* underestimates the actual total number of steals when the pool
* is not quiescent. This value may be useful for monitoring and
- * tuning fork/join programs: In general, steal counts should be
+ * tuning fork/join programs: in general, steal counts should be
* high enough to keep threads busy, but low enough to avoid
* overhead and contention across threads.
- * @return the number of steals.
+ *
+ * @return the number of steals
*/
public long getStealCount() {
- return stealCount.get();
- }
-
- /**
- * Accumulate steal count from a worker. Call only
- * when worker known to be idle.
- */
- private void updateStealCount(ForkJoinWorkerThread w) {
- int sc = w.getAndClearStealCount();
- if (sc != 0)
- stealCount.addAndGet(sc);
+ long count = stealCount.get();
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.totalSteals;
+ }
+ }
+ return count;
}
/**
@@ -935,77 +2444,106 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
* an approximation, obtained by iterating across all threads in
* the pool. This method may be useful for tuning task
* granularities.
- * @return the number of queued tasks.
+ *
+ * @return the number of queued tasks
*/
public long getQueuedTaskCount() {
long count = 0;
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null)
- count += t.getQueueSize();
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 1; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.queueSize();
}
}
return count;
}
/**
- * Returns an estimate of the number tasks submitted to this pool
- * that have not yet begun executing. This method takes time
- * proportional to the number of submissions.
- * @return the number of queued submissions.
+ * Returns an estimate of the number of tasks submitted to this
+ * pool that have not yet begun executing. This method may take
+ * time proportional to the number of submissions.
+ *
+ * @return the number of queued submissions
*/
public int getQueuedSubmissionCount() {
- return submissionQueue.size();
+ int count = 0;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null)
+ count += w.queueSize();
+ }
+ }
+ return count;
}
/**
- * Returns true if there are any tasks submitted to this pool
- * that have not yet begun executing.
- * @return <code>true</code> if there are any queued submissions.
+ * Returns {@code true} if there are any tasks submitted to this
+ * pool that have not yet begun executing.
+ *
+ * @return {@code true} if there are any queued submissions
*/
public boolean hasQueuedSubmissions() {
- return !submissionQueue.isEmpty();
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && !w.isEmpty())
+ return true;
+ }
+ }
+ return false;
}
/**
* Removes and returns the next unexecuted submission if one is
* available. This method may be useful in extensions to this
* class that re-assign work in systems with multiple pools.
- * @return the next submission, or null if none
+ *
+ * @return the next submission, or {@code null} if none
*/
protected ForkJoinTask<?> pollSubmission() {
- return submissionQueue.poll();
+ WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; i += 2) {
+ if ((w = ws[i]) != null && (t = w.poll()) != null)
+ return t;
+ }
+ }
+ return null;
}
/**
* Removes all available unexecuted submitted and forked tasks
* from scheduling queues and adds them to the given collection,
* without altering their execution status. These may include
- * artifically generated or wrapped tasks. This method id designed
- * to be invoked only when the pool is known to be
+ * artificially generated or wrapped tasks. This method is
+ * designed to be invoked only when the pool is known to be
* quiescent. Invocations at other times may not remove all
* tasks. A failure encountered while attempting to add elements
- * to collection <tt>c</tt> may result in elements being in
+ * to collection {@code c} may result in elements being in
* neither, either or both collections when the associated
* exception is thrown. The behavior of this operation is
* undefined if the specified collection is modified while the
* operation is in progress.
+ *
* @param c the collection to transfer elements into
* @return the number of elements transferred
*/
- protected int drainTasksTo(Collection<ForkJoinTask<?>> c) {
- int n = submissionQueue.drainTo(c);
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
+ protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
+ int count = 0;
+ WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
+ if ((ws = workQueues) != null) {
for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread w = ws[i];
- if (w != null)
- n += w.drainTasksTo(c);
+ if ((w = ws[i]) != null) {
+ while ((t = w.poll()) != null) {
+ c.add(t);
+ ++count;
+ }
+ }
}
}
- return n;
+ return count;
}
/**
@@ -1016,101 +2554,124 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
* @return a string identifying this pool, as well as its state
*/
public String toString() {
- int ps = parallelism;
- int wc = workerCounts;
- int rc = runControl;
- long st = getStealCount();
- long qt = getQueuedTaskCount();
- long qs = getQueuedSubmissionCount();
+ // Use a single pass through workQueues to collect counts
+ long qt = 0L, qs = 0L; int rc = 0;
+ long st = stealCount.get();
+ long c = ctl;
+ WorkQueue[] ws; WorkQueue w;
+ if ((ws = workQueues) != null) {
+ for (int i = 0; i < ws.length; ++i) {
+ if ((w = ws[i]) != null) {
+ int size = w.queueSize();
+ if ((i & 1) == 0)
+ qs += size;
+ else {
+ qt += size;
+ st += w.totalSteals;
+ if (w.isApparentlyUnblocked())
+ ++rc;
+ }
+ }
+ }
+ }
+ int pc = parallelism;
+ int tc = pc + (short)(c >>> TC_SHIFT);
+ int ac = pc + (int)(c >> AC_SHIFT);
+ if (ac < 0) // ignore transient negative
+ ac = 0;
+ String level;
+ if ((c & STOP_BIT) != 0)
+ level = (tc == 0) ? "Terminated" : "Terminating";
+ else
+ level = runState < 0 ? "Shutting down" : "Running";
return super.toString() +
- "[" + runStateToString(runStateOf(rc)) +
- ", parallelism = " + ps +
- ", size = " + totalCountOf(wc) +
- ", active = " + activeCountOf(rc) +
- ", running = " + runningCountOf(wc) +
+ "[" + level +
+ ", parallelism = " + pc +
+ ", size = " + tc +
+ ", active = " + ac +
+ ", running = " + rc +
", steals = " + st +
", tasks = " + qt +
", submissions = " + qs +
"]";
}
- private static String runStateToString(int rs) {
- switch(rs) {
- case RUNNING: return "Running";
- case SHUTDOWN: return "Shutting down";
- case TERMINATING: return "Terminating";
- case TERMINATED: return "Terminated";
- default: throw new Error("Unknown run state");
- }
- }
-
- // lifecycle control
-
/**
* Initiates an orderly shutdown in which previously submitted
* tasks are executed, but no new tasks will be accepted.
* Invocation has no additional effect if already shut down.
* Tasks that are in the process of being submitted concurrently
* during the course of this method may or may not be rejected.
+ *
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public void shutdown() {
checkPermission();
- transitionRunStateTo(SHUTDOWN);
- if (canTerminateOnShutdown(runControl))
- terminateOnShutdown();
+ tryTerminate(false, true);
}
/**
- * Attempts to stop all actively executing tasks, and cancels all
- * waiting tasks. Tasks that are in the process of being
- * submitted or executed concurrently during the course of this
- * method may or may not be rejected. Unlike some other executors,
- * this method cancels rather than collects non-executed tasks
- * upon termination, so always returns an empty list. However, you
- * can use method <code>drainTasksTo</code> before invoking this
- * method to transfer unexecuted tasks to another collection.
+ * Attempts to cancel and/or stop all tasks, and reject all
+ * subsequently submitted tasks. Tasks that are in the process of
+ * being submitted or executed concurrently during the course of
+ * this method may or may not be rejected. This method cancels
+ * both existing and unexecuted tasks, in order to permit
+ * termination in the presence of task dependencies. So the method
+ * always returns an empty list (unlike the case for some other
+ * Executors).
+ *
* @return an empty list
* @throws SecurityException if a security manager exists and
* the caller is not permitted to modify threads
* because it does not hold {@link
- * java.lang.RuntimePermission}<code>("modifyThread")</code>,
+ * java.lang.RuntimePermission}{@code ("modifyThread")}
*/
public List<Runnable> shutdownNow() {
checkPermission();
- terminate();
+ tryTerminate(true, true);
return Collections.emptyList();
}
/**
- * Returns <code>true</code> if all tasks have completed following shut down.
+ * Returns {@code true} if all tasks have completed following shut down.
*
- * @return <code>true</code> if all tasks have completed following shut down
+ * @return {@code true} if all tasks have completed following shut down
*/
public boolean isTerminated() {
- return runStateOf(runControl) == TERMINATED;
+ long c = ctl;
+ return ((c & STOP_BIT) != 0L &&
+ (short)(c >>> TC_SHIFT) == -parallelism);
}
/**
- * Returns <code>true</code> if the process of termination has
- * commenced but possibly not yet completed.
+ * Returns {@code true} if the process of termination has
+ * commenced but not yet completed. This method may be useful for
+ * debugging. A return of {@code true} reported a sufficient
+ * period after shutdown may indicate that submitted tasks have
+ * ignored or suppressed interruption, or are waiting for IO,
+ * causing this executor not to properly terminate. (See the
+ * advisory notes for class {@link ForkJoinTask} stating that
+ * tasks should not normally entail blocking operations. But if
+ * they do, they must abort them on interrupt.)
*
- * @return <code>true</code> if terminating
+ * @return {@code true} if terminating but not yet terminated
*/
public boolean isTerminating() {
- return runStateOf(runControl) >= TERMINATING;
+ long c = ctl;
+ return ((c & STOP_BIT) != 0L &&
+ (short)(c >>> TC_SHIFT) != -parallelism);
}
/**
- * Returns <code>true</code> if this pool has been shut down.
+ * Returns {@code true} if this pool has been shut down.
*
- * @return <code>true</code> if this pool has been shut down
+ * @return {@code true} if this pool has been shut down
*/
public boolean isShutdown() {
- return runStateOf(runControl) >= SHUTDOWN;
+ return runState < 0;
}
/**
@@ -1120,14 +2681,14 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
*
* @param timeout the maximum time to wait
* @param unit the time unit of the timeout argument
- * @return <code>true</code> if this executor terminated and
- * <code>false</code> if the timeout elapsed before termination
+ * @return {@code true} if this executor terminated and
+ * {@code false} if the timeout elapsed before termination
* @throws InterruptedException if interrupted while waiting
*/
public boolean awaitTermination(long timeout, TimeUnit unit)
throws InterruptedException {
long nanos = unit.toNanos(timeout);
- final ReentrantLock lock = this.workerLock;
+ final Mutex lock = this.lock;
lock.lock();
try {
for (;;) {
@@ -1142,729 +2703,189 @@ public class ForkJoinPool /*extends AbstractExecutorService*/ {
}
}
- // Shutdown and termination support
-
- /**
- * Callback from terminating worker. Null out the corresponding
- * workers slot, and if terminating, try to terminate, else try to
- * shrink workers array.
- * @param w the worker
- */
- final void workerTerminated(ForkJoinWorkerThread w) {
- updateStealCount(w);
- updateWorkerCount(-1);
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- int idx = w.poolIndex;
- if (idx >= 0 && idx < ws.length && ws[idx] == w)
- ws[idx] = null;
- if (totalCountOf(workerCounts) == 0) {
- terminate(); // no-op if already terminating
- transitionRunStateTo(TERMINATED);
- termination.signalAll();
- }
- else if (!isTerminating()) {
- tryShrinkWorkerArray();
- tryResumeSpare(true); // allow replacement
- }
- }
- } finally {
- lock.unlock();
- }
- signalIdleWorkers();
- }
-
- /**
- * Initiate termination.
- */
- private void terminate() {
- if (transitionRunStateTo(TERMINATING)) {
- stopAllWorkers();
- resumeAllSpares();
- signalIdleWorkers();
- cancelQueuedSubmissions();
- cancelQueuedWorkerTasks();
- interruptUnterminatedWorkers();
- signalIdleWorkers(); // resignal after interrupt
- }
- }
-
- /**
- * Possibly terminate when on shutdown state
- */
- private void terminateOnShutdown() {
- if (!hasQueuedSubmissions() && canTerminateOnShutdown(runControl))
- terminate();
- }
-
- /**
- * Clear out and cancel submissions
- */
- private void cancelQueuedSubmissions() {
- ForkJoinTask<?> task;
- while ((task = pollSubmission()) != null)
- task.cancel(false);
- }
-
- /**
- * Clean out worker queues.
- */
- private void cancelQueuedWorkerTasks() {
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null)
- t.cancelTasks();
- }
- }
- } finally {
- lock.unlock();
- }
- }
-
- /**
- * Set each worker's status to terminating. Requires lock to avoid
- * conflicts with add/remove
- */
- private void stopAllWorkers() {
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null)
- t.shutdownNow();
- }
- }
- } finally {
- lock.unlock();
- }
- }
-
- /**
- * Interrupt all unterminated workers. This is not required for
- * sake of internal control, but may help unstick user code during
- * shutdown.
- */
- private void interruptUnterminatedWorkers() {
- final ReentrantLock lock = this.workerLock;
- lock.lock();
- try {
- ForkJoinWorkerThread[] ws = workers;
- if (ws != null) {
- for (int i = 0; i < ws.length; ++i) {
- ForkJoinWorkerThread t = ws[i];
- if (t != null && !t.isTerminated()) {
- try {
- t.interrupt();
- } catch (SecurityException ignore) {
- }
- }
- }
- }
- } finally {
- lock.unlock();
- }
- }
-
-
- /*
- * Nodes for event barrier to manage idle threads. Queue nodes
- * are basic Treiber stack nodes, also used for spare stack.
- *
- * The event barrier has an event count and a wait queue (actually
- * a Treiber stack). Workers are enabled to look for work when
- * the eventCount is incremented. If they fail to find work, they
- * may wait for next count. Upon release, threads help others wake
- * up.
- *
- * Synchronization events occur only in enough contexts to
- * maintain overall liveness:
- *
- * - Submission of a new task to the pool
- * - Resizes or other changes to the workers array
- * - pool termination
- * - A worker pushing a task on an empty queue
- *
- * The case of pushing a task occurs often enough, and is heavy
- * enough compared to simple stack pushes, to require special
- * handling: Method signalWork returns without advancing count if
- * the queue appears to be empty. This would ordinarily result in
- * races causing some queued waiters not to be woken up. To avoid
- * this, the first worker enqueued in method sync (see
- * syncIsReleasable) rescans for tasks after being enqueued, and
- * helps signal if any are found. This works well because the
- * worker has nothing better to do, and so might as well help
- * alleviate the overhead and contention on the threads actually
- * doing work. Also, since event counts increments on task
- * availability exist to maintain liveness (rather than to force
- * refreshes etc), it is OK for callers to exit early if
- * contending with another signaller.
- */
- static final class WaitQueueNode {
- WaitQueueNode next; // only written before enqueued
- volatile ForkJoinWorkerThread thread; // nulled to cancel wait
- final long count; // unused for spare stack
-
- WaitQueueNode(long c, ForkJoinWorkerThread w) {
- count = c;
- thread = w;
- }
-
- /**
- * Wake up waiter, returning false if known to already
- */
- boolean signal() {
- ForkJoinWorkerThread t = thread;
- if (t == null)
- return false;
- thread = null;
- LockSupport.unpark(t);
- return true;
- }
-
- /**
- * Await release on sync
- */
- void awaitSyncRelease(ForkJoinPool p) {
- while (thread != null && !p.syncIsReleasable(this))
- LockSupport.park(this);
- }
-
- /**
- * Await resumption as spare
- */
- void awaitSpareRelease() {
- while (thread != null) {
- if (!Thread.interrupted())
- LockSupport.park(this);
- }
- }
- }
-
- /**
- * Ensures that no thread is waiting for count to advance from the
- * current value of eventCount read on entry to this method, by
- * releasing waiting threads if necessary.
- * @return the count
- */
- final long ensureSync() {
- long c = eventCount;
- WaitQueueNode q;
- while ((q = syncStack) != null && q.count < c) {
- if (casBarrierStack(q, null)) {
- do {
- q.signal();
- } while ((q = q.next) != null);
- break;
- }
- }
- return c;
- }
-
- /**
- * Increments event count and releases waiting threads.
- */
- private void signalIdleWorkers() {
- long c;
- do;while (!casEventCount(c = eventCount, c+1));
- ensureSync();
- }
-
- /**
- * Signal threads waiting to poll a task. Because method sync
- * rechecks availability, it is OK to only proceed if queue
- * appears to be non-empty, and OK to skip under contention to
- * increment count (since some other thread succeeded).
- */
- final void signalWork() {
- long c;
- WaitQueueNode q;
- if (syncStack != null &&
- casEventCount(c = eventCount, c+1) &&
- (((q = syncStack) != null && q.count <= c) &&
- (!casBarrierStack(q, q.next) || !q.signal())))
- ensureSync();
- }
-
- /**
- * Waits until event count advances from last value held by
- * caller, or if excess threads, caller is resumed as spare, or
- * caller or pool is terminating. Updates caller's event on exit.
- * @param w the calling worker thread
- */
- final void sync(ForkJoinWorkerThread w) {
- updateStealCount(w); // Transfer w's count while it is idle
-
- while (!w.isShutdown() && !isTerminating() && !suspendIfSpare(w)) {
- long prev = w.lastEventCount;
- WaitQueueNode node = null;
- WaitQueueNode h;
- while (eventCount == prev &&
- ((h = syncStack) == null || h.count == prev)) {
- if (node == null)
- node = new WaitQueueNode(prev, w);
- if (casBarrierStack(node.next = h, node)) {
- node.awaitSyncRelease(this);
- break;
- }
- }
- long ec = ensureSync();
- if (ec != prev) {
- w.lastEventCount = ec;
- break;
- }
- }
- }
-
- /**
- * Returns true if worker waiting on sync can proceed:
- * - on signal (thread == null)
- * - on event count advance (winning race to notify vs signaller)
- * - on Interrupt
- * - if the first queued node, we find work available
- * If node was not signalled and event count not advanced on exit,
- * then we also help advance event count.
- * @return true if node can be released
- */
- final boolean syncIsReleasable(WaitQueueNode node) {
- long prev = node.count;
- if (!Thread.interrupted() && node.thread != null &&
- (node.next != null ||
- !ForkJoinWorkerThread.hasQueuedTasks(workers)) &&
- eventCount == prev)
- return false;
- if (node.thread != null) {
- node.thread = null;
- long ec = eventCount;
- if (prev <= ec) // help signal
- casEventCount(ec, ec+1);
- }
- return true;
- }
-
- /**
- * Returns true if a new sync event occurred since last call to
- * sync or this method, if so, updating caller's count.
- */
- final boolean hasNewSyncEvent(ForkJoinWorkerThread w) {
- long lc = w.lastEventCount;
- long ec = ensureSync();
- if (ec == lc)
- return false;
- w.lastEventCount = ec;
- return true;
- }
-
- // Parallelism maintenance
-
- /**
- * Decrement running count; if too low, add spare.
- *
- * Conceptually, all we need to do here is add or resume a
- * spare thread when one is about to block (and remove or
- * suspend it later when unblocked -- see suspendIfSpare).
- * However, implementing this idea requires coping with
- * several problems: We have imperfect information about the
- * states of threads. Some count updates can and usually do
- * lag run state changes, despite arrangements to keep them
- * accurate (for example, when possible, updating counts
- * before signalling or resuming), especially when running on
- * dynamic JVMs that don't optimize the infrequent paths that
- * update counts. Generating too many threads can make these
- * problems become worse, because excess threads are more
- * likely to be context-switched with others, slowing them all
- * down, especially if there is no work available, so all are
- * busy scanning or idling. Also, excess spare threads can
- * only be suspended or removed when they are idle, not
- * immediately when they aren't needed. So adding threads will
- * raise parallelism level for longer than necessary. Also,
- * FJ applications often enounter highly transient peaks when
- * many threads are blocked joining, but for less time than it
- * takes to create or resume spares.
- *
- * @param joinMe if non-null, return early if done
- * @param maintainParallelism if true, try to stay within
- * target counts, else create only to avoid starvation
- * @return true if joinMe known to be done
- */
- final boolean preJoin(ForkJoinTask<?> joinMe, boolean maintainParallelism) {
- maintainParallelism &= maintainsParallelism; // overrride
- boolean dec = false; // true when running count decremented
- while (spareStack == null || !tryResumeSpare(dec)) {
- int counts = workerCounts;
- if (dec || (dec = casWorkerCounts(counts, --counts))) { // CAS cheat
- if (!needSpare(counts, maintainParallelism))
- break;
- if (joinMe.status < 0)
- return true;
- if (tryAddSpare(counts))
- break;
- }
- }
- return false;
- }
-
- /**
- * Same idea as preJoin
- */
- final boolean preBlock(ManagedBlocker blocker, boolean maintainParallelism){
- maintainParallelism &= maintainsParallelism;
- boolean dec = false;
- while (spareStack == null || !tryResumeSpare(dec)) {
- int counts = workerCounts;
- if (dec || (dec = casWorkerCounts(counts, --counts))) {
- if (!needSpare(counts, maintainParallelism))
- break;
- if (blocker.isReleasable())
- return true;
- if (tryAddSpare(counts))
- break;
- }
- }
- return false;
- }
-
- /**
- * Returns true if a spare thread appears to be needed. If
- * maintaining parallelism, returns true when the deficit in
- * running threads is more than the surplus of total threads, and
- * there is apparently some work to do. This self-limiting rule
- * means that the more threads that have already been added, the
- * less parallelism we will tolerate before adding another.
- * @param counts current worker counts
- * @param maintainParallelism try to maintain parallelism
- */
- private boolean needSpare(int counts, boolean maintainParallelism) {
- int ps = parallelism;
- int rc = runningCountOf(counts);
- int tc = totalCountOf(counts);
- int runningDeficit = ps - rc;
- int totalSurplus = tc - ps;
- return (tc < maxPoolSize &&
- (rc == 0 || totalSurplus < 0 ||
- (maintainParallelism &&
- runningDeficit > totalSurplus &&
- ForkJoinWorkerThread.hasQueuedTasks(workers))));
- }
-
- /**
- * Add a spare worker if lock available and no more than the
- * expected numbers of threads exist
- * @return true if successful
- */
- private boolean tryAddSpare(int expectedCounts) {
- final ReentrantLock lock = this.workerLock;
- int expectedRunning = runningCountOf(expectedCounts);
- int expectedTotal = totalCountOf(expectedCounts);
- boolean success = false;
- boolean locked = false;
- // confirm counts while locking; CAS after obtaining lock
- try {
- for (;;) {
- int s = workerCounts;
- int tc = totalCountOf(s);
- int rc = runningCountOf(s);
- if (rc > expectedRunning || tc > expectedTotal)
- break;
- if (!locked && !(locked = lock.tryLock()))
- break;
- if (casWorkerCounts(s, workerCountsFor(tc+1, rc+1))) {
- createAndStartSpare(tc);
- success = true;
- break;
- }
- }
- } finally {
- if (locked)
- lock.unlock();
- }
- return success;
- }
-
- /**
- * Add the kth spare worker. On entry, pool coounts are already
- * adjusted to reflect addition.
- */
- private void createAndStartSpare(int k) {
- ForkJoinWorkerThread w = null;
- ForkJoinWorkerThread[] ws = ensureWorkerArrayCapacity(k + 1);
- int len = ws.length;
- // Probably, we can place at slot k. If not, find empty slot
- if (k < len && ws[k] != null) {
- for (k = 0; k < len && ws[k] != null; ++k)
- ;
- }
- if (k < len && !isTerminating() && (w = createWorker(k)) != null) {
- ws[k] = w;
- w.start();
- }
- else
- updateWorkerCount(-1); // adjust on failure
- signalIdleWorkers();
- }
-
- /**
- * Suspend calling thread w if there are excess threads. Called
- * only from sync. Spares are enqueued in a Treiber stack
- * using the same WaitQueueNodes as barriers. They are resumed
- * mainly in preJoin, but are also woken on pool events that
- * require all threads to check run state.
- * @param w the caller
- */
- private boolean suspendIfSpare(ForkJoinWorkerThread w) {
- WaitQueueNode node = null;
- int s;
- while (parallelism < runningCountOf(s = workerCounts)) {
- if (node == null)
- node = new WaitQueueNode(0, w);
- if (casWorkerCounts(s, s-1)) { // representation-dependent
- // push onto stack
- do;while (!casSpareStack(node.next = spareStack, node));
- // block until released by resumeSpare
- node.awaitSpareRelease();
- return true;
- }
- }
- return false;
- }
-
- /**
- * Try to pop and resume a spare thread.
- * @param updateCount if true, increment running count on success
- * @return true if successful
- */
- private boolean tryResumeSpare(boolean updateCount) {
- WaitQueueNode q;
- while ((q = spareStack) != null) {
- if (casSpareStack(q, q.next)) {
- if (updateCount)
- updateRunningCount(1);
- q.signal();
- return true;
- }
- }
- return false;
- }
-
- /**
- * Pop and resume all spare threads. Same idea as ensureSync.
- * @return true if any spares released
- */
- private boolean resumeAllSpares() {
- WaitQueueNode q;
- while ( (q = spareStack) != null) {
- if (casSpareStack(q, null)) {
- do {
- updateRunningCount(1);
- q.signal();
- } while ((q = q.next) != null);
- return true;
- }
- }
- return false;
- }
-
- /**
- * Pop and shutdown excessive spare threads. Call only while
- * holding lock. This is not guaranteed to eliminate all excess
- * threads, only those suspended as spares, which are the ones
- * unlikely to be needed in the future.
- */
- private void trimSpares() {
- int surplus = totalCountOf(workerCounts) - parallelism;
- WaitQueueNode q;
- while (surplus > 0 && (q = spareStack) != null) {
- if (casSpareStack(q, null)) {
- do {
- updateRunningCount(1);
- ForkJoinWorkerThread w = q.thread;
- if (w != null && surplus > 0 &&
- runningCountOf(workerCounts) > 0 && w.shutdown())
- --surplus;
- q.signal();
- } while ((q = q.next) != null);
- }
- }
- }
-
/**
* Interface for extending managed parallelism for tasks running
- * in ForkJoinPools. A ManagedBlocker provides two methods.
- * Method <code>isReleasable</code> must return true if blocking is not
- * necessary. Method <code>block</code> blocks the current thread
- * if necessary (perhaps internally invoking isReleasable before
- * actually blocking.).
+ * in {@link ForkJoinPool}s.
+ *
+ * <p>A {@code ManagedBlocker} provides two methods. Method
+ * {@code isReleasable} must return {@code true} if blocking is
+ * not necessary. Method {@code block} blocks the current thread
+ * if necessary (perhaps internally invoking {@code isReleasable}
+ * before actually blocking). These actions are performed by any
+ * thread invoking {@link ForkJoinPool#managedBlock}. The
+ * unusual methods in this API accommodate synchronizers that may,
+ * but don't usually, block for long periods. Similarly, they
+ * allow more efficient internal handling of cases in which
+ * additional workers may be, but usually are not, needed to
+ * ensure sufficient parallelism. Toward this end,
+ * implementations of method {@code isReleasable} must be amenable
+ * to repeated invocation.
+ *
* <p>For example, here is a ManagedBlocker based on a
* ReentrantLock:
- * <pre>
- * class ManagedLocker implements ManagedBlocker {
- * final ReentrantLock lock;
- * boolean hasLock = false;
- * ManagedLocker(ReentrantLock lock) { this.lock = lock; }
- * public boolean block() {
- * if (!hasLock)
- * lock.lock();
- * return true;
- * }
- * public boolean isReleasable() {
- * return hasLock || (hasLock = lock.tryLock());
- * }
+ * <pre> {@code
+ * class ManagedLocker implements ManagedBlocker {
+ * final ReentrantLock lock;
+ * boolean hasLock = false;
+ * ManagedLocker(ReentrantLock lock) { this.lock = lock; }
+ * public boolean block() {
+ * if (!hasLock)
+ * lock.lock();
+ * return true;
+ * }
+ * public boolean isReleasable() {
+ * return hasLock || (hasLock = lock.tryLock());
+ * }
+ * }}</pre>
+ *
+ * <p>Here is a class that possibly blocks waiting for an
+ * item on a given queue:
+ * <pre> {@code
+ * class QueueTaker<E> implements ManagedBlocker {
+ * final BlockingQueue<E> queue;
+ * volatile E item = null;
+ * QueueTaker(BlockingQueue<E> q) { this.queue = q; }
+ * public boolean block() throws InterruptedException {
+ * if (item == null)
+ * item = queue.take();
+ * return true;
* }
- * </pre>
+ * public boolean isReleasable() {
+ * return item != null || (item = queue.poll()) != null;
+ * }
+ * public E getItem() { // call after pool.managedBlock completes
+ * return item;
+ * }
+ * }}</pre>
*/
public static interface ManagedBlocker {
/**
* Possibly blocks the current thread, for example waiting for
* a lock or condition.
- * @return true if no additional blocking is necessary (i.e.,
- * if isReleasable would return true).
+ *
+ * @return {@code true} if no additional blocking is necessary
+ * (i.e., if isReleasable would return true)
* @throws InterruptedException if interrupted while waiting
- * (the method is not required to do so, but is allowe to).
+ * (the method is not required to do so, but is allowed to)
*/
boolean block() throws InterruptedException;
/**
- * Returns true if blocking is unnecessary.
+ * Returns {@code true} if blocking is unnecessary.
*/
boolean isReleasable();
}
/**
* Blocks in accord with the given blocker. If the current thread
- * is a ForkJoinWorkerThread, this method possibly arranges for a
- * spare thread to be activated if necessary to ensure parallelism
- * while the current thread is blocked. If
- * <code>maintainParallelism</code> is true and the pool supports
- * it ({@link #getMaintainsParallelism}), this method attempts to
- * maintain the pool's nominal parallelism. Otherwise if activates
- * a thread only if necessary to avoid complete starvation. This
- * option may be preferable when blockages use timeouts, or are
- * almost always brief.
- *
- * <p> If the caller is not a ForkJoinTask, this method is behaviorally
- * equivalent to
- * <pre>
- * while (!blocker.isReleasable())
- * if (blocker.block())
- * return;
- * </pre>
- * If the caller is a ForkJoinTask, then the pool may first
- * be expanded to ensure parallelism, and later adjusted.
+ * is a {@link ForkJoinWorkerThread}, this method possibly
+ * arranges for a spare thread to be activated if necessary to
+ * ensure sufficient parallelism while the current thread is blocked.
+ *
+ * <p>If the caller is not a {@link ForkJoinTask}, this method is
+ * behaviorally equivalent to
+ * <pre> {@code
+ * while (!blocker.isReleasable())
+ * if (blocker.block())
+ * return;
+ * }</pre>
+ *
+ * If the caller is a {@code ForkJoinTask}, then the pool may
+ * first be expanded to ensure parallelism, and later adjusted.
*
* @param blocker the blocker
- * @param maintainParallelism if true and supported by this pool,
- * attempt to maintain the pool's nominal parallelism; otherwise
- * activate a thread only if necessary to avoid complete
- * starvation.
- * @throws InterruptedException if blocker.block did so.
- */
- public static void managedBlock(ManagedBlocker blocker,
- boolean maintainParallelism)
+ * @throws InterruptedException if blocker.block did so
+ */
+ public static void managedBlock(ManagedBlocker blocker)
throws InterruptedException {
Thread t = Thread.currentThread();
- ForkJoinPool pool = (t instanceof ForkJoinWorkerThread?
- ((ForkJoinWorkerThread)t).pool : null);
- if (!blocker.isReleasable()) {
- try {
- if (pool == null ||
- !pool.preBlock(blocker, maintainParallelism))
- awaitBlocker(blocker);
- } finally {
- if (pool != null)
- pool.updateRunningCount(1);
+ ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread)t).pool : null);
+ while (!blocker.isReleasable()) {
+ if (p == null || p.tryCompensate(null, blocker)) {
+ try {
+ do {} while (!blocker.isReleasable() && !blocker.block());
+ } finally {
+ if (p != null)
+ p.incrementActiveCount();
+ }
+ break;
}
}
}
- private static void awaitBlocker(ManagedBlocker blocker)
- throws InterruptedException {
- do;while (!blocker.isReleasable() && !blocker.block());
- }
-
- // AbstractExecutorService overrides
+ // AbstractExecutorService overrides. These rely on undocumented
+ // fact that ForkJoinTask.adapt returns ForkJoinTasks that also
+ // implement RunnableFuture.
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
- return new AdaptedRunnable(runnable, value);
+ return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
}
protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
- return new AdaptedCallable(callable);
+ return new ForkJoinTask.AdaptedCallable<T>(callable);
}
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long CTL;
+ private static final long PARKBLOCKER;
+ private static final int ABASE;
+ private static final int ASHIFT;
- // Temporary Unsafe mechanics for preliminary release
- private static Unsafe getUnsafe() throws Throwable {
+ static {
+ poolNumberGenerator = new AtomicInteger();
+ nextSubmitterSeed = new AtomicInteger(0x55555555);
+ modifyThreadPermission = new RuntimePermission("modifyThread");
+ defaultForkJoinWorkerThreadFactory =
+ new DefaultForkJoinWorkerThreadFactory();
+ submitters = new ThreadSubmitter();
+ int s;
try {
- return Unsafe.getUnsafe();
+ U = getUnsafe();
+ Class<?> k = ForkJoinPool.class;
+ Class<?> ak = ForkJoinTask[].class;
+ CTL = U.objectFieldOffset
+ (k.getDeclaredField("ctl"));
+ Class<?> tk = Thread.class;
+ PARKBLOCKER = U.objectFieldOffset
+ (tk.getDeclaredField("parkBlocker"));
+ ABASE = U.arrayBaseOffset(ak);
+ s = U.arrayIndexScale(ak);
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ if ((s & (s-1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
- (new java.security.PrivilegedExceptionAction<Unsafe>() {
- public Unsafe run() throws Exception {
- return getUnsafePrivileged();
+ (new java.security
+ .PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ java.lang.reflect.Field f = sun.misc
+ .Unsafe.class.getDeclaredField("theUnsafe");
+ f.setAccessible(true);
+ return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
- throw e.getCause();
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
}
}
}
- private static Unsafe getUnsafePrivileged()
- throws NoSuchFieldException, IllegalAccessException {
- Field f = Unsafe.class.getDeclaredField("theUnsafe");
- f.setAccessible(true);
- return (Unsafe) f.get(null);
- }
-
- private static long fieldOffset(String fieldName)
- throws NoSuchFieldException {
- return _unsafe.objectFieldOffset
- (ForkJoinPool.class.getDeclaredField(fieldName));
- }
-
- static final Unsafe _unsafe;
- static final long eventCountOffset;
- static final long workerCountsOffset;
- static final long runControlOffset;
- static final long syncStackOffset;
- static final long spareStackOffset;
-
- static {
- try {
- _unsafe = getUnsafe();
- eventCountOffset = fieldOffset("eventCount");
- workerCountsOffset = fieldOffset("workerCounts");
- runControlOffset = fieldOffset("runControl");
- syncStackOffset = fieldOffset("syncStack");
- spareStackOffset = fieldOffset("spareStack");
- } catch (Throwable e) {
- throw new RuntimeException("Could not initialize intrinsics", e);
- }
- }
-
- private boolean casEventCount(long cmp, long val) {
- return _unsafe.compareAndSwapLong(this, eventCountOffset, cmp, val);
- }
- private boolean casWorkerCounts(int cmp, int val) {
- return _unsafe.compareAndSwapInt(this, workerCountsOffset, cmp, val);
- }
- private boolean casRunControl(int cmp, int val) {
- return _unsafe.compareAndSwapInt(this, runControlOffset, cmp, val);
- }
- private boolean casSpareStack(WaitQueueNode cmp, WaitQueueNode val) {
- return _unsafe.compareAndSwapObject(this, spareStackOffset, cmp, val);
- }
- private boolean casBarrierStack(WaitQueueNode cmp, WaitQueueNode val) {
- return _unsafe.compareAndSwapObject(this, syncStackOffset, cmp, val);
- }
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java
index dc1a6bcccc..344f6887a6 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java
@@ -1,470 +1,597 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
import java.io.Serializable;
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.*;
-import sun.misc.Unsafe;
-import java.lang.reflect.*;
+import java.util.Collection;
+import java.util.List;
+import java.util.RandomAccess;
+import java.lang.ref.WeakReference;
+import java.lang.ref.ReferenceQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+//import java.util.concurrent.RunnableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.locks.ReentrantLock;
+import java.lang.reflect.Constructor;
/**
- * Abstract base class for tasks that run within a {@link
- * ForkJoinPool}. A ForkJoinTask is a thread-like entity that is much
+ * Abstract base class for tasks that run within a {@link ForkJoinPool}.
+ * A {@code ForkJoinTask} is a thread-like entity that is much
* lighter weight than a normal thread. Huge numbers of tasks and
* subtasks may be hosted by a small number of actual threads in a
* ForkJoinPool, at the price of some usage limitations.
*
- * <p> A "main" ForkJoinTask begins execution when submitted to a
- * {@link ForkJoinPool}. Once started, it will usually in turn start
- * other subtasks. As indicated by the name of this class, many
- * programs using ForkJoinTasks employ only methods <code>fork</code>
- * and <code>join</code>, or derivatives such as
- * <code>invokeAll</code>. However, this class also provides a number
- * of other methods that can come into play in advanced usages, as
- * well as extension mechanics that allow support of new forms of
- * fork/join processing.
+ * <p>A "main" {@code ForkJoinTask} begins execution when submitted
+ * to a {@link ForkJoinPool}. Once started, it will usually in turn
+ * start other subtasks. As indicated by the name of this class,
+ * many programs using {@code ForkJoinTask} employ only methods
+ * {@link #fork} and {@link #join}, or derivatives such as {@link
+ * #invokeAll(ForkJoinTask...) invokeAll}. However, this class also
+ * provides a number of other methods that can come into play in
+ * advanced usages, as well as extension mechanics that allow
+ * support of new forms of fork/join processing.
*
- * <p>A ForkJoinTask is a lightweight form of {@link Future}. The
- * efficiency of ForkJoinTasks stems from a set of restrictions (that
- * are only partially statically enforceable) reflecting their
- * intended use as computational tasks calculating pure functions or
- * operating on purely isolated objects. The primary coordination
- * mechanisms are {@link #fork}, that arranges asynchronous execution,
- * and {@link #join}, that doesn't proceed until the task's result has
- * been computed. Computations should avoid <code>synchronized</code>
- * methods or blocks, and should minimize other blocking
- * synchronization apart from joining other tasks or using
- * synchronizers such as Phasers that are advertised to cooperate with
- * fork/join scheduling. Tasks should also not perform blocking IO,
- * and should ideally access variables that are completely independent
- * of those accessed by other running tasks. Minor breaches of these
- * restrictions, for example using shared output streams, may be
- * tolerable in practice, but frequent use may result in poor
- * performance, and the potential to indefinitely stall if the number
- * of threads not waiting for IO or other external synchronization
- * becomes exhausted. This usage restriction is in part enforced by
- * not permitting checked exceptions such as <code>IOExceptions</code>
- * to be thrown. However, computations may still encounter unchecked
- * exceptions, that are rethrown to callers attempting join
- * them. These exceptions may additionally include
- * RejectedExecutionExceptions stemming from internal resource
- * exhaustion such as failure to allocate internal task queues.
+ * <p>A {@code ForkJoinTask} is a lightweight form of {@link Future}.
+ * The efficiency of {@code ForkJoinTask}s stems from a set of
+ * restrictions (that are only partially statically enforceable)
+ * reflecting their main use as computational tasks calculating pure
+ * functions or operating on purely isolated objects. The primary
+ * coordination mechanisms are {@link #fork}, that arranges
+ * asynchronous execution, and {@link #join}, that doesn't proceed
+ * until the task's result has been computed. Computations should
+ * ideally avoid {@code synchronized} methods or blocks, and should
+ * minimize other blocking synchronization apart from joining other
+ * tasks or using synchronizers such as Phasers that are advertised to
+ * cooperate with fork/join scheduling. Subdividable tasks should also
+ * not perform blocking IO, and should ideally access variables that
+ * are completely independent of those accessed by other running
+ * tasks. These guidelines are loosely enforced by not permitting
+ * checked exceptions such as {@code IOExceptions} to be
+ * thrown. However, computations may still encounter unchecked
+ * exceptions, that are rethrown to callers attempting to join
+ * them. These exceptions may additionally include {@link
+ * RejectedExecutionException} stemming from internal resource
+ * exhaustion, such as failure to allocate internal task
+ * queues. Rethrown exceptions behave in the same way as regular
+ * exceptions, but, when possible, contain stack traces (as displayed
+ * for example using {@code ex.printStackTrace()}) of both the thread
+ * that initiated the computation as well as the thread actually
+ * encountering the exception; minimally only the latter.
+ *
+ * <p>It is possible to define and use ForkJoinTasks that may block,
+ * but doing do requires three further considerations: (1) Completion
+ * of few if any <em>other</em> tasks should be dependent on a task
+ * that blocks on external synchronization or IO. Event-style async
+ * tasks that are never joined often fall into this category. (2) To
+ * minimize resource impact, tasks should be small; ideally performing
+ * only the (possibly) blocking action. (3) Unless the {@link
+ * ForkJoinPool.ManagedBlocker} API is used, or the number of possibly
+ * blocked tasks is known to be less than the pool's {@link
+ * ForkJoinPool#getParallelism} level, the pool cannot guarantee that
+ * enough threads will be available to ensure progress or good
+ * performance.
*
* <p>The primary method for awaiting completion and extracting
* results of a task is {@link #join}, but there are several variants:
* The {@link Future#get} methods support interruptible and/or timed
- * waits for completion and report results using <code>Future</code>
- * conventions. Method {@link #helpJoin} enables callers to actively
- * execute other tasks while awaiting joins, which is sometimes more
- * efficient but only applies when all subtasks are known to be
- * strictly tree-structured. Method {@link #invoke} is semantically
- * equivalent to <code>fork(); join()</code> but always attempts to
- * begin execution in the current thread. The "<em>quiet</em>" forms
- * of these methods do not extract results or report exceptions. These
+ * waits for completion and report results using {@code Future}
+ * conventions. Method {@link #invoke} is semantically
+ * equivalent to {@code fork(); join()} but always attempts to begin
+ * execution in the current thread. The "<em>quiet</em>" forms of
+ * these methods do not extract results or report exceptions. These
* may be useful when a set of tasks are being executed, and you need
* to delay processing of results or exceptions until all complete.
- * Method <code>invokeAll</code> (available in multiple versions)
+ * Method {@code invokeAll} (available in multiple versions)
* performs the most common form of parallel invocation: forking a set
* of tasks and joining them all.
*
- * <p> The ForkJoinTask class is not usually directly subclassed.
+ * <p>In the most typical usages, a fork-join pair act like a call
+ * (fork) and return (join) from a parallel recursive function. As is
+ * the case with other forms of recursive calls, returns (joins)
+ * should be performed innermost-first. For example, {@code a.fork();
+ * b.fork(); b.join(); a.join();} is likely to be substantially more
+ * efficient than joining {@code a} before {@code b}.
+ *
+ * <p>The execution status of tasks may be queried at several levels
+ * of detail: {@link #isDone} is true if a task completed in any way
+ * (including the case where a task was cancelled without executing);
+ * {@link #isCompletedNormally} is true if a task completed without
+ * cancellation or encountering an exception; {@link #isCancelled} is
+ * true if the task was cancelled (in which case {@link #getException}
+ * returns a {@link java.util.concurrent.CancellationException}); and
+ * {@link #isCompletedAbnormally} is true if a task was either
+ * cancelled or encountered an exception, in which case {@link
+ * #getException} will return either the encountered exception or
+ * {@link java.util.concurrent.CancellationException}.
+ *
+ * <p>The ForkJoinTask class is not usually directly subclassed.
* Instead, you subclass one of the abstract classes that support a
- * particular style of fork/join processing. Normally, a concrete
+ * particular style of fork/join processing, typically {@link
+ * RecursiveAction} for computations that do not return results, or
+ * {@link RecursiveTask} for those that do. Normally, a concrete
* ForkJoinTask subclass declares fields comprising its parameters,
- * established in a constructor, and then defines a <code>compute</code>
+ * established in a constructor, and then defines a {@code compute}
* method that somehow uses the control methods supplied by this base
- * class. While these methods have <code>public</code> access (to allow
- * instances of different task subclasses to call each others
+ * class. While these methods have {@code public} access (to allow
+ * instances of different task subclasses to call each other's
* methods), some of them may only be called from within other
- * ForkJoinTasks. Attempts to invoke them in other contexts result in
- * exceptions or errors possibly including ClassCastException.
+ * ForkJoinTasks (as may be determined using method {@link
+ * #inForkJoinPool}). Attempts to invoke them in other contexts
+ * result in exceptions or errors, possibly including
+ * {@code ClassCastException}.
*
- * <p>Most base support methods are <code>final</code> because their
- * implementations are intrinsically tied to the underlying
- * lightweight task scheduling framework, and so cannot be overridden.
- * Developers creating new basic styles of fork/join processing should
- * minimally implement <code>protected</code> methods
- * <code>exec</code>, <code>setRawResult</code>, and
- * <code>getRawResult</code>, while also introducing an abstract
- * computational method that can be implemented in its subclasses,
- * possibly relying on other <code>protected</code> methods provided
- * by this class.
+ * <p>Method {@link #join} and its variants are appropriate for use
+ * only when completion dependencies are acyclic; that is, the
+ * parallel computation can be described as a directed acyclic graph
+ * (DAG). Otherwise, executions may encounter a form of deadlock as
+ * tasks cyclically wait for each other. However, this framework
+ * supports other methods and techniques (for example the use of
+ * {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that
+ * may be of use in constructing custom subclasses for problems that
+ * are not statically structured as DAGs. To support such usages a
+ * ForkJoinTask may be atomically <em>marked</em> using {@link
+ * #markForkJoinTask} and checked for marking using {@link
+ * #isMarkedForkJoinTask}. The ForkJoinTask implementation does not
+ * use these {@code protected} methods or marks for any purpose, but
+ * they may be of use in the construction of specialized subclasses.
+ * For example, parallel graph traversals can use the supplied methods
+ * to avoid revisiting nodes/tasks that have already been processed.
+ * Also, completion based designs can use them to record that one
+ * subtask has completed. (Method names for marking are bulky in part
+ * to encourage definition of methods that reflect their usage
+ * patterns.)
+ *
+ * <p>Most base support methods are {@code final}, to prevent
+ * overriding of implementations that are intrinsically tied to the
+ * underlying lightweight task scheduling framework. Developers
+ * creating new basic styles of fork/join processing should minimally
+ * implement {@code protected} methods {@link #exec}, {@link
+ * #setRawResult}, and {@link #getRawResult}, while also introducing
+ * an abstract computational method that can be implemented in its
+ * subclasses, possibly relying on other {@code protected} methods
+ * provided by this class.
*
* <p>ForkJoinTasks should perform relatively small amounts of
- * computations, othewise splitting into smaller tasks. As a very
- * rough rule of thumb, a task should perform more than 100 and less
- * than 10000 basic computational steps. If tasks are too big, then
- * parellelism cannot improve throughput. If too small, then memory
- * and internal task maintenance overhead may overwhelm processing.
+ * computation. Large tasks should be split into smaller subtasks,
+ * usually via recursive decomposition. As a very rough rule of thumb,
+ * a task should perform more than 100 and less than 10000 basic
+ * computational steps, and should avoid indefinite looping. If tasks
+ * are too big, then parallelism cannot improve throughput. If too
+ * small, then memory and internal task maintenance overhead may
+ * overwhelm processing.
+ *
+ * <p>This class provides {@code adapt} methods for {@link Runnable}
+ * and {@link Callable}, that may be of use when mixing execution of
+ * {@code ForkJoinTasks} with other kinds of tasks. When all tasks are
+ * of this form, consider using a pool constructed in <em>asyncMode</em>.
*
- * <p>ForkJoinTasks are <code>Serializable</code>, which enables them
- * to be used in extensions such as remote execution frameworks. It is
- * in general sensible to serialize tasks only before or after, but
- * not during execution. Serialization is not relied on during
- * execution itself.
+ * <p>ForkJoinTasks are {@code Serializable}, which enables them to be
+ * used in extensions such as remote execution frameworks. It is
+ * sensible to serialize tasks only before or after, but not during,
+ * execution. Serialization is not relied on during execution itself.
+ *
+ * @since 1.7
+ * @author Doug Lea
*/
public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
- /**
- * Run control status bits packed into a single int to minimize
- * footprint and to ensure atomicity (via CAS). Status is
- * initially zero, and takes on nonnegative values until
- * completed, upon which status holds COMPLETED. CANCELLED, or
- * EXCEPTIONAL, which use the top 3 bits. Tasks undergoing
- * blocking waits by other threads have SIGNAL_MASK bits set --
- * bit 15 for external (nonFJ) waits, and the rest a count of
- * waiting FJ threads. (This representation relies on
- * ForkJoinPool max thread limits). Completion of a stolen task
- * with SIGNAL_MASK bits set awakens waiter via notifyAll. Even
- * though suboptimal for some purposes, we use basic builtin
- * wait/notify to take advantage of "monitor inflation" in JVMs
- * that we would otherwise need to emulate to avoid adding further
- * per-task bookkeeping overhead. Note that bits 16-28 are
- * currently unused. Also value 0x80000000 is available as spare
- * completion value.
+ /*
+ * See the internal documentation of class ForkJoinPool for a
+ * general implementation overview. ForkJoinTasks are mainly
+ * responsible for maintaining their "status" field amidst relays
+ * to methods in ForkJoinWorkerThread and ForkJoinPool.
+ *
+ * The methods of this class are more-or-less layered into
+ * (1) basic status maintenance
+ * (2) execution and awaiting completion
+ * (3) user-level methods that additionally report results.
+ * This is sometimes hard to see because this file orders exported
+ * methods in a way that flows well in javadocs.
*/
- volatile int status; // accessed directy by pool and workers
- static final int COMPLETION_MASK = 0xe0000000;
- static final int NORMAL = 0xe0000000; // == mask
- static final int CANCELLED = 0xc0000000;
- static final int EXCEPTIONAL = 0xa0000000;
- static final int SIGNAL_MASK = 0x0000ffff;
- static final int INTERNAL_SIGNAL_MASK = 0x00007fff;
- static final int EXTERNAL_SIGNAL = 0x00008000; // top bit of low word
-
- /**
- * Table of exceptions thrown by tasks, to enable reporting by
- * callers. Because exceptions are rare, we don't directly keep
- * them with task objects, but instead us a weak ref table. Note
- * that cancellation exceptions don't appear in the table, but are
- * instead recorded as status values.
- * Todo: Use ConcurrentReferenceHashMap
+ /*
+ * The status field holds run control status bits packed into a
+ * single int to minimize footprint and to ensure atomicity (via
+ * CAS). Status is initially zero, and takes on nonnegative
+ * values until completed, upon which status (anded with
+ * DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks
+ * undergoing blocking waits by other threads have the SIGNAL bit
+ * set. Completion of a stolen task with SIGNAL set awakens any
+ * waiters via notifyAll. Even though suboptimal for some
+ * purposes, we use basic builtin wait/notify to take advantage of
+ * "monitor inflation" in JVMs that we would otherwise need to
+ * emulate to avoid adding further per-task bookkeeping overhead.
+ * We want these monitors to be "fat", i.e., not use biasing or
+ * thin-lock techniques, so use some odd coding idioms that tend
+ * to avoid them, mainly by arranging that every synchronized
+ * block performs a wait, notifyAll or both.
*/
- static final Map<ForkJoinTask<?>, Throwable> exceptionMap =
- Collections.synchronizedMap
- (new WeakHashMap<ForkJoinTask<?>, Throwable>());
- // within-package utilities
+ /** The run status of this task */
+ volatile int status; // accessed directly by pool and workers
+ static final int DONE_MASK = 0xf0000000; // mask out non-completion bits
+ static final int NORMAL = 0xf0000000; // must be negative
+ static final int CANCELLED = 0xc0000000; // must be < NORMAL
+ static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED
+ static final int SIGNAL = 0x00000001;
+ static final int MARKED = 0x00000002;
/**
- * Get current worker thread, or null if not a worker thread
- */
- static ForkJoinWorkerThread getWorker() {
- Thread t = Thread.currentThread();
- return ((t instanceof ForkJoinWorkerThread)?
- (ForkJoinWorkerThread)t : null);
- }
-
- final boolean casStatus(int cmp, int val) {
- return _unsafe.compareAndSwapInt(this, statusOffset, cmp, val);
- }
-
- /**
- * Workaround for not being able to rethrow unchecked exceptions.
- */
- static void rethrowException(Throwable ex) {
- if (ex != null)
- _unsafe.throwException(ex);
- }
-
- // Setting completion status
-
- /**
- * Mark completion and wake up threads waiting to join this task.
+ * Marks completion and wakes up threads waiting to join this
+ * task. A specialization for NORMAL completion is in method
+ * doExec.
+ *
* @param completion one of NORMAL, CANCELLED, EXCEPTIONAL
+ * @return completion status on exit
*/
- final void setCompletion(int completion) {
- ForkJoinPool pool = getPool();
- if (pool != null) {
- int s; // Clear signal bits while setting completion status
- do;while ((s = status) >= 0 && !casStatus(s, completion));
-
- if ((s & SIGNAL_MASK) != 0) {
- if ((s &= INTERNAL_SIGNAL_MASK) != 0)
- pool.updateRunningCount(s);
- synchronized(this) { notifyAll(); }
+ private int setCompletion(int completion) {
+ for (int s;;) {
+ if ((s = status) < 0)
+ return s;
+ if (U.compareAndSwapInt(this, STATUS, s, s | completion)) {
+ if ((s & SIGNAL) != 0)
+ synchronized (this) { notifyAll(); }
+ return completion;
}
}
- else
- externallySetCompletion(completion);
- }
-
- /**
- * Version of setCompletion for non-FJ threads. Leaves signal
- * bits for unblocked threads to adjust, and always notifies.
- */
- private void externallySetCompletion(int completion) {
- int s;
- do;while ((s = status) >= 0 &&
- !casStatus(s, (s & SIGNAL_MASK) | completion));
- synchronized(this) { notifyAll(); }
- }
-
- /**
- * Sets status to indicate normal completion
- */
- final void setNormalCompletion() {
- // Try typical fast case -- single CAS, no signal, not already done.
- // Manually expand casStatus to improve chances of inlining it
- if (!_unsafe.compareAndSwapInt(this, statusOffset, 0, NORMAL))
- setCompletion(NORMAL);
- }
-
- // internal waiting and notification
-
- /**
- * Performs the actual monitor wait for awaitDone
- */
- private void doAwaitDone() {
- // Minimize lock bias and in/de-flation effects by maximizing
- // chances of waiting inside sync
- try {
- while (status >= 0)
- synchronized(this) { if (status >= 0) wait(); }
- } catch (InterruptedException ie) {
- onInterruptedWait();
- }
}
/**
- * Performs the actual monitor wait for awaitDone
+ * Primary execution method for stolen tasks. Unless done, calls
+ * exec and records status if completed, but doesn't wait for
+ * completion otherwise.
+ *
+ * @return status on exit from this method
*/
- private void doAwaitDone(long startTime, long nanos) {
- synchronized(this) {
+ final int doExec() {
+ int s; boolean completed;
+ if ((s = status) >= 0) {
try {
- while (status >= 0) {
- long nt = nanos - System.nanoTime() - startTime;
- if (nt <= 0)
- break;
- wait(nt / 1000000, (int)(nt % 1000000));
+ completed = exec();
+ } catch (Throwable rex) {
+ return setExceptionalCompletion(rex);
+ }
+ while ((s = status) >= 0 && completed) {
+ if (U.compareAndSwapInt(this, STATUS, s, s | NORMAL)) {
+ if ((s & SIGNAL) != 0)
+ synchronized (this) { notifyAll(); }
+ return NORMAL;
}
- } catch (InterruptedException ie) {
- onInterruptedWait();
}
}
+ return s;
}
- // Awaiting completion
+ /**
+ * Tries to set SIGNAL status. Used by ForkJoinPool. Other
+ * variants are directly incorporated into externalAwaitDone etc.
+ *
+ * @return true if successful
+ */
+ final boolean trySetSignal() {
+ int s;
+ return U.compareAndSwapInt(this, STATUS, s = status, s | SIGNAL);
+ }
/**
- * Sets status to indicate there is joiner, then waits for join,
- * surrounded with pool notifications.
- * @return status upon exit
+ * Blocks a non-worker-thread until completion.
+ * @return status upon completion
*/
- private int awaitDone(ForkJoinWorkerThread w, boolean maintainParallelism) {
- ForkJoinPool pool = w == null? null : w.pool;
+ private int externalAwaitDone() {
+ boolean interrupted = false;
int s;
while ((s = status) >= 0) {
- if (casStatus(s, pool == null? s|EXTERNAL_SIGNAL : s+1)) {
- if (pool == null || !pool.preJoin(this, maintainParallelism))
- doAwaitDone();
- if (((s = status) & INTERNAL_SIGNAL_MASK) != 0)
- adjustPoolCountsOnUnblock(pool);
- break;
+ if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0) {
+ try {
+ wait();
+ } catch (InterruptedException ie) {
+ interrupted = true;
+ }
+ }
+ else
+ notifyAll();
+ }
}
}
+ if (interrupted)
+ Thread.currentThread().interrupt();
return s;
}
/**
- * Timed version of awaitDone
- * @return status upon exit
+ * Blocks a non-worker-thread until completion or interruption.
*/
- private int awaitDone(ForkJoinWorkerThread w, long nanos) {
- ForkJoinPool pool = w == null? null : w.pool;
+ private int externalInterruptibleAwaitDone() throws InterruptedException {
int s;
+ if (Thread.interrupted())
+ throw new InterruptedException();
while ((s = status) >= 0) {
- if (casStatus(s, pool == null? s|EXTERNAL_SIGNAL : s+1)) {
- long startTime = System.nanoTime();
- if (pool == null || !pool.preJoin(this, false))
- doAwaitDone(startTime, nanos);
- if ((s = status) >= 0) {
- adjustPoolCountsOnCancelledWait(pool);
- s = status;
+ if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0)
+ wait();
+ else
+ notifyAll();
}
- if (s < 0 && (s & INTERNAL_SIGNAL_MASK) != 0)
- adjustPoolCountsOnUnblock(pool);
- break;
}
}
return s;
}
- /**
- * Notify pool that thread is unblocked. Called by signalled
- * threads when woken by non-FJ threads (which is atypical).
- */
- private void adjustPoolCountsOnUnblock(ForkJoinPool pool) {
- int s;
- do;while ((s = status) < 0 && !casStatus(s, s & COMPLETION_MASK));
- if (pool != null && (s &= INTERNAL_SIGNAL_MASK) != 0)
- pool.updateRunningCount(s);
- }
/**
- * Notify pool to adjust counts on cancelled or timed out wait
+ * Implementation for join, get, quietlyJoin. Directly handles
+ * only cases of already-completed, external wait, and
+ * unfork+exec. Others are relayed to ForkJoinPool.awaitJoin.
+ *
+ * @return status upon completion
*/
- private void adjustPoolCountsOnCancelledWait(ForkJoinPool pool) {
- if (pool != null) {
- int s;
- while ((s = status) >= 0 && (s & INTERNAL_SIGNAL_MASK) != 0) {
- if (casStatus(s, s - 1)) {
- pool.updateRunningCount(1);
- break;
- }
+ private int doJoin() {
+ int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w;
+ if ((s = status) >= 0) {
+ if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
+ if (!(w = (wt = (ForkJoinWorkerThread)t).workQueue).
+ tryUnpush(this) || (s = doExec()) >= 0)
+ s = wt.pool.awaitJoin(w, this);
}
+ else
+ s = externalAwaitDone();
}
+ return s;
}
/**
- * Handle interruptions during waits.
+ * Implementation for invoke, quietlyInvoke.
+ *
+ * @return status upon completion
*/
- private void onInterruptedWait() {
- ForkJoinWorkerThread w = getWorker();
- if (w == null)
- Thread.currentThread().interrupt(); // re-interrupt
- else if (w.isTerminating())
- cancelIgnoringExceptions();
- // else if FJworker, ignore interrupt
+ private int doInvoke() {
+ int s; Thread t; ForkJoinWorkerThread wt;
+ if ((s = doExec()) >= 0) {
+ if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
+ s = (wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue,
+ this);
+ else
+ s = externalAwaitDone();
+ }
+ return s;
}
- // Recording and reporting exceptions
+ // Exception table support
- private void setDoneExceptionally(Throwable rex) {
- exceptionMap.put(this, rex);
- setCompletion(EXCEPTIONAL);
- }
+ /**
+ * Table of exceptions thrown by tasks, to enable reporting by
+ * callers. Because exceptions are rare, we don't directly keep
+ * them with task objects, but instead use a weak ref table. Note
+ * that cancellation exceptions don't appear in the table, but are
+ * instead recorded as status values.
+ *
+ * Note: These statics are initialized below in static block.
+ */
+ private static final ExceptionNode[] exceptionTable;
+ private static final ReentrantLock exceptionTableLock;
+ private static final ReferenceQueue<Object> exceptionTableRefQueue;
/**
- * Throws the exception associated with status s;
- * @throws the exception
+ * Fixed capacity for exceptionTable.
*/
- private void reportException(int s) {
- if ((s &= COMPLETION_MASK) < NORMAL) {
- if (s == CANCELLED)
- throw new CancellationException();
- else
- rethrowException(exceptionMap.get(this));
+ private static final int EXCEPTION_MAP_CAPACITY = 32;
+
+ /**
+ * Key-value nodes for exception table. The chained hash table
+ * uses identity comparisons, full locking, and weak references
+ * for keys. The table has a fixed capacity because it only
+ * maintains task exceptions long enough for joiners to access
+ * them, so should never become very large for sustained
+ * periods. However, since we do not know when the last joiner
+ * completes, we must use weak references and expunge them. We do
+ * so on each operation (hence full locking). Also, some thread in
+ * any ForkJoinPool will call helpExpungeStaleExceptions when its
+ * pool becomes isQuiescent.
+ */
+ static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
+ final Throwable ex;
+ ExceptionNode next;
+ final long thrower; // use id not ref to avoid weak cycles
+ ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
+ super(task, exceptionTableRefQueue);
+ this.ex = ex;
+ this.next = next;
+ this.thrower = Thread.currentThread().getId();
}
}
/**
- * Returns result or throws exception using j.u.c.Future conventions
- * Only call when isDone known to be true.
+ * Records exception and sets exceptional completion.
+ *
+ * @return status on exit
*/
- private V reportFutureResult()
- throws ExecutionException, InterruptedException {
- int s = status & COMPLETION_MASK;
- if (s < NORMAL) {
- Throwable ex;
- if (s == CANCELLED)
- throw new CancellationException();
- if (s == EXCEPTIONAL && (ex = exceptionMap.get(this)) != null)
- throw new ExecutionException(ex);
- if (Thread.interrupted())
- throw new InterruptedException();
+ private int setExceptionalCompletion(Throwable ex) {
+ int h = System.identityHashCode(this);
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ expungeStaleExceptions();
+ ExceptionNode[] t = exceptionTable;
+ int i = h & (t.length - 1);
+ for (ExceptionNode e = t[i]; ; e = e.next) {
+ if (e == null) {
+ t[i] = new ExceptionNode(this, ex, t[i]);
+ break;
+ }
+ if (e.get() == this) // already present
+ break;
+ }
+ } finally {
+ lock.unlock();
}
- return getRawResult();
+ return setCompletion(EXCEPTIONAL);
}
/**
- * Returns result or throws exception using j.u.c.Future conventions
- * with timeouts
+ * Cancels, ignoring any exceptions thrown by cancel. Used during
+ * worker and pool shutdown. Cancel is spec'ed not to throw any
+ * exceptions, but if it does anyway, we have no recourse during
+ * shutdown, so guard against this case.
*/
- private V reportTimedFutureResult()
- throws InterruptedException, ExecutionException, TimeoutException {
- Throwable ex;
- int s = status & COMPLETION_MASK;
- if (s == NORMAL)
- return getRawResult();
- if (s == CANCELLED)
- throw new CancellationException();
- if (s == EXCEPTIONAL && (ex = exceptionMap.get(this)) != null)
- throw new ExecutionException(ex);
- if (Thread.interrupted())
- throw new InterruptedException();
- throw new TimeoutException();
+ static final void cancelIgnoringExceptions(ForkJoinTask<?> t) {
+ if (t != null && t.status >= 0) {
+ try {
+ t.cancel(false);
+ } catch (Throwable ignore) {
+ }
+ }
}
- // internal execution methods
-
/**
- * Calls exec, recording completion, and rethrowing exception if
- * encountered. Caller should normally check status before calling
- * @return true if completed normally
+ * Removes exception node and clears status
*/
- private boolean tryExec() {
- try { // try block must contain only call to exec
- if (!exec())
- return false;
- } catch (Throwable rex) {
- setDoneExceptionally(rex);
- rethrowException(rex);
- return false; // not reached
+ private void clearExceptionalCompletion() {
+ int h = System.identityHashCode(this);
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ ExceptionNode[] t = exceptionTable;
+ int i = h & (t.length - 1);
+ ExceptionNode e = t[i];
+ ExceptionNode pred = null;
+ while (e != null) {
+ ExceptionNode next = e.next;
+ if (e.get() == this) {
+ if (pred == null)
+ t[i] = next;
+ else
+ pred.next = next;
+ break;
+ }
+ pred = e;
+ e = next;
+ }
+ expungeStaleExceptions();
+ status = 0;
+ } finally {
+ lock.unlock();
}
- setNormalCompletion();
- return true;
}
/**
- * Main execution method used by worker threads. Invokes
- * base computation unless already complete
+ * Returns a rethrowable exception for the given task, if
+ * available. To provide accurate stack traces, if the exception
+ * was not thrown by the current thread, we try to create a new
+ * exception of the same type as the one thrown, but with the
+ * recorded exception as its cause. If there is no such
+ * constructor, we instead try to use a no-arg constructor,
+ * followed by initCause, to the same effect. If none of these
+ * apply, or any fail due to other exceptions, we return the
+ * recorded exception, which is still correct, although it may
+ * contain a misleading stack trace.
+ *
+ * @return the exception, or null if none
*/
- final void quietlyExec() {
- if (status >= 0) {
+ private Throwable getThrowableException() {
+ if ((status & DONE_MASK) != EXCEPTIONAL)
+ return null;
+ int h = System.identityHashCode(this);
+ ExceptionNode e;
+ final ReentrantLock lock = exceptionTableLock;
+ lock.lock();
+ try {
+ expungeStaleExceptions();
+ ExceptionNode[] t = exceptionTable;
+ e = t[h & (t.length - 1)];
+ while (e != null && e.get() != this)
+ e = e.next;
+ } finally {
+ lock.unlock();
+ }
+ Throwable ex;
+ if (e == null || (ex = e.ex) == null)
+ return null;
+ if (e.thrower != Thread.currentThread().getId()) {
+ Class<? extends Throwable> ec = ex.getClass();
try {
- if (!exec())
- return;
- } catch(Throwable rex) {
- setDoneExceptionally(rex);
- return;
+ Constructor<?> noArgCtor = null;
+ Constructor<?>[] cs = ec.getConstructors();// public ctors only
+ for (int i = 0; i < cs.length; ++i) {
+ Constructor<?> c = cs[i];
+ Class<?>[] ps = c.getParameterTypes();
+ if (ps.length == 0)
+ noArgCtor = c;
+ else if (ps.length == 1 && ps[0] == Throwable.class)
+ return (Throwable)(c.newInstance(ex));
+ }
+ if (noArgCtor != null) {
+ Throwable wx = (Throwable)(noArgCtor.newInstance());
+ wx.initCause(ex);
+ return wx;
+ }
+ } catch (Exception ignore) {
}
- setNormalCompletion();
}
+ return ex;
}
/**
- * Calls exec, recording but not rethrowing exception
- * Caller should normally check status before calling
- * @return true if completed normally
+ * Poll stale refs and remove them. Call only while holding lock.
*/
- private boolean tryQuietlyInvoke() {
- try {
- if (!exec())
- return false;
- } catch (Throwable rex) {
- setDoneExceptionally(rex);
- return false;
+ private static void expungeStaleExceptions() {
+ for (Object x; (x = exceptionTableRefQueue.poll()) != null;) {
+ if (x instanceof ExceptionNode) {
+ ForkJoinTask<?> key = ((ExceptionNode)x).get();
+ ExceptionNode[] t = exceptionTable;
+ int i = System.identityHashCode(key) & (t.length - 1);
+ ExceptionNode e = t[i];
+ ExceptionNode pred = null;
+ while (e != null) {
+ ExceptionNode next = e.next;
+ if (e == x) {
+ if (pred == null)
+ t[i] = next;
+ else
+ pred.next = next;
+ break;
+ }
+ pred = e;
+ e = next;
+ }
+ }
}
- setNormalCompletion();
- return true;
}
/**
- * Cancel, ignoring any exceptions it throws
+ * If lock is available, poll stale refs and remove them.
+ * Called from ForkJoinPool when pools become quiescent.
*/
- final void cancelIgnoringExceptions() {
- try {
- cancel(false);
- } catch(Throwable ignore) {
+ static final void helpExpungeStaleExceptions() {
+ final ReentrantLock lock = exceptionTableLock;
+ if (lock.tryLock()) {
+ try {
+ expungeStaleExceptions();
+ } finally {
+ lock.unlock();
+ }
}
}
/**
- * Main implementation of helpJoin
+ * Throws exception, if any, associated with the given status.
*/
- private int busyJoin(ForkJoinWorkerThread w) {
- int s;
- ForkJoinTask<?> t;
- while ((s = status) >= 0 && (t = w.scanWhileJoining(this)) != null)
- t.quietlyExec();
- return (s >= 0)? awaitDone(w, false) : s; // block if no work
+ private void reportException(int s) {
+ Throwable ex = ((s == CANCELLED) ? new CancellationException() :
+ (s == EXCEPTIONAL) ? getThrowableException() :
+ null);
+ if (ex != null)
+ U.throwException(ex);
}
// public methods
@@ -472,70 +599,111 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
/**
* Arranges to asynchronously execute this task. While it is not
* necessarily enforced, it is a usage error to fork a task more
- * than once unless it has completed and been reinitialized. This
- * method may be invoked only from within ForkJoinTask
- * computations. Attempts to invoke in other contexts result in
- * exceptions or errors possibly including ClassCastException.
+ * than once unless it has completed and been reinitialized.
+ * Subsequent modifications to the state of this task or any data
+ * it operates on are not necessarily consistently observable by
+ * any thread other than the one executing it unless preceded by a
+ * call to {@link #join} or related methods, or a call to {@link
+ * #isDone} returning {@code true}.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @return {@code this}, to simplify usage
*/
- public final void fork() {
- ((ForkJoinWorkerThread)(Thread.currentThread())).pushTask(this);
+ public final ForkJoinTask<V> fork() {
+ ((ForkJoinWorkerThread)Thread.currentThread()).workQueue.push(this);
+ return this;
}
/**
- * Returns the result of the computation when it is ready.
- * This method differs from <code>get</code> in that abnormal
- * completion results in RuntimeExceptions or Errors, not
- * ExecutionExceptions.
+ * Returns the result of the computation when it {@link #isDone is
+ * done}. This method differs from {@link #get()} in that
+ * abnormal completion results in {@code RuntimeException} or
+ * {@code Error}, not {@code ExecutionException}, and that
+ * interrupts of the calling thread do <em>not</em> cause the
+ * method to abruptly return by throwing {@code
+ * InterruptedException}.
*
* @return the computed result
*/
public final V join() {
- ForkJoinWorkerThread w = getWorker();
- if (w == null || status < 0 || !w.unpushTask(this) || !tryExec())
- reportException(awaitDone(w, true));
+ int s;
+ if ((s = doJoin() & DONE_MASK) != NORMAL)
+ reportException(s);
return getRawResult();
}
/**
* Commences performing this task, awaits its completion if
- * necessary, and return its result.
- * @throws Throwable (a RuntimeException, Error, or unchecked
- * exception) if the underlying computation did so.
+ * necessary, and returns its result, or throws an (unchecked)
+ * {@code RuntimeException} or {@code Error} if the underlying
+ * computation did so.
+ *
* @return the computed result
*/
public final V invoke() {
- if (status >= 0 && tryExec())
- return getRawResult();
- else
- return join();
+ int s;
+ if ((s = doInvoke() & DONE_MASK) != NORMAL)
+ reportException(s);
+ return getRawResult();
}
/**
- * Forks both tasks, returning when <code>isDone</code> holds for
- * both of them or an exception is encountered. This method may be
- * invoked only from within ForkJoinTask computations. Attempts to
- * invoke in other contexts result in exceptions or errors
- * possibly including ClassCastException.
- * @param t1 one task
- * @param t2 the other task
- * @throws NullPointerException if t1 or t2 are null
- * @throws RuntimeException or Error if either task did so.
+ * Forks the given tasks, returning when {@code isDone} holds for
+ * each task or an (unchecked) exception is encountered, in which
+ * case the exception is rethrown. If more than one task
+ * encounters an exception, then this method throws any one of
+ * these exceptions. If any task encounters an exception, the
+ * other may be cancelled. However, the execution status of
+ * individual tasks is not guaranteed upon exceptional return. The
+ * status of each task may be obtained using {@link
+ * #getException()} and related methods to check if they have been
+ * cancelled, completed normally or exceptionally, or left
+ * unprocessed.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @param t1 the first task
+ * @param t2 the second task
+ * @throws NullPointerException if any task is null
*/
- public static void invokeAll(ForkJoinTask<?>t1, ForkJoinTask<?> t2) {
+ public static void invokeAll(ForkJoinTask<?> t1, ForkJoinTask<?> t2) {
+ int s1, s2;
t2.fork();
- t1.invoke();
- t2.join();
+ if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL)
+ t1.reportException(s1);
+ if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL)
+ t2.reportException(s2);
}
/**
- * Forks the given tasks, returning when <code>isDone</code> holds
- * for all of them. If any task encounters an exception, others
- * may be cancelled. This method may be invoked only from within
- * ForkJoinTask computations. Attempts to invoke in other contexts
- * result in exceptions or errors possibly including ClassCastException.
- * @param tasks the array of tasks
- * @throws NullPointerException if tasks or any element are null.
- * @throws RuntimeException or Error if any task did so.
+ * Forks the given tasks, returning when {@code isDone} holds for
+ * each task or an (unchecked) exception is encountered, in which
+ * case the exception is rethrown. If more than one task
+ * encounters an exception, then this method throws any one of
+ * these exceptions. If any task encounters an exception, others
+ * may be cancelled. However, the execution status of individual
+ * tasks is not guaranteed upon exceptional return. The status of
+ * each task may be obtained using {@link #getException()} and
+ * related methods to check if they have been cancelled, completed
+ * normally or exceptionally, or left unprocessed.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @param tasks the tasks
+ * @throws NullPointerException if any task is null
*/
public static void invokeAll(ForkJoinTask<?>... tasks) {
Throwable ex = null;
@@ -548,46 +716,53 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
}
else if (i != 0)
t.fork();
- else {
- t.quietlyInvoke();
- if (ex == null)
- ex = t.getException();
- }
+ else if (t.doInvoke() < NORMAL && ex == null)
+ ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = tasks[i];
if (t != null) {
if (ex != null)
t.cancel(false);
- else {
- t.quietlyJoin();
- if (ex == null)
- ex = t.getException();
- }
+ else if (t.doJoin() < NORMAL)
+ ex = t.getException();
}
}
if (ex != null)
- rethrowException(ex);
+ U.throwException(ex);
}
/**
- * Forks all tasks in the collection, returning when
- * <code>isDone</code> holds for all of them. If any task
- * encounters an exception, others may be cancelled. This method
- * may be invoked only from within ForkJoinTask
- * computations. Attempts to invoke in other contexts resul!t in
- * exceptions or errors possibly including ClassCastException.
+ * Forks all tasks in the specified collection, returning when
+ * {@code isDone} holds for each task or an (unchecked) exception
+ * is encountered, in which case the exception is rethrown. If
+ * more than one task encounters an exception, then this method
+ * throws any one of these exceptions. If any task encounters an
+ * exception, others may be cancelled. However, the execution
+ * status of individual tasks is not guaranteed upon exceptional
+ * return. The status of each task may be obtained using {@link
+ * #getException()} and related methods to check if they have been
+ * cancelled, completed normally or exceptionally, or left
+ * unprocessed.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
* @param tasks the collection of tasks
- * @throws NullPointerException if tasks or any element are null.
- * @throws RuntimeException or Error if any task did so.
+ * @return the tasks argument, to simplify usage
+ * @throws NullPointerException if tasks or any element are null
*/
- public static void invokeAll(Collection<? extends ForkJoinTask<?>> tasks) {
- if (!(tasks instanceof List)) {
- invokeAll(tasks.toArray(new ForkJoinTask[tasks.size()]));
- return;
+ public static <T extends ForkJoinTask<?>> Collection<T> invokeAll(Collection<T> tasks) {
+ if (!(tasks instanceof RandomAccess) || !(tasks instanceof List<?>)) {
+ invokeAll(tasks.toArray(new ForkJoinTask<?>[tasks.size()]));
+ return tasks;
}
+ @SuppressWarnings("unchecked")
List<? extends ForkJoinTask<?>> ts =
- (List<? extends ForkJoinTask<?>>)tasks;
+ (List<? extends ForkJoinTask<?>>) tasks;
Throwable ex = null;
int last = ts.size() - 1;
for (int i = last; i >= 0; --i) {
@@ -598,253 +773,326 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
}
else if (i != 0)
t.fork();
- else {
- t.quietlyInvoke();
- if (ex == null)
- ex = t.getException();
- }
+ else if (t.doInvoke() < NORMAL && ex == null)
+ ex = t.getException();
}
for (int i = 1; i <= last; ++i) {
ForkJoinTask<?> t = ts.get(i);
if (t != null) {
if (ex != null)
t.cancel(false);
- else {
- t.quietlyJoin();
- if (ex == null)
- ex = t.getException();
- }
+ else if (t.doJoin() < NORMAL)
+ ex = t.getException();
}
}
if (ex != null)
- rethrowException(ex);
+ U.throwException(ex);
+ return tasks;
}
/**
- * Returns true if the computation performed by this task has
- * completed (or has been cancelled).
- * @return true if this computation has completed
+ * Attempts to cancel execution of this task. This attempt will
+ * fail if the task has already completed or could not be
+ * cancelled for some other reason. If successful, and this task
+ * has not started when {@code cancel} is called, execution of
+ * this task is suppressed. After this method returns
+ * successfully, unless there is an intervening call to {@link
+ * #reinitialize}, subsequent calls to {@link #isCancelled},
+ * {@link #isDone}, and {@code cancel} will return {@code true}
+ * and calls to {@link #join} and related methods will result in
+ * {@code CancellationException}.
+ *
+ * <p>This method may be overridden in subclasses, but if so, must
+ * still ensure that these properties hold. In particular, the
+ * {@code cancel} method itself must not throw exceptions.
+ *
+ * <p>This method is designed to be invoked by <em>other</em>
+ * tasks. To terminate the current task, you can just return or
+ * throw an unchecked exception from its computation method, or
+ * invoke {@link #completeExceptionally}.
+ *
+ * @param mayInterruptIfRunning this value has no effect in the
+ * default implementation because interrupts are not used to
+ * control cancellation.
+ *
+ * @return {@code true} if this task is now cancelled
*/
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED;
+ }
+
public final boolean isDone() {
return status < 0;
}
- /**
- * Returns true if this task was cancelled.
- * @return true if this task was cancelled
- */
public final boolean isCancelled() {
- return (status & COMPLETION_MASK) == CANCELLED;
+ return (status & DONE_MASK) == CANCELLED;
}
/**
- * Asserts that the results of this task's computation will not be
- * used. If a cancellation occurs before atempting to execute this
- * task, then execution will be suppressed, <code>isCancelled</code>
- * will report true, and <code>join</code> will result in a
- * <code>CancellationException</code> being thrown. Otherwise, when
- * cancellation races with completion, there are no guarantees
- * about whether <code>isCancelled</code> will report true, whether
- * <code>join</code> will return normally or via an exception, or
- * whether these behaviors will remain consistent upon repeated
- * invocation.
- *
- * <p>This method may be overridden in subclasses, but if so, must
- * still ensure that these minimal properties hold. In particular,
- * the cancel method itself must not throw exceptions.
- *
- * <p> This method is designed to be invoked by <em>other</em>
- * tasks. To terminate the current task, you can just return or
- * throw an unchecked exception from its computation method, or
- * invoke <code>completeExceptionally</code>.
- *
- * @param mayInterruptIfRunning this value is ignored in the
- * default implementation because tasks are not in general
- * cancelled via interruption.
+ * Returns {@code true} if this task threw an exception or was cancelled.
*
- * @return true if this task is now cancelled
+ * @return {@code true} if this task threw an exception or was cancelled
*/
- public boolean cancel(boolean mayInterruptIfRunning) {
- setCompletion(CANCELLED);
- return (status & COMPLETION_MASK) == CANCELLED;
+ public final boolean isCompletedAbnormally() {
+ return status < NORMAL;
}
/**
- * Returns true if this task threw an exception or was cancelled
- * @return true if this task threw an exception or was cancelled
+ * Returns {@code true} if this task completed without throwing an
+ * exception and was not cancelled.
+ *
+ * @return {@code true} if this task completed without throwing an
+ * exception and was not cancelled
*/
- public final boolean isCompletedAbnormally() {
- return (status & COMPLETION_MASK) < NORMAL;
+ public final boolean isCompletedNormally() {
+ return (status & DONE_MASK) == NORMAL;
}
/**
* Returns the exception thrown by the base computation, or a
- * CancellationException if cancelled, or null if none or if the
- * method has not yet completed.
- * @return the exception, or null if none
+ * {@code CancellationException} if cancelled, or {@code null} if
+ * none or if the method has not yet completed.
+ *
+ * @return the exception, or {@code null} if none
*/
public final Throwable getException() {
- int s = status & COMPLETION_MASK;
- if (s >= NORMAL)
- return null;
- if (s == CANCELLED)
- return new CancellationException();
- return exceptionMap.get(this);
+ int s = status & DONE_MASK;
+ return ((s >= NORMAL) ? null :
+ (s == CANCELLED) ? new CancellationException() :
+ getThrowableException());
}
/**
* Completes this task abnormally, and if not already aborted or
* cancelled, causes it to throw the given exception upon
- * <code>join</code> and related operations. This method may be used
+ * {@code join} and related operations. This method may be used
* to induce exceptions in asynchronous tasks, or to force
* completion of tasks that would not otherwise complete. Its use
- * in other situations is likely to be wrong. This method is
- * overridable, but overridden versions must invoke <code>super</code>
+ * in other situations is discouraged. This method is
+ * overridable, but overridden versions must invoke {@code super}
* implementation to maintain guarantees.
*
- * @param ex the exception to throw. If this exception is
- * not a RuntimeException or Error, the actual exception thrown
- * will be a RuntimeException with cause ex.
+ * @param ex the exception to throw. If this exception is not a
+ * {@code RuntimeException} or {@code Error}, the actual exception
+ * thrown will be a {@code RuntimeException} with cause {@code ex}.
*/
public void completeExceptionally(Throwable ex) {
- setDoneExceptionally((ex instanceof RuntimeException) ||
- (ex instanceof Error)? ex :
- new RuntimeException(ex));
+ setExceptionalCompletion((ex instanceof RuntimeException) ||
+ (ex instanceof Error) ? ex :
+ new RuntimeException(ex));
}
/**
* Completes this task, and if not already aborted or cancelled,
- * returning a <code>null</code> result upon <code>join</code> and related
- * operations. This method may be used to provide results for
- * asynchronous tasks, or to provide alternative handling for
- * tasks that would not otherwise complete normally. Its use in
- * other situations is likely to be wrong. This method is
- * overridable, but overridden versions must invoke <code>super</code>
- * implementation to maintain guarantees.
+ * returning the given value as the result of subsequent
+ * invocations of {@code join} and related operations. This method
+ * may be used to provide results for asynchronous tasks, or to
+ * provide alternative handling for tasks that would not otherwise
+ * complete normally. Its use in other situations is
+ * discouraged. This method is overridable, but overridden
+ * versions must invoke {@code super} implementation to maintain
+ * guarantees.
*
- * @param value the result value for this task.
+ * @param value the result value for this task
*/
public void complete(V value) {
try {
setRawResult(value);
- } catch(Throwable rex) {
- setDoneExceptionally(rex);
+ } catch (Throwable rex) {
+ setExceptionalCompletion(rex);
return;
}
- setNormalCompletion();
- }
-
- public final V get() throws InterruptedException, ExecutionException {
- ForkJoinWorkerThread w = getWorker();
- if (w == null || status < 0 || !w.unpushTask(this) || !tryQuietlyInvoke())
- awaitDone(w, true);
- return reportFutureResult();
- }
-
- public final V get(long timeout, TimeUnit unit)
- throws InterruptedException, ExecutionException, TimeoutException {
- ForkJoinWorkerThread w = getWorker();
- if (w == null || status < 0 || !w.unpushTask(this) || !tryQuietlyInvoke())
- awaitDone(w, unit.toNanos(timeout));
- return reportTimedFutureResult();
+ setCompletion(NORMAL);
}
/**
- * Possibly executes other tasks until this task is ready, then
- * returns the result of the computation. This method may be more
- * efficient than <code>join</code>, but is only applicable when
- * there are no potemtial dependencies between continuation of the
- * current task and that of any other task that might be executed
- * while helping. (This usually holds for pure divide-and-conquer
- * tasks). This method may be invoked only from within
- * ForkJoinTask computations. Attempts to invoke in other contexts
- * resul!t in exceptions or errors possibly including ClassCastException.
+ * Waits if necessary for the computation to complete, and then
+ * retrieves its result.
+ *
* @return the computed result
+ * @throws CancellationException if the computation was cancelled
+ * @throws ExecutionException if the computation threw an
+ * exception
+ * @throws InterruptedException if the current thread is not a
+ * member of a ForkJoinPool and was interrupted while waiting
*/
- public final V helpJoin() {
- ForkJoinWorkerThread w = (ForkJoinWorkerThread)(Thread.currentThread());
- if (status < 0 || !w.unpushTask(this) || !tryExec())
- reportException(busyJoin(w));
+ public final V get() throws InterruptedException, ExecutionException {
+ int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ?
+ doJoin() : externalInterruptibleAwaitDone();
+ Throwable ex;
+ if ((s &= DONE_MASK) == CANCELLED)
+ throw new CancellationException();
+ if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
+ throw new ExecutionException(ex);
return getRawResult();
}
/**
- * Possibly executes other tasks until this task is ready. This
- * method may be invoked only from within ForkJoinTask
- * computations. Attempts to invoke in other contexts resul!t in
- * exceptions or errors possibly including ClassCastException.
+ * Waits if necessary for at most the given time for the computation
+ * to complete, and then retrieves its result, if available.
+ *
+ * @param timeout the maximum time to wait
+ * @param unit the time unit of the timeout argument
+ * @return the computed result
+ * @throws CancellationException if the computation was cancelled
+ * @throws ExecutionException if the computation threw an
+ * exception
+ * @throws InterruptedException if the current thread is not a
+ * member of a ForkJoinPool and was interrupted while waiting
+ * @throws TimeoutException if the wait timed out
*/
- public final void quietlyHelpJoin() {
- if (status >= 0) {
- ForkJoinWorkerThread w =
- (ForkJoinWorkerThread)(Thread.currentThread());
- if (!w.unpushTask(this) || !tryQuietlyInvoke())
- busyJoin(w);
+ public final V get(long timeout, TimeUnit unit)
+ throws InterruptedException, ExecutionException, TimeoutException {
+ if (Thread.interrupted())
+ throw new InterruptedException();
+ // Messy in part because we measure in nanosecs, but wait in millisecs
+ int s; long ns, ms;
+ if ((s = status) >= 0 && (ns = unit.toNanos(timeout)) > 0L) {
+ long deadline = System.nanoTime() + ns;
+ ForkJoinPool p = null;
+ ForkJoinPool.WorkQueue w = null;
+ Thread t = Thread.currentThread();
+ if (t instanceof ForkJoinWorkerThread) {
+ ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
+ p = wt.pool;
+ w = wt.workQueue;
+ s = p.helpJoinOnce(w, this); // no retries on failure
+ }
+ boolean canBlock = false;
+ boolean interrupted = false;
+ try {
+ while ((s = status) >= 0) {
+ if (w != null && w.runState < 0)
+ cancelIgnoringExceptions(this);
+ else if (!canBlock) {
+ if (p == null || p.tryCompensate(this, null))
+ canBlock = true;
+ }
+ else {
+ if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L &&
+ U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
+ synchronized (this) {
+ if (status >= 0) {
+ try {
+ wait(ms);
+ } catch (InterruptedException ie) {
+ if (p == null)
+ interrupted = true;
+ }
+ }
+ else
+ notifyAll();
+ }
+ }
+ if ((s = status) < 0 || interrupted ||
+ (ns = deadline - System.nanoTime()) <= 0L)
+ break;
+ }
+ }
+ } finally {
+ if (p != null && canBlock)
+ p.incrementActiveCount();
+ }
+ if (interrupted)
+ throw new InterruptedException();
+ }
+ if ((s &= DONE_MASK) != NORMAL) {
+ Throwable ex;
+ if (s == CANCELLED)
+ throw new CancellationException();
+ if (s != EXCEPTIONAL)
+ throw new TimeoutException();
+ if ((ex = getThrowableException()) != null)
+ throw new ExecutionException(ex);
}
+ return getRawResult();
}
/**
- * Joins this task, without returning its result or throwing an
+ * Joins this task, without returning its result or throwing its
* exception. This method may be useful when processing
* collections of tasks when some have been cancelled or otherwise
* known to have aborted.
*/
public final void quietlyJoin() {
- if (status >= 0) {
- ForkJoinWorkerThread w = getWorker();
- if (w == null || !w.unpushTask(this) || !tryQuietlyInvoke())
- awaitDone(w, true);
- }
+ doJoin();
}
/**
* Commences performing this task and awaits its completion if
- * necessary, without returning its result or throwing an
- * exception. This method may be useful when processing
- * collections of tasks when some have been cancelled or otherwise
- * known to have aborted.
+ * necessary, without returning its result or throwing its
+ * exception.
*/
public final void quietlyInvoke() {
- if (status >= 0 && !tryQuietlyInvoke())
- quietlyJoin();
+ doInvoke();
}
/**
* Possibly executes tasks until the pool hosting the current task
- * {@link ForkJoinPool#isQuiescent}. This method may be of use in
- * designs in which many tasks are forked, but none are explicitly
- * joined, instead executing them until all are processed.
+ * {@link ForkJoinPool#isQuiescent is quiescent}. This method may
+ * be of use in designs in which many tasks are forked, but none
+ * are explicitly joined, instead executing them until all are
+ * processed.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
*/
public static void helpQuiesce() {
- ((ForkJoinWorkerThread)(Thread.currentThread())).
- helpQuiescePool();
+ ForkJoinWorkerThread wt =
+ (ForkJoinWorkerThread)Thread.currentThread();
+ wt.pool.helpQuiescePool(wt.workQueue);
}
/**
* Resets the internal bookkeeping state of this task, allowing a
- * subsequent <code>fork</code>. This method allows repeated reuse of
+ * subsequent {@code fork}. This method allows repeated reuse of
* this task, but only if reuse occurs when this task has either
* never been forked, or has been forked, then completed and all
* outstanding joins of this task have also completed. Effects
- * under any other usage conditions are not guaranteed, and are
- * almost surely wrong. This method may be useful when executing
+ * under any other usage conditions are not guaranteed.
+ * This method may be useful when executing
* pre-constructed trees of subtasks in loops.
+ *
+ * <p>Upon completion of this method, {@code isDone()} reports
+ * {@code false}, and {@code getException()} reports {@code
+ * null}. However, the value returned by {@code getRawResult} is
+ * unaffected. To clear this value, you can invoke {@code
+ * setRawResult(null)}.
*/
public void reinitialize() {
- if ((status & COMPLETION_MASK) == EXCEPTIONAL)
- exceptionMap.remove(this);
- status = 0;
+ if ((status & DONE_MASK) == EXCEPTIONAL)
+ clearExceptionalCompletion();
+ else
+ status = 0;
}
/**
* Returns the pool hosting the current task execution, or null
- * if this task is executing outside of any pool.
- * @return the pool, or null if none.
+ * if this task is executing outside of any ForkJoinPool.
+ *
+ * @see #inForkJoinPool
+ * @return the pool, or {@code null} if none
*/
public static ForkJoinPool getPool() {
Thread t = Thread.currentThread();
- return ((t instanceof ForkJoinWorkerThread)?
- ((ForkJoinWorkerThread)t).pool : null);
+ return (t instanceof ForkJoinWorkerThread) ?
+ ((ForkJoinWorkerThread) t).pool : null;
+ }
+
+ /**
+ * Returns {@code true} if the current thread is a {@link
+ * ForkJoinWorkerThread} executing as a ForkJoinPool computation.
+ *
+ * @return {@code true} if the current thread is a {@link
+ * ForkJoinWorkerThread} executing as a ForkJoinPool computation,
+ * or {@code false} otherwise
+ */
+ public static boolean inForkJoinPool() {
+ return Thread.currentThread() instanceof ForkJoinWorkerThread;
}
/**
@@ -853,13 +1101,19 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* by the current thread, and has not commenced executing in
* another thread. This method may be useful when arranging
* alternative local processing of tasks that could have been, but
- * were not, stolen. This method may be invoked only from within
- * ForkJoinTask computations. Attempts to invoke in other contexts
- * result in exceptions or errors possibly including ClassCastException.
- * @return true if unforked
+ * were not, stolen.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @return {@code true} if unforked
*/
public boolean tryUnfork() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).unpushTask(this);
+ return ((ForkJoinWorkerThread)Thread.currentThread())
+ .workQueue.tryUnpush(this);
}
/**
@@ -867,15 +1121,22 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* forked by the current worker thread but not yet executed. This
* value may be useful for heuristic decisions about whether to
* fork other tasks.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
* @return the number of tasks
*/
public static int getQueuedTaskCount() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).
- getQueueSize();
+ return ((ForkJoinWorkerThread) Thread.currentThread())
+ .workQueue.queueSize();
}
/**
- * Returns a estimate of how many more locally queued tasks are
+ * Returns an estimate of how many more locally queued tasks are
* held by the current worker thread than there are other worker
* threads that might steal them. This value may be useful for
* heuristic decisions about whether to fork other tasks. In many
@@ -883,23 +1144,74 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* aim to maintain a small constant surplus (for example, 3) of
* tasks, and to process computations locally if this threshold is
* exceeded.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
* @return the surplus number of tasks, which may be negative
*/
public static int getSurplusQueuedTaskCount() {
- return ((ForkJoinWorkerThread)(Thread.currentThread()))
- .getEstimatedSurplusTaskCount();
+ /*
+ * The aim of this method is to return a cheap heuristic guide
+ * for task partitioning when programmers, frameworks, tools,
+ * or languages have little or no idea about task granularity.
+ * In essence by offering this method, we ask users only about
+ * tradeoffs in overhead vs expected throughput and its
+ * variance, rather than how finely to partition tasks.
+ *
+ * In a steady state strict (tree-structured) computation,
+ * each thread makes available for stealing enough tasks for
+ * other threads to remain active. Inductively, if all threads
+ * play by the same rules, each thread should make available
+ * only a constant number of tasks.
+ *
+ * The minimum useful constant is just 1. But using a value of
+ * 1 would require immediate replenishment upon each steal to
+ * maintain enough tasks, which is infeasible. Further,
+ * partitionings/granularities of offered tasks should
+ * minimize steal rates, which in general means that threads
+ * nearer the top of computation tree should generate more
+ * than those nearer the bottom. In perfect steady state, each
+ * thread is at approximately the same level of computation
+ * tree. However, producing extra tasks amortizes the
+ * uncertainty of progress and diffusion assumptions.
+ *
+ * So, users will want to use values larger, but not much
+ * larger than 1 to both smooth over transient shortages and
+ * hedge against uneven progress; as traded off against the
+ * cost of extra task overhead. We leave the user to pick a
+ * threshold value to compare with the results of this call to
+ * guide decisions, but recommend values such as 3.
+ *
+ * When all threads are active, it is on average OK to
+ * estimate surplus strictly locally. In steady-state, if one
+ * thread is maintaining say 2 surplus tasks, then so are
+ * others. So we can just use estimated queue length.
+ * However, this strategy alone leads to serious mis-estimates
+ * in some non-steady-state conditions (ramp-up, ramp-down,
+ * other stalls). We can detect many of these by further
+ * considering the number of "idle" threads, that are known to
+ * have zero queued tasks, so compensate by a factor of
+ * (#idle/#active) threads.
+ */
+ ForkJoinWorkerThread wt =
+ (ForkJoinWorkerThread)Thread.currentThread();
+ return wt.workQueue.queueSize() - wt.pool.idlePerActive();
}
// Extension methods
/**
- * Returns the result that would be returned by <code>join</code>,
- * even if this task completed abnormally, or null if this task is
- * not known to have been completed. This method is designed to
- * aid debugging, as well as to support extensions. Its use in any
- * other context is discouraged.
+ * Returns the result that would be returned by {@link #join}, even
+ * if this task completed abnormally, or {@code null} if this task
+ * is not known to have been completed. This method is designed
+ * to aid debugging, as well as to support extensions. Its use in
+ * any other context is discouraged.
*
- * @return the result, or null if not completed.
+ * @return the result, or {@code null} if not completed
*/
public abstract V getRawResult();
@@ -918,42 +1230,52 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* called otherwise. The return value controls whether this task
* is considered to be done normally. It may return false in
* asynchronous actions that require explicit invocations of
- * <code>complete</code> to become joinable. It may throw exceptions
- * to indicate abnormal exit.
- * @return true if completed normally
- * @throws Error or RuntimeException if encountered during computation
+ * {@link #complete} to become joinable. It may also throw an
+ * (unchecked) exception to indicate abnormal exit.
+ *
+ * @return {@code true} if completed normally
*/
protected abstract boolean exec();
/**
- * Returns, but does not unschedule or execute, the task queued by
- * the current thread but not yet executed, if one is
+ * Returns, but does not unschedule or execute, a task queued by
+ * the current thread but not yet executed, if one is immediately
* available. There is no guarantee that this task will actually
- * be polled or executed next. This method is designed primarily
- * to support extensions, and is unlikely to be useful otherwise.
- * This method may be invoked only from within ForkJoinTask
- * computations. Attempts to invoke in other contexts result in
- * exceptions or errors possibly including ClassCastException.
+ * be polled or executed next. Conversely, this method may return
+ * null even if a task exists but cannot be accessed without
+ * contention with other threads. This method is designed
+ * primarily to support extensions, and is unlikely to be useful
+ * otherwise.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
*
- * @return the next task, or null if none are available
+ * @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> peekNextLocalTask() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).peekTask();
+ return ((ForkJoinWorkerThread) Thread.currentThread()).workQueue.peek();
}
/**
* Unschedules and returns, without executing, the next task
* queued by the current thread but not yet executed. This method
* is designed primarily to support extensions, and is unlikely to
- * be useful otherwise. This method may be invoked only from
- * within ForkJoinTask computations. Attempts to invoke in other
- * contexts result in exceptions or errors possibly including
- * ClassCastException.
+ * be useful otherwise.
+ *
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
*
- * @return the next task, or null if none are available
+ * @return the next task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollNextLocalTask() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).pollLocalTask();
+ return ((ForkJoinWorkerThread) Thread.currentThread())
+ .workQueue.nextLocalTask();
}
/**
@@ -961,19 +1283,170 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
* queued by the current thread but not yet executed, if one is
* available, or if not available, a task that was forked by some
* other thread, if available. Availability may be transient, so a
- * <code>null</code> result does not necessarily imply quiecence
+ * {@code null} result does not necessarily imply quiescence
* of the pool this task is operating in. This method is designed
* primarily to support extensions, and is unlikely to be useful
- * otherwise. This method may be invoked only from within
- * ForkJoinTask computations. Attempts to invoke in other contexts
- * result in exceptions or errors possibly including
- * ClassCastException.
+ * otherwise.
*
- * @return a task, or null if none are available
+ * <p>This method may be invoked only from within {@code
+ * ForkJoinPool} computations (as may be determined using method
+ * {@link #inForkJoinPool}). Attempts to invoke in other contexts
+ * result in exceptions or errors, possibly including {@code
+ * ClassCastException}.
+ *
+ * @return a task, or {@code null} if none are available
*/
protected static ForkJoinTask<?> pollTask() {
- return ((ForkJoinWorkerThread)(Thread.currentThread())).
- pollTask();
+ ForkJoinWorkerThread wt =
+ (ForkJoinWorkerThread)Thread.currentThread();
+ return wt.pool.nextTaskFor(wt.workQueue);
+ }
+
+ // Mark-bit operations
+
+ /**
+ * Returns true if this task is marked.
+ *
+ * @return true if this task is marked
+ * @since 1.8
+ */
+ public final boolean isMarkedForkJoinTask() {
+ return (status & MARKED) != 0;
+ }
+
+ /**
+ * Atomically sets the mark on this task.
+ *
+ * @return true if this task was previously unmarked
+ * @since 1.8
+ */
+ public final boolean markForkJoinTask() {
+ for (int s;;) {
+ if (((s = status) & MARKED) != 0)
+ return false;
+ if (U.compareAndSwapInt(this, STATUS, s, s | MARKED))
+ return true;
+ }
+ }
+
+ /**
+ * Atomically clears the mark on this task.
+ *
+ * @return true if this task was previously marked
+ * @since 1.8
+ */
+ public final boolean unmarkForkJoinTask() {
+ for (int s;;) {
+ if (((s = status) & MARKED) == 0)
+ return false;
+ if (U.compareAndSwapInt(this, STATUS, s, s & ~MARKED))
+ return true;
+ }
+ }
+
+ /**
+ * Adaptor for Runnables. This implements RunnableFuture
+ * to be compliant with AbstractExecutorService constraints
+ * when used in ForkJoinPool.
+ */
+ static final class AdaptedRunnable<T> extends ForkJoinTask<T>
+ implements RunnableFuture<T> {
+ final Runnable runnable;
+ T result;
+ AdaptedRunnable(Runnable runnable, T result) {
+ if (runnable == null) throw new NullPointerException();
+ this.runnable = runnable;
+ this.result = result; // OK to set this even before completion
+ }
+ public final T getRawResult() { return result; }
+ public final void setRawResult(T v) { result = v; }
+ public final boolean exec() { runnable.run(); return true; }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /**
+ * Adaptor for Runnables without results
+ */
+ static final class AdaptedRunnableAction extends ForkJoinTask<Void>
+ implements RunnableFuture<Void> {
+ final Runnable runnable;
+ AdaptedRunnableAction(Runnable runnable) {
+ if (runnable == null) throw new NullPointerException();
+ this.runnable = runnable;
+ }
+ public final Void getRawResult() { return null; }
+ public final void setRawResult(Void v) { }
+ public final boolean exec() { runnable.run(); return true; }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 5232453952276885070L;
+ }
+
+ /**
+ * Adaptor for Callables
+ */
+ static final class AdaptedCallable<T> extends ForkJoinTask<T>
+ implements RunnableFuture<T> {
+ final Callable<? extends T> callable;
+ T result;
+ AdaptedCallable(Callable<? extends T> callable) {
+ if (callable == null) throw new NullPointerException();
+ this.callable = callable;
+ }
+ public final T getRawResult() { return result; }
+ public final void setRawResult(T v) { result = v; }
+ public final boolean exec() {
+ try {
+ result = callable.call();
+ return true;
+ } catch (Error err) {
+ throw err;
+ } catch (RuntimeException rex) {
+ throw rex;
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ public final void run() { invoke(); }
+ private static final long serialVersionUID = 2838392045355241008L;
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code run}
+ * method of the given {@code Runnable} as its action, and returns
+ * a null result upon {@link #join}.
+ *
+ * @param runnable the runnable action
+ * @return the task
+ */
+ public static ForkJoinTask<?> adapt(Runnable runnable) {
+ return new AdaptedRunnableAction(runnable);
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code run}
+ * method of the given {@code Runnable} as its action, and returns
+ * the given result upon {@link #join}.
+ *
+ * @param runnable the runnable action
+ * @param result the result upon completion
+ * @return the task
+ */
+ public static <T> ForkJoinTask<T> adapt(Runnable runnable, T result) {
+ return new AdaptedRunnable<T>(runnable, result);
+ }
+
+ /**
+ * Returns a new {@code ForkJoinTask} that performs the {@code call}
+ * method of the given {@code Callable} as its action, and returns
+ * its result upon {@link #join}, translating any checked exceptions
+ * encountered into {@code RuntimeException}.
+ *
+ * @param callable the callable action
+ * @return the task
+ */
+ public static <T> ForkJoinTask<T> adapt(Callable<? extends T> callable) {
+ return new AdaptedCallable<T>(callable);
}
// Serialization support
@@ -981,11 +1454,10 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
private static final long serialVersionUID = -7721805057305804111L;
/**
- * Save the state to a stream.
+ * Saves this task to a stream (that is, serializes it).
*
* @serialData the current run status and the exception thrown
- * during execution, or null if none.
- * @param s the stream
+ * during execution, or {@code null} if none
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
@@ -994,70 +1466,57 @@ public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
}
/**
- * Reconstitute the instance from a stream.
- * @param s the stream
+ * Reconstitutes this task from a stream (that is, deserializes it).
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
- status &= ~INTERNAL_SIGNAL_MASK; // clear internal signal counts
- status |= EXTERNAL_SIGNAL; // conservatively set external signal
Object ex = s.readObject();
if (ex != null)
- setDoneExceptionally((Throwable)ex);
+ setExceptionalCompletion((Throwable)ex);
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe U;
+ private static final long STATUS;
+ static {
+ exceptionTableLock = new ReentrantLock();
+ exceptionTableRefQueue = new ReferenceQueue<Object>();
+ exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY];
+ try {
+ U = getUnsafe();
+ STATUS = U.objectFieldOffset
+ (ForkJoinTask.class.getDeclaredField("status"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
}
- // Temporary Unsafe mechanics for preliminary release
- private static Unsafe getUnsafe() throws Throwable {
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
try {
- return Unsafe.getUnsafe();
+ return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
- (new java.security.PrivilegedExceptionAction<Unsafe>() {
- public Unsafe run() throws Exception {
- return getUnsafePrivileged();
+ (new java.security
+ .PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ java.lang.reflect.Field f = sun.misc
+ .Unsafe.class.getDeclaredField("theUnsafe");
+ f.setAccessible(true);
+ return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
- throw e.getCause();
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
}
}
}
-
- private static Unsafe getUnsafePrivileged()
- throws NoSuchFieldException, IllegalAccessException {
- Field f = Unsafe.class.getDeclaredField("theUnsafe");
- f.setAccessible(true);
- return (Unsafe) f.get(null);
- }
-
- private static long fieldOffset(String fieldName, Unsafe unsafe)
- throws NoSuchFieldException {
- // do not use _unsafe to avoid NPE
- return unsafe.objectFieldOffset
- (ForkJoinTask.class.getDeclaredField(fieldName));
- }
-
- static final Unsafe _unsafe;
- static final long statusOffset;
-
- static {
- Unsafe tmpUnsafe = null;
- long tmpStatusOffset = 0;
- try {
- tmpUnsafe = getUnsafe();
- tmpStatusOffset = fieldOffset("status", tmpUnsafe);
- } catch (Throwable e) {
- // Ignore the failure to load sun.misc.Unsafe on Android so
- // that platform can use the actor library without the
- // fork/join scheduler.
- String vmVendor = System.getProperty("java.vm.vendor");
- if (!vmVendor.contains("Android")) {
- throw new RuntimeException("Could not initialize intrinsics", e);
- }
- }
- _unsafe = tmpUnsafe;
- statusOffset = tmpStatusOffset;
- }
-
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java
index b4d889750c..90a0af5723 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java
@@ -1,224 +1,55 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
-import java.util.*;
-import java.util.concurrent.*;
-import java.util.concurrent.atomic.*;
-import java.util.concurrent.locks.*;
-import sun.misc.Unsafe;
-import java.lang.reflect.*;
/**
- * A thread managed by a {@link ForkJoinPool}. This class is
- * subclassable solely for the sake of adding functionality -- there
- * are no overridable methods dealing with scheduling or
- * execution. However, you can override initialization and termination
- * methods surrounding the main task processing loop. If you do
- * create such a subclass, you will also need to supply a custom
- * ForkJoinWorkerThreadFactory to use it in a ForkJoinPool.
+ * A thread managed by a {@link ForkJoinPool}, which executes
+ * {@link ForkJoinTask}s.
+ * This class is subclassable solely for the sake of adding
+ * functionality -- there are no overridable methods dealing with
+ * scheduling or execution. However, you can override initialization
+ * and termination methods surrounding the main task processing loop.
+ * If you do create such a subclass, you will also need to supply a
+ * custom {@link ForkJoinPool.ForkJoinWorkerThreadFactory} to use it
+ * in a {@code ForkJoinPool}.
*
+ * @since 1.7
+ * @author Doug Lea
*/
public class ForkJoinWorkerThread extends Thread {
/*
- * Algorithm overview:
- *
- * 1. Work-Stealing: Work-stealing queues are special forms of
- * Deques that support only three of the four possible
- * end-operations -- push, pop, and deq (aka steal), and only do
- * so under the constraints that push and pop are called only from
- * the owning thread, while deq may be called from other threads.
- * (If you are unfamiliar with them, you probably want to read
- * Herlihy and Shavit's book "The Art of Multiprocessor
- * programming", chapter 16 describing these in more detail before
- * proceeding.) The main work-stealing queue design is roughly
- * similar to "Dynamic Circular Work-Stealing Deque" by David
- * Chase and Yossi Lev, SPAA 2005
- * (http://research.sun.com/scalable/pubs/index.html). The main
- * difference ultimately stems from gc requirements that we null
- * out taken slots as soon as we can, to maintain as small a
- * footprint as possible even in programs generating huge numbers
- * of tasks. To accomplish this, we shift the CAS arbitrating pop
- * vs deq (steal) from being on the indices ("base" and "sp") to
- * the slots themselves (mainly via method "casSlotNull()"). So,
- * both a successful pop and deq mainly entail CAS'ing a nonnull
- * slot to null. Because we rely on CASes of references, we do
- * not need tag bits on base or sp. They are simple ints as used
- * in any circular array-based queue (see for example ArrayDeque).
- * Updates to the indices must still be ordered in a way that
- * guarantees that (sp - base) > 0 means the queue is empty, but
- * otherwise may err on the side of possibly making the queue
- * appear nonempty when a push, pop, or deq have not fully
- * committed. Note that this means that the deq operation,
- * considered individually, is not wait-free. One thief cannot
- * successfully continue until another in-progress one (or, if
- * previously empty, a push) completes. However, in the
- * aggregate, we ensure at least probablistic non-blockingness. If
- * an attempted steal fails, a thief always chooses a different
- * random victim target to try next. So, in order for one thief to
- * progress, it suffices for any in-progress deq or new push on
- * any empty queue to complete. One reason this works well here is
- * that apparently-nonempty often means soon-to-be-stealable,
- * which gives threads a chance to activate if necessary before
- * stealing (see below).
- *
- * Efficient implementation of this approach currently relies on
- * an uncomfortable amount of "Unsafe" mechanics. To maintain
- * correct orderings, reads and writes of variable base require
- * volatile ordering. Variable sp does not require volatile write
- * but needs cheaper store-ordering on writes. Because they are
- * protected by volatile base reads, reads of the queue array and
- * its slots do not need volatile load semantics, but writes (in
- * push) require store order and CASes (in pop and deq) require
- * (volatile) CAS semantics. Since these combinations aren't
- * supported using ordinary volatiles, the only way to accomplish
- * these effciently is to use direct Unsafe calls. (Using external
- * AtomicIntegers and AtomicReferenceArrays for the indices and
- * array is significantly slower because of memory locality and
- * indirection effects.) Further, performance on most platforms is
- * very sensitive to placement and sizing of the (resizable) queue
- * array. Even though these queues don't usually become all that
- * big, the initial size must be large enough to counteract cache
- * contention effects across multiple queues (especially in the
- * presence of GC cardmarking). Also, to improve thread-locality,
- * queues are currently initialized immediately after the thread
- * gets the initial signal to start processing tasks. However,
- * all queue-related methods except pushTask are written in a way
- * that allows them to instead be lazily allocated and/or disposed
- * of when empty. All together, these low-level implementation
- * choices produce as much as a factor of 4 performance
- * improvement compared to naive implementations, and enable the
- * processing of billions of tasks per second, sometimes at the
- * expense of ugliness.
- *
- * 2. Run control: The primary run control is based on a global
- * counter (activeCount) held by the pool. It uses an algorithm
- * similar to that in Herlihy and Shavit section 17.6 to cause
- * threads to eventually block when all threads declare they are
- * inactive. (See variable "scans".) For this to work, threads
- * must be declared active when executing tasks, and before
- * stealing a task. They must be inactive before blocking on the
- * Pool Barrier (awaiting a new submission or other Pool
- * event). In between, there is some free play which we take
- * advantage of to avoid contention and rapid flickering of the
- * global activeCount: If inactive, we activate only if a victim
- * queue appears to be nonempty (see above). Similarly, a thread
- * tries to inactivate only after a full scan of other threads.
- * The net effect is that contention on activeCount is rarely a
- * measurable performance issue. (There are also a few other cases
- * where we scan for work rather than retry/block upon
- * contention.)
- *
- * 3. Selection control. We maintain policy of always choosing to
- * run local tasks rather than stealing, and always trying to
- * steal tasks before trying to run a new submission. All steals
- * are currently performed in randomly-chosen deq-order. It may be
- * worthwhile to bias these with locality / anti-locality
- * information, but doing this well probably requires more
- * lower-level information from JVMs than currently provided.
- */
-
- /**
- * Capacity of work-stealing queue array upon initialization.
- * Must be a power of two. Initial size must be at least 2, but is
- * padded to minimize cache effects.
- */
- private static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
-
- /**
- * Maximum work-stealing queue array size. Must be less than or
- * equal to 1 << 28 to ensure lack of index wraparound. (This
- * is less than usual bounds, because we need leftshift by 3
- * to be in int range).
- */
- private static final int MAXIMUM_QUEUE_CAPACITY = 1 << 28;
-
- /**
- * The pool this thread works in. Accessed directly by ForkJoinTask
- */
- final ForkJoinPool pool;
-
- /**
- * The work-stealing queue array. Size must be a power of two.
- * Initialized when thread starts, to improve memory locality.
- */
- private ForkJoinTask<?>[] queue;
-
- /**
- * Index (mod queue.length) of next queue slot to push to or pop
- * from. It is written only by owner thread, via ordered store.
- * Both sp and base are allowed to wrap around on overflow, but
- * (sp - base) still estimates size.
- */
- private volatile int sp;
-
- /**
- * Index (mod queue.length) of least valid queue slot, which is
- * always the next position to steal from if nonempty.
- */
- private volatile int base;
-
- /**
- * Activity status. When true, this worker is considered active.
- * Must be false upon construction. It must be true when executing
- * tasks, and BEFORE stealing a task. It must be false before
- * calling pool.sync
- */
- private boolean active;
-
- /**
- * Run state of this worker. Supports simple versions of the usual
- * shutdown/shutdownNow control.
- */
- private volatile int runState;
-
- /**
- * Seed for random number generator for choosing steal victims.
- * Uses Marsaglia xorshift. Must be nonzero upon initialization.
- */
- private int seed;
-
- /**
- * Number of steals, transferred to pool when idle
+ * ForkJoinWorkerThreads are managed by ForkJoinPools and perform
+ * ForkJoinTasks. For explanation, see the internal documentation
+ * of class ForkJoinPool.
*/
- private int stealCount;
- /**
- * Index of this worker in pool array. Set once by pool before
- * running, and accessed directly by pool during cleanup etc
- */
- int poolIndex;
-
- /**
- * The last barrier event waited for. Accessed in pool callback
- * methods, but only by current thread.
- */
- long lastEventCount;
-
- /**
- * True if use local fifo, not default lifo, for local polling
- */
- private boolean locallyFifo;
+ final ForkJoinPool.WorkQueue workQueue; // Work-stealing mechanics
+ final ForkJoinPool pool; // the pool this thread works in
/**
* Creates a ForkJoinWorkerThread operating in the given pool.
+ *
* @param pool the pool this thread works in
* @throws NullPointerException if pool is null
*/
protected ForkJoinWorkerThread(ForkJoinPool pool) {
- if (pool == null) throw new NullPointerException();
+ super(pool.nextWorkerName());
+ setDaemon(true);
+ Thread.UncaughtExceptionHandler ueh = pool.ueh;
+ if (ueh != null)
+ setUncaughtExceptionHandler(ueh);
this.pool = pool;
- // Note: poolIndex is set by pool during construction
- // Remaining initialization is deferred to onStart
+ pool.registerWorker(this.workQueue = new ForkJoinPool.WorkQueue
+ (pool, this, pool.localMode));
}
- // Public access methods
-
/**
- * Returns the pool hosting this thread
+ * Returns the pool hosting this thread.
+ *
* @return the pool
*/
public ForkJoinPool getPool() {
@@ -231,543 +62,58 @@ public class ForkJoinWorkerThread extends Thread {
* threads (minus one) that have ever been created in the pool.
* This method may be useful for applications that track status or
* collect results per-worker rather than per-task.
- * @return the index number.
+ *
+ * @return the index number
*/
public int getPoolIndex() {
- return poolIndex;
- }
-
- /**
- * Establishes local first-in-first-out scheduling mode for forked
- * tasks that are never joined.
- * @param async if true, use locally FIFO scheduling
- */
- void setAsyncMode(boolean async) {
- locallyFifo = async;
- }
-
- // Runstate management
-
- // Runstate values. Order matters
- private static final int RUNNING = 0;
- private static final int SHUTDOWN = 1;
- private static final int TERMINATING = 2;
- private static final int TERMINATED = 3;
-
- final boolean isShutdown() { return runState >= SHUTDOWN; }
- final boolean isTerminating() { return runState >= TERMINATING; }
- final boolean isTerminated() { return runState == TERMINATED; }
- final boolean shutdown() { return transitionRunStateTo(SHUTDOWN); }
- final boolean shutdownNow() { return transitionRunStateTo(TERMINATING); }
-
- /**
- * Transition to at least the given state. Return true if not
- * already at least given state.
- */
- private boolean transitionRunStateTo(int state) {
- for (;;) {
- int s = runState;
- if (s >= state)
- return false;
- if (_unsafe.compareAndSwapInt(this, runStateOffset, s, state))
- return true;
- }
- }
-
- /**
- * Try to set status to active; fail on contention
- */
- private boolean tryActivate() {
- if (!active) {
- if (!pool.tryIncrementActiveCount())
- return false;
- active = true;
- }
- return true;
- }
-
- /**
- * Try to set status to active; fail on contention
- */
- private boolean tryInactivate() {
- if (active) {
- if (!pool.tryDecrementActiveCount())
- return false;
- active = false;
- }
- return true;
- }
-
- /**
- * Computes next value for random victim probe. Scans don't
- * require a very high quality generator, but also not a crummy
- * one. Marsaglia xor-shift is cheap and works well.
- */
- private static int xorShift(int r) {
- r ^= r << 1;
- r ^= r >>> 3;
- r ^= r << 10;
- return r;
- }
-
- // Lifecycle methods
-
- /**
- * This method is required to be public, but should never be
- * called explicitly. It performs the main run loop to execute
- * ForkJoinTasks.
- */
- public void run() {
- Throwable exception = null;
- try {
- onStart();
- pool.sync(this); // await first pool event
- mainLoop();
- } catch (Throwable ex) {
- exception = ex;
- } finally {
- onTermination(exception);
- }
- }
-
- /**
- * Execute tasks until shut down.
- */
- private void mainLoop() {
- while (!isShutdown()) {
- ForkJoinTask<?> t = pollTask();
- if (t != null || (t = pollSubmission()) != null)
- t.quietlyExec();
- else if (tryInactivate())
- pool.sync(this);
- }
+ return workQueue.poolIndex;
}
/**
* Initializes internal state after construction but before
* processing any tasks. If you override this method, you must
- * invoke super.onStart() at the beginning of the method.
+ * invoke {@code super.onStart()} at the beginning of the method.
* Initialization requires care: Most fields must have legal
* default values, to ensure that attempted accesses from other
* threads work correctly even before this thread starts
* processing tasks.
*/
protected void onStart() {
- // Allocate while starting to improve chances of thread-local
- // isolation
- queue = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
- // Initial value of seed need not be especially random but
- // should differ across workers and must be nonzero
- int p = poolIndex + 1;
- seed = p + (p << 8) + (p << 16) + (p << 24); // spread bits
}
/**
- * Perform cleanup associated with termination of this worker
+ * Performs cleanup associated with termination of this worker
* thread. If you override this method, you must invoke
- * super.onTermination at the end of the overridden method.
+ * {@code super.onTermination} at the end of the overridden method.
*
* @param exception the exception causing this thread to abort due
- * to an unrecoverable error, or null if completed normally.
+ * to an unrecoverable error, or {@code null} if completed normally
*/
protected void onTermination(Throwable exception) {
- // Execute remaining local tasks unless aborting or terminating
- while (exception == null && !pool.isTerminating() && base != sp) {
- try {
- ForkJoinTask<?> t = popTask();
- if (t != null)
- t.quietlyExec();
- } catch(Throwable ex) {
- exception = ex;
- }
- }
- // Cancel other tasks, transition status, notify pool, and
- // propagate exception to uncaught exception handler
- try {
- do;while (!tryInactivate()); // ensure inactive
- cancelTasks();
- runState = TERMINATED;
- pool.workerTerminated(this);
- } catch (Throwable ex) { // Shouldn't ever happen
- if (exception == null) // but if so, at least rethrown
- exception = ex;
- } finally {
- if (exception != null)
- ForkJoinTask.rethrowException(exception);
- }
}
- // Intrinsics-based support for queue operations.
-
/**
- * Add in store-order the given task at given slot of q to
- * null. Caller must ensure q is nonnull and index is in range.
- */
- private static void setSlot(ForkJoinTask<?>[] q, int i,
- ForkJoinTask<?> t){
- _unsafe.putOrderedObject(q, (i << qShift) + qBase, t);
- }
-
- /**
- * CAS given slot of q to null. Caller must ensure q is nonnull
- * and index is in range.
- */
- private static boolean casSlotNull(ForkJoinTask<?>[] q, int i,
- ForkJoinTask<?> t) {
- return _unsafe.compareAndSwapObject(q, (i << qShift) + qBase, t, null);
- }
-
- /**
- * Sets sp in store-order.
- */
- private void storeSp(int s) {
- _unsafe.putOrderedInt(this, spOffset, s);
- }
-
- // Main queue methods
-
- /**
- * Pushes a task. Called only by current thread.
- * @param t the task. Caller must ensure nonnull
- */
- final void pushTask(ForkJoinTask<?> t) {
- ForkJoinTask<?>[] q = queue;
- int mask = q.length - 1;
- int s = sp;
- setSlot(q, s & mask, t);
- storeSp(++s);
- if ((s -= base) == 1)
- pool.signalWork();
- else if (s >= mask)
- growQueue();
- }
-
- /**
- * Tries to take a task from the base of the queue, failing if
- * either empty or contended.
- * @return a task, or null if none or contended.
- */
- final ForkJoinTask<?> deqTask() {
- ForkJoinTask<?> t;
- ForkJoinTask<?>[] q;
- int i;
- int b;
- if (sp != (b = base) &&
- (q = queue) != null && // must read q after b
- (t = q[i = (q.length - 1) & b]) != null &&
- casSlotNull(q, i, t)) {
- base = b + 1;
- return t;
- }
- return null;
- }
-
- /**
- * Returns a popped task, or null if empty. Ensures active status
- * if nonnull. Called only by current thread.
- */
- final ForkJoinTask<?> popTask() {
- int s = sp;
- while (s != base) {
- if (tryActivate()) {
- ForkJoinTask<?>[] q = queue;
- int mask = q.length - 1;
- int i = (s - 1) & mask;
- ForkJoinTask<?> t = q[i];
- if (t == null || !casSlotNull(q, i, t))
- break;
- storeSp(s - 1);
- return t;
- }
- }
- return null;
- }
-
- /**
- * Specialized version of popTask to pop only if
- * topmost element is the given task. Called only
- * by current thread while active.
- * @param t the task. Caller must ensure nonnull
- */
- final boolean unpushTask(ForkJoinTask<?> t) {
- ForkJoinTask<?>[] q = queue;
- int mask = q.length - 1;
- int s = sp - 1;
- if (casSlotNull(q, s & mask, t)) {
- storeSp(s);
- return true;
- }
- return false;
- }
-
- /**
- * Returns next task.
- */
- final ForkJoinTask<?> peekTask() {
- ForkJoinTask<?>[] q = queue;
- if (q == null)
- return null;
- int mask = q.length - 1;
- int i = locallyFifo? base : (sp - 1);
- return q[i & mask];
- }
-
- /**
- * Doubles queue array size. Transfers elements by emulating
- * steals (deqs) from old array and placing, oldest first, into
- * new array.
- */
- private void growQueue() {
- ForkJoinTask<?>[] oldQ = queue;
- int oldSize = oldQ.length;
- int newSize = oldSize << 1;
- if (newSize > MAXIMUM_QUEUE_CAPACITY)
- throw new RejectedExecutionException("Queue capacity exceeded");
- ForkJoinTask<?>[] newQ = queue = new ForkJoinTask<?>[newSize];
-
- int b = base;
- int bf = b + oldSize;
- int oldMask = oldSize - 1;
- int newMask = newSize - 1;
- do {
- int oldIndex = b & oldMask;
- ForkJoinTask<?> t = oldQ[oldIndex];
- if (t != null && !casSlotNull(oldQ, oldIndex, t))
- t = null;
- setSlot(newQ, b & newMask, t);
- } while (++b != bf);
- pool.signalWork();
- }
-
- /**
- * Tries to steal a task from another worker. Starts at a random
- * index of workers array, and probes workers until finding one
- * with non-empty queue or finding that all are empty. It
- * randomly selects the first n probes. If these are empty, it
- * resorts to a full circular traversal, which is necessary to
- * accurately set active status by caller. Also restarts if pool
- * events occurred since last scan, which forces refresh of
- * workers array, in case barrier was associated with resize.
- *
- * This method must be both fast and quiet -- usually avoiding
- * memory accesses that could disrupt cache sharing etc other than
- * those needed to check for and take tasks. This accounts for,
- * among other things, updating random seed in place without
- * storing it until exit.
- *
- * @return a task, or null if none found
- */
- private ForkJoinTask<?> scan() {
- ForkJoinTask<?> t = null;
- int r = seed; // extract once to keep scan quiet
- ForkJoinWorkerThread[] ws; // refreshed on outer loop
- int mask; // must be power 2 minus 1 and > 0
- outer:do {
- if ((ws = pool.workers) != null && (mask = ws.length - 1) > 0) {
- int idx = r;
- int probes = ~mask; // use random index while negative
- for (;;) {
- r = xorShift(r); // update random seed
- ForkJoinWorkerThread v = ws[mask & idx];
- if (v == null || v.sp == v.base) {
- if (probes <= mask)
- idx = (probes++ < 0)? r : (idx + 1);
- else
- break;
- }
- else if (!tryActivate() || (t = v.deqTask()) == null)
- continue outer; // restart on contention
- else
- break outer;
- }
- }
- } while (pool.hasNewSyncEvent(this)); // retry on pool events
- seed = r;
- return t;
- }
-
- /**
- * gets and removes a local or stolen a task
- * @return a task, if available
- */
- final ForkJoinTask<?> pollTask() {
- ForkJoinTask<?> t = locallyFifo? deqTask() : popTask();
- if (t == null && (t = scan()) != null)
- ++stealCount;
- return t;
- }
-
- /**
- * gets a local task
- * @return a task, if available
- */
- final ForkJoinTask<?> pollLocalTask() {
- return locallyFifo? deqTask() : popTask();
- }
-
- /**
- * Returns a pool submission, if one exists, activating first.
- * @return a submission, if available
- */
- private ForkJoinTask<?> pollSubmission() {
- ForkJoinPool p = pool;
- while (p.hasQueuedSubmissions()) {
- ForkJoinTask<?> t;
- if (tryActivate() && (t = p.pollSubmission()) != null)
- return t;
- }
- return null;
- }
-
- // Methods accessed only by Pool
-
- /**
- * Removes and cancels all tasks in queue. Can be called from any
- * thread.
- */
- final void cancelTasks() {
- ForkJoinTask<?> t;
- while (base != sp && (t = deqTask()) != null)
- t.cancelIgnoringExceptions();
- }
-
- /**
- * Drains tasks to given collection c
- * @return the number of tasks drained
- */
- final int drainTasksTo(Collection<ForkJoinTask<?>> c) {
- int n = 0;
- ForkJoinTask<?> t;
- while (base != sp && (t = deqTask()) != null) {
- c.add(t);
- ++n;
- }
- return n;
- }
-
- /**
- * Get and clear steal count for accumulation by pool. Called
- * only when known to be idle (in pool.sync and termination).
- */
- final int getAndClearStealCount() {
- int sc = stealCount;
- stealCount = 0;
- return sc;
- }
-
- /**
- * Returns true if at least one worker in the given array appears
- * to have at least one queued task.
- * @param ws array of workers
- */
- static boolean hasQueuedTasks(ForkJoinWorkerThread[] ws) {
- if (ws != null) {
- int len = ws.length;
- for (int j = 0; j < 2; ++j) { // need two passes for clean sweep
- for (int i = 0; i < len; ++i) {
- ForkJoinWorkerThread w = ws[i];
- if (w != null && w.sp != w.base)
- return true;
- }
- }
- }
- return false;
- }
-
- // Support methods for ForkJoinTask
-
- /**
- * Returns an estimate of the number of tasks in the queue.
- */
- final int getQueueSize() {
- int n = sp - base;
- return n < 0? 0 : n; // suppress momentarily negative values
- }
-
- /**
- * Returns an estimate of the number of tasks, offset by a
- * function of number of idle workers.
- */
- final int getEstimatedSurplusTaskCount() {
- // The halving approximates weighting idle vs non-idle workers
- return (sp - base) - (pool.getIdleThreadCount() >>> 1);
- }
-
- /**
- * Scan, returning early if joinMe done
- */
- final ForkJoinTask<?> scanWhileJoining(ForkJoinTask<?> joinMe) {
- ForkJoinTask<?> t = pollTask();
- if (t != null && joinMe.status < 0 && sp == base) {
- pushTask(t); // unsteal if done and this task would be stealable
- t = null;
- }
- return t;
- }
-
- /**
- * Runs tasks until pool isQuiescent
+ * This method is required to be public, but should never be
+ * called explicitly. It performs the main run loop to execute
+ * {@link ForkJoinTask}s.
*/
- final void helpQuiescePool() {
- for (;;) {
- ForkJoinTask<?> t = pollTask();
- if (t != null)
- t.quietlyExec();
- else if (tryInactivate() && pool.isQuiescent())
- break;
- }
- do;while (!tryActivate()); // re-activate on exit
- }
-
- // Temporary Unsafe mechanics for preliminary release
- private static Unsafe getUnsafe() throws Throwable {
+ public void run() {
+ Throwable exception = null;
try {
- return Unsafe.getUnsafe();
- } catch (SecurityException se) {
+ onStart();
+ pool.runWorker(workQueue);
+ } catch (Throwable ex) {
+ exception = ex;
+ } finally {
try {
- return java.security.AccessController.doPrivileged
- (new java.security.PrivilegedExceptionAction<Unsafe>() {
- public Unsafe run() throws Exception {
- return getUnsafePrivileged();
- }});
- } catch (java.security.PrivilegedActionException e) {
- throw e.getCause();
+ onTermination(exception);
+ } catch (Throwable ex) {
+ if (exception == null)
+ exception = ex;
+ } finally {
+ pool.deregisterWorker(this, exception);
}
}
}
-
- private static Unsafe getUnsafePrivileged()
- throws NoSuchFieldException, IllegalAccessException {
- Field f = Unsafe.class.getDeclaredField("theUnsafe");
- f.setAccessible(true);
- return (Unsafe) f.get(null);
- }
-
- private static long fieldOffset(String fieldName)
- throws NoSuchFieldException {
- return _unsafe.objectFieldOffset
- (ForkJoinWorkerThread.class.getDeclaredField(fieldName));
- }
-
- static final Unsafe _unsafe;
- static final long baseOffset;
- static final long spOffset;
- static final long runStateOffset;
- static final long qBase;
- static final int qShift;
- static {
- try {
- _unsafe = getUnsafe();
- baseOffset = fieldOffset("base");
- spOffset = fieldOffset("sp");
- runStateOffset = fieldOffset("runState");
- qBase = _unsafe.arrayBaseOffset(ForkJoinTask[].class);
- int s = _unsafe.arrayIndexScale(ForkJoinTask[].class);
- if ((s & (s-1)) != 0)
- throw new Error("data type scale not a power of two");
- qShift = 31 - Integer.numberOfLeadingZeros(s);
- } catch (Throwable e) {
- throw new RuntimeException("Could not initialize intrinsics", e);
- }
- }
}
+
diff --git a/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java b/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java
index 3b46c176ff..ceeb9212d5 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java
@@ -1,30 +1,38 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
-import java.util.concurrent.*;
-import java.util.concurrent.locks.*;
-import java.util.concurrent.atomic.*;
-import java.util.*;
-import java.io.*;
-import sun.misc.Unsafe;
-import java.lang.reflect.*;
+
+import java.util.AbstractQueue;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.LockSupport;
/**
- * An unbounded {@linkplain TransferQueue} based on linked nodes.
+ * An unbounded {@link TransferQueue} based on linked nodes.
* This queue orders elements FIFO (first-in-first-out) with respect
* to any given producer. The <em>head</em> of the queue is that
* element that has been on the queue the longest time for some
* producer. The <em>tail</em> of the queue is that element that has
* been on the queue the shortest time for some producer.
*
- * <p>Beware that, unlike in most collections, the {@code size}
- * method is <em>NOT</em> a constant-time operation. Because of the
+ * <p>Beware that, unlike in most collections, the {@code size} method
+ * is <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these queues, determining the current number
- * of elements requires a traversal of the elements.
+ * of elements requires a traversal of the elements, and so may report
+ * inaccurate results if this collection is modified during traversal.
+ * Additionally, the bulk operations {@code addAll},
+ * {@code removeAll}, {@code retainAll}, {@code containsAll},
+ * {@code equals}, and {@code toArray} are <em>not</em> guaranteed
+ * to be performed atomically. For example, an iterator operating
+ * concurrently with an {@code addAll} operation might view only some
+ * of the added elements.
*
* <p>This class and its iterator implement all of the
* <em>optional</em> methods of the {@link Collection} and {@link
@@ -44,381 +52,938 @@ import java.lang.reflect.*;
* @since 1.7
* @author Doug Lea
* @param <E> the type of elements held in this collection
- *
*/
public class LinkedTransferQueue<E> extends AbstractQueue<E>
implements TransferQueue<E>, java.io.Serializable {
private static final long serialVersionUID = -3223113410248163686L;
/*
- * This class extends the approach used in FIFO-mode
- * SynchronousQueues. See the internal documentation, as well as
- * the PPoPP 2006 paper "Scalable Synchronous Queues" by Scherer,
- * Lea & Scott
- * (http://www.cs.rice.edu/~wns1/papers/2006-PPoPP-SQ.pdf)
+ * *** Overview of Dual Queues with Slack ***
+ *
+ * Dual Queues, introduced by Scherer and Scott
+ * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are
+ * (linked) queues in which nodes may represent either data or
+ * requests. When a thread tries to enqueue a data node, but
+ * encounters a request node, it instead "matches" and removes it;
+ * and vice versa for enqueuing requests. Blocking Dual Queues
+ * arrange that threads enqueuing unmatched requests block until
+ * other threads provide the match. Dual Synchronous Queues (see
+ * Scherer, Lea, & Scott
+ * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
+ * additionally arrange that threads enqueuing unmatched data also
+ * block. Dual Transfer Queues support all of these modes, as
+ * dictated by callers.
+ *
+ * A FIFO dual queue may be implemented using a variation of the
+ * Michael & Scott (M&S) lock-free queue algorithm
+ * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf).
+ * It maintains two pointer fields, "head", pointing to a
+ * (matched) node that in turn points to the first actual
+ * (unmatched) queue node (or null if empty); and "tail" that
+ * points to the last node on the queue (or again null if
+ * empty). For example, here is a possible queue with four data
+ * elements:
+ *
+ * head tail
+ * | |
+ * v v
+ * M -> U -> U -> U -> U
+ *
+ * The M&S queue algorithm is known to be prone to scalability and
+ * overhead limitations when maintaining (via CAS) these head and
+ * tail pointers. This has led to the development of
+ * contention-reducing variants such as elimination arrays (see
+ * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
+ * optimistic back pointers (see Ladan-Mozes & Shavit
+ * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
+ * However, the nature of dual queues enables a simpler tactic for
+ * improving M&S-style implementations when dual-ness is needed.
+ *
+ * In a dual queue, each node must atomically maintain its match
+ * status. While there are other possible variants, we implement
+ * this here as: for a data-mode node, matching entails CASing an
+ * "item" field from a non-null data value to null upon match, and
+ * vice-versa for request nodes, CASing from null to a data
+ * value. (Note that the linearization properties of this style of
+ * queue are easy to verify -- elements are made available by
+ * linking, and unavailable by matching.) Compared to plain M&S
+ * queues, this property of dual queues requires one additional
+ * successful atomic operation per enq/deq pair. But it also
+ * enables lower cost variants of queue maintenance mechanics. (A
+ * variation of this idea applies even for non-dual queues that
+ * support deletion of interior elements, such as
+ * j.u.c.ConcurrentLinkedQueue.)
+ *
+ * Once a node is matched, its match status can never again
+ * change. We may thus arrange that the linked list of them
+ * contain a prefix of zero or more matched nodes, followed by a
+ * suffix of zero or more unmatched nodes. (Note that we allow
+ * both the prefix and suffix to be zero length, which in turn
+ * means that we do not use a dummy header.) If we were not
+ * concerned with either time or space efficiency, we could
+ * correctly perform enqueue and dequeue operations by traversing
+ * from a pointer to the initial node; CASing the item of the
+ * first unmatched node on match and CASing the next field of the
+ * trailing node on appends. (Plus some special-casing when
+ * initially empty). While this would be a terrible idea in
+ * itself, it does have the benefit of not requiring ANY atomic
+ * updates on head/tail fields.
+ *
+ * We introduce here an approach that lies between the extremes of
+ * never versus always updating queue (head and tail) pointers.
+ * This offers a tradeoff between sometimes requiring extra
+ * traversal steps to locate the first and/or last unmatched
+ * nodes, versus the reduced overhead and contention of fewer
+ * updates to queue pointers. For example, a possible snapshot of
+ * a queue is:
+ *
+ * head tail
+ * | |
+ * v v
+ * M -> M -> U -> U -> U -> U
+ *
+ * The best value for this "slack" (the targeted maximum distance
+ * between the value of "head" and the first unmatched node, and
+ * similarly for "tail") is an empirical matter. We have found
+ * that using very small constants in the range of 1-3 work best
+ * over a range of platforms. Larger values introduce increasing
+ * costs of cache misses and risks of long traversal chains, while
+ * smaller values increase CAS contention and overhead.
+ *
+ * Dual queues with slack differ from plain M&S dual queues by
+ * virtue of only sometimes updating head or tail pointers when
+ * matching, appending, or even traversing nodes; in order to
+ * maintain a targeted slack. The idea of "sometimes" may be
+ * operationalized in several ways. The simplest is to use a
+ * per-operation counter incremented on each traversal step, and
+ * to try (via CAS) to update the associated queue pointer
+ * whenever the count exceeds a threshold. Another, that requires
+ * more overhead, is to use random number generators to update
+ * with a given probability per traversal step.
+ *
+ * In any strategy along these lines, because CASes updating
+ * fields may fail, the actual slack may exceed targeted
+ * slack. However, they may be retried at any time to maintain
+ * targets. Even when using very small slack values, this
+ * approach works well for dual queues because it allows all
+ * operations up to the point of matching or appending an item
+ * (hence potentially allowing progress by another thread) to be
+ * read-only, thus not introducing any further contention. As
+ * described below, we implement this by performing slack
+ * maintenance retries only after these points.
+ *
+ * As an accompaniment to such techniques, traversal overhead can
+ * be further reduced without increasing contention of head
+ * pointer updates: Threads may sometimes shortcut the "next" link
+ * path from the current "head" node to be closer to the currently
+ * known first unmatched node, and similarly for tail. Again, this
+ * may be triggered with using thresholds or randomization.
+ *
+ * These ideas must be further extended to avoid unbounded amounts
+ * of costly-to-reclaim garbage caused by the sequential "next"
+ * links of nodes starting at old forgotten head nodes: As first
+ * described in detail by Boehm
+ * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC
+ * delays noticing that any arbitrarily old node has become
+ * garbage, all newer dead nodes will also be unreclaimed.
+ * (Similar issues arise in non-GC environments.) To cope with
+ * this in our implementation, upon CASing to advance the head
+ * pointer, we set the "next" link of the previous head to point
+ * only to itself; thus limiting the length of connected dead lists.
+ * (We also take similar care to wipe out possibly garbage
+ * retaining values held in other Node fields.) However, doing so
+ * adds some further complexity to traversal: If any "next"
+ * pointer links to itself, it indicates that the current thread
+ * has lagged behind a head-update, and so the traversal must
+ * continue from the "head". Traversals trying to find the
+ * current tail starting from "tail" may also encounter
+ * self-links, in which case they also continue at "head".
+ *
+ * It is tempting in slack-based scheme to not even use CAS for
+ * updates (similarly to Ladan-Mozes & Shavit). However, this
+ * cannot be done for head updates under the above link-forgetting
+ * mechanics because an update may leave head at a detached node.
+ * And while direct writes are possible for tail updates, they
+ * increase the risk of long retraversals, and hence long garbage
+ * chains, which can be much more costly than is worthwhile
+ * considering that the cost difference of performing a CAS vs
+ * write is smaller when they are not triggered on each operation
+ * (especially considering that writes and CASes equally require
+ * additional GC bookkeeping ("write barriers") that are sometimes
+ * more costly than the writes themselves because of contention).
+ *
+ * *** Overview of implementation ***
+ *
+ * We use a threshold-based approach to updates, with a slack
+ * threshold of two -- that is, we update head/tail when the
+ * current pointer appears to be two or more steps away from the
+ * first/last node. The slack value is hard-wired: a path greater
+ * than one is naturally implemented by checking equality of
+ * traversal pointers except when the list has only one element,
+ * in which case we keep slack threshold at one. Avoiding tracking
+ * explicit counts across method calls slightly simplifies an
+ * already-messy implementation. Using randomization would
+ * probably work better if there were a low-quality dirt-cheap
+ * per-thread one available, but even ThreadLocalRandom is too
+ * heavy for these purposes.
+ *
+ * With such a small slack threshold value, it is not worthwhile
+ * to augment this with path short-circuiting (i.e., unsplicing
+ * interior nodes) except in the case of cancellation/removal (see
+ * below).
+ *
+ * We allow both the head and tail fields to be null before any
+ * nodes are enqueued; initializing upon first append. This
+ * simplifies some other logic, as well as providing more
+ * efficient explicit control paths instead of letting JVMs insert
+ * implicit NullPointerExceptions when they are null. While not
+ * currently fully implemented, we also leave open the possibility
+ * of re-nulling these fields when empty (which is complicated to
+ * arrange, for little benefit.)
+ *
+ * All enqueue/dequeue operations are handled by the single method
+ * "xfer" with parameters indicating whether to act as some form
+ * of offer, put, poll, take, or transfer (each possibly with
+ * timeout). The relative complexity of using one monolithic
+ * method outweighs the code bulk and maintenance problems of
+ * using separate methods for each case.
*
- * The main extension is to provide different Wait modes for the
- * main "xfer" method that puts or takes items. These don't
- * impact the basic dual-queue logic, but instead control whether
- * or how threads block upon insertion of request or data nodes
- * into the dual queue. It also uses slightly different
- * conventions for tracking whether nodes are off-list or
- * cancelled.
+ * Operation consists of up to three phases. The first is
+ * implemented within method xfer, the second in tryAppend, and
+ * the third in method awaitMatch.
+ *
+ * 1. Try to match an existing node
+ *
+ * Starting at head, skip already-matched nodes until finding
+ * an unmatched node of opposite mode, if one exists, in which
+ * case matching it and returning, also if necessary updating
+ * head to one past the matched node (or the node itself if the
+ * list has no other unmatched nodes). If the CAS misses, then
+ * a loop retries advancing head by two steps until either
+ * success or the slack is at most two. By requiring that each
+ * attempt advances head by two (if applicable), we ensure that
+ * the slack does not grow without bound. Traversals also check
+ * if the initial head is now off-list, in which case they
+ * start at the new head.
+ *
+ * If no candidates are found and the call was untimed
+ * poll/offer, (argument "how" is NOW) return.
+ *
+ * 2. Try to append a new node (method tryAppend)
+ *
+ * Starting at current tail pointer, find the actual last node
+ * and try to append a new node (or if head was null, establish
+ * the first node). Nodes can be appended only if their
+ * predecessors are either already matched or are of the same
+ * mode. If we detect otherwise, then a new node with opposite
+ * mode must have been appended during traversal, so we must
+ * restart at phase 1. The traversal and update steps are
+ * otherwise similar to phase 1: Retrying upon CAS misses and
+ * checking for staleness. In particular, if a self-link is
+ * encountered, then we can safely jump to a node on the list
+ * by continuing the traversal at current head.
+ *
+ * On successful append, if the call was ASYNC, return.
+ *
+ * 3. Await match or cancellation (method awaitMatch)
+ *
+ * Wait for another thread to match node; instead cancelling if
+ * the current thread was interrupted or the wait timed out. On
+ * multiprocessors, we use front-of-queue spinning: If a node
+ * appears to be the first unmatched node in the queue, it
+ * spins a bit before blocking. In either case, before blocking
+ * it tries to unsplice any nodes between the current "head"
+ * and the first unmatched node.
+ *
+ * Front-of-queue spinning vastly improves performance of
+ * heavily contended queues. And so long as it is relatively
+ * brief and "quiet", spinning does not much impact performance
+ * of less-contended queues. During spins threads check their
+ * interrupt status and generate a thread-local random number
+ * to decide to occasionally perform a Thread.yield. While
+ * yield has underdefined specs, we assume that it might help,
+ * and will not hurt, in limiting impact of spinning on busy
+ * systems. We also use smaller (1/2) spins for nodes that are
+ * not known to be front but whose predecessors have not
+ * blocked -- these "chained" spins avoid artifacts of
+ * front-of-queue rules which otherwise lead to alternating
+ * nodes spinning vs blocking. Further, front threads that
+ * represent phase changes (from data to request node or vice
+ * versa) compared to their predecessors receive additional
+ * chained spins, reflecting longer paths typically required to
+ * unblock threads during phase changes.
+ *
+ *
+ * ** Unlinking removed interior nodes **
+ *
+ * In addition to minimizing garbage retention via self-linking
+ * described above, we also unlink removed interior nodes. These
+ * may arise due to timed out or interrupted waits, or calls to
+ * remove(x) or Iterator.remove. Normally, given a node that was
+ * at one time known to be the predecessor of some node s that is
+ * to be removed, we can unsplice s by CASing the next field of
+ * its predecessor if it still points to s (otherwise s must
+ * already have been removed or is now offlist). But there are two
+ * situations in which we cannot guarantee to make node s
+ * unreachable in this way: (1) If s is the trailing node of list
+ * (i.e., with null next), then it is pinned as the target node
+ * for appends, so can only be removed later after other nodes are
+ * appended. (2) We cannot necessarily unlink s given a
+ * predecessor node that is matched (including the case of being
+ * cancelled): the predecessor may already be unspliced, in which
+ * case some previous reachable node may still point to s.
+ * (For further explanation see Herlihy & Shavit "The Art of
+ * Multiprocessor Programming" chapter 9). Although, in both
+ * cases, we can rule out the need for further action if either s
+ * or its predecessor are (or can be made to be) at, or fall off
+ * from, the head of list.
+ *
+ * Without taking these into account, it would be possible for an
+ * unbounded number of supposedly removed nodes to remain
+ * reachable. Situations leading to such buildup are uncommon but
+ * can occur in practice; for example when a series of short timed
+ * calls to poll repeatedly time out but never otherwise fall off
+ * the list because of an untimed call to take at the front of the
+ * queue.
+ *
+ * When these cases arise, rather than always retraversing the
+ * entire list to find an actual predecessor to unlink (which
+ * won't help for case (1) anyway), we record a conservative
+ * estimate of possible unsplice failures (in "sweepVotes").
+ * We trigger a full sweep when the estimate exceeds a threshold
+ * ("SWEEP_THRESHOLD") indicating the maximum number of estimated
+ * removal failures to tolerate before sweeping through, unlinking
+ * cancelled nodes that were not unlinked upon initial removal.
+ * We perform sweeps by the thread hitting threshold (rather than
+ * background threads or by spreading work to other threads)
+ * because in the main contexts in which removal occurs, the
+ * caller is already timed-out, cancelled, or performing a
+ * potentially O(n) operation (e.g. remove(x)), none of which are
+ * time-critical enough to warrant the overhead that alternatives
+ * would impose on other threads.
+ *
+ * Because the sweepVotes estimate is conservative, and because
+ * nodes become unlinked "naturally" as they fall off the head of
+ * the queue, and because we allow votes to accumulate even while
+ * sweeps are in progress, there are typically significantly fewer
+ * such nodes than estimated. Choice of a threshold value
+ * balances the likelihood of wasted effort and contention, versus
+ * providing a worst-case bound on retention of interior nodes in
+ * quiescent queues. The value defined below was chosen
+ * empirically to balance these under various timeout scenarios.
+ *
+ * Note that we cannot self-link unlinked interior nodes during
+ * sweeps. However, the associated garbage chains terminate when
+ * some successor ultimately falls off the head of the list and is
+ * self-linked.
*/
- // Wait modes for xfer method
- static final int NOWAIT = 0;
- static final int TIMEOUT = 1;
- static final int WAIT = 2;
-
- /** The number of CPUs, for spin control */
- static final int NCPUS = Runtime.getRuntime().availableProcessors();
+ /** True if on multiprocessor */
+ private static final boolean MP =
+ Runtime.getRuntime().availableProcessors() > 1;
/**
- * The number of times to spin before blocking in timed waits.
- * The value is empirically derived -- it works well across a
- * variety of processors and OSes. Empirically, the best value
- * seems not to vary with number of CPUs (beyond 2) so is just
- * a constant.
+ * The number of times to spin (with randomly interspersed calls
+ * to Thread.yield) on multiprocessor before blocking when a node
+ * is apparently the first waiter in the queue. See above for
+ * explanation. Must be a power of two. The value is empirically
+ * derived -- it works pretty well across a variety of processors,
+ * numbers of CPUs, and OSes.
*/
- static final int maxTimedSpins = (NCPUS < 2)? 0 : 32;
+ private static final int FRONT_SPINS = 1 << 7;
/**
- * The number of times to spin before blocking in untimed waits.
- * This is greater than timed value because untimed waits spin
- * faster since they don't need to check times on each spin.
+ * The number of times to spin before blocking when a node is
+ * preceded by another node that is apparently spinning. Also
+ * serves as an increment to FRONT_SPINS on phase changes, and as
+ * base average frequency for yielding during spins. Must be a
+ * power of two.
*/
- static final int maxUntimedSpins = maxTimedSpins * 16;
+ private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
/**
- * The number of nanoseconds for which it is faster to spin
- * rather than to use timed park. A rough estimate suffices.
+ * The maximum number of estimated removal failures (sweepVotes)
+ * to tolerate before sweeping through the queue unlinking
+ * cancelled nodes that were not unlinked upon initial
+ * removal. See above for explanation. The value must be at least
+ * two to avoid useless sweeps when removing trailing nodes.
*/
- static final long spinForTimeoutThreshold = 1000L;
+ static final int SWEEP_THRESHOLD = 32;
/**
- * Node class for LinkedTransferQueue. Opportunistically
- * subclasses from AtomicReference to represent item. Uses Object,
- * not E, to allow setting item to "this" after use, to avoid
- * garbage retention. Similarly, setting the next field to this is
- * used as sentinel that node is off list.
+ * Queue nodes. Uses Object, not E, for items to allow forgetting
+ * them after use. Relies heavily on Unsafe mechanics to minimize
+ * unnecessary ordering constraints: Writes that are intrinsically
+ * ordered wrt other accesses or CASes use simple relaxed forms.
*/
- static final class QNode extends AtomicReference<Object> {
- volatile QNode next;
- volatile Thread waiter; // to control park/unpark
- final boolean isData;
- QNode(Object item, boolean isData) {
- super(item);
+ static final class Node {
+ final boolean isData; // false if this is a request node
+ volatile Object item; // initially non-null if isData; CASed to match
+ volatile Node next;
+ volatile Thread waiter; // null until waiting
+
+ // CAS methods for fields
+ final boolean casNext(Node cmp, Node val) {
+ return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
+ }
+
+ final boolean casItem(Object cmp, Object val) {
+ // assert cmp == null || cmp.getClass() != Node.class;
+ return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
+ }
+
+ /**
+ * Constructs a new node. Uses relaxed write because item can
+ * only be seen after publication via casNext.
+ */
+ Node(Object item, boolean isData) {
+ UNSAFE.putObject(this, itemOffset, item); // relaxed write
this.isData = isData;
}
- static final AtomicReferenceFieldUpdater<QNode, QNode>
- nextUpdater = AtomicReferenceFieldUpdater.newUpdater
- (QNode.class, QNode.class, "next");
+ /**
+ * Links node to itself to avoid garbage retention. Called
+ * only after CASing head field, so uses relaxed write.
+ */
+ final void forgetNext() {
+ UNSAFE.putObject(this, nextOffset, this);
+ }
- final boolean casNext(QNode cmp, QNode val) {
- return nextUpdater.compareAndSet(this, cmp, val);
+ /**
+ * Sets item to self and waiter to null, to avoid garbage
+ * retention after matching or cancelling. Uses relaxed writes
+ * because order is already constrained in the only calling
+ * contexts: item is forgotten only after volatile/atomic
+ * mechanics that extract items. Similarly, clearing waiter
+ * follows either CAS or return from park (if ever parked;
+ * else we don't care).
+ */
+ final void forgetContents() {
+ UNSAFE.putObject(this, itemOffset, this);
+ UNSAFE.putObject(this, waiterOffset, null);
}
- final void clearNext() {
- nextUpdater.lazySet(this, this);
+ /**
+ * Returns true if this node has been matched, including the
+ * case of artificial matches due to cancellation.
+ */
+ final boolean isMatched() {
+ Object x = item;
+ return (x == this) || ((x == null) == isData);
}
- }
+ /**
+ * Returns true if this is an unmatched request node.
+ */
+ final boolean isUnmatchedRequest() {
+ return !isData && item == null;
+ }
- /**
- * Padded version of AtomicReference used for head, tail and
- * cleanMe, to alleviate contention across threads CASing one vs
- * the other.
- */
- static final class PaddedAtomicReference<T> extends AtomicReference<T> {
- // enough padding for 64bytes with 4byte refs
- Object p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd, pe;
- PaddedAtomicReference(T r) { super(r); }
+ /**
+ * Returns true if a node with the given mode cannot be
+ * appended to this node because this node is unmatched and
+ * has opposite data mode.
+ */
+ final boolean cannotPrecede(boolean haveData) {
+ boolean d = isData;
+ Object x;
+ return d != haveData && (x = item) != this && (x != null) == d;
+ }
+
+ /**
+ * Tries to artificially match a data node -- used by remove.
+ */
+ final boolean tryMatchData() {
+ // assert isData;
+ Object x = item;
+ if (x != null && x != this && casItem(x, null)) {
+ LockSupport.unpark(waiter);
+ return true;
+ }
+ return false;
+ }
+
+ private static final long serialVersionUID = -3375979862319811754L;
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long itemOffset;
+ private static final long nextOffset;
+ private static final long waiterOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = Node.class;
+ itemOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("item"));
+ nextOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("next"));
+ waiterOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("waiter"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
}
+ /** head of the queue; null until first enqueue */
+ transient volatile Node head;
- /** head of the queue */
- private transient final PaddedAtomicReference<QNode> head;
- /** tail of the queue */
- private transient final PaddedAtomicReference<QNode> tail;
+ /** tail of the queue; null until first append */
+ private transient volatile Node tail;
- /**
- * Reference to a cancelled node that might not yet have been
- * unlinked from queue because it was the last inserted node
- * when it cancelled.
- */
- private transient final PaddedAtomicReference<QNode> cleanMe;
+ /** The number of apparent failures to unsplice removed nodes */
+ private transient volatile int sweepVotes;
- /**
- * Tries to cas nh as new head; if successful, unlink
- * old head's next node to avoid garbage retention.
+ // CAS methods for fields
+ private boolean casTail(Node cmp, Node val) {
+ return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
+ }
+
+ private boolean casHead(Node cmp, Node val) {
+ return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
+ }
+
+ private boolean casSweepVotes(int cmp, int val) {
+ return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val);
+ }
+
+ /*
+ * Possible values for "how" argument in xfer method.
*/
- private boolean advanceHead(QNode h, QNode nh) {
- if (h == head.get() && head.compareAndSet(h, nh)) {
- h.clearNext(); // forget old next
- return true;
- }
- return false;
+ private static final int NOW = 0; // for untimed poll, tryTransfer
+ private static final int ASYNC = 1; // for offer, put, add
+ private static final int SYNC = 2; // for transfer, take
+ private static final int TIMED = 3; // for timed poll, tryTransfer
+
+ @SuppressWarnings("unchecked")
+ static <E> E cast(Object item) {
+ // assert item == null || item.getClass() != Node.class;
+ return (E) item;
}
/**
- * Puts or takes an item. Used for most queue operations (except
- * poll() and tryTransfer()). See the similar code in
- * SynchronousQueue for detailed explanation.
+ * Implements all queuing methods. See above for explanation.
*
- * @param e the item or if null, signifies that this is a take
- * @param mode the wait mode: NOWAIT, TIMEOUT, WAIT
- * @param nanos timeout in nanosecs, used only if mode is TIMEOUT
- * @return an item, or null on failure
+ * @param e the item or null for take
+ * @param haveData true if this is a put, else a take
+ * @param how NOW, ASYNC, SYNC, or TIMED
+ * @param nanos timeout in nanosecs, used only if mode is TIMED
+ * @return an item if matched, else e
+ * @throws NullPointerException if haveData mode but e is null
*/
- private Object xfer(Object e, int mode, long nanos) {
- boolean isData = (e != null);
- QNode s = null;
- final PaddedAtomicReference<QNode> head = this.head;
- final PaddedAtomicReference<QNode> tail = this.tail;
-
- for (;;) {
- QNode t = tail.get();
- QNode h = head.get();
-
- if (t != null && (t == h || t.isData == isData)) {
- if (s == null)
- s = new QNode(e, isData);
- QNode last = t.next;
- if (last != null) {
- if (t == tail.get())
- tail.compareAndSet(t, last);
- }
- else if (t.casNext(null, s)) {
- tail.compareAndSet(t, s);
- return awaitFulfill(t, s, e, mode, nanos);
+ private E xfer(E e, boolean haveData, int how, long nanos) {
+ if (haveData && (e == null))
+ throw new NullPointerException();
+ Node s = null; // the node to append, if needed
+
+ retry:
+ for (;;) { // restart on append race
+
+ for (Node h = head, p = h; p != null;) { // find & match first node
+ boolean isData = p.isData;
+ Object item = p.item;
+ if (item != p && (item != null) == isData) { // unmatched
+ if (isData == haveData) // can't match
+ break;
+ if (p.casItem(item, e)) { // match
+ for (Node q = p; q != h;) {
+ Node n = q.next; // update by 2 unless singleton
+ if (head == h && casHead(h, n == null ? q : n)) {
+ h.forgetNext();
+ break;
+ } // advance and retry
+ if ((h = head) == null ||
+ (q = h.next) == null || !q.isMatched())
+ break; // unless slack < 2
+ }
+ LockSupport.unpark(p.waiter);
+ return LinkedTransferQueue.<E>cast(item);
+ }
}
+ Node n = p.next;
+ p = (p != n) ? n : (h = head); // Use head if p offlist
}
- else if (h != null) {
- QNode first = h.next;
- if (t == tail.get() && first != null &&
- advanceHead(h, first)) {
- Object x = first.get();
- if (x != first && first.compareAndSet(x, e)) {
- LockSupport.unpark(first.waiter);
- return isData? e : x;
- }
- }
+ if (how != NOW) { // No matches available
+ if (s == null)
+ s = new Node(e, haveData);
+ Node pred = tryAppend(s, haveData);
+ if (pred == null)
+ continue retry; // lost race vs opposite mode
+ if (how != ASYNC)
+ return awaitMatch(s, pred, e, (how == TIMED), nanos);
}
+ return e; // not waiting
}
}
-
/**
- * Version of xfer for poll() and tryTransfer, which
- * simplifies control paths both here and in xfer.
+ * Tries to append node s as tail.
+ *
+ * @param s the node to append
+ * @param haveData true if appending in data mode
+ * @return null on failure due to losing race with append in
+ * different mode, else s's predecessor, or s itself if no
+ * predecessor
*/
- private Object fulfill(Object e) {
- boolean isData = (e != null);
- final PaddedAtomicReference<QNode> head = this.head;
- final PaddedAtomicReference<QNode> tail = this.tail;
-
- for (;;) {
- QNode t = tail.get();
- QNode h = head.get();
-
- if (t != null && (t == h || t.isData == isData)) {
- QNode last = t.next;
- if (t == tail.get()) {
- if (last != null)
- tail.compareAndSet(t, last);
- else
- return null;
- }
+ private Node tryAppend(Node s, boolean haveData) {
+ for (Node t = tail, p = t;;) { // move p to last node and append
+ Node n, u; // temps for reads of next & tail
+ if (p == null && (p = head) == null) {
+ if (casHead(null, s))
+ return s; // initialize
}
- else if (h != null) {
- QNode first = h.next;
- if (t == tail.get() &&
- first != null &&
- advanceHead(h, first)) {
- Object x = first.get();
- if (x != first && first.compareAndSet(x, e)) {
- LockSupport.unpark(first.waiter);
- return isData? e : x;
- }
+ else if (p.cannotPrecede(haveData))
+ return null; // lost race vs opposite mode
+ else if ((n = p.next) != null) // not last; keep traversing
+ p = p != t && t != (u = tail) ? (t = u) : // stale tail
+ (p != n) ? n : null; // restart if off list
+ else if (!p.casNext(null, s))
+ p = p.next; // re-read on CAS failure
+ else {
+ if (p != t) { // update if slack now >= 2
+ while ((tail != t || !casTail(t, s)) &&
+ (t = tail) != null &&
+ (s = t.next) != null && // advance and retry
+ (s = s.next) != null && s != t);
}
+ return p;
}
}
}
/**
- * Spins/blocks until node s is fulfilled or caller gives up,
- * depending on wait mode.
+ * Spins/yields/blocks until node s is matched or caller gives up.
*
- * @param pred the predecessor of waiting node
* @param s the waiting node
+ * @param pred the predecessor of s, or s itself if it has no
+ * predecessor, or null if unknown (the null case does not occur
+ * in any current calls but may in possible future extensions)
* @param e the comparison value for checking match
- * @param mode mode
- * @param nanos timeout value
- * @return matched item, or s if cancelled
+ * @param timed if true, wait only until timeout elapses
+ * @param nanos timeout in nanosecs, used only if timed is true
+ * @return matched item, or e if unmatched on interrupt or timeout
*/
- private Object awaitFulfill(QNode pred, QNode s, Object e,
- int mode, long nanos) {
- if (mode == NOWAIT)
- return null;
-
- long lastTime = (mode == TIMEOUT)? System.nanoTime() : 0;
+ private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
+ long lastTime = timed ? System.nanoTime() : 0L;
Thread w = Thread.currentThread();
- int spins = -1; // set to desired spin count below
+ int spins = -1; // initialized after first item and cancel checks
+ ThreadLocalRandom randomYields = null; // bound if needed
+
for (;;) {
- if (w.isInterrupted())
- s.compareAndSet(e, s);
- Object x = s.get();
- if (x != e) { // Node was matched or cancelled
- advanceHead(pred, s); // unlink if head
- if (x == s) { // was cancelled
- clean(pred, s);
- return null;
- }
- else if (x != null) {
- s.set(s); // avoid garbage retention
- return x;
- }
- else
- return e;
+ Object item = s.item;
+ if (item != e) { // matched
+ // assert item != s;
+ s.forgetContents(); // avoid garbage
+ return LinkedTransferQueue.<E>cast(item);
}
- if (mode == TIMEOUT) {
- long now = System.nanoTime();
- nanos -= now - lastTime;
- lastTime = now;
- if (nanos <= 0) {
- s.compareAndSet(e, s); // try to cancel
- continue;
- }
+ if ((w.isInterrupted() || (timed && nanos <= 0)) &&
+ s.casItem(e, s)) { // cancel
+ unsplice(pred, s);
+ return e;
}
- if (spins < 0) {
- QNode h = head.get(); // only spin if at head
- spins = ((h != null && h.next == s) ?
- (mode == TIMEOUT?
- maxTimedSpins : maxUntimedSpins) : 0);
+
+ if (spins < 0) { // establish spins at/near front
+ if ((spins = spinsFor(pred, s.isData)) > 0)
+ randomYields = ThreadLocalRandom.current();
}
- if (spins > 0)
+ else if (spins > 0) { // spin
--spins;
- else if (s.waiter == null)
- s.waiter = w;
- else if (mode != TIMEOUT) {
- LockSupport.park(this);
- s.waiter = null;
- spins = -1;
+ if (randomYields.nextInt(CHAINED_SPINS) == 0)
+ Thread.yield(); // occasionally yield
+ }
+ else if (s.waiter == null) {
+ s.waiter = w; // request unpark then recheck
}
- else if (nanos > spinForTimeoutThreshold) {
- LockSupport.parkNanos(this, nanos);
- s.waiter = null;
- spins = -1;
+ else if (timed) {
+ long now = System.nanoTime();
+ if ((nanos -= now - lastTime) > 0)
+ LockSupport.parkNanos(this, nanos);
+ lastTime = now;
+ }
+ else {
+ LockSupport.park(this);
}
}
}
/**
- * Returns validated tail for use in cleaning methods.
+ * Returns spin/yield value for a node with given predecessor and
+ * data mode. See above for explanation.
*/
- private QNode getValidatedTail() {
- for (;;) {
- QNode h = head.get();
- QNode first = h.next;
- if (first != null && first.next == first) { // help advance
- advanceHead(h, first);
- continue;
- }
- QNode t = tail.get();
- QNode last = t.next;
- if (t == tail.get()) {
- if (last != null)
- tail.compareAndSet(t, last); // help advance
- else
- return t;
+ private static int spinsFor(Node pred, boolean haveData) {
+ if (MP && pred != null) {
+ if (pred.isData != haveData) // phase change
+ return FRONT_SPINS + CHAINED_SPINS;
+ if (pred.isMatched()) // probably at front
+ return FRONT_SPINS;
+ if (pred.waiter == null) // pred apparently spinning
+ return CHAINED_SPINS;
+ }
+ return 0;
+ }
+
+ /* -------------- Traversal methods -------------- */
+
+ /**
+ * Returns the successor of p, or the head node if p.next has been
+ * linked to self, which will only be true if traversing with a
+ * stale pointer that is now off the list.
+ */
+ final Node succ(Node p) {
+ Node next = p.next;
+ return (p == next) ? head : next;
+ }
+
+ /**
+ * Returns the first unmatched node of the given mode, or null if
+ * none. Used by methods isEmpty, hasWaitingConsumer.
+ */
+ private Node firstOfMode(boolean isData) {
+ for (Node p = head; p != null; p = succ(p)) {
+ if (!p.isMatched())
+ return (p.isData == isData) ? p : null;
+ }
+ return null;
+ }
+
+ /**
+ * Returns the item in the first unmatched node with isData; or
+ * null if none. Used by peek.
+ */
+ private E firstDataItem() {
+ for (Node p = head; p != null; p = succ(p)) {
+ Object item = p.item;
+ if (p.isData) {
+ if (item != null && item != p)
+ return LinkedTransferQueue.<E>cast(item);
}
+ else if (item == null)
+ return null;
}
+ return null;
}
/**
- * Gets rid of cancelled node s with original predecessor pred.
- *
- * @param pred predecessor of cancelled node
- * @param s the cancelled node
+ * Traverses and counts unmatched nodes of the given mode.
+ * Used by methods size and getWaitingConsumerCount.
*/
- private void clean(QNode pred, QNode s) {
- Thread w = s.waiter;
- if (w != null) { // Wake up thread
- s.waiter = null;
- if (w != Thread.currentThread())
- LockSupport.unpark(w);
+ private int countOfMode(boolean data) {
+ int count = 0;
+ for (Node p = head; p != null; ) {
+ if (!p.isMatched()) {
+ if (p.isData != data)
+ return 0;
+ if (++count == Integer.MAX_VALUE) // saturated
+ break;
+ }
+ Node n = p.next;
+ if (n != p)
+ p = n;
+ else {
+ count = 0;
+ p = head;
+ }
}
+ return count;
+ }
- if (pred == null)
- return;
+ final class Itr implements Iterator<E> {
+ private Node nextNode; // next node to return item for
+ private E nextItem; // the corresponding item
+ private Node lastRet; // last returned node, to support remove
+ private Node lastPred; // predecessor to unlink lastRet
- /*
- * At any given time, exactly one node on list cannot be
- * deleted -- the last inserted node. To accommodate this, if
- * we cannot delete s, we save its predecessor as "cleanMe",
- * processing the previously saved version first. At least one
- * of node s or the node previously saved can always be
- * processed, so this always terminates.
+ /**
+ * Moves to next node after prev, or first node if prev null.
*/
- while (pred.next == s) {
- QNode oldpred = reclean(); // First, help get rid of cleanMe
- QNode t = getValidatedTail();
- if (s != t) { // If not tail, try to unsplice
- QNode sn = s.next; // s.next == s means s already off list
- if (sn == s || pred.casNext(s, sn))
+ private void advance(Node prev) {
+ /*
+ * To track and avoid buildup of deleted nodes in the face
+ * of calls to both Queue.remove and Itr.remove, we must
+ * include variants of unsplice and sweep upon each
+ * advance: Upon Itr.remove, we may need to catch up links
+ * from lastPred, and upon other removes, we might need to
+ * skip ahead from stale nodes and unsplice deleted ones
+ * found while advancing.
+ */
+
+ Node r, b; // reset lastPred upon possible deletion of lastRet
+ if ((r = lastRet) != null && !r.isMatched())
+ lastPred = r; // next lastPred is old lastRet
+ else if ((b = lastPred) == null || b.isMatched())
+ lastPred = null; // at start of list
+ else {
+ Node s, n; // help with removal of lastPred.next
+ while ((s = b.next) != null &&
+ s != b && s.isMatched() &&
+ (n = s.next) != null && n != s)
+ b.casNext(s, n);
+ }
+
+ this.lastRet = prev;
+
+ for (Node p = prev, s, n;;) {
+ s = (p == null) ? head : p.next;
+ if (s == null)
+ break;
+ else if (s == p) {
+ p = null;
+ continue;
+ }
+ Object item = s.item;
+ if (s.isData) {
+ if (item != null && item != s) {
+ nextItem = LinkedTransferQueue.<E>cast(item);
+ nextNode = s;
+ return;
+ }
+ }
+ else if (item == null)
+ break;
+ // assert s.isMatched();
+ if (p == null)
+ p = s;
+ else if ((n = s.next) == null)
break;
+ else if (s == n)
+ p = null;
+ else
+ p.casNext(s, n);
}
- else if (oldpred == pred || // Already saved
- (oldpred == null && cleanMe.compareAndSet(null, pred)))
- break; // Postpone cleaning
+ nextNode = null;
+ nextItem = null;
+ }
+
+ Itr() {
+ advance(null);
+ }
+
+ public final boolean hasNext() {
+ return nextNode != null;
+ }
+
+ public final E next() {
+ Node p = nextNode;
+ if (p == null) throw new NoSuchElementException();
+ E e = nextItem;
+ advance(p);
+ return e;
+ }
+
+ public final void remove() {
+ final Node lastRet = this.lastRet;
+ if (lastRet == null)
+ throw new IllegalStateException();
+ this.lastRet = null;
+ if (lastRet.tryMatchData())
+ unsplice(lastPred, lastRet);
}
}
+ /* -------------- Removal methods -------------- */
+
/**
- * Tries to unsplice the cancelled node held in cleanMe that was
- * previously uncleanable because it was at tail.
+ * Unsplices (now or later) the given deleted/cancelled node with
+ * the given predecessor.
*
- * @return current cleanMe node (or null)
+ * @param pred a node that was at one time known to be the
+ * predecessor of s, or null or s itself if s is/was at head
+ * @param s the node to be unspliced
*/
- private QNode reclean() {
+ final void unsplice(Node pred, Node s) {
+ s.forgetContents(); // forget unneeded fields
/*
- * cleanMe is, or at one time was, predecessor of cancelled
- * node s that was the tail so could not be unspliced. If s
- * is no longer the tail, try to unsplice if necessary and
- * make cleanMe slot available. This differs from similar
- * code in clean() because we must check that pred still
- * points to a cancelled node that must be unspliced -- if
- * not, we can (must) clear cleanMe without unsplicing.
- * This can loop only due to contention on casNext or
- * clearing cleanMe.
+ * See above for rationale. Briefly: if pred still points to
+ * s, try to unlink s. If s cannot be unlinked, because it is
+ * trailing node or pred might be unlinked, and neither pred
+ * nor s are head or offlist, add to sweepVotes, and if enough
+ * votes have accumulated, sweep.
*/
- QNode pred;
- while ((pred = cleanMe.get()) != null) {
- QNode t = getValidatedTail();
- QNode s = pred.next;
- if (s != t) {
- QNode sn;
- if (s == null || s == pred || s.get() != s ||
- (sn = s.next) == s || pred.casNext(s, sn))
- cleanMe.compareAndSet(pred, null);
+ if (pred != null && pred != s && pred.next == s) {
+ Node n = s.next;
+ if (n == null ||
+ (n != s && pred.casNext(s, n) && pred.isMatched())) {
+ for (;;) { // check if at, or could be, head
+ Node h = head;
+ if (h == pred || h == s || h == null)
+ return; // at head or list empty
+ if (!h.isMatched())
+ break;
+ Node hn = h.next;
+ if (hn == null)
+ return; // now empty
+ if (hn != h && casHead(h, hn))
+ h.forgetNext(); // advance head
+ }
+ if (pred.next != pred && s.next != s) { // recheck if offlist
+ for (;;) { // sweep now if enough votes
+ int v = sweepVotes;
+ if (v < SWEEP_THRESHOLD) {
+ if (casSweepVotes(v, v + 1))
+ break;
+ }
+ else if (casSweepVotes(v, 0)) {
+ sweep();
+ break;
+ }
+ }
+ }
}
- else // s is still tail; cannot clean
+ }
+ }
+
+ /**
+ * Unlinks matched (typically cancelled) nodes encountered in a
+ * traversal from head.
+ */
+ private void sweep() {
+ for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
+ if (!s.isMatched())
+ // Unmatched nodes are never self-linked
+ p = s;
+ else if ((n = s.next) == null) // trailing node is pinned
break;
+ else if (s == n) // stale
+ // No need to also check for p == s, since that implies s == n
+ p = head;
+ else
+ p.casNext(s, n);
}
- return pred;
}
/**
+ * Main implementation of remove(Object)
+ */
+ private boolean findAndRemove(Object e) {
+ if (e != null) {
+ for (Node pred = null, p = head; p != null; ) {
+ Object item = p.item;
+ if (p.isData) {
+ if (item != null && item != p && e.equals(item) &&
+ p.tryMatchData()) {
+ unsplice(pred, p);
+ return true;
+ }
+ }
+ else if (item == null)
+ break;
+ pred = p;
+ if ((p = p.next) == pred) { // stale
+ pred = null;
+ p = head;
+ }
+ }
+ }
+ return false;
+ }
+
+
+ /**
* Creates an initially empty {@code LinkedTransferQueue}.
*/
public LinkedTransferQueue() {
- QNode dummy = new QNode(null, false);
- head = new PaddedAtomicReference<QNode>(dummy);
- tail = new PaddedAtomicReference<QNode>(dummy);
- cleanMe = new PaddedAtomicReference<QNode>(null);
}
/**
@@ -435,252 +1000,200 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
addAll(c);
}
- public void put(E e) throws InterruptedException {
- if (e == null) throw new NullPointerException();
- if (Thread.interrupted()) throw new InterruptedException();
- xfer(e, NOWAIT, 0);
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never block.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public void put(E e) {
+ xfer(e, true, ASYNC, 0);
}
- public boolean offer(E e, long timeout, TimeUnit unit)
- throws InterruptedException {
- if (e == null) throw new NullPointerException();
- if (Thread.interrupted()) throw new InterruptedException();
- xfer(e, NOWAIT, 0);
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never block or
+ * return {@code false}.
+ *
+ * @return {@code true} (as specified by
+ * {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit)
+ * BlockingQueue.offer})
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean offer(E e, long timeout, TimeUnit unit) {
+ xfer(e, true, ASYNC, 0);
return true;
}
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never return {@code false}.
+ *
+ * @return {@code true} (as specified by {@link Queue#offer})
+ * @throws NullPointerException if the specified element is null
+ */
public boolean offer(E e) {
- if (e == null) throw new NullPointerException();
- xfer(e, NOWAIT, 0);
+ xfer(e, true, ASYNC, 0);
return true;
}
+ /**
+ * Inserts the specified element at the tail of this queue.
+ * As the queue is unbounded, this method will never throw
+ * {@link IllegalStateException} or return {@code false}.
+ *
+ * @return {@code true} (as specified by {@link Collection#add})
+ * @throws NullPointerException if the specified element is null
+ */
public boolean add(E e) {
- if (e == null) throw new NullPointerException();
- xfer(e, NOWAIT, 0);
+ xfer(e, true, ASYNC, 0);
return true;
}
+ /**
+ * Transfers the element to a waiting consumer immediately, if possible.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * otherwise returning {@code false} without enqueuing the element.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
+ public boolean tryTransfer(E e) {
+ return xfer(e, true, NOW, 0) == null;
+ }
+
+ /**
+ * Transfers the element to a consumer, waiting if necessary to do so.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else inserts the specified element at the tail of this queue
+ * and waits until the element is received by a consumer.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
public void transfer(E e) throws InterruptedException {
- if (e == null) throw new NullPointerException();
- if (xfer(e, WAIT, 0) == null) {
- Thread.interrupted();
+ if (xfer(e, true, SYNC, 0) != null) {
+ Thread.interrupted(); // failure possible only due to interrupt
throw new InterruptedException();
}
}
+ /**
+ * Transfers the element to a consumer if it is possible to do so
+ * before the timeout elapses.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else inserts the specified element at the tail of this queue
+ * and waits until the element is received by a consumer,
+ * returning {@code false} if the specified wait time elapses
+ * before the element can be transferred.
+ *
+ * @throws NullPointerException if the specified element is null
+ */
public boolean tryTransfer(E e, long timeout, TimeUnit unit)
throws InterruptedException {
- if (e == null) throw new NullPointerException();
- if (xfer(e, TIMEOUT, unit.toNanos(timeout)) != null)
+ if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
return true;
if (!Thread.interrupted())
return false;
throw new InterruptedException();
}
- public boolean tryTransfer(E e) {
- if (e == null) throw new NullPointerException();
- return fulfill(e) != null;
- }
-
public E take() throws InterruptedException {
- Object e = xfer(null, WAIT, 0);
+ E e = xfer(null, false, SYNC, 0);
if (e != null)
- return (E)e;
+ return e;
Thread.interrupted();
throw new InterruptedException();
}
public E poll(long timeout, TimeUnit unit) throws InterruptedException {
- Object e = xfer(null, TIMEOUT, unit.toNanos(timeout));
+ E e = xfer(null, false, TIMED, unit.toNanos(timeout));
if (e != null || !Thread.interrupted())
- return (E)e;
+ return e;
throw new InterruptedException();
}
public E poll() {
- return (E)fulfill(null);
+ return xfer(null, false, NOW, 0);
}
+ /**
+ * @throws NullPointerException {@inheritDoc}
+ * @throws IllegalArgumentException {@inheritDoc}
+ */
public int drainTo(Collection<? super E> c) {
if (c == null)
throw new NullPointerException();
if (c == this)
throw new IllegalArgumentException();
int n = 0;
- E e;
- while ( (e = poll()) != null) {
+ for (E e; (e = poll()) != null;) {
c.add(e);
++n;
}
return n;
}
+ /**
+ * @throws NullPointerException {@inheritDoc}
+ * @throws IllegalArgumentException {@inheritDoc}
+ */
public int drainTo(Collection<? super E> c, int maxElements) {
if (c == null)
throw new NullPointerException();
if (c == this)
throw new IllegalArgumentException();
int n = 0;
- E e;
- while (n < maxElements && (e = poll()) != null) {
+ for (E e; n < maxElements && (e = poll()) != null;) {
c.add(e);
++n;
}
return n;
}
- // Traversal-based methods
-
/**
- * Returns head after performing any outstanding helping steps.
+ * Returns an iterator over the elements in this queue in proper sequence.
+ * The elements will be returned in order from first (head) to last (tail).
+ *
+ * <p>The returned iterator is a "weakly consistent" iterator that
+ * will never throw {@link java.util.ConcurrentModificationException
+ * ConcurrentModificationException}, and guarantees to traverse
+ * elements as they existed upon construction of the iterator, and
+ * may (but is not guaranteed to) reflect any modifications
+ * subsequent to construction.
+ *
+ * @return an iterator over the elements in this queue in proper sequence
*/
- private QNode traversalHead() {
- for (;;) {
- QNode t = tail.get();
- QNode h = head.get();
- if (h != null && t != null) {
- QNode last = t.next;
- QNode first = h.next;
- if (t == tail.get()) {
- if (last != null)
- tail.compareAndSet(t, last);
- else if (first != null) {
- Object x = first.get();
- if (x == first)
- advanceHead(h, first);
- else
- return h;
- }
- else
- return h;
- }
- }
- reclean();
- }
- }
-
-
public Iterator<E> iterator() {
return new Itr();
}
- /**
- * Iterators. Basic strategy is to traverse list, treating
- * non-data (i.e., request) nodes as terminating list.
- * Once a valid data node is found, the item is cached
- * so that the next call to next() will return it even
- * if subsequently removed.
- */
- class Itr implements Iterator<E> {
- QNode next; // node to return next
- QNode pnext; // predecessor of next
- QNode snext; // successor of next
- QNode curr; // last returned node, for remove()
- QNode pcurr; // predecessor of curr, for remove()
- E nextItem; // Cache of next item, once commited to in next
-
- Itr() {
- findNext();
- }
-
- /**
- * Ensures next points to next valid node, or null if none.
- */
- void findNext() {
- for (;;) {
- QNode pred = pnext;
- QNode q = next;
- if (pred == null || pred == q) {
- pred = traversalHead();
- q = pred.next;
- }
- if (q == null || !q.isData) {
- next = null;
- return;
- }
- Object x = q.get();
- QNode s = q.next;
- if (x != null && q != x && q != s) {
- nextItem = (E)x;
- snext = s;
- pnext = pred;
- next = q;
- return;
- }
- pnext = q;
- next = s;
- }
- }
-
- public boolean hasNext() {
- return next != null;
- }
-
- public E next() {
- if (next == null) throw new NoSuchElementException();
- pcurr = pnext;
- curr = next;
- pnext = next;
- next = snext;
- E x = nextItem;
- findNext();
- return x;
- }
-
- public void remove() {
- QNode p = curr;
- if (p == null)
- throw new IllegalStateException();
- Object x = p.get();
- if (x != null && x != p && p.compareAndSet(x, p))
- clean(pcurr, p);
- }
- }
-
public E peek() {
- for (;;) {
- QNode h = traversalHead();
- QNode p = h.next;
- if (p == null)
- return null;
- Object x = p.get();
- if (p != x) {
- if (!p.isData)
- return null;
- if (x != null)
- return (E)x;
- }
- }
+ return firstDataItem();
}
+ /**
+ * Returns {@code true} if this queue contains no elements.
+ *
+ * @return {@code true} if this queue contains no elements
+ */
public boolean isEmpty() {
- for (;;) {
- QNode h = traversalHead();
- QNode p = h.next;
- if (p == null)
- return true;
- Object x = p.get();
- if (p != x) {
- if (!p.isData)
- return true;
- if (x != null)
- return false;
- }
+ for (Node p = head; p != null; p = succ(p)) {
+ if (!p.isMatched())
+ return !p.isData;
}
+ return true;
}
public boolean hasWaitingConsumer() {
- for (;;) {
- QNode h = traversalHead();
- QNode p = h.next;
- if (p == null)
- return false;
- Object x = p.get();
- if (p != x)
- return !p.isData;
- }
+ return firstOfMode(false) != null;
}
/**
@@ -696,58 +1209,64 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
* @return the number of elements in this queue
*/
public int size() {
- int count = 0;
- QNode h = traversalHead();
- for (QNode p = h.next; p != null && p.isData; p = p.next) {
- Object x = p.get();
- if (x != null && x != p) {
- if (++count == Integer.MAX_VALUE) // saturated
- break;
- }
- }
- return count;
+ return countOfMode(true);
}
public int getWaitingConsumerCount() {
- int count = 0;
- QNode h = traversalHead();
- for (QNode p = h.next; p != null && !p.isData; p = p.next) {
- if (p.get() == null) {
- if (++count == Integer.MAX_VALUE)
- break;
- }
- }
- return count;
+ return countOfMode(false);
}
- public int remainingCapacity() {
- return Integer.MAX_VALUE;
+ /**
+ * Removes a single instance of the specified element from this queue,
+ * if it is present. More formally, removes an element {@code e} such
+ * that {@code o.equals(e)}, if this queue contains one or more such
+ * elements.
+ * Returns {@code true} if this queue contained the specified element
+ * (or equivalently, if this queue changed as a result of the call).
+ *
+ * @param o element to be removed from this queue, if present
+ * @return {@code true} if this queue changed as a result of the call
+ */
+ public boolean remove(Object o) {
+ return findAndRemove(o);
}
- public boolean remove(Object o) {
- if (o == null)
- return false;
- for (;;) {
- QNode pred = traversalHead();
- for (;;) {
- QNode q = pred.next;
- if (q == null || !q.isData)
- return false;
- if (q == pred) // restart
- break;
- Object x = q.get();
- if (x != null && x != q && o.equals(x) &&
- q.compareAndSet(x, q)) {
- clean(pred, q);
+ /**
+ * Returns {@code true} if this queue contains the specified element.
+ * More formally, returns {@code true} if and only if this queue contains
+ * at least one element {@code e} such that {@code o.equals(e)}.
+ *
+ * @param o object to be checked for containment in this queue
+ * @return {@code true} if this queue contains the specified element
+ */
+ public boolean contains(Object o) {
+ if (o == null) return false;
+ for (Node p = head; p != null; p = succ(p)) {
+ Object item = p.item;
+ if (p.isData) {
+ if (item != null && item != p && o.equals(item))
return true;
- }
- pred = q;
}
+ else if (item == null)
+ break;
}
+ return false;
+ }
+
+ /**
+ * Always returns {@code Integer.MAX_VALUE} because a
+ * {@code LinkedTransferQueue} is not capacity constrained.
+ *
+ * @return {@code Integer.MAX_VALUE} (as specified by
+ * {@link java.util.concurrent.BlockingQueue#remainingCapacity()
+ * BlockingQueue.remainingCapacity})
+ */
+ public int remainingCapacity() {
+ return Integer.MAX_VALUE;
}
/**
- * Save the state to a stream (that is, serialize it).
+ * Saves the state to a stream (that is, serializes it).
*
* @serialData All of the elements (each an {@code E}) in
* the proper order, followed by a null
@@ -763,16 +1282,17 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
}
/**
- * Reconstitute the Queue instance from a stream (that is,
- * deserialize it).
+ * Reconstitutes the Queue instance from a stream (that is,
+ * deserializes it).
+ *
* @param s the stream
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
- resetHeadAndTail();
for (;;) {
- E item = (E)s.readObject();
+ @SuppressWarnings("unchecked")
+ E item = (E) s.readObject();
if (item == null)
break;
else
@@ -780,61 +1300,53 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
}
}
+ // Unsafe mechanics
- // Support for resetting head/tail while deserializing
- private void resetHeadAndTail() {
- QNode dummy = new QNode(null, false);
- _unsafe.putObjectVolatile(this, headOffset,
- new PaddedAtomicReference<QNode>(dummy));
- _unsafe.putObjectVolatile(this, tailOffset,
- new PaddedAtomicReference<QNode>(dummy));
- _unsafe.putObjectVolatile(this, cleanMeOffset,
- new PaddedAtomicReference<QNode>(null));
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long headOffset;
+ private static final long tailOffset;
+ private static final long sweepVotesOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> k = LinkedTransferQueue.class;
+ headOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("head"));
+ tailOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("tail"));
+ sweepVotesOffset = UNSAFE.objectFieldOffset
+ (k.getDeclaredField("sweepVotes"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
}
- // Temporary Unsafe mechanics for preliminary release
- private static Unsafe getUnsafe() throws Throwable {
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ static sun.misc.Unsafe getUnsafe() {
try {
- return Unsafe.getUnsafe();
+ return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException se) {
try {
return java.security.AccessController.doPrivileged
- (new java.security.PrivilegedExceptionAction<Unsafe>() {
- public Unsafe run() throws Exception {
- return getUnsafePrivileged();
+ (new java.security
+ .PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ java.lang.reflect.Field f = sun.misc
+ .Unsafe.class.getDeclaredField("theUnsafe");
+ f.setAccessible(true);
+ return (sun.misc.Unsafe) f.get(null);
}});
} catch (java.security.PrivilegedActionException e) {
- throw e.getCause();
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
}
}
}
- private static Unsafe getUnsafePrivileged()
- throws NoSuchFieldException, IllegalAccessException {
- Field f = Unsafe.class.getDeclaredField("theUnsafe");
- f.setAccessible(true);
- return (Unsafe) f.get(null);
- }
-
- private static long fieldOffset(String fieldName)
- throws NoSuchFieldException {
- return _unsafe.objectFieldOffset
- (LinkedTransferQueue.class.getDeclaredField(fieldName));
- }
-
- private static final Unsafe _unsafe;
- private static final long headOffset;
- private static final long tailOffset;
- private static final long cleanMeOffset;
- static {
- try {
- _unsafe = getUnsafe();
- headOffset = fieldOffset("head");
- tailOffset = fieldOffset("tail");
- cleanMeOffset = fieldOffset("cleanMe");
- } catch (Throwable e) {
- throw new RuntimeException("Could not initialize intrinsics", e);
- }
- }
-
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java b/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java
index 2d36f7eb33..1e7cdd952d 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java
@@ -1,64 +1,73 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
/**
- * Recursive resultless ForkJoinTasks. This class establishes
- * conventions to parameterize resultless actions as <tt>Void</tt>
- * ForkJoinTasks. Because <tt>null</tt> is the only valid value of
- * <tt>Void</tt>, methods such as join always return <tt>null</tt>
- * upon completion.
+ * A recursive resultless {@link ForkJoinTask}. This class
+ * establishes conventions to parameterize resultless actions as
+ * {@code Void} {@code ForkJoinTask}s. Because {@code null} is the
+ * only valid value of type {@code Void}, methods such as {@code join}
+ * always return {@code null} upon completion.
*
- * <p><b>Sample Usages.</b> Here is a sketch of a ForkJoin sort that
- * sorts a given <tt>long[]</tt> array:
+ * <p><b>Sample Usages.</b> Here is a simple but complete ForkJoin
+ * sort that sorts a given {@code long[]} array:
*
- * <pre>
- * class SortTask extends RecursiveAction {
- * final long[] array; final int lo; final int hi;
+ * <pre> {@code
+ * static class SortTask extends RecursiveAction {
+ * final long[] array; final int lo, hi;
* SortTask(long[] array, int lo, int hi) {
* this.array = array; this.lo = lo; this.hi = hi;
* }
+ * SortTask(long[] array) { this(array, 0, array.length); }
* protected void compute() {
- * if (hi - lo &lt; THRESHOLD)
- * sequentiallySort(array, lo, hi);
+ * if (hi - lo < THRESHOLD)
+ * sortSequentially(lo, hi);
* else {
- * int mid = (lo + hi) &gt;&gt;&gt; 1;
+ * int mid = (lo + hi) >>> 1;
* invokeAll(new SortTask(array, lo, mid),
* new SortTask(array, mid, hi));
- * merge(array, lo, hi);
+ * merge(lo, mid, hi);
* }
* }
- * }
- * </pre>
+ * // implementation details follow:
+ * final static int THRESHOLD = 1000;
+ * void sortSequentially(int lo, int hi) {
+ * Arrays.sort(array, lo, hi);
+ * }
+ * void merge(int lo, int mid, int hi) {
+ * long[] buf = Arrays.copyOfRange(array, lo, mid);
+ * for (int i = 0, j = lo, k = mid; i < buf.length; j++)
+ * array[j] = (k == hi || buf[i] < array[k]) ?
+ * buf[i++] : array[k++];
+ * }
+ * }}</pre>
*
- * You could then sort anArray by creating <tt>new SortTask(anArray, 0,
- * anArray.length-1) </tt> and invoking it in a ForkJoinPool.
- * As a more concrete simple example, the following task increments
- * each element of an array:
- * <pre>
+ * You could then sort {@code anArray} by creating {@code new
+ * SortTask(anArray)} and invoking it in a ForkJoinPool. As a more
+ * concrete simple example, the following task increments each element
+ * of an array:
+ * <pre> {@code
* class IncrementTask extends RecursiveAction {
- * final long[] array; final int lo; final int hi;
+ * final long[] array; final int lo, hi;
* IncrementTask(long[] array, int lo, int hi) {
* this.array = array; this.lo = lo; this.hi = hi;
* }
* protected void compute() {
- * if (hi - lo &lt; THRESHOLD) {
- * for (int i = lo; i &lt; hi; ++i)
+ * if (hi - lo < THRESHOLD) {
+ * for (int i = lo; i < hi; ++i)
* array[i]++;
* }
* else {
- * int mid = (lo + hi) &gt;&gt;&gt; 1;
+ * int mid = (lo + hi) >>> 1;
* invokeAll(new IncrementTask(array, lo, mid),
* new IncrementTask(array, mid, hi));
* }
* }
- * }
- * </pre>
- *
+ * }}</pre>
*
* <p>The following example illustrates some refinements and idioms
* that may lead to better performance: RecursiveActions need not be
@@ -66,33 +75,33 @@ package scala.concurrent.forkjoin;
* divide-and-conquer approach. Here is a class that sums the squares
* of each element of a double array, by subdividing out only the
* right-hand-sides of repeated divisions by two, and keeping track of
- * them with a chain of <tt>next</tt> references. It uses a dynamic
- * threshold based on method <tt>surplus</tt>, but counterbalances
- * potential excess partitioning by directly performing leaf actions
- * on unstolen tasks rather than further subdividing.
+ * them with a chain of {@code next} references. It uses a dynamic
+ * threshold based on method {@code getSurplusQueuedTaskCount}, but
+ * counterbalances potential excess partitioning by directly
+ * performing leaf actions on unstolen tasks rather than further
+ * subdividing.
*
- * <pre>
+ * <pre> {@code
* double sumOfSquares(ForkJoinPool pool, double[] array) {
* int n = array.length;
- * int seqSize = 1 + n / (8 * pool.getParallelism());
- * Applyer a = new Applyer(array, 0, n, seqSize, null);
+ * Applyer a = new Applyer(array, 0, n, null);
* pool.invoke(a);
* return a.result;
* }
*
* class Applyer extends RecursiveAction {
* final double[] array;
- * final int lo, hi, seqSize;
+ * final int lo, hi;
* double result;
* Applyer next; // keeps track of right-hand-side tasks
- * Applyer(double[] array, int lo, int hi, int seqSize, Applyer next) {
+ * Applyer(double[] array, int lo, int hi, Applyer next) {
* this.array = array; this.lo = lo; this.hi = hi;
- * this.seqSize = seqSize; this.next = next;
+ * this.next = next;
* }
*
- * double atLeaf(int l, int r) {
+ * double atLeaf(int l, int h) {
* double sum = 0;
- * for (int i = l; i &lt; h; ++i) // perform leftmost base step
+ * for (int i = l; i < h; ++i) // perform leftmost base step
* sum += array[i] * array[i];
* return sum;
* }
@@ -101,10 +110,9 @@ package scala.concurrent.forkjoin;
* int l = lo;
* int h = hi;
* Applyer right = null;
- * while (h - l &gt; 1 &amp;&amp;
- * ForkJoinWorkerThread.getEstimatedSurplusTaskCount() &lt;= 3) {
- * int mid = (l + h) &gt;&gt;&gt; 1;
- * right = new Applyer(array, mid, h, seqSize, right);
+ * while (h - l > 1 && getSurplusQueuedTaskCount() <= 3) {
+ * int mid = (l + h) >>> 1;
+ * right = new Applyer(array, mid, h, right);
* right.fork();
* h = mid;
* }
@@ -113,17 +121,20 @@ package scala.concurrent.forkjoin;
* if (right.tryUnfork()) // directly calculate if not stolen
* sum += right.atLeaf(right.lo, right.hi);
* else {
- * right.helpJoin();
+ * right.join();
* sum += right.result;
* }
* right = right.next;
* }
* result = sum;
* }
- * }
- * </pre>
+ * }}</pre>
+ *
+ * @since 1.7
+ * @author Doug Lea
*/
public abstract class RecursiveAction extends ForkJoinTask<Void> {
+ private static final long serialVersionUID = 5232453952276485070L;
/**
* The main computation performed by this task.
@@ -131,7 +142,9 @@ public abstract class RecursiveAction extends ForkJoinTask<Void> {
protected abstract void compute();
/**
- * Always returns null
+ * Always returns {@code null}.
+ *
+ * @return {@code null} always
*/
public final Void getRawResult() { return null; }
@@ -141,7 +154,7 @@ public abstract class RecursiveAction extends ForkJoinTask<Void> {
protected final void setRawResult(Void mustBeNull) { }
/**
- * Implements execution conventions for RecursiveActions
+ * Implements execution conventions for RecursiveActions.
*/
protected final boolean exec() {
compute();
diff --git a/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java b/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java
index a526f75597..d1e1547143 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java
@@ -1,29 +1,29 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
/**
- * Recursive result-bearing ForkJoinTasks.
- * <p> For a classic example, here is a task computing Fibonacci numbers:
+ * A recursive result-bearing {@link ForkJoinTask}.
*
- * <pre>
- * class Fibonacci extends RecursiveTask&lt;Integer&gt; {
+ * <p>For a classic example, here is a task computing Fibonacci numbers:
+ *
+ * <pre> {@code
+ * class Fibonacci extends RecursiveTask<Integer> {
* final int n;
- * Fibonnaci(int n) { this.n = n; }
+ * Fibonacci(int n) { this.n = n; }
* Integer compute() {
- * if (n &lt;= 1)
+ * if (n <= 1)
* return n;
* Fibonacci f1 = new Fibonacci(n - 1);
* f1.fork();
* Fibonacci f2 = new Fibonacci(n - 2);
* return f2.compute() + f1.join();
* }
- * }
- * </pre>
+ * }}</pre>
*
* However, besides being a dumb way to compute Fibonacci functions
* (there is a simple fast linear algorithm that you'd use in
@@ -33,17 +33,14 @@ package scala.concurrent.forkjoin;
* minimum granularity size (for example 10 here) for which you always
* sequentially solve rather than subdividing.
*
+ * @since 1.7
+ * @author Doug Lea
*/
public abstract class RecursiveTask<V> extends ForkJoinTask<V> {
+ private static final long serialVersionUID = 5232453952276485270L;
/**
- * Empty constructor for use by subclasses.
- */
- protected RecursiveTask() {
- }
-
- /**
- * The result returned by compute method.
+ * The result of the computation.
*/
V result;
@@ -61,7 +58,7 @@ public abstract class RecursiveTask<V> extends ForkJoinTask<V> {
}
/**
- * Implements execution conventions for RecursiveTask
+ * Implements execution conventions for RecursiveTask.
*/
protected final boolean exec() {
result = compute();
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java b/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java
index 34e2e37f37..19237c9092 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java
@@ -1,49 +1,53 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
-import java.util.*;
+
+import java.util.Random;
/**
- * A random number generator with the same properties as class {@link
- * Random} but isolated to the current Thread. Like the global
- * generator used by the {@link java.lang.Math} class, a
- * ThreadLocalRandom is initialized with an internally generated seed
- * that may not otherwise be modified. When applicable, use of
- * ThreadLocalRandom rather than shared Random objects in concurrent
- * programs will typically encounter much less overhead and
- * contention. ThreadLocalRandoms are particularly appropriate when
- * multiple tasks (for example, each a {@link ForkJoinTask}), use
- * random numbers in parallel in thread pools.
+ * A random number generator isolated to the current thread. Like the
+ * global {@link java.util.Random} generator used by the {@link
+ * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized
+ * with an internally generated seed that may not otherwise be
+ * modified. When applicable, use of {@code ThreadLocalRandom} rather
+ * than shared {@code Random} objects in concurrent programs will
+ * typically encounter much less overhead and contention. Use of
+ * {@code ThreadLocalRandom} is particularly appropriate when multiple
+ * tasks (for example, each a {@link ForkJoinTask}) use random numbers
+ * in parallel in thread pools.
*
* <p>Usages of this class should typically be of the form:
- * <code>ThreadLocalRandom.current().nextX(...)</code> (where
- * <code>X</code> is <code>Int</code>, <code>Long</code>, etc).
+ * {@code ThreadLocalRandom.current().nextX(...)} (where
+ * {@code X} is {@code Int}, {@code Long}, etc).
* When all usages are of this form, it is never possible to
- * accidently share ThreadLocalRandoms across multiple threads.
+ * accidently share a {@code ThreadLocalRandom} across multiple threads.
*
* <p>This class also provides additional commonly used bounded random
* generation methods.
+ *
+ * @since 1.7
+ * @author Doug Lea
*/
public class ThreadLocalRandom extends Random {
// same constants as Random, but must be redeclared because private
- private final static long multiplier = 0x5DEECE66DL;
- private final static long addend = 0xBL;
- private final static long mask = (1L << 48) - 1;
+ private static final long multiplier = 0x5DEECE66DL;
+ private static final long addend = 0xBL;
+ private static final long mask = (1L << 48) - 1;
/**
- * The random seed. We can't use super.seed
+ * The random seed. We can't use super.seed.
*/
private long rnd;
/**
- * Initialization flag to permit the first and only allowed call
- * to setSeed (inside Random constructor) to succeed. We can't
- * allow others since it would cause setting seed in one part of a
- * program to unintentionally impact other usages by the thread.
+ * Initialization flag to permit calls to setSeed to succeed only
+ * while executing the Random constructor. We can't allow others
+ * since it would cause setting seed in one part of a program to
+ * unintentionally impact other usages by the thread.
*/
boolean initialized;
@@ -65,40 +69,42 @@ public class ThreadLocalRandom extends Random {
/**
* Constructor called only by localRandom.initialValue.
- * We rely on the fact that the superclass no-arg constructor
- * invokes setSeed exactly once to initialize.
*/
ThreadLocalRandom() {
super();
+ initialized = true;
}
/**
- * Returns the current Thread's ThreadLocalRandom
- * @return the current Thread's ThreadLocalRandom
+ * Returns the current thread's {@code ThreadLocalRandom}.
+ *
+ * @return the current thread's {@code ThreadLocalRandom}
*/
public static ThreadLocalRandom current() {
return localRandom.get();
}
/**
- * Throws UnsupportedOperationException. Setting seeds in this
- * generator is unsupported.
+ * Throws {@code UnsupportedOperationException}. Setting seeds in
+ * this generator is not supported.
+ *
* @throws UnsupportedOperationException always
*/
public void setSeed(long seed) {
if (initialized)
throw new UnsupportedOperationException();
- initialized = true;
rnd = (seed ^ multiplier) & mask;
}
protected int next(int bits) {
- return (int)((rnd = (rnd * multiplier + addend) & mask) >>> (48-bits));
+ rnd = (rnd * multiplier + addend) & mask;
+ return (int) (rnd >>> (48-bits));
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
+ *
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @throws IllegalArgumentException if least greater than or equal
@@ -113,7 +119,8 @@ public class ThreadLocalRandom extends Random {
/**
* Returns a pseudorandom, uniformly distributed value
- * between 0 (inclusive) and the specified value (exclusive)
+ * between 0 (inclusive) and the specified value (exclusive).
+ *
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
@@ -131,17 +138,18 @@ public class ThreadLocalRandom extends Random {
while (n >= Integer.MAX_VALUE) {
int bits = next(2);
long half = n >>> 1;
- long nextn = ((bits & 2) == 0)? half : n - half;
+ long nextn = ((bits & 2) == 0) ? half : n - half;
if ((bits & 1) == 0)
offset += n - nextn;
n = nextn;
}
- return offset + nextInt((int)n);
+ return offset + nextInt((int) n);
}
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
+ *
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
@@ -156,7 +164,8 @@ public class ThreadLocalRandom extends Random {
/**
* Returns a pseudorandom, uniformly distributed {@code double} value
- * between 0 (inclusive) and the specified value (exclusive)
+ * between 0 (inclusive) and the specified value (exclusive).
+ *
* @param n the bound on the random number to be returned. Must be
* positive.
* @return the next value
@@ -171,6 +180,7 @@ public class ThreadLocalRandom extends Random {
/**
* Returns a pseudorandom, uniformly distributed value between the
* given least value (inclusive) and bound (exclusive).
+ *
* @param least the least value returned
* @param bound the upper bound (exclusive)
* @return the next value
@@ -183,4 +193,5 @@ public class ThreadLocalRandom extends Random {
return nextDouble() * (bound - least) + least;
}
+ private static final long serialVersionUID = -5851777807851030925L;
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java b/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java
index 9c7b2289c4..7d149c7ae5 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java
@@ -1,7 +1,7 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
package scala.concurrent.forkjoin;
@@ -11,21 +11,23 @@ import java.util.concurrent.*;
* A {@link BlockingQueue} in which producers may wait for consumers
* to receive elements. A {@code TransferQueue} may be useful for
* example in message passing applications in which producers
- * sometimes (using method {@code transfer}) await receipt of
- * elements by consumers invoking {@code take} or {@code poll},
- * while at other times enqueue elements (via method {@code put})
- * without waiting for receipt. Non-blocking and time-out versions of
- * {@code tryTransfer} are also available. A TransferQueue may also
- * be queried via {@code hasWaitingConsumer} whether there are any
- * threads waiting for items, which is a converse analogy to a
- * {@code peek} operation.
+ * sometimes (using method {@link #transfer}) await receipt of
+ * elements by consumers invoking {@code take} or {@code poll}, while
+ * at other times enqueue elements (via method {@code put}) without
+ * waiting for receipt.
+ * {@linkplain #tryTransfer(Object) Non-blocking} and
+ * {@linkplain #tryTransfer(Object,long,TimeUnit) time-out} versions of
+ * {@code tryTransfer} are also available.
+ * A {@code TransferQueue} may also be queried, via {@link
+ * #hasWaitingConsumer}, whether there are any threads waiting for
+ * items, which is a converse analogy to a {@code peek} operation.
*
- * <p>Like any {@code BlockingQueue}, a {@code TransferQueue} may be
- * capacity bounded. If so, an attempted {@code transfer} operation
- * may initially block waiting for available space, and/or
- * subsequently block waiting for reception by a consumer. Note that
- * in a queue with zero capacity, such as {@link SynchronousQueue},
- * {@code put} and {@code transfer} are effectively synonymous.
+ * <p>Like other blocking queues, a {@code TransferQueue} may be
+ * capacity bounded. If so, an attempted transfer operation may
+ * initially block waiting for available space, and/or subsequently
+ * block waiting for reception by a consumer. Note that in a queue
+ * with zero capacity, such as {@link SynchronousQueue}, {@code put}
+ * and {@code transfer} are effectively synonymous.
*
* <p>This interface is a member of the
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
@@ -37,9 +39,12 @@ import java.util.concurrent.*;
*/
public interface TransferQueue<E> extends BlockingQueue<E> {
/**
- * Transfers the specified element if there exists a consumer
- * already waiting to receive it, otherwise returning {@code false}
- * without enqueuing the element.
+ * Transfers the element to a waiting consumer immediately, if possible.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * otherwise returning {@code false} without enqueuing the element.
*
* @param e the element to transfer
* @return {@code true} if the element was transferred, else
@@ -53,13 +58,16 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
boolean tryTransfer(E e);
/**
- * Inserts the specified element into this queue, waiting if
- * necessary for space to become available and the element to be
- * dequeued by a consumer invoking {@code take} or {@code poll}.
+ * Transfers the element to a consumer, waiting if necessary to do so.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else waits until the element is received by a consumer.
*
* @param e the element to transfer
* @throws InterruptedException if interrupted while waiting,
- * in which case the element is not enqueued.
+ * in which case the element is not left enqueued
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
@@ -69,10 +77,15 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
void transfer(E e) throws InterruptedException;
/**
- * Inserts the specified element into this queue, waiting up to
- * the specified wait time if necessary for space to become
- * available and the element to be dequeued by a consumer invoking
- * {@code take} or {@code poll}.
+ * Transfers the element to a consumer if it is possible to do so
+ * before the timeout elapses.
+ *
+ * <p>More precisely, transfers the specified element immediately
+ * if there exists a consumer already waiting to receive it (in
+ * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
+ * else waits until the element is received by a consumer,
+ * returning {@code false} if the specified wait time elapses
+ * before the element can be transferred.
*
* @param e the element to transfer
* @param timeout how long to wait before giving up, in units of
@@ -81,9 +94,9 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
* {@code timeout} parameter
* @return {@code true} if successful, or {@code false} if
* the specified waiting time elapses before completion,
- * in which case the element is not enqueued.
+ * in which case the element is not left enqueued
* @throws InterruptedException if interrupted while waiting,
- * in which case the element is not enqueued.
+ * in which case the element is not left enqueued
* @throws ClassCastException if the class of the specified element
* prevents it from being added to this queue
* @throws NullPointerException if the specified element is null
@@ -95,7 +108,8 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
/**
* Returns {@code true} if there is at least one consumer waiting
- * to dequeue an element via {@code take} or {@code poll}.
+ * to receive an element via {@link #take} or
+ * timed {@link #poll(long,TimeUnit) poll}.
* The return value represents a momentary state of affairs.
*
* @return {@code true} if there is at least one waiting consumer
@@ -104,15 +118,16 @@ public interface TransferQueue<E> extends BlockingQueue<E> {
/**
* Returns an estimate of the number of consumers waiting to
- * dequeue elements via {@code take} or {@code poll}. The return
- * value is an approximation of a momentary state of affairs, that
- * may be inaccurate if consumers have completed or given up
- * waiting. The value may be useful for monitoring and heuristics,
- * but not for synchronization control. Implementations of this
+ * receive elements via {@link #take} or timed
+ * {@link #poll(long,TimeUnit) poll}. The return value is an
+ * approximation of a momentary state of affairs, that may be
+ * inaccurate if consumers have completed or given up waiting.
+ * The value may be useful for monitoring and heuristics, but
+ * not for synchronization control. Implementations of this
* method are likely to be noticeably slower than those for
* {@link #hasWaitingConsumer}.
*
- * @return the number of consumers waiting to dequeue elements
+ * @return the number of consumers waiting to receive elements
*/
int getWaitingConsumerCount();
}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/package-info.java b/src/forkjoin/scala/concurrent/forkjoin/package-info.java
index b8fa0fad02..3561b9b44a 100644
--- a/src/forkjoin/scala/concurrent/forkjoin/package-info.java
+++ b/src/forkjoin/scala/concurrent/forkjoin/package-info.java
@@ -1,7 +1,7 @@
/*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/licenses/publicdomain
+ * http://creativecommons.org/publicdomain/zero/1.0/
*/
@@ -15,7 +15,7 @@
* Threads. However, when applicable, they typically provide
* significantly greater performance on multiprocessor platforms.
*
- * <p> Candidates for fork/join processing mainly include those that
+ * <p>Candidates for fork/join processing mainly include those that
* can be expressed using parallel divide-and-conquer techniques: To
* solve a problem, break it in two (or more) parts, and then solve
* those parts in parallel, continuing on in this way until the
@@ -24,6 +24,5 @@
* available to other threads (normally one per CPU), that help
* complete the tasks. In general, the most efficient ForkJoinTasks
* are those that directly implement this algorithmic design pattern.
- *
*/
package scala.concurrent.forkjoin;
diff --git a/src/library/scala/Tuple2.scala b/src/library/scala/Tuple2.scala
index ad3f7df697..b1befca4fa 100644
--- a/src/library/scala/Tuple2.scala
+++ b/src/library/scala/Tuple2.scala
@@ -19,7 +19,7 @@ import scala.collection.generic.{ CanBuildFrom => CBF }
* @param _1 Element 1 of this Tuple2
* @param _2 Element 2 of this Tuple2
*/
-case class Tuple2[@specialized(Int, Long, Double) +T1, @specialized(Int, Long, Double) +T2](_1: T1, _2: T2)
+case class Tuple2[@specialized(Int, Long, Double, Char, Boolean, AnyRef) +T1, @specialized(Int, Long, Double, Char, Boolean, AnyRef) +T2](_1: T1, _2: T2)
extends Product2[T1, T2]
{
override def toString() = "(" + _1 + "," + _2 + ")"
diff --git a/src/library/scala/annotation/elidable.scala b/src/library/scala/annotation/elidable.scala
index 8dc180d7ab..053cdba220 100644
--- a/src/library/scala/annotation/elidable.scala
+++ b/src/library/scala/annotation/elidable.scala
@@ -10,22 +10,53 @@ package scala.annotation
import java.util.logging.Level
-/** An annotation for methods for which invocations might
- * be removed in the generated code.
+/** An annotation for methods whose bodies may be excluded
+ * from compiler-generated bytecode.
*
* Behavior is influenced by passing `-Xelide-below <arg>` to `scalac`.
- * Methods marked elidable will be omitted from generated code if the
- * priority given the annotation is lower than to the command line argument.
- * Examples:
- * {{{
- * import annotation.elidable._
+ * Calls to methods marked elidable (as well as the method body) will
+ * be omitted from generated code if the priority given the annotation
+ * is lower than that given on the command line.
*
- * @elidable(WARNING) def foo = log("foo")
- * @elidable(FINE) def bar = log("bar")
+ * @elidable(123) // annotation priority
+ * scalac -Xelide-below 456 // command line priority
*
- * scalac -Xelide-below=1000
- * }}}
- * @since 2.8
+ * The method call will be replaced with an expression which depends on
+ * the type of the elided expression. In decreasing order of precedence:
+ *
+ * Unit ()
+ * Boolean false
+ * T <: AnyVal 0
+ * T >: Null null
+ * T >: Nothing Predef.???
+ *
+ * Complete example:
+ {{{
+ import annotation._, elidable._
+ object Test extends App {
+ def expensiveComputation(): Int = { Thread.sleep(1000) ; 172 }
+
+ @elidable(WARNING) def warning(msg: String) = println(msg)
+ @elidable(FINE) def debug(msg: String) = println(msg)
+ @elidable(FINE) def computedValue = expensiveComputation()
+
+ warning("Warning! Danger! Warning!")
+ debug("Debug! Danger! Debug!")
+ println("I computed a value: " + computedValue)
+ }
+ % scalac example.scala && scala Test
+ Warning! Danger! Warning!
+ Debug! Danger! Debug!
+ I computed a value: 172
+
+ // INFO lies between WARNING and FINE
+ % scalac -Xelide-below INFO example.scala && scala Test
+ Warning! Danger! Warning!
+ I computed a value: 0
+ }}}
+ *
+ * @author Paul Phillips
+ * @since 2.8
*/
final class elidable(final val level: Int) extends annotation.StaticAnnotation {}
diff --git a/src/library/scala/collection/GenTraversableLike.scala b/src/library/scala/collection/GenTraversableLike.scala
index c837775cf9..1dcc0bdac7 100644
--- a/src/library/scala/collection/GenTraversableLike.scala
+++ b/src/library/scala/collection/GenTraversableLike.scala
@@ -318,7 +318,7 @@ trait GenTraversableLike[+A, +Repr] extends GenTraversableOnce[A] with Paralleli
* $orderDependent
*
* @param from the lowest index to include from this $coll.
- * @param until the highest index to EXCLUDE from this $coll.
+ * @param until the lowest index to EXCLUDE from this $coll.
* @return a $coll containing the elements greater than or equal to
* index `from` extending up to (but not including) index `until`
* of this $coll.
diff --git a/src/library/scala/collection/immutable/List.scala b/src/library/scala/collection/immutable/List.scala
index 5f3f9b717f..e2a4a09938 100644
--- a/src/library/scala/collection/immutable/List.scala
+++ b/src/library/scala/collection/immutable/List.scala
@@ -205,6 +205,16 @@ sealed abstract class List[+A] extends AbstractSeq[A]
these
}
+ /**
+ * @example {{{
+ * // Given a list
+ * val letters = List('a','b','c','d','e')
+ *
+ * // `slice` returns all elements beginning at index `from` and afterwards,
+ * // up until index `until` (excluding index `until`.)
+ * letters.slice(1,3) // Returns List('b','c')
+ * }}}
+ */
override def slice(from: Int, until: Int): List[A] = {
val lo = math.max(from, 0)
if (until <= lo || isEmpty) Nil
diff --git a/src/library/scala/collection/immutable/RedBlack.scala b/src/library/scala/collection/immutable/RedBlack.scala
index 9906c9896e..83eeaa45ee 100644
--- a/src/library/scala/collection/immutable/RedBlack.scala
+++ b/src/library/scala/collection/immutable/RedBlack.scala
@@ -11,10 +11,13 @@
package scala.collection
package immutable
-/** A base class containing the implementations for `TreeMaps` and `TreeSets`.
+/** Old base class that was used by previous implementations of `TreeMaps` and `TreeSets`.
+ *
+ * Deprecated due to various performance bugs (see [[https://issues.scala-lang.org/browse/SI-5331 SI-5331]] for more information).
*
* @since 2.3
*/
+@deprecated("use `TreeMap` or `TreeSet` instead", "2.10")
@SerialVersionUID(8691885935445612921L)
abstract class RedBlack[A] extends Serializable {
@@ -287,5 +290,3 @@ abstract class RedBlack[A] extends Serializable {
def isBlack = true
}
}
-
-
diff --git a/src/library/scala/collection/immutable/RedBlackTree.scala b/src/library/scala/collection/immutable/RedBlackTree.scala
new file mode 100644
index 0000000000..0f28c4997b
--- /dev/null
+++ b/src/library/scala/collection/immutable/RedBlackTree.scala
@@ -0,0 +1,485 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2005-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+
+
+package scala.collection
+package immutable
+
+import annotation.tailrec
+import annotation.meta.getter
+
+/** An object containing the RedBlack tree implementation used by for `TreeMaps` and `TreeSets`.
+ *
+ * Implementation note: since efficiency is important for data structures this implementation
+ * uses <code>null</code> to represent empty trees. This also means pattern matching cannot
+ * easily be used. The API represented by the RedBlackTree object tries to hide these
+ * optimizations behind a reasonably clean API.
+ *
+ * @since 2.10
+ */
+private[immutable]
+object RedBlackTree {
+
+ def isEmpty(tree: Tree[_, _]): Boolean = tree eq null
+
+ def contains[A](tree: Tree[A, _], x: A)(implicit ordering: Ordering[A]): Boolean = lookup(tree, x) ne null
+ def get[A, B](tree: Tree[A, B], x: A)(implicit ordering: Ordering[A]): Option[B] = lookup(tree, x) match {
+ case null => None
+ case tree => Some(tree.value)
+ }
+
+ @tailrec
+ def lookup[A, B](tree: Tree[A, B], x: A)(implicit ordering: Ordering[A]): Tree[A, B] = if (tree eq null) null else {
+ val cmp = ordering.compare(x, tree.key)
+ if (cmp < 0) lookup(tree.left, x)
+ else if (cmp > 0) lookup(tree.right, x)
+ else tree
+ }
+
+ def count(tree: Tree[_, _]) = if (tree eq null) 0 else tree.count
+ def update[A, B, B1 >: B](tree: Tree[A, B], k: A, v: B1)(implicit ordering: Ordering[A]): Tree[A, B1] = blacken(upd(tree, k, v))
+ def delete[A, B](tree: Tree[A, B], k: A)(implicit ordering: Ordering[A]): Tree[A, B] = blacken(del(tree, k))
+ def rangeImpl[A: Ordering, B](tree: Tree[A, B], from: Option[A], until: Option[A]): Tree[A, B] = (from, until) match {
+ case (Some(from), Some(until)) => this.range(tree, from, until)
+ case (Some(from), None) => this.from(tree, from)
+ case (None, Some(until)) => this.until(tree, until)
+ case (None, None) => tree
+ }
+ def range[A: Ordering, B](tree: Tree[A, B], from: A, until: A): Tree[A, B] = blacken(doRange(tree, from, until))
+ def from[A: Ordering, B](tree: Tree[A, B], from: A): Tree[A, B] = blacken(doFrom(tree, from))
+ def to[A: Ordering, B](tree: Tree[A, B], to: A): Tree[A, B] = blacken(doTo(tree, to))
+ def until[A: Ordering, B](tree: Tree[A, B], key: A): Tree[A, B] = blacken(doUntil(tree, key))
+
+ def drop[A: Ordering, B](tree: Tree[A, B], n: Int): Tree[A, B] = blacken(doDrop(tree, n))
+ def take[A: Ordering, B](tree: Tree[A, B], n: Int): Tree[A, B] = blacken(doTake(tree, n))
+ def slice[A: Ordering, B](tree: Tree[A, B], from: Int, until: Int): Tree[A, B] = blacken(doSlice(tree, from, until))
+
+ def smallest[A, B](tree: Tree[A, B]): Tree[A, B] = {
+ if (tree eq null) throw new NoSuchElementException("empty map")
+ var result = tree
+ while (result.left ne null) result = result.left
+ result
+ }
+ def greatest[A, B](tree: Tree[A, B]): Tree[A, B] = {
+ if (tree eq null) throw new NoSuchElementException("empty map")
+ var result = tree
+ while (result.right ne null) result = result.right
+ result
+ }
+
+ def foreach[A, B, U](tree: Tree[A, B], f: ((A, B)) => U): Unit = if (tree ne null) {
+ if (tree.left ne null) foreach(tree.left, f)
+ f((tree.key, tree.value))
+ if (tree.right ne null) foreach(tree.right, f)
+ }
+ def foreachKey[A, U](tree: Tree[A, _], f: A => U): Unit = if (tree ne null) {
+ if (tree.left ne null) foreachKey(tree.left, f)
+ f(tree.key)
+ if (tree.right ne null) foreachKey(tree.right, f)
+ }
+
+ def iterator[A, B](tree: Tree[A, B]): Iterator[(A, B)] = new EntriesIterator(tree)
+ def keysIterator[A, _](tree: Tree[A, _]): Iterator[A] = new KeysIterator(tree)
+ def valuesIterator[_, B](tree: Tree[_, B]): Iterator[B] = new ValuesIterator(tree)
+
+ @tailrec
+ def nth[A, B](tree: Tree[A, B], n: Int): Tree[A, B] = {
+ val count = this.count(tree.left)
+ if (n < count) nth(tree.left, n)
+ else if (n > count) nth(tree.right, n - count - 1)
+ else tree
+ }
+
+ def isBlack(tree: Tree[_, _]) = (tree eq null) || isBlackTree(tree)
+
+ private[this] def isRedTree(tree: Tree[_, _]) = tree.isInstanceOf[RedTree[_, _]]
+ private[this] def isBlackTree(tree: Tree[_, _]) = tree.isInstanceOf[BlackTree[_, _]]
+
+ private[this] def blacken[A, B](t: Tree[A, B]): Tree[A, B] = if (t eq null) null else t.black
+
+ private[this] def mkTree[A, B](isBlack: Boolean, k: A, v: B, l: Tree[A, B], r: Tree[A, B]) =
+ if (isBlack) BlackTree(k, v, l, r) else RedTree(k, v, l, r)
+
+ private[this] def balanceLeft[A, B, B1 >: B](isBlack: Boolean, z: A, zv: B, l: Tree[A, B1], d: Tree[A, B1]): Tree[A, B1] = {
+ if (isRedTree(l) && isRedTree(l.left))
+ RedTree(l.key, l.value, BlackTree(l.left.key, l.left.value, l.left.left, l.left.right), BlackTree(z, zv, l.right, d))
+ else if (isRedTree(l) && isRedTree(l.right))
+ RedTree(l.right.key, l.right.value, BlackTree(l.key, l.value, l.left, l.right.left), BlackTree(z, zv, l.right.right, d))
+ else
+ mkTree(isBlack, z, zv, l, d)
+ }
+ private[this] def balanceRight[A, B, B1 >: B](isBlack: Boolean, x: A, xv: B, a: Tree[A, B1], r: Tree[A, B1]): Tree[A, B1] = {
+ if (isRedTree(r) && isRedTree(r.left))
+ RedTree(r.left.key, r.left.value, BlackTree(x, xv, a, r.left.left), BlackTree(r.key, r.value, r.left.right, r.right))
+ else if (isRedTree(r) && isRedTree(r.right))
+ RedTree(r.key, r.value, BlackTree(x, xv, a, r.left), BlackTree(r.right.key, r.right.value, r.right.left, r.right.right))
+ else
+ mkTree(isBlack, x, xv, a, r)
+ }
+ private[this] def upd[A, B, B1 >: B](tree: Tree[A, B], k: A, v: B1)(implicit ordering: Ordering[A]): Tree[A, B1] = if (tree eq null) {
+ RedTree(k, v, null, null)
+ } else {
+ val cmp = ordering.compare(k, tree.key)
+ if (cmp < 0) balanceLeft(isBlackTree(tree), tree.key, tree.value, upd(tree.left, k, v), tree.right)
+ else if (cmp > 0) balanceRight(isBlackTree(tree), tree.key, tree.value, tree.left, upd(tree.right, k, v))
+ else mkTree(isBlackTree(tree), k, v, tree.left, tree.right)
+ }
+
+ // Based on Stefan Kahrs' Haskell version of Okasaki's Red&Black Trees
+ // http://www.cse.unsw.edu.au/~dons/data/RedBlackTree.html
+ private[this] def del[A, B](tree: Tree[A, B], k: A)(implicit ordering: Ordering[A]): Tree[A, B] = if (tree eq null) null else {
+ def balance(x: A, xv: B, tl: Tree[A, B], tr: Tree[A, B]) = if (isRedTree(tl)) {
+ if (isRedTree(tr)) {
+ RedTree(x, xv, tl.black, tr.black)
+ } else if (isRedTree(tl.left)) {
+ RedTree(tl.key, tl.value, tl.left.black, BlackTree(x, xv, tl.right, tr))
+ } else if (isRedTree(tl.right)) {
+ RedTree(tl.right.key, tl.right.value, BlackTree(tl.key, tl.value, tl.left, tl.right.left), BlackTree(x, xv, tl.right.right, tr))
+ } else {
+ BlackTree(x, xv, tl, tr)
+ }
+ } else if (isRedTree(tr)) {
+ if (isRedTree(tr.right)) {
+ RedTree(tr.key, tr.value, BlackTree(x, xv, tl, tr.left), tr.right.black)
+ } else if (isRedTree(tr.left)) {
+ RedTree(tr.left.key, tr.left.value, BlackTree(x, xv, tl, tr.left.left), BlackTree(tr.key, tr.value, tr.left.right, tr.right))
+ } else {
+ BlackTree(x, xv, tl, tr)
+ }
+ } else {
+ BlackTree(x, xv, tl, tr)
+ }
+ def subl(t: Tree[A, B]) =
+ if (t.isInstanceOf[BlackTree[_, _]]) t.red
+ else sys.error("Defect: invariance violation; expected black, got "+t)
+
+ def balLeft(x: A, xv: B, tl: Tree[A, B], tr: Tree[A, B]) = if (isRedTree(tl)) {
+ RedTree(x, xv, tl.black, tr)
+ } else if (isBlackTree(tr)) {
+ balance(x, xv, tl, tr.red)
+ } else if (isRedTree(tr) && isBlackTree(tr.left)) {
+ RedTree(tr.left.key, tr.left.value, BlackTree(x, xv, tl, tr.left.left), balance(tr.key, tr.value, tr.left.right, subl(tr.right)))
+ } else {
+ sys.error("Defect: invariance violation")
+ }
+ def balRight(x: A, xv: B, tl: Tree[A, B], tr: Tree[A, B]) = if (isRedTree(tr)) {
+ RedTree(x, xv, tl, tr.black)
+ } else if (isBlackTree(tl)) {
+ balance(x, xv, tl.red, tr)
+ } else if (isRedTree(tl) && isBlackTree(tl.right)) {
+ RedTree(tl.right.key, tl.right.value, balance(tl.key, tl.value, subl(tl.left), tl.right.left), BlackTree(x, xv, tl.right.right, tr))
+ } else {
+ sys.error("Defect: invariance violation")
+ }
+ def delLeft = if (isBlackTree(tree.left)) balLeft(tree.key, tree.value, del(tree.left, k), tree.right) else RedTree(tree.key, tree.value, del(tree.left, k), tree.right)
+ def delRight = if (isBlackTree(tree.right)) balRight(tree.key, tree.value, tree.left, del(tree.right, k)) else RedTree(tree.key, tree.value, tree.left, del(tree.right, k))
+ def append(tl: Tree[A, B], tr: Tree[A, B]): Tree[A, B] = if (tl eq null) {
+ tr
+ } else if (tr eq null) {
+ tl
+ } else if (isRedTree(tl) && isRedTree(tr)) {
+ val bc = append(tl.right, tr.left)
+ if (isRedTree(bc)) {
+ RedTree(bc.key, bc.value, RedTree(tl.key, tl.value, tl.left, bc.left), RedTree(tr.key, tr.value, bc.right, tr.right))
+ } else {
+ RedTree(tl.key, tl.value, tl.left, RedTree(tr.key, tr.value, bc, tr.right))
+ }
+ } else if (isBlackTree(tl) && isBlackTree(tr)) {
+ val bc = append(tl.right, tr.left)
+ if (isRedTree(bc)) {
+ RedTree(bc.key, bc.value, BlackTree(tl.key, tl.value, tl.left, bc.left), BlackTree(tr.key, tr.value, bc.right, tr.right))
+ } else {
+ balLeft(tl.key, tl.value, tl.left, BlackTree(tr.key, tr.value, bc, tr.right))
+ }
+ } else if (isRedTree(tr)) {
+ RedTree(tr.key, tr.value, append(tl, tr.left), tr.right)
+ } else if (isRedTree(tl)) {
+ RedTree(tl.key, tl.value, tl.left, append(tl.right, tr))
+ } else {
+ sys.error("unmatched tree on append: " + tl + ", " + tr)
+ }
+
+ val cmp = ordering.compare(k, tree.key)
+ if (cmp < 0) delLeft
+ else if (cmp > 0) delRight
+ else append(tree.left, tree.right)
+ }
+
+ private[this] def doFrom[A, B](tree: Tree[A, B], from: A)(implicit ordering: Ordering[A]): Tree[A, B] = {
+ if (tree eq null) return null
+ if (ordering.lt(tree.key, from)) return doFrom(tree.right, from)
+ val newLeft = doFrom(tree.left, from)
+ if (newLeft eq tree.left) tree
+ else if (newLeft eq null) upd(tree.right, tree.key, tree.value)
+ else rebalance(tree, newLeft, tree.right)
+ }
+ private[this] def doTo[A, B](tree: Tree[A, B], to: A)(implicit ordering: Ordering[A]): Tree[A, B] = {
+ if (tree eq null) return null
+ if (ordering.lt(to, tree.key)) return doTo(tree.left, to)
+ val newRight = doTo(tree.right, to)
+ if (newRight eq tree.right) tree
+ else if (newRight eq null) upd(tree.left, tree.key, tree.value)
+ else rebalance(tree, tree.left, newRight)
+ }
+ private[this] def doUntil[A, B](tree: Tree[A, B], until: A)(implicit ordering: Ordering[A]): Tree[A, B] = {
+ if (tree eq null) return null
+ if (ordering.lteq(until, tree.key)) return doUntil(tree.left, until)
+ val newRight = doUntil(tree.right, until)
+ if (newRight eq tree.right) tree
+ else if (newRight eq null) upd(tree.left, tree.key, tree.value)
+ else rebalance(tree, tree.left, newRight)
+ }
+ private[this] def doRange[A, B](tree: Tree[A, B], from: A, until: A)(implicit ordering: Ordering[A]): Tree[A, B] = {
+ if (tree eq null) return null
+ if (ordering.lt(tree.key, from)) return doRange(tree.right, from, until);
+ if (ordering.lteq(until, tree.key)) return doRange(tree.left, from, until);
+ val newLeft = doFrom(tree.left, from)
+ val newRight = doUntil(tree.right, until)
+ if ((newLeft eq tree.left) && (newRight eq tree.right)) tree
+ else if (newLeft eq null) upd(newRight, tree.key, tree.value);
+ else if (newRight eq null) upd(newLeft, tree.key, tree.value);
+ else rebalance(tree, newLeft, newRight)
+ }
+
+ private[this] def doDrop[A: Ordering, B](tree: Tree[A, B], n: Int): Tree[A, B] = {
+ if (n <= 0) return tree
+ if (n >= this.count(tree)) return null
+ val count = this.count(tree.left)
+ if (n > count) return doDrop(tree.right, n - count - 1)
+ val newLeft = doDrop(tree.left, n)
+ if (newLeft eq tree.left) tree
+ else if (newLeft eq null) upd(tree.right, tree.key, tree.value)
+ else rebalance(tree, newLeft, tree.right)
+ }
+ private[this] def doTake[A: Ordering, B](tree: Tree[A, B], n: Int): Tree[A, B] = {
+ if (n <= 0) return null
+ if (n >= this.count(tree)) return tree
+ val count = this.count(tree.left)
+ if (n <= count) return doTake(tree.left, n)
+ val newRight = doTake(tree.right, n - count - 1)
+ if (newRight eq tree.right) tree
+ else if (newRight eq null) upd(tree.left, tree.key, tree.value)
+ else rebalance(tree, tree.left, newRight)
+ }
+ private[this] def doSlice[A: Ordering, B](tree: Tree[A, B], from: Int, until: Int): Tree[A, B] = {
+ if (tree eq null) return null
+ val count = this.count(tree.left)
+ if (from > count) return doSlice(tree.right, from - count - 1, until - count - 1)
+ if (until <= count) return doSlice(tree.left, from, until)
+ val newLeft = doDrop(tree.left, from)
+ val newRight = doTake(tree.right, until - count - 1)
+ if ((newLeft eq tree.left) && (newRight eq tree.right)) tree
+ else if (newLeft eq null) upd(newRight, tree.key, tree.value)
+ else if (newRight eq null) upd(newLeft, tree.key, tree.value)
+ else rebalance(tree, newLeft, newRight)
+ }
+
+ // The zipper returned might have been traversed left-most (always the left child)
+ // or right-most (always the right child). Left trees are traversed right-most,
+ // and right trees are traversed leftmost.
+
+ // Returns the zipper for the side with deepest black nodes depth, a flag
+ // indicating whether the trees were unbalanced at all, and a flag indicating
+ // whether the zipper was traversed left-most or right-most.
+
+ // If the trees were balanced, returns an empty zipper
+ private[this] def compareDepth[A, B](left: Tree[A, B], right: Tree[A, B]): (List[Tree[A, B]], Boolean, Boolean, Int) = {
+ // Once a side is found to be deeper, unzip it to the bottom
+ def unzip(zipper: List[Tree[A, B]], leftMost: Boolean): List[Tree[A, B]] = {
+ val next = if (leftMost) zipper.head.left else zipper.head.right
+ next match {
+ case null => zipper
+ case node => unzip(node :: zipper, leftMost)
+ }
+ }
+
+ // Unzip left tree on the rightmost side and right tree on the leftmost side until one is
+ // found to be deeper, or the bottom is reached
+ def unzipBoth(left: Tree[A, B],
+ right: Tree[A, B],
+ leftZipper: List[Tree[A, B]],
+ rightZipper: List[Tree[A, B]],
+ smallerDepth: Int): (List[Tree[A, B]], Boolean, Boolean, Int) = {
+ if (isBlackTree(left) && isBlackTree(right)) {
+ unzipBoth(left.right, right.left, left :: leftZipper, right :: rightZipper, smallerDepth + 1)
+ } else if (isRedTree(left) && isRedTree(right)) {
+ unzipBoth(left.right, right.left, left :: leftZipper, right :: rightZipper, smallerDepth)
+ } else if (isRedTree(right)) {
+ unzipBoth(left, right.left, leftZipper, right :: rightZipper, smallerDepth)
+ } else if (isRedTree(left)) {
+ unzipBoth(left.right, right, left :: leftZipper, rightZipper, smallerDepth)
+ } else if ((left eq null) && (right eq null)) {
+ (Nil, true, false, smallerDepth)
+ } else if ((left eq null) && isBlackTree(right)) {
+ val leftMost = true
+ (unzip(right :: rightZipper, leftMost), false, leftMost, smallerDepth)
+ } else if (isBlackTree(left) && (right eq null)) {
+ val leftMost = false
+ (unzip(left :: leftZipper, leftMost), false, leftMost, smallerDepth)
+ } else {
+ sys.error("unmatched trees in unzip: " + left + ", " + right)
+ }
+ }
+ unzipBoth(left, right, Nil, Nil, 0)
+ }
+
+ private[this] def rebalance[A, B](tree: Tree[A, B], newLeft: Tree[A, B], newRight: Tree[A, B]) = {
+ // This is like drop(n-1), but only counting black nodes
+ def findDepth(zipper: List[Tree[A, B]], depth: Int): List[Tree[A, B]] = zipper match {
+ case head :: tail if isBlackTree(head) =>
+ if (depth == 1) zipper else findDepth(tail, depth - 1)
+ case _ :: tail => findDepth(tail, depth)
+ case Nil => sys.error("Defect: unexpected empty zipper while computing range")
+ }
+
+ // Blackening the smaller tree avoids balancing problems on union;
+ // this can't be done later, though, or it would change the result of compareDepth
+ val blkNewLeft = blacken(newLeft)
+ val blkNewRight = blacken(newRight)
+ val (zipper, levelled, leftMost, smallerDepth) = compareDepth(blkNewLeft, blkNewRight)
+
+ if (levelled) {
+ BlackTree(tree.key, tree.value, blkNewLeft, blkNewRight)
+ } else {
+ val zipFrom = findDepth(zipper, smallerDepth)
+ val union = if (leftMost) {
+ RedTree(tree.key, tree.value, blkNewLeft, zipFrom.head)
+ } else {
+ RedTree(tree.key, tree.value, zipFrom.head, blkNewRight)
+ }
+ val zippedTree = zipFrom.tail.foldLeft(union: Tree[A, B]) { (tree, node) =>
+ if (leftMost)
+ balanceLeft(isBlackTree(node), node.key, node.value, tree, node.right)
+ else
+ balanceRight(isBlackTree(node), node.key, node.value, node.left, tree)
+ }
+ zippedTree
+ }
+ }
+
+ /*
+ * Forcing direct fields access using the @inline annotation helps speed up
+ * various operations (especially smallest/greatest and update/delete).
+ *
+ * Unfortunately the direct field access is not guaranteed to work (but
+ * works on the current implementation of the Scala compiler).
+ *
+ * An alternative is to implement the these classes using plain old Java code...
+ */
+ sealed abstract class Tree[A, +B](
+ @(inline @getter) final val key: A,
+ @(inline @getter) final val value: B,
+ @(inline @getter) final val left: Tree[A, B],
+ @(inline @getter) final val right: Tree[A, B])
+ extends Serializable {
+ final val count: Int = 1 + RedBlackTree.count(left) + RedBlackTree.count(right)
+ def black: Tree[A, B]
+ def red: Tree[A, B]
+ }
+ final class RedTree[A, +B](key: A,
+ value: B,
+ left: Tree[A, B],
+ right: Tree[A, B]) extends Tree[A, B](key, value, left, right) {
+ override def black: Tree[A, B] = BlackTree(key, value, left, right)
+ override def red: Tree[A, B] = this
+ override def toString: String = "RedTree(" + key + ", " + value + ", " + left + ", " + right + ")"
+ }
+ final class BlackTree[A, +B](key: A,
+ value: B,
+ left: Tree[A, B],
+ right: Tree[A, B]) extends Tree[A, B](key, value, left, right) {
+ override def black: Tree[A, B] = this
+ override def red: Tree[A, B] = RedTree(key, value, left, right)
+ override def toString: String = "BlackTree(" + key + ", " + value + ", " + left + ", " + right + ")"
+ }
+
+ object RedTree {
+ @inline def apply[A, B](key: A, value: B, left: Tree[A, B], right: Tree[A, B]) = new RedTree(key, value, left, right)
+ def unapply[A, B](t: RedTree[A, B]) = Some((t.key, t.value, t.left, t.right))
+ }
+ object BlackTree {
+ @inline def apply[A, B](key: A, value: B, left: Tree[A, B], right: Tree[A, B]) = new BlackTree(key, value, left, right)
+ def unapply[A, B](t: BlackTree[A, B]) = Some((t.key, t.value, t.left, t.right))
+ }
+
+ private[this] abstract class TreeIterator[A, B, R](tree: Tree[A, B]) extends Iterator[R] {
+ protected[this] def nextResult(tree: Tree[A, B]): R
+
+ override def hasNext: Boolean = next ne null
+
+ override def next: R = next match {
+ case null =>
+ throw new NoSuchElementException("next on empty iterator")
+ case tree =>
+ next = findNext(tree.right)
+ nextResult(tree)
+ }
+
+ @tailrec
+ private[this] def findNext(tree: Tree[A, B]): Tree[A, B] = {
+ if (tree eq null) popPath()
+ else if (tree.left eq null) tree
+ else {
+ pushPath(tree)
+ findNext(tree.left)
+ }
+ }
+
+ private[this] def pushPath(tree: Tree[A, B]) {
+ try {
+ path(index) = tree
+ index += 1
+ } catch {
+ case _: ArrayIndexOutOfBoundsException =>
+ /*
+ * Either the tree became unbalanced or we calculated the maximum height incorrectly.
+ * To avoid crashing the iterator we expand the path array. Obviously this should never
+ * happen...
+ *
+ * An exception handler is used instead of an if-condition to optimize the normal path.
+ * This makes a large difference in iteration speed!
+ */
+ assert(index >= path.length)
+ path :+= null
+ pushPath(tree)
+ }
+ }
+ private[this] def popPath(): Tree[A, B] = if (index == 0) null else {
+ index -= 1
+ path(index)
+ }
+
+ private[this] var path = if (tree eq null) null else {
+ /*
+ * According to "Ralf Hinze. Constructing red-black trees" [http://www.cs.ox.ac.uk/ralf.hinze/publications/#P5]
+ * the maximum height of a red-black tree is 2*log_2(n + 2) - 2.
+ *
+ * According to {@see Integer#numberOfLeadingZeros} ceil(log_2(n)) = (32 - Integer.numberOfLeadingZeros(n - 1))
+ *
+ * We also don't store the deepest nodes in the path so the maximum path length is further reduced by one.
+ */
+ val maximumHeight = 2 * (32 - Integer.numberOfLeadingZeros(tree.count + 2 - 1)) - 2 - 1
+ new Array[Tree[A, B]](maximumHeight)
+ }
+ private[this] var index = 0
+ private[this] var next: Tree[A, B] = findNext(tree)
+ }
+
+ private[this] class EntriesIterator[A, B](tree: Tree[A, B]) extends TreeIterator[A, B, (A, B)](tree) {
+ override def nextResult(tree: Tree[A, B]) = (tree.key, tree.value)
+ }
+
+ private[this] class KeysIterator[A, B](tree: Tree[A, B]) extends TreeIterator[A, B, A](tree) {
+ override def nextResult(tree: Tree[A, B]) = tree.key
+ }
+
+ private[this] class ValuesIterator[A, B](tree: Tree[A, B]) extends TreeIterator[A, B, B](tree) {
+ override def nextResult(tree: Tree[A, B]) = tree.value
+ }
+}
diff --git a/src/library/scala/collection/immutable/TreeMap.scala b/src/library/scala/collection/immutable/TreeMap.scala
index ef0eac3701..dc4f79be35 100644
--- a/src/library/scala/collection/immutable/TreeMap.scala
+++ b/src/library/scala/collection/immutable/TreeMap.scala
@@ -12,6 +12,7 @@ package scala.collection
package immutable
import generic._
+import immutable.{RedBlackTree => RB}
import mutable.Builder
import annotation.bridge
@@ -23,7 +24,6 @@ object TreeMap extends ImmutableSortedMapFactory[TreeMap] {
def empty[A, B](implicit ord: Ordering[A]) = new TreeMap[A, B]()(ord)
/** $sortedMapCanBuildFromInfo */
implicit def canBuildFrom[A, B](implicit ord: Ordering[A]): CanBuildFrom[Coll, (A, B), TreeMap[A, B]] = new SortedMapCanBuildFrom[A, B]
- private def make[A, B](s: Int, t: RedBlack[A]#Tree[B])(implicit ord: Ordering[A]) = new TreeMap[A, B](s, t)(ord)
}
/** This class implements immutable maps using a tree.
@@ -46,31 +46,79 @@ object TreeMap extends ImmutableSortedMapFactory[TreeMap] {
* @define mayNotTerminateInf
* @define willNotTerminateInf
*/
-class TreeMap[A, +B](override val size: Int, t: RedBlack[A]#Tree[B])(implicit val ordering: Ordering[A])
- extends RedBlack[A]
- with SortedMap[A, B]
+class TreeMap[A, +B] private (tree: RB.Tree[A, B])(implicit val ordering: Ordering[A])
+ extends SortedMap[A, B]
with SortedMapLike[A, B, TreeMap[A, B]]
with MapLike[A, B, TreeMap[A, B]]
with Serializable {
+ @deprecated("use `ordering.lt` instead", "2.10")
def isSmaller(x: A, y: A) = ordering.lt(x, y)
override protected[this] def newBuilder : Builder[(A, B), TreeMap[A, B]] =
TreeMap.newBuilder[A, B]
- def this()(implicit ordering: Ordering[A]) = this(0, null)(ordering)
+ override def size = RB.count(tree)
- protected val tree: RedBlack[A]#Tree[B] = if (size == 0) Empty else t
+ def this()(implicit ordering: Ordering[A]) = this(null)(ordering)
- override def rangeImpl(from : Option[A], until : Option[A]): TreeMap[A,B] = {
- val ntree = tree.range(from,until)
- new TreeMap[A,B](ntree.count, ntree)
- }
+ override def rangeImpl(from: Option[A], until: Option[A]): TreeMap[A, B] = new TreeMap[A, B](RB.rangeImpl(tree, from, until))
+ override def range(from: A, until: A): TreeMap[A, B] = new TreeMap[A, B](RB.range(tree, from, until))
+ override def from(from: A): TreeMap[A, B] = new TreeMap[A, B](RB.from(tree, from))
+ override def to(to: A): TreeMap[A, B] = new TreeMap[A, B](RB.to(tree, to))
+ override def until(until: A): TreeMap[A, B] = new TreeMap[A, B](RB.until(tree, until))
- override def firstKey = t.first
- override def lastKey = t.last
+ override def firstKey = RB.smallest(tree).key
+ override def lastKey = RB.greatest(tree).key
override def compare(k0: A, k1: A): Int = ordering.compare(k0, k1)
+ override def head = {
+ val smallest = RB.smallest(tree)
+ (smallest.key, smallest.value)
+ }
+ override def headOption = if (RB.isEmpty(tree)) None else Some(head)
+ override def last = {
+ val greatest = RB.greatest(tree)
+ (greatest.key, greatest.value)
+ }
+ override def lastOption = if (RB.isEmpty(tree)) None else Some(last)
+
+ override def tail = new TreeMap(RB.delete(tree, firstKey))
+ override def init = new TreeMap(RB.delete(tree, lastKey))
+
+ override def drop(n: Int) = {
+ if (n <= 0) this
+ else if (n >= size) empty
+ else new TreeMap(RB.drop(tree, n))
+ }
+
+ override def take(n: Int) = {
+ if (n <= 0) empty
+ else if (n >= size) this
+ else new TreeMap(RB.take(tree, n))
+ }
+
+ override def slice(from: Int, until: Int) = {
+ if (until <= from) empty
+ else if (from <= 0) take(until)
+ else if (until >= size) drop(from)
+ else new TreeMap(RB.slice(tree, from, until))
+ }
+
+ override def dropRight(n: Int) = take(size - n)
+ override def takeRight(n: Int) = drop(size - n)
+ override def splitAt(n: Int) = (take(n), drop(n))
+
+ private[this] def countWhile(p: ((A, B)) => Boolean): Int = {
+ var result = 0
+ val it = iterator
+ while (it.hasNext && p(it.next)) result += 1
+ result
+ }
+ override def dropWhile(p: ((A, B)) => Boolean) = drop(countWhile(p))
+ override def takeWhile(p: ((A, B)) => Boolean) = take(countWhile(p))
+ override def span(p: ((A, B)) => Boolean) = splitAt(countWhile(p))
+
/** A factory to create empty maps of the same type of keys.
*/
override def empty: TreeMap[A, B] = TreeMap.empty[A, B](ordering)
@@ -84,10 +132,7 @@ class TreeMap[A, +B](override val size: Int, t: RedBlack[A]#Tree[B])(implicit va
* @param value the value to be associated with `key`
* @return a new $coll with the updated binding
*/
- override def updated [B1 >: B](key: A, value: B1): TreeMap[A, B1] = {
- val newsize = if (tree.lookup(key).isEmpty) size + 1 else size
- TreeMap.make(newsize, tree.update(key, value))
- }
+ override def updated [B1 >: B](key: A, value: B1): TreeMap[A, B1] = new TreeMap(RB.update(tree, key, value))
/** Add a key/value pair to this map.
* @tparam B1 type of the value of the new binding, a supertype of `B`
@@ -128,14 +173,13 @@ class TreeMap[A, +B](override val size: Int, t: RedBlack[A]#Tree[B])(implicit va
* @return a new $coll with the inserted binding, if it wasn't present in the map
*/
def insert [B1 >: B](key: A, value: B1): TreeMap[A, B1] = {
- assert(tree.lookup(key).isEmpty)
- TreeMap.make(size + 1, tree.update(key, value))
+ assert(!RB.contains(tree, key))
+ new TreeMap(RB.update(tree, key, value))
}
def - (key:A): TreeMap[A, B] =
- if (tree.lookup(key).isEmpty) this
- else if (size == 1) empty
- else TreeMap.make(size - 1, tree.delete(key))
+ if (!RB.contains(tree, key)) this
+ else new TreeMap(RB.delete(tree, key))
/** Check if this map maps `key` to a value and return the
* value if it exists.
@@ -143,21 +187,22 @@ class TreeMap[A, +B](override val size: Int, t: RedBlack[A]#Tree[B])(implicit va
* @param key the key of the mapping of interest
* @return the value of the mapping, if it exists
*/
- override def get(key: A): Option[B] = tree.lookup(key) match {
- case n: NonEmpty[b] => Some(n.value)
- case _ => None
- }
+ override def get(key: A): Option[B] = RB.get(tree, key)
/** Creates a new iterator over all elements contained in this
* object.
*
* @return the new iterator
*/
- def iterator: Iterator[(A, B)] = tree.toStream.iterator
+ override def iterator: Iterator[(A, B)] = RB.iterator(tree)
+
+ override def keysIterator: Iterator[A] = RB.keysIterator(tree)
+ override def valuesIterator: Iterator[B] = RB.valuesIterator(tree)
- override def toStream: Stream[(A, B)] = tree.toStream
+ override def contains(key: A): Boolean = RB.contains(tree, key)
+ override def isDefinedAt(key: A): Boolean = RB.contains(tree, key)
- override def foreach[U](f : ((A,B)) => U) = tree foreach { case (x, y) => f(x, y) }
+ override def foreach[U](f : ((A,B)) => U) = RB.foreach(tree, f)
}
diff --git a/src/library/scala/collection/immutable/TreeSet.scala b/src/library/scala/collection/immutable/TreeSet.scala
index 8b90ece143..1b3d72ceb7 100644
--- a/src/library/scala/collection/immutable/TreeSet.scala
+++ b/src/library/scala/collection/immutable/TreeSet.scala
@@ -12,6 +12,7 @@ package scala.collection
package immutable
import generic._
+import immutable.{RedBlackTree => RB}
import mutable.{ Builder, SetBuilder }
/** $factoryInfo
@@ -46,20 +47,61 @@ object TreeSet extends ImmutableSortedSetFactory[TreeSet] {
* @define mayNotTerminateInf
* @define willNotTerminateInf
*/
-@SerialVersionUID(-234066569443569402L)
-class TreeSet[A](override val size: Int, t: RedBlack[A]#Tree[Unit])
- (implicit val ordering: Ordering[A])
- extends RedBlack[A] with SortedSet[A] with SortedSetLike[A, TreeSet[A]] with Serializable {
+@SerialVersionUID(-5685982407650748405L)
+class TreeSet[A] private (tree: RB.Tree[A, Unit])(implicit val ordering: Ordering[A])
+ extends SortedSet[A] with SortedSetLike[A, TreeSet[A]] with Serializable {
override def stringPrefix = "TreeSet"
- def isSmaller(x: A, y: A) = compare(x,y) < 0
+ override def size = RB.count(tree)
+
+ override def head = RB.smallest(tree).key
+ override def headOption = if (RB.isEmpty(tree)) None else Some(head)
+ override def last = RB.greatest(tree).key
+ override def lastOption = if (RB.isEmpty(tree)) None else Some(last)
+
+ override def tail = new TreeSet(RB.delete(tree, firstKey))
+ override def init = new TreeSet(RB.delete(tree, lastKey))
+
+ override def drop(n: Int) = {
+ if (n <= 0) this
+ else if (n >= size) empty
+ else newSet(RB.drop(tree, n))
+ }
- def this()(implicit ordering: Ordering[A]) = this(0, null)(ordering)
+ override def take(n: Int) = {
+ if (n <= 0) empty
+ else if (n >= size) this
+ else newSet(RB.take(tree, n))
+ }
- protected val tree: RedBlack[A]#Tree[Unit] = if (size == 0) Empty else t
+ override def slice(from: Int, until: Int) = {
+ if (until <= from) empty
+ else if (from <= 0) take(until)
+ else if (until >= size) drop(from)
+ else newSet(RB.slice(tree, from, until))
+ }
- private def newSet(s: Int, t: RedBlack[A]#Tree[Unit]) = new TreeSet[A](s, t)
+ override def dropRight(n: Int) = take(size - n)
+ override def takeRight(n: Int) = drop(size - n)
+ override def splitAt(n: Int) = (take(n), drop(n))
+
+ private[this] def countWhile(p: A => Boolean): Int = {
+ var result = 0
+ val it = iterator
+ while (it.hasNext && p(it.next)) result += 1
+ result
+ }
+ override def dropWhile(p: A => Boolean) = drop(countWhile(p))
+ override def takeWhile(p: A => Boolean) = take(countWhile(p))
+ override def span(p: A => Boolean) = splitAt(countWhile(p))
+
+ @deprecated("use `ordering.lt` instead", "2.10")
+ def isSmaller(x: A, y: A) = compare(x,y) < 0
+
+ def this()(implicit ordering: Ordering[A]) = this(null)(ordering)
+
+ private def newSet(t: RB.Tree[A, Unit]) = new TreeSet[A](t)
/** A factory to create empty sets of the same type of keys.
*/
@@ -70,10 +112,7 @@ class TreeSet[A](override val size: Int, t: RedBlack[A]#Tree[Unit])
* @param elem a new element to add.
* @return a new $coll containing `elem` and all the elements of this $coll.
*/
- def + (elem: A): TreeSet[A] = {
- val newsize = if (tree.lookup(elem).isEmpty) size + 1 else size
- newSet(newsize, tree.update(elem, ()))
- }
+ def + (elem: A): TreeSet[A] = newSet(RB.update(tree, elem, ()))
/** A new `TreeSet` with the entry added is returned,
* assuming that elem is <em>not</em> in the TreeSet.
@@ -82,8 +121,8 @@ class TreeSet[A](override val size: Int, t: RedBlack[A]#Tree[Unit])
* @return a new $coll containing `elem` and all the elements of this $coll.
*/
def insert(elem: A): TreeSet[A] = {
- assert(tree.lookup(elem).isEmpty)
- newSet(size + 1, tree.update(elem, ()))
+ assert(!RB.contains(tree, elem))
+ newSet(RB.update(tree, elem, ()))
}
/** Creates a new `TreeSet` with the entry removed.
@@ -92,31 +131,31 @@ class TreeSet[A](override val size: Int, t: RedBlack[A]#Tree[Unit])
* @return a new $coll containing all the elements of this $coll except `elem`.
*/
def - (elem:A): TreeSet[A] =
- if (tree.lookup(elem).isEmpty) this
- else newSet(size - 1, tree delete elem)
+ if (!RB.contains(tree, elem)) this
+ else newSet(RB.delete(tree, elem))
/** Checks if this set contains element `elem`.
*
* @param elem the element to check for membership.
* @return true, iff `elem` is contained in this set.
*/
- def contains(elem: A): Boolean = !tree.lookup(elem).isEmpty
+ def contains(elem: A): Boolean = RB.contains(tree, elem)
/** Creates a new iterator over all elements contained in this
* object.
*
* @return the new iterator
*/
- def iterator: Iterator[A] = tree.toStream.iterator map (_._1)
+ def iterator: Iterator[A] = RB.keysIterator(tree)
- override def toStream: Stream[A] = tree.toStream map (_._1)
+ override def foreach[U](f: A => U) = RB.foreachKey(tree, f)
- override def foreach[U](f: A => U) = tree foreach { (x, y) => f(x) }
+ override def rangeImpl(from: Option[A], until: Option[A]): TreeSet[A] = newSet(RB.rangeImpl(tree, from, until))
+ override def range(from: A, until: A): TreeSet[A] = newSet(RB.range(tree, from, until))
+ override def from(from: A): TreeSet[A] = newSet(RB.from(tree, from))
+ override def to(to: A): TreeSet[A] = newSet(RB.to(tree, to))
+ override def until(until: A): TreeSet[A] = newSet(RB.until(tree, until))
- override def rangeImpl(from: Option[A], until: Option[A]): TreeSet[A] = {
- val tree = this.tree.range(from, until)
- newSet(tree.count, tree)
- }
- override def firstKey = tree.first
- override def lastKey = tree.last
+ override def firstKey = head
+ override def lastKey = last
}
diff --git a/src/library/scala/collection/mutable/HashTable.scala b/src/library/scala/collection/mutable/HashTable.scala
index cdf1b78f29..5b3e07b826 100644
--- a/src/library/scala/collection/mutable/HashTable.scala
+++ b/src/library/scala/collection/mutable/HashTable.scala
@@ -52,6 +52,10 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
*/
@transient protected var sizemap: Array[Int] = null
+ @transient var seedvalue: Int = tableSizeSeed
+
+ protected def tableSizeSeed = Integer.bitCount(table.length - 1)
+
protected def initialSize: Int = HashTable.initialSize
private def lastPopulatedIndex = {
@@ -70,14 +74,16 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
private[collection] def init[B](in: java.io.ObjectInputStream, f: (A, B) => Entry) {
in.defaultReadObject
- _loadFactor = in.readInt
+ _loadFactor = in.readInt()
assert(_loadFactor > 0)
- val size = in.readInt
+ val size = in.readInt()
tableSize = 0
assert(size >= 0)
-
- val smDefined = in.readBoolean
+
+ seedvalue = in.readInt()
+
+ val smDefined = in.readBoolean()
table = new Array(capacity(sizeForThreshold(_loadFactor, size)))
threshold = newThreshold(_loadFactor, table.size)
@@ -86,7 +92,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
var index = 0
while (index < size) {
- addEntry(f(in.readObject.asInstanceOf[A], in.readObject.asInstanceOf[B]))
+ addEntry(f(in.readObject().asInstanceOf[A], in.readObject().asInstanceOf[B]))
index += 1
}
}
@@ -103,6 +109,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
out.defaultWriteObject
out.writeInt(_loadFactor)
out.writeInt(tableSize)
+ out.writeInt(seedvalue)
out.writeBoolean(isSizeMapDefined)
foreachEntry { entry =>
out.writeObject(entry.key)
@@ -314,7 +321,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
// this is of crucial importance when populating the table in parallel
protected final def index(hcode: Int) = {
val ones = table.length - 1
- val improved = improve(hcode)
+ val improved = improve(hcode, seedvalue)
val shifted = (improved >> (32 - java.lang.Integer.bitCount(ones))) & ones
shifted
}
@@ -325,6 +332,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
table = c.table
tableSize = c.tableSize
threshold = c.threshold
+ seedvalue = c.seedvalue
sizemap = c.sizemap
}
if (alwaysInitSizeMap && sizemap == null) sizeMapInitAndRebuild
@@ -335,6 +343,7 @@ trait HashTable[A, Entry >: Null <: HashEntry[A, Entry]] extends HashTable.HashU
table,
tableSize,
threshold,
+ seedvalue,
sizemap
)
}
@@ -368,7 +377,7 @@ private[collection] object HashTable {
protected def elemHashCode(key: KeyType) = key.##
- protected final def improve(hcode: Int) = {
+ protected final def improve(hcode: Int, seed: Int) = {
/* Murmur hash
* m = 0x5bd1e995
* r = 24
@@ -396,7 +405,7 @@ private[collection] object HashTable {
* */
var i = hcode * 0x9e3775cd
i = java.lang.Integer.reverseBytes(i)
- i * 0x9e3775cd
+ i = i * 0x9e3775cd
// a slower alternative for byte reversal:
// i = (i << 16) | (i >> 16)
// i = ((i >> 8) & 0x00ff00ff) | ((i << 8) & 0xff00ff00)
@@ -420,6 +429,11 @@ private[collection] object HashTable {
// h = h ^ (h >>> 14)
// h = h + (h << 4)
// h ^ (h >>> 10)
+
+ // the rest of the computation is due to SI-5293
+ val rotation = seed % 32
+ val rotated = (i >>> rotation) | (i << (32 - rotation))
+ rotated
}
}
@@ -442,6 +456,7 @@ private[collection] object HashTable {
val table: Array[HashEntry[A, Entry]],
val tableSize: Int,
val threshold: Int,
+ val seedvalue: Int,
val sizemap: Array[Int]
) {
import collection.DebugUtils._
@@ -452,6 +467,7 @@ private[collection] object HashTable {
append("Table: [" + arrayString(table, 0, table.length) + "]")
append("Table size: " + tableSize)
append("Load factor: " + loadFactor)
+ append("Seedvalue: " + seedvalue)
append("Threshold: " + threshold)
append("Sizemap: [" + arrayString(sizemap, 0, sizemap.length) + "]")
}
diff --git a/src/library/scala/collection/parallel/mutable/ParHashMap.scala b/src/library/scala/collection/parallel/mutable/ParHashMap.scala
index 15ffd3fdd2..21a5b05749 100644
--- a/src/library/scala/collection/parallel/mutable/ParHashMap.scala
+++ b/src/library/scala/collection/parallel/mutable/ParHashMap.scala
@@ -160,10 +160,11 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], DefaultEntr
import collection.parallel.tasksupport._
private var mask = ParHashMapCombiner.discriminantmask
private var nonmasklen = ParHashMapCombiner.nonmasklength
+ private var seedvalue = 27
def +=(elem: (K, V)) = {
sz += 1
- val hc = improve(elemHashCode(elem._1))
+ val hc = improve(elemHashCode(elem._1), seedvalue)
val pos = (hc >>> nonmasklen)
if (buckets(pos) eq null) {
// initialize bucket
@@ -176,7 +177,7 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], DefaultEntr
def result: ParHashMap[K, V] = if (size >= (ParHashMapCombiner.numblocks * sizeMapBucketSize)) { // 1024
// construct table
- val table = new AddingHashTable(size, tableLoadFactor)
+ val table = new AddingHashTable(size, tableLoadFactor, seedvalue)
val bucks = buckets.map(b => if (b ne null) b.headPtr else null)
val insertcount = executeAndWaitResult(new FillBlocks(bucks, table, 0, bucks.length))
table.setSize(insertcount)
@@ -210,11 +211,12 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], DefaultEntr
* and true if the key was successfully inserted. It does not update the number of elements
* in the table.
*/
- private[ParHashMapCombiner] class AddingHashTable(numelems: Int, lf: Int) extends HashTable[K, DefaultEntry[K, V]] {
+ private[ParHashMapCombiner] class AddingHashTable(numelems: Int, lf: Int, _seedvalue: Int) extends HashTable[K, DefaultEntry[K, V]] {
import HashTable._
_loadFactor = lf
table = new Array[HashEntry[K, DefaultEntry[K, V]]](capacity(sizeForThreshold(_loadFactor, numelems)))
tableSize = 0
+ seedvalue = _seedvalue
threshold = newThreshold(_loadFactor, table.length)
sizeMapInit(table.length)
def setSize(sz: Int) = tableSize = sz
@@ -285,7 +287,7 @@ extends collection.parallel.BucketCombiner[(K, V), ParHashMap[K, V], DefaultEntr
insertcount
}
private def assertCorrectBlock(block: Int, k: K) {
- val hc = improve(elemHashCode(k))
+ val hc = improve(elemHashCode(k), seedvalue)
if ((hc >>> nonmasklen) != block) {
println(hc + " goes to " + (hc >>> nonmasklen) + ", while expected block is " + block)
assert((hc >>> nonmasklen) == block)
diff --git a/src/library/scala/concurrent/Awaitable.scala b/src/library/scala/concurrent/Awaitable.scala
new file mode 100644
index 0000000000..c38e668f30
--- /dev/null
+++ b/src/library/scala/concurrent/Awaitable.scala
@@ -0,0 +1,24 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+
+
+import scala.annotation.implicitNotFound
+import scala.util.Duration
+
+
+
+trait Awaitable[+T] {
+ @implicitNotFound(msg = "Waiting must be done by calling `blocking(timeout) b`, where `b` is the `Awaitable` object or a potentially blocking piece of code.")
+ def await(atMost: Duration)(implicit canawait: CanAwait): T
+}
+
+
+
diff --git a/src/library/scala/concurrent/Channel.scala b/src/library/scala/concurrent/Channel.scala
index 43d684641e..e79f76430f 100644
--- a/src/library/scala/concurrent/Channel.scala
+++ b/src/library/scala/concurrent/Channel.scala
@@ -23,7 +23,7 @@ class Channel[A] {
private var written = new LinkedList[A] // FIFO buffer, realized through
private var lastWritten = written // aliasing of a linked list
private var nreaders = 0
-
+
/**
* @param x ...
*/
@@ -33,7 +33,7 @@ class Channel[A] {
lastWritten = lastWritten.next
if (nreaders > 0) notify()
}
-
+
def read: A = synchronized {
while (written.next == null) {
try {
@@ -46,4 +46,5 @@ class Channel[A] {
written = written.next
x
}
+
}
diff --git a/src/library/scala/concurrent/ConcurrentPackageObject.scala b/src/library/scala/concurrent/ConcurrentPackageObject.scala
new file mode 100644
index 0000000000..6aacd53de2
--- /dev/null
+++ b/src/library/scala/concurrent/ConcurrentPackageObject.scala
@@ -0,0 +1,103 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+import scala.util.{ Duration, Try, Success, Failure }
+import ConcurrentPackageObject._
+
+/** This package object contains primitives for concurrent and parallel programming.
+ */
+abstract class ConcurrentPackageObject {
+ /** A global execution environment for executing lightweight tasks.
+ */
+ lazy val executionContext =
+ new impl.ExecutionContextImpl(java.util.concurrent.Executors.newCachedThreadPool())
+
+ /** A global service for scheduling tasks for execution.
+ */
+ // lazy val scheduler =
+ // new default.SchedulerImpl
+
+ val handledFutureException: PartialFunction[Throwable, Throwable] = {
+ case t: Throwable if isFutureThrowable(t) => t
+ }
+
+ // TODO rename appropriately and make public
+ private[concurrent] def isFutureThrowable(t: Throwable) = t match {
+ case e: Error => false
+ case t: scala.util.control.ControlThrowable => false
+ case i: InterruptedException => false
+ case _ => true
+ }
+
+ private[concurrent] def resolve[T](source: Try[T]): Try[T] = source match {
+ case Failure(t: scala.runtime.NonLocalReturnControl[_]) => Success(t.value.asInstanceOf[T])
+ case Failure(t: scala.util.control.ControlThrowable) => Failure(new ExecutionException("Boxed ControlThrowable", t))
+ case Failure(t: InterruptedException) => Failure(new ExecutionException("Boxed InterruptedException", t))
+ case Failure(e: Error) => Failure(new ExecutionException("Boxed Error", e))
+ case _ => source
+ }
+
+ private[concurrent] def resolver[T] =
+ resolverFunction.asInstanceOf[PartialFunction[Throwable, Try[T]]]
+
+ /* concurrency constructs */
+
+ def future[T](body: =>T)(implicit execCtx: ExecutionContext = executionContext): Future[T] =
+ execCtx future body
+
+ def promise[T]()(implicit execCtx: ExecutionContext = executionContext): Promise[T] =
+ execCtx promise
+
+ /** Wraps a block of code into an awaitable object. */
+ def body2awaitable[T](body: =>T) = new Awaitable[T] {
+ def await(atMost: Duration)(implicit cb: CanAwait) = body
+ }
+
+ /** Used to block on a piece of code which potentially blocks.
+ *
+ * @param body A piece of code which contains potentially blocking or long running calls.
+ *
+ * Calling this method may throw the following exceptions:
+ * - CancellationException - if the computation was cancelled
+ * - InterruptedException - in the case that a wait within the blockable object was interrupted
+ * - TimeoutException - in the case that the blockable object timed out
+ */
+ def blocking[T](atMost: Duration)(body: =>T)(implicit execCtx: ExecutionContext): T =
+ executionContext.blocking(atMost)(body)
+
+ /** Blocks on an awaitable object.
+ *
+ * @param awaitable An object with a `block` method which runs potentially blocking or long running calls.
+ *
+ * Calling this method may throw the following exceptions:
+ * - CancellationException - if the computation was cancelled
+ * - InterruptedException - in the case that a wait within the blockable object was interrupted
+ * - TimeoutException - in the case that the blockable object timed out
+ */
+ def blocking[T](awaitable: Awaitable[T], atMost: Duration)(implicit execCtx: ExecutionContext = executionContext): T =
+ executionContext.blocking(awaitable, atMost)
+
+ @inline implicit final def int2durationops(x: Int): DurationOps = new DurationOps(x)
+}
+
+private[concurrent] object ConcurrentPackageObject {
+ // TODO, docs, return type
+ // Note that having this in the package object led to failures when
+ // compiling a subset of sources; it seems that the wildcard is not
+ // properly handled, and you get messages like "type _$1 defined twice".
+ // This is consistent with other package object breakdowns.
+ private val resolverFunction: PartialFunction[Throwable, Try[_]] = {
+ case t: scala.runtime.NonLocalReturnControl[_] => Success(t.value)
+ case t: scala.util.control.ControlThrowable => Failure(new ExecutionException("Boxed ControlThrowable", t))
+ case t: InterruptedException => Failure(new ExecutionException("Boxed InterruptedException", t))
+ case e: Error => Failure(new ExecutionException("Boxed Error", e))
+ case t => Failure(t)
+ }
+}
diff --git a/src/library/scala/concurrent/DelayedLazyVal.scala b/src/library/scala/concurrent/DelayedLazyVal.scala
index e308c3b5a6..0b7f54a27a 100644
--- a/src/library/scala/concurrent/DelayedLazyVal.scala
+++ b/src/library/scala/concurrent/DelayedLazyVal.scala
@@ -8,7 +8,6 @@
package scala.concurrent
-import ops.future
/** A `DelayedLazyVal` is a wrapper for lengthy computations which have a
* valid partially computed result.
@@ -27,21 +26,23 @@ import ops.future
class DelayedLazyVal[T](f: () => T, body: => Unit) {
@volatile private[this] var _isDone = false
private[this] lazy val complete = f()
-
+
/** Whether the computation is complete.
*
* @return true if the computation is complete.
*/
def isDone = _isDone
-
+
/** The current result of f(), or the final result if complete.
*
* @return the current value
*/
def apply(): T = if (isDone) complete else f()
-
- future {
+
+ // TODO replace with scala.concurrent.future { ... }
+ ops.future {
body
_isDone = true
}
+
}
diff --git a/src/library/scala/concurrent/ExecutionContext.scala b/src/library/scala/concurrent/ExecutionContext.scala
new file mode 100644
index 0000000000..99cd264ac5
--- /dev/null
+++ b/src/library/scala/concurrent/ExecutionContext.scala
@@ -0,0 +1,132 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+
+
+import java.util.concurrent.atomic.{ AtomicInteger }
+import java.util.concurrent.{ Executors, Future => JFuture, Callable }
+import scala.util.Duration
+import scala.util.{ Try, Success, Failure }
+import scala.concurrent.forkjoin.{ ForkJoinPool, RecursiveTask => FJTask, RecursiveAction, ForkJoinWorkerThread }
+import scala.collection.generic.CanBuildFrom
+import collection._
+
+
+
+trait ExecutionContext {
+
+ protected implicit object CanAwaitEvidence extends CanAwait
+
+ def execute(runnable: Runnable): Unit
+
+ def execute[U](body: () => U): Unit
+
+ def promise[T]: Promise[T]
+
+ def future[T](body: Callable[T]): Future[T] = future(body.call())
+
+ def future[T](body: => T): Future[T]
+
+ def blocking[T](atMost: Duration)(body: =>T): T
+
+ def blocking[T](awaitable: Awaitable[T], atMost: Duration): T
+
+ def reportFailure(t: Throwable): Unit
+
+ /* implementations follow */
+
+ private implicit val executionContext = this
+
+ def keptPromise[T](result: T): Promise[T] = {
+ val p = promise[T]
+ p success result
+ }
+
+ def brokenPromise[T](t: Throwable): Promise[T] = {
+ val p = promise[T]
+ p failure t
+ }
+
+ /** TODO some docs
+ *
+ */
+ def all[T, Coll[X] <: Traversable[X]](futures: Coll[Future[T]])(implicit cbf: CanBuildFrom[Coll[_], T, Coll[T]]): Future[Coll[T]] = {
+ import nondeterministic._
+ val buffer = new mutable.ArrayBuffer[T]
+ val counter = new AtomicInteger(1) // how else could we do this?
+ val p: Promise[Coll[T]] = promise[Coll[T]] // we need an implicit execctx in the signature
+ var idx = 0
+
+ def tryFinish() = if (counter.decrementAndGet() == 0) {
+ val builder = cbf(futures)
+ builder ++= buffer
+ p success builder.result
+ }
+
+ for (f <- futures) {
+ val currentIndex = idx
+ buffer += null.asInstanceOf[T]
+ counter.incrementAndGet()
+ f onComplete {
+ case Failure(t) =>
+ p tryFailure t
+ case Success(v) =>
+ buffer(currentIndex) = v
+ tryFinish()
+ }
+ idx += 1
+ }
+
+ tryFinish()
+
+ p.future
+ }
+
+ /** TODO some docs
+ *
+ */
+ def any[T](futures: Traversable[Future[T]]): Future[T] = {
+ val p = promise[T]
+ val completeFirst: Try[T] => Unit = elem => p tryComplete elem
+
+ futures foreach (_ onComplete completeFirst)
+
+ p.future
+ }
+
+ /** TODO some docs
+ *
+ */
+ def find[T](futures: Traversable[Future[T]])(predicate: T => Boolean): Future[Option[T]] = {
+ if (futures.isEmpty) Promise.kept[Option[T]](None).future
+ else {
+ val result = promise[Option[T]]
+ val count = new AtomicInteger(futures.size)
+ val search: Try[T] => Unit = {
+ v => v match {
+ case Success(r) => if (predicate(r)) result trySuccess Some(r)
+ case _ =>
+ }
+ if (count.decrementAndGet() == 0) result trySuccess None
+ }
+
+ futures.foreach(_ onComplete search)
+
+ result.future
+ }
+ }
+
+}
+
+
+sealed trait CanAwait
+
+
+
diff --git a/src/library/scala/concurrent/Future.scala b/src/library/scala/concurrent/Future.scala
new file mode 100644
index 0000000000..73f76bbbfb
--- /dev/null
+++ b/src/library/scala/concurrent/Future.scala
@@ -0,0 +1,492 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+
+
+import java.util.concurrent.{ ConcurrentLinkedQueue, TimeUnit, Callable }
+import java.util.concurrent.TimeUnit.{ NANOSECONDS => NANOS, MILLISECONDS ⇒ MILLIS }
+import java.lang.{ Iterable => JIterable }
+import java.util.{ LinkedList => JLinkedList }
+import java.{ lang => jl }
+import java.util.concurrent.atomic.{ AtomicReferenceFieldUpdater, AtomicInteger, AtomicBoolean }
+
+import scala.util.{ Timeout, Duration, Try, Success, Failure }
+import scala.Option
+
+import scala.annotation.tailrec
+import scala.collection.mutable.Stack
+import scala.collection.mutable.Builder
+import scala.collection.generic.CanBuildFrom
+
+
+
+/** The trait that represents futures.
+ *
+ * Asynchronous computations that yield futures are created with the `future` call:
+ *
+ * {{{
+ * val s = "Hello"
+ * val f: Future[String] = future {
+ * s + " future!"
+ * }
+ * f onSuccess {
+ * case msg => println(msg)
+ * }
+ * }}}
+ *
+ * @author Philipp Haller, Heather Miller, Aleksandar Prokopec, Viktor Klang
+ *
+ * @define multipleCallbacks
+ * Multiple callbacks may be registered; there is no guarantee that they will be
+ * executed in a particular order.
+ *
+ * @define caughtThrowables
+ * The future may contain a throwable object and this means that the future failed.
+ * Futures obtained through combinators have the same exception as the future they were obtained from.
+ * The following throwable objects are not contained in the future:
+ * - `Error` - errors are not contained within futures
+ * - `InterruptedException` - not contained within futures
+ * - all `scala.util.control.ControlThrowable` except `NonLocalReturnControl` - not contained within futures
+ *
+ * Instead, the future is completed with a ExecutionException with one of the exceptions above
+ * as the cause.
+ * If a future is failed with a `scala.runtime.NonLocalReturnControl`,
+ * it is completed with a value instead from that throwable instead instead.
+ *
+ * @define nonDeterministic
+ * Note: using this method yields nondeterministic dataflow programs.
+ *
+ * @define forComprehensionExamples
+ * Example:
+ *
+ * {{{
+ * val f = future { 5 }
+ * val g = future { 3 }
+ * val h = for {
+ * x: Int <- f // returns Future(5)
+ * y: Int <- g // returns Future(5)
+ * } yield x + y
+ * }}}
+ *
+ * is translated to:
+ *
+ * {{{
+ * f flatMap { (x: Int) => g map { (y: Int) => x + y } }
+ * }}}
+ */
+trait Future[+T] extends Awaitable[T] {
+self =>
+
+ /* Callbacks */
+
+ /** When this future is completed successfully (i.e. with a value),
+ * apply the provided partial function to the value if the partial function
+ * is defined at that value.
+ *
+ * If the future has already been completed with a value,
+ * this will either be applied immediately or be scheduled asynchronously.
+ *
+ * $multipleCallbacks
+ */
+ def onSuccess[U](pf: PartialFunction[T, U]): this.type = onComplete {
+ case Failure(t) => // do nothing
+ case Success(v) => if (pf isDefinedAt v) pf(v) else { /*do nothing*/ }
+ }
+
+ /** When this future is completed with a failure (i.e. with a throwable),
+ * apply the provided callback to the throwable.
+ *
+ * $caughtThrowables
+ *
+ * If the future has already been completed with a failure,
+ * this will either be applied immediately or be scheduled asynchronously.
+ *
+ * Will not be called in case that the future is completed with a value.
+ *
+ * $multipleCallbacks
+ */
+ def onFailure[U](callback: PartialFunction[Throwable, U]): this.type = onComplete {
+ case Failure(t) => if (isFutureThrowable(t) && callback.isDefinedAt(t)) callback(t) else { /*do nothing*/ }
+ case Success(v) => // do nothing
+ }
+
+ /** When this future is completed, either through an exception, a timeout, or a value,
+ * apply the provided function.
+ *
+ * If the future has already been completed,
+ * this will either be applied immediately or be scheduled asynchronously.
+ *
+ * $multipleCallbacks
+ */
+ def onComplete[U](func: Try[T] => U): this.type
+
+
+ /* Miscellaneous */
+
+ /** Creates a new promise.
+ */
+ def newPromise[S]: Promise[S]
+
+
+ /* Projections */
+
+ /** Returns a failed projection of this future.
+ *
+ * The failed projection is a future holding a value of type `Throwable`.
+ *
+ * It is completed with a value which is the throwable of the original future
+ * in case the original future is failed.
+ *
+ * It is failed with a `NoSuchElementException` if the original future is completed successfully.
+ *
+ * Blocking on this future returns a value if the original future is completed with an exception
+ * and throws a corresponding exception if the original future fails.
+ */
+ def failed: Future[Throwable] = {
+ def noSuchElem(v: T) =
+ new NoSuchElementException("Future.failed not completed with a throwable. Instead completed with: " + v)
+
+ val p = newPromise[Throwable]
+
+ onComplete {
+ case Failure(t) => p success t
+ case Success(v) => p failure noSuchElem(v)
+ }
+
+ p.future
+ }
+
+
+ /* Monadic operations */
+
+ /** Asynchronously processes the value in the future once the value becomes available.
+ *
+ * Will not be called if the future fails.
+ */
+ def foreach[U](f: T => U): Unit = onComplete {
+ case Success(r) => f(r)
+ case Failure(_) => // do nothing
+ }
+
+ /** Creates a new future by applying a function to the successful result of
+ * this future. If this future is completed with an exception then the new
+ * future will also contain this exception.
+ *
+ * $forComprehensionExample
+ */
+ def map[S](f: T => S): Future[S] = {
+ val p = newPromise[S]
+
+ onComplete {
+ case Failure(t) => p failure t
+ case Success(v) =>
+ try p success f(v)
+ catch {
+ case t => p complete resolver(t)
+ }
+ }
+
+ p.future
+ }
+
+ /** Creates a new future by applying a function to the successful result of
+ * this future, and returns the result of the function as the new future.
+ * If this future is completed with an exception then the new future will
+ * also contain this exception.
+ *
+ * $forComprehensionExample
+ */
+ def flatMap[S](f: T => Future[S]): Future[S] = {
+ val p = newPromise[S]
+
+ onComplete {
+ case Failure(t) => p failure t
+ case Success(v) =>
+ try {
+ f(v) onComplete {
+ case Failure(t) => p failure t
+ case Success(v) => p success v
+ }
+ } catch {
+ case t: Throwable => p complete resolver(t)
+ }
+ }
+
+ p.future
+ }
+
+ /** Creates a new future by filtering the value of the current future with a predicate.
+ *
+ * If the current future contains a value which satisfies the predicate, the new future will also hold that value.
+ * Otherwise, the resulting future will fail with a `NoSuchElementException`.
+ *
+ * If the current future fails or times out, the resulting future also fails or times out, respectively.
+ *
+ * Example:
+ * {{{
+ * val f = future { 5 }
+ * val g = f filter { _ % 2 == 1 }
+ * val h = f filter { _ % 2 == 0 }
+ * await(0) g // evaluates to 5
+ * await(0) h // throw a NoSuchElementException
+ * }}}
+ */
+ def filter(pred: T => Boolean): Future[T] = {
+ val p = newPromise[T]
+
+ onComplete {
+ case Failure(t) => p failure t
+ case Success(v) =>
+ try {
+ if (pred(v)) p success v
+ else p failure new NoSuchElementException("Future.filter predicate is not satisfied by: " + v)
+ } catch {
+ case t: Throwable => p complete resolver(t)
+ }
+ }
+
+ p.future
+ }
+
+ /** Creates a new future by mapping the value of the current future if the given partial function is defined at that value.
+ *
+ * If the current future contains a value for which the partial function is defined, the new future will also hold that value.
+ * Otherwise, the resulting future will fail with a `NoSuchElementException`.
+ *
+ * If the current future fails or times out, the resulting future also fails or times out, respectively.
+ *
+ * Example:
+ * {{{
+ * val f = future { -5 }
+ * val g = f collect {
+ * case x if x < 0 => -x
+ * }
+ * val h = f collect {
+ * case x if x > 0 => x * 2
+ * }
+ * await(0) g // evaluates to 5
+ * await(0) h // throw a NoSuchElementException
+ * }}}
+ */
+ def collect[S](pf: PartialFunction[T, S]): Future[S] = {
+ val p = newPromise[S]
+
+ onComplete {
+ case Failure(t) => p failure t
+ case Success(v) =>
+ try {
+ if (pf.isDefinedAt(v)) p success pf(v)
+ else p failure new NoSuchElementException("Future.collect partial function is not defined at: " + v)
+ } catch {
+ case t: Throwable => p complete resolver(t)
+ }
+ }
+
+ p.future
+ }
+
+ /** Creates a new future that will handle any matching throwable that this
+ * future might contain. If there is no match, or if this future contains
+ * a valid result then the new future will contain the same.
+ *
+ * Example:
+ *
+ * {{{
+ * future (6 / 0) recover { case e: ArithmeticException ⇒ 0 } // result: 0
+ * future (6 / 0) recover { case e: NotFoundException ⇒ 0 } // result: exception
+ * future (6 / 2) recover { case e: ArithmeticException ⇒ 0 } // result: 3
+ * }}}
+ */
+ def recover[U >: T](pf: PartialFunction[Throwable, U]): Future[U] = {
+ val p = newPromise[U]
+
+ onComplete {
+ case Failure(t) if pf isDefinedAt t =>
+ try { p success pf(t) }
+ catch { case t: Throwable => p complete resolver(t) }
+ case otherwise => p complete otherwise
+ }
+
+ p.future
+ }
+
+ /** Creates a new future that will handle any matching throwable that this
+ * future might contain by assigning it a value of another future.
+ *
+ * If there is no match, or if this future contains
+ * a valid result then the new future will contain the same result.
+ *
+ * Example:
+ *
+ * {{{
+ * val f = future { Int.MaxValue }
+ * future (6 / 0) recoverWith { case e: ArithmeticException => f } // result: Int.MaxValue
+ * }}}
+ */
+ def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] = {
+ val p = newPromise[U]
+
+ onComplete {
+ case Failure(t) if pf isDefinedAt t =>
+ try {
+ p completeWith pf(t)
+ } catch {
+ case t: Throwable => p complete resolver(t)
+ }
+ case otherwise => p complete otherwise
+ }
+
+ p.future
+ }
+
+ /** Zips the values of `this` and `that` future, and creates
+ * a new future holding the tuple of their results.
+ *
+ * If `this` future fails, the resulting future is failed
+ * with the throwable stored in `this`.
+ * Otherwise, if `that` future fails, the resulting future is failed
+ * with the throwable stored in `that`.
+ */
+ def zip[U](that: Future[U]): Future[(T, U)] = {
+ val p = newPromise[(T, U)]
+
+ this onComplete {
+ case Failure(t) => p failure t
+ case Success(r) => that onSuccess {
+ case r2 => p success ((r, r2))
+ }
+ }
+
+ that onFailure {
+ case f => p failure f
+ }
+
+ p.future
+ }
+
+ /** Creates a new future which holds the result of this future if it was completed successfully, or, if not,
+ * the result of the `that` future if `that` is completed successfully.
+ * If both futures are failed, the resulting future holds the throwable object of the first future.
+ *
+ * Using this method will not cause concurrent programs to become nondeterministic.
+ *
+ * Example:
+ * {{{
+ * val f = future { sys.error("failed") }
+ * val g = future { 5 }
+ * val h = f orElse g
+ * await(0) h // evaluates to 5
+ * }}}
+ */
+ def fallbackTo[U >: T](that: Future[U]): Future[U] = {
+ val p = newPromise[U]
+
+ onComplete {
+ case Failure(t) => that onComplete {
+ case Failure(_) => p failure t
+ case Success(v) => p success v
+ }
+ case Success(v) => p success v
+ }
+
+ p.future
+ }
+
+ /** Applies the side-effecting function to the result of this future, and returns
+ * a new future with the result of this future.
+ *
+ * This method allows one to enforce that the callbacks are executed in a
+ * specified order.
+ *
+ * Note that if one of the chained `andThen` callbacks throws
+ * an exception, that exception is not propagated to the subsequent `andThen`
+ * callbacks. Instead, the subsequent `andThen` callbacks are given the original
+ * value of this future.
+ *
+ * The following example prints out `5`:
+ *
+ * {{{
+ * val f = future { 5 }
+ * f andThen {
+ * case r => sys.error("runtime exception")
+ * } andThen {
+ * case Failure(t) => println(t)
+ * case Success(v) => println(v)
+ * }
+ * }}}
+ */
+ def andThen[U](pf: PartialFunction[Try[T], U]): Future[T] = {
+ val p = newPromise[T]
+
+ onComplete {
+ case r =>
+ try if (pf isDefinedAt r) pf(r)
+ finally p complete r
+ }
+
+ p.future
+ }
+
+ /** Creates a new future which holds the result of either this future or `that` future, depending on
+ * which future was completed first.
+ *
+ * $nonDeterministic
+ *
+ * Example:
+ * {{{
+ * val f = future { sys.error("failed") }
+ * val g = future { 5 }
+ * val h = f either g
+ * await(0) h // evaluates to either 5 or throws a runtime exception
+ * }}}
+ */
+ def either[U >: T](that: Future[U]): Future[U] = {
+ val p = self.newPromise[U]
+
+ val completePromise: PartialFunction[Try[U], _] = {
+ case Failure(t) => p tryFailure t
+ case Success(v) => p trySuccess v
+ }
+
+ self onComplete completePromise
+ that onComplete completePromise
+
+ p.future
+ }
+
+}
+
+
+
+/** TODO some docs
+ *
+ * @define nonDeterministic
+ * Note: using this method yields nondeterministic dataflow programs.
+ */
+object Future {
+
+ // TODO make more modular by encoding all other helper methods within the execution context
+ /** TODO some docs
+ */
+ def all[T, Coll[X] <: Traversable[X]](futures: Coll[Future[T]])(implicit cbf: CanBuildFrom[Coll[_], T, Coll[T]], ec: ExecutionContext): Future[Coll[T]] =
+ ec.all[T, Coll](futures)
+
+ // move this to future companion object
+ @inline def apply[T](body: =>T)(implicit executor: ExecutionContext): Future[T] = executor.future(body)
+
+ def any[T](futures: Traversable[Future[T]])(implicit ec: ExecutionContext): Future[T] = ec.any(futures)
+
+ def find[T](futures: Traversable[Future[T]])(predicate: T => Boolean)(implicit ec: ExecutionContext): Future[Option[T]] = ec.find(futures)(predicate)
+
+}
+
+
+
+
diff --git a/src/library/scala/concurrent/FutureTaskRunner.scala b/src/library/scala/concurrent/FutureTaskRunner.scala
index c5fcde2d19..75e6299ad9 100644
--- a/src/library/scala/concurrent/FutureTaskRunner.scala
+++ b/src/library/scala/concurrent/FutureTaskRunner.scala
@@ -13,6 +13,7 @@ package scala.concurrent
*
* @author Philipp Haller
*/
+@deprecated("Use `ExecutionContext`s instead.", "2.10.0")
trait FutureTaskRunner extends TaskRunner {
/** The type of the futures that the underlying task runner supports.
diff --git a/src/library/scala/concurrent/JavaConversions.scala b/src/library/scala/concurrent/JavaConversions.scala
index db3c490882..bac9d4f558 100644
--- a/src/library/scala/concurrent/JavaConversions.scala
+++ b/src/library/scala/concurrent/JavaConversions.scala
@@ -17,6 +17,7 @@ import java.util.concurrent.{ExecutorService, Executor}
*/
object JavaConversions {
+ @deprecated("Use `asExecutionContext` instead.", "2.10.0")
implicit def asTaskRunner(exec: ExecutorService): FutureTaskRunner =
new ThreadPoolRunner {
override protected def executor =
@@ -26,6 +27,7 @@ object JavaConversions {
exec.shutdown()
}
+ @deprecated("Use `asExecutionContext` instead.", "2.10.0")
implicit def asTaskRunner(exec: Executor): TaskRunner =
new TaskRunner {
type Task[T] = Runnable
@@ -46,4 +48,9 @@ object JavaConversions {
// do nothing
}
}
+
+ implicit def asExecutionContext(exec: ExecutorService): ExecutionContext = null // TODO
+
+ implicit def asExecutionContext(exec: Executor): ExecutionContext = null // TODO
+
}
diff --git a/src/library/scala/concurrent/ManagedBlocker.scala b/src/library/scala/concurrent/ManagedBlocker.scala
index 9c6f4d51d6..0b6d82e76f 100644
--- a/src/library/scala/concurrent/ManagedBlocker.scala
+++ b/src/library/scala/concurrent/ManagedBlocker.scala
@@ -12,6 +12,7 @@ package scala.concurrent
*
* @author Philipp Haller
*/
+@deprecated("Not used.", "2.10.0")
trait ManagedBlocker {
/**
diff --git a/src/library/scala/concurrent/Promise.scala b/src/library/scala/concurrent/Promise.scala
new file mode 100644
index 0000000000..f26deb77ab
--- /dev/null
+++ b/src/library/scala/concurrent/Promise.scala
@@ -0,0 +1,132 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+import scala.util.{ Try, Success, Failure }
+
+
+
+
+/** Promise is an object which can be completed with a value or failed
+ * with an exception.
+ *
+ * @define promiseCompletion
+ * If the promise has already been fulfilled, failed or has timed out,
+ * calling this method will throw an IllegalStateException.
+ *
+ * @define allowedThrowables
+ * If the throwable used to fail this promise is an error, a control exception
+ * or an interrupted exception, it will be wrapped as a cause within an
+ * `ExecutionException` which will fail the promise.
+ *
+ * @define nonDeterministic
+ * Note: Using this method may result in non-deterministic concurrent programs.
+ */
+trait Promise[T] {
+
+ import nondeterministic._
+
+ /** Future containing the value of this promise.
+ */
+ def future: Future[T]
+
+ /** Completes the promise with either an exception or a value.
+ *
+ * @param result Either the value or the exception to complete the promise with.
+ *
+ * $promiseCompletion
+ */
+ def complete(result:Try[T]): this.type = if (tryComplete(result)) this else throwCompleted
+
+ /** Tries to complete the promise with either a value or the exception.
+ *
+ * $nonDeterministic
+ *
+ * @return If the promise has already been completed returns `false`, or `true` otherwise.
+ */
+ def tryComplete(result: Try[T]): Boolean
+
+ /** Completes this promise with the specified future, once that future is completed.
+ *
+ * @return This promise
+ */
+ final def completeWith(other: Future[T]): this.type = {
+ other onComplete {
+ this complete _
+ }
+ this
+ }
+
+ /** Completes the promise with a value.
+ *
+ * @param value The value to complete the promise with.
+ *
+ * $promiseCompletion
+ */
+ def success(v: T): this.type = if (trySuccess(v)) this else throwCompleted
+
+ /** Tries to complete the promise with a value.
+ *
+ * $nonDeterministic
+ *
+ * @return If the promise has already been completed returns `false`, or `true` otherwise.
+ */
+ def trySuccess(value: T): Boolean = tryComplete(Success(value))
+
+ /** Completes the promise with an exception.
+ *
+ * @param t The throwable to complete the promise with.
+ *
+ * $allowedThrowables
+ *
+ * $promiseCompletion
+ */
+ def failure(t: Throwable): this.type = if (tryFailure(t)) this else throwCompleted
+
+ /** Tries to complete the promise with an exception.
+ *
+ * $nonDeterministic
+ *
+ * @return If the promise has already been completed returns `false`, or `true` otherwise.
+ */
+ def tryFailure(t: Throwable): Boolean = tryComplete(Failure(t))
+
+ /** Wraps a `Throwable` in an `ExecutionException` if necessary. TODO replace with `resolver` from scala.concurrent
+ *
+ * $allowedThrowables
+ */
+ protected def wrap(t: Throwable): Throwable = t match {
+ case t: Throwable if isFutureThrowable(t) => t
+ case _ => new ExecutionException(t)
+ }
+
+ private def throwCompleted = throw new IllegalStateException("Promise already completed.")
+
+}
+
+
+
+object Promise {
+
+ def kept[T](result: T)(implicit execctx: ExecutionContext): Promise[T] =
+ execctx keptPromise result
+
+ def broken[T](t: Throwable)(implicit execctx: ExecutionContext): Promise[T] =
+ execctx brokenPromise t
+
+}
+
+
+
+
+
+
+
+
+
diff --git a/src/library/scala/concurrent/Scheduler.scala b/src/library/scala/concurrent/Scheduler.scala
new file mode 100644
index 0000000000..39d798e6b4
--- /dev/null
+++ b/src/library/scala/concurrent/Scheduler.scala
@@ -0,0 +1,54 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+
+import scala.util.Duration
+
+/** A service for scheduling tasks and thunks for one-time, or periodic execution.
+ */
+trait Scheduler {
+
+ /** Schedules a thunk for repeated execution with an initial delay and a frequency.
+ *
+ * @param delay the initial delay after which the thunk should be executed
+ * the first time
+ * @param frequency the frequency with which the thunk should be executed,
+ * as a time period between subsequent executions
+ */
+ def schedule(delay: Duration, frequency: Duration)(thunk: => Unit): Cancellable
+
+ /** Schedules a task for execution after a given delay.
+ *
+ * @param delay the duration after which the task should be executed
+ * @param task the task that is scheduled for execution
+ * @return a `Cancellable` that may be used to cancel the execution
+ * of the task
+ */
+ def scheduleOnce(delay: Duration, task: Runnable): Cancellable
+
+ /** Schedules a thunk for execution after a given delay.
+ *
+ * @param delay the duration after which the thunk should be executed
+ * @param thunk the thunk that is scheduled for execution
+ * @return a `Cancellable` that may be used to cancel the execution
+ * of the thunk
+ */
+ def scheduleOnce(delay: Duration)(task: => Unit): Cancellable
+
+}
+
+
+
+trait Cancellable {
+
+ /** Cancels the underlying task.
+ */
+ def cancel(): Unit
+
+}
diff --git a/src/library/scala/concurrent/Task.scala b/src/library/scala/concurrent/Task.scala
new file mode 100644
index 0000000000..d6f86bac31
--- /dev/null
+++ b/src/library/scala/concurrent/Task.scala
@@ -0,0 +1,13 @@
+package scala.concurrent
+
+
+
+trait Task[+T] {
+
+ def start(): Unit
+
+ def future: Future[T]
+
+}
+
+
diff --git a/src/library/scala/concurrent/TaskRunner.scala b/src/library/scala/concurrent/TaskRunner.scala
index 64e62adfd3..500d79e07f 100644
--- a/src/library/scala/concurrent/TaskRunner.scala
+++ b/src/library/scala/concurrent/TaskRunner.scala
@@ -12,6 +12,7 @@ package scala.concurrent
*
* @author Philipp Haller
*/
+@deprecated("Use `ExecutionContext`s instead.", "2.10.0")
trait TaskRunner {
type Task[T]
diff --git a/src/library/scala/concurrent/TaskRunners.scala b/src/library/scala/concurrent/TaskRunners.scala
index 588073dc5e..7994255b25 100644
--- a/src/library/scala/concurrent/TaskRunners.scala
+++ b/src/library/scala/concurrent/TaskRunners.scala
@@ -14,6 +14,7 @@ import java.util.concurrent.{ThreadPoolExecutor, LinkedBlockingQueue, TimeUnit}
*
* @author Philipp Haller
*/
+@deprecated("Use `ExecutionContext`s instead.", "2.10.0")
object TaskRunners {
implicit val threadRunner: FutureTaskRunner =
diff --git a/src/library/scala/concurrent/ThreadPoolRunner.scala b/src/library/scala/concurrent/ThreadPoolRunner.scala
index 27d8f2cc32..a3e0253634 100644
--- a/src/library/scala/concurrent/ThreadPoolRunner.scala
+++ b/src/library/scala/concurrent/ThreadPoolRunner.scala
@@ -15,6 +15,7 @@ import java.util.concurrent.{ExecutorService, Callable, TimeUnit}
*
* @author Philipp Haller
*/
+@deprecated("Use `ExecutionContext`s instead.", "2.10.0")
trait ThreadPoolRunner extends FutureTaskRunner {
type Task[T] = Callable[T] with Runnable
diff --git a/src/library/scala/concurrent/default/SchedulerImpl.scala.disabled b/src/library/scala/concurrent/default/SchedulerImpl.scala.disabled
new file mode 100644
index 0000000000..745d2d1a15
--- /dev/null
+++ b/src/library/scala/concurrent/default/SchedulerImpl.scala.disabled
@@ -0,0 +1,44 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+package default
+
+import scala.util.Duration
+
+private[concurrent] final class SchedulerImpl extends Scheduler {
+ private val timer =
+ new java.util.Timer(true) // the associated thread runs as a daemon
+
+ def schedule(delay: Duration, frequency: Duration)(thunk: => Unit): Cancellable = ???
+
+ def scheduleOnce(delay: Duration, task: Runnable): Cancellable = {
+ val timerTask = new java.util.TimerTask {
+ def run(): Unit =
+ task.run()
+ }
+ timer.schedule(timerTask, delay.toMillis)
+ new Cancellable {
+ def cancel(): Unit =
+ timerTask.cancel()
+ }
+ }
+
+ def scheduleOnce(delay: Duration)(task: => Unit): Cancellable = {
+ val timerTask = new java.util.TimerTask {
+ def run(): Unit =
+ task
+ }
+ timer.schedule(timerTask, delay.toMillis)
+ new Cancellable {
+ def cancel(): Unit =
+ timerTask.cancel()
+ }
+ }
+
+}
diff --git a/src/library/scala/concurrent/default/TaskImpl.scala.disabled b/src/library/scala/concurrent/default/TaskImpl.scala.disabled
new file mode 100644
index 0000000000..94e54cb372
--- /dev/null
+++ b/src/library/scala/concurrent/default/TaskImpl.scala.disabled
@@ -0,0 +1,313 @@
+package scala.concurrent
+package default
+
+
+
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater
+import scala.concurrent.forkjoin.{ ForkJoinPool, RecursiveAction, ForkJoinWorkerThread }
+import scala.util.Try
+import scala.util
+import scala.util.Duration
+import scala.annotation.tailrec
+
+
+
+private[concurrent] trait Completable[T] {
+self: Future[T] =>
+
+ val executor: ExecutionContextImpl
+
+ def newPromise[S]: Promise[S] = executor promise
+
+ type Callback = Try[T] => Any
+
+ def getState: State[T]
+
+ def casState(oldv: State[T], newv: State[T]): Boolean
+
+ protected def dispatch[U](r: Runnable) = executionContext execute r
+
+ protected def processCallbacks(cbs: List[Callback], r: Try[T]) =
+ for (cb <- cbs) dispatch(new Runnable {
+ override def run() = cb(r)
+ })
+
+ def future: Future[T] = self
+
+ def onComplete[U](callback: Try[T] => U): this.type = {
+ @tailrec def tryAddCallback(): Try[T] = {
+ getState match {
+ case p @ Pending(lst) =>
+ val pt = p.asInstanceOf[Pending[T]]
+ if (casState(pt, Pending(callback :: pt.callbacks))) null
+ else tryAddCallback()
+ case Success(res) => util.Success(res)
+ case Failure(t) => util.Failure(t)
+ }
+ }
+
+ val res = tryAddCallback()
+ if (res != null) dispatch(new Runnable {
+ override def run() =
+ try callback(res)
+ catch handledFutureException andThen {
+ t => Console.err.println(t)
+ }
+ })
+
+ this
+ }
+
+ def isTimedout: Boolean = getState match {
+ case Failure(ft: FutureTimeoutException) => true
+ case _ => false
+ }
+
+}
+
+private[concurrent] class PromiseImpl[T](context: ExecutionContextImpl)
+extends Promise[T] with Future[T] with Completable[T] {
+
+ val executor: scala.concurrent.default.ExecutionContextImpl = context
+
+ @volatile private var state: State[T] = _
+
+ val updater = AtomicReferenceFieldUpdater.newUpdater(classOf[PromiseImpl[T]], classOf[State[T]], "state")
+
+ updater.set(this, Pending(List()))
+
+ def casState(oldv: State[T], newv: State[T]): Boolean = {
+ updater.compareAndSet(this, oldv, newv)
+ }
+
+ def getState: State[T] = {
+ updater.get(this)
+ }
+
+ @tailrec private def tryCompleteState(completed: State[T]): List[Callback] = (getState: @unchecked) match {
+ case p @ Pending(cbs) => if (!casState(p, completed)) tryCompleteState(completed) else cbs
+ case _ => null
+ }
+
+ def tryComplete(r: Try[T]) = r match {
+ case util.Failure(t) => tryFailure(t)
+ case util.Success(v) => trySuccess(v)
+ }
+
+ override def trySuccess(value: T): Boolean = {
+ val cbs = tryCompleteState(Success(value))
+ if (cbs == null)
+ false
+ else {
+ processCallbacks(cbs, util.Success(value))
+ this.synchronized {
+ this.notifyAll()
+ }
+ true
+ }
+ }
+
+ override def tryFailure(t: Throwable): Boolean = {
+ val wrapped = wrap(t)
+ val cbs = tryCompleteState(Failure(wrapped))
+ if (cbs == null)
+ false
+ else {
+ processCallbacks(cbs, util.Failure(wrapped))
+ this.synchronized {
+ this.notifyAll()
+ }
+ true
+ }
+ }
+
+ def await(atMost: Duration)(implicit canawait: scala.concurrent.CanAwait): T = getState match {
+ case Success(res) => res
+ case Failure(t) => throw t
+ case _ =>
+ this.synchronized {
+ while (true)
+ getState match {
+ case Pending(_) => this.wait()
+ case Success(res) => return res
+ case Failure(t) => throw t
+ }
+ }
+ sys.error("unreachable")
+ }
+
+}
+
+private[concurrent] class TaskImpl[T](context: ExecutionContextImpl, body: => T)
+extends RecursiveAction with Task[T] with Future[T] with Completable[T] {
+
+ val executor: ExecutionContextImpl = context
+
+ @volatile private var state: State[T] = _
+
+ val updater = AtomicReferenceFieldUpdater.newUpdater(classOf[TaskImpl[T]], classOf[State[T]], "state")
+
+ updater.set(this, Pending(List()))
+
+ def casState(oldv: State[T], newv: State[T]): Boolean = {
+ updater.compareAndSet(this, oldv, newv)
+ }
+
+ def getState: State[T] = {
+ updater.get(this)
+ }
+
+ @tailrec private def tryCompleteState(completed: State[T]): List[Callback] = (getState: @unchecked) match {
+ case p @ Pending(cbs) => if (!casState(p, completed)) tryCompleteState(completed) else cbs
+ }
+
+ def compute(): Unit = {
+ var cbs: List[Callback] = null
+ try {
+ val res = body
+ processCallbacks(tryCompleteState(Success(res)), util.Success(res))
+ } catch {
+ case t if isFutureThrowable(t) =>
+ processCallbacks(tryCompleteState(Failure(t)), util.Failure(t))
+ case t =>
+ val ee = new ExecutionException(t)
+ processCallbacks(tryCompleteState(Failure(ee)), util.Failure(ee))
+ throw t
+ }
+ }
+
+ def start(): Unit = {
+ Thread.currentThread match {
+ case fj: ForkJoinWorkerThread if fj.getPool eq executor.pool => fork()
+ case _ => executor.pool.execute(this)
+ }
+ }
+
+ // TODO FIXME: handle timeouts
+ def await(atMost: Duration): this.type =
+ await
+
+ def await: this.type = {
+ this.join()
+ this
+ }
+
+ def tryCancel(): Unit =
+ tryUnfork()
+
+ def await(atMost: Duration)(implicit canawait: CanAwait): T = {
+ join() // TODO handle timeout also
+ (updater.get(this): @unchecked) match {
+ case Success(r) => r
+ case Failure(t) => throw t
+ }
+ }
+
+}
+
+
+private[concurrent] sealed abstract class State[T]
+
+
+case class Pending[T](callbacks: List[Try[T] => Any]) extends State[T]
+
+
+case class Success[T](result: T) extends State[T]
+
+
+case class Failure[T](throwable: Throwable) extends State[T]
+
+
+private[concurrent] final class ExecutionContextImpl extends ExecutionContext {
+ import ExecutionContextImpl._
+
+ val pool = {
+ val p = new ForkJoinPool
+ p.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler {
+ def uncaughtException(t: Thread, throwable: Throwable) {
+ Console.err.println(throwable.getMessage)
+ throwable.printStackTrace(Console.err)
+ }
+ })
+ p
+ }
+
+ @inline
+ private def executeTask(task: RecursiveAction) {
+ if (Thread.currentThread.isInstanceOf[ForkJoinWorkerThread])
+ task.fork()
+ else
+ pool execute task
+ }
+
+ def execute(task: Runnable) {
+ val action = new RecursiveAction { def compute() { task.run() } }
+ executeTask(action)
+ }
+
+ def execute[U](body: () => U) {
+ val action = new RecursiveAction { def compute() { body() } }
+ executeTask(action)
+ }
+
+ def task[T](body: => T): Task[T] = {
+ new TaskImpl(this, body)
+ }
+
+ def future[T](body: => T): Future[T] = {
+ val t = task(body)
+ t.start()
+ t.future
+ }
+
+ def promise[T]: Promise[T] =
+ new PromiseImpl[T](this)
+
+ def blocking[T](atMost: Duration)(body: =>T): T = blocking(body2awaitable(body), atMost)
+
+ def blocking[T](awaitable: Awaitable[T], atMost: Duration): T = {
+ currentExecutionContext.get match {
+ case null => awaitable.await(atMost)(null) // outside - TODO - fix timeout case
+ case x if x eq this => this.blockingCall(awaitable) // inside an execution context thread on this executor
+ case x => x.blocking(awaitable, atMost)
+ }
+ }
+
+ private def blockingCall[T](b: Awaitable[T]): T = b match {
+ case fj: TaskImpl[_] if fj.executor.pool eq pool =>
+ fj.await(Duration.fromNanos(0))
+ case _ =>
+ var res: T = null.asInstanceOf[T]
+ @volatile var blockingDone = false
+ // TODO add exception handling here!
+ val mb = new ForkJoinPool.ManagedBlocker {
+ def block() = {
+ res = b.await(Duration.fromNanos(0))(CanAwaitEvidence)
+ blockingDone = true
+ true
+ }
+ def isReleasable = blockingDone
+ }
+ ForkJoinPool.managedBlock(mb, true)
+ res
+ }
+
+ def reportFailure(t: Throwable): Unit = {}
+
+}
+
+
+object ExecutionContextImpl {
+
+ private[concurrent] def currentExecutionContext: ThreadLocal[ExecutionContext] = new ThreadLocal[ExecutionContext] {
+ override protected def initialValue = null
+ }
+
+}
+
+
+
+
+
+
+
diff --git a/src/library/scala/concurrent/impl/AbstractPromise.java b/src/library/scala/concurrent/impl/AbstractPromise.java
new file mode 100644
index 0000000000..5280d67854
--- /dev/null
+++ b/src/library/scala/concurrent/impl/AbstractPromise.java
@@ -0,0 +1,21 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent.impl;
+
+
+
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+
+
+
+abstract class AbstractPromise {
+ private volatile Object _ref = null;
+ protected final static AtomicReferenceFieldUpdater<AbstractPromise, Object> updater =
+ AtomicReferenceFieldUpdater.newUpdater(AbstractPromise.class, Object.class, "_ref");
+}
diff --git a/src/library/scala/concurrent/impl/ExecutionContextImpl.scala b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
new file mode 100644
index 0000000000..af0eb66292
--- /dev/null
+++ b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
@@ -0,0 +1,134 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent.impl
+
+
+
+import java.util.concurrent.{Callable, ExecutorService}
+import scala.concurrent.{ExecutionContext, resolver, Awaitable, body2awaitable}
+import scala.util.{ Duration, Try, Success, Failure }
+import scala.collection.mutable.Stack
+
+
+
+class ExecutionContextImpl(executorService: ExecutorService) extends ExecutionContext {
+ import ExecutionContextImpl._
+
+ def execute(runnable: Runnable): Unit = executorService match {
+ // case fj: ForkJoinPool =>
+ // TODO fork if more applicable
+ // executorService execute runnable
+ case _ =>
+ executorService execute runnable
+ }
+
+ def execute[U](body: () => U): Unit = execute(new Runnable {
+ def run() = body()
+ })
+
+ def promise[T]: Promise[T] = new Promise.DefaultPromise[T]()(this)
+
+ def future[T](body: =>T): Future[T] = {
+ val p = promise[T]
+
+ dispatchFuture {
+ () =>
+ p complete {
+ try {
+ Success(body)
+ } catch {
+ case e => resolver(e)
+ }
+ }
+ }
+
+ p.future
+ }
+
+ def blocking[T](atMost: Duration)(body: =>T): T = blocking(body2awaitable(body), atMost)
+
+ def blocking[T](awaitable: Awaitable[T], atMost: Duration): T = {
+ currentExecutionContext.get match {
+ case null => awaitable.await(atMost)(null) // outside - TODO - fix timeout case
+ case x => x.blockingCall(awaitable) // inside an execution context thread
+ }
+ }
+
+ def reportFailure(t: Throwable) = t match {
+ case e: Error => throw e // rethrow serious errors
+ case t => t.printStackTrace()
+ }
+
+ /** Only callable from the tasks running on the same execution context. */
+ private def blockingCall[T](body: Awaitable[T]): T = {
+ releaseStack()
+
+ // TODO see what to do with timeout
+ body.await(Duration.fromNanos(0))(CanAwaitEvidence)
+ }
+
+ // an optimization for batching futures
+ // TODO we should replace this with a public queue,
+ // so that it can be stolen from
+ // OR: a push to the local task queue should be so cheap that this is
+ // not even needed, but stealing is still possible
+ private val _taskStack = new ThreadLocal[Stack[() => Unit]]()
+
+ private def releaseStack(): Unit =
+ _taskStack.get match {
+ case stack if (stack ne null) && stack.nonEmpty =>
+ val tasks = stack.elems
+ stack.clear()
+ _taskStack.remove()
+ dispatchFuture(() => _taskStack.get.elems = tasks, true)
+ case null =>
+ // do nothing - there is no local batching stack anymore
+ case _ =>
+ _taskStack.remove()
+ }
+
+ private[impl] def dispatchFuture(task: () => Unit, force: Boolean = false): Unit =
+ _taskStack.get match {
+ case stack if (stack ne null) && !force => stack push task
+ case _ => this.execute(
+ new Runnable {
+ def run() {
+ try {
+ val taskStack = Stack[() => Unit](task)
+ _taskStack set taskStack
+ while (taskStack.nonEmpty) {
+ val next = taskStack.pop()
+ try {
+ next.apply()
+ } catch {
+ case e =>
+ // TODO catching all and continue isn't good for OOME
+ reportFailure(e)
+ }
+ }
+ } finally {
+ _taskStack.remove()
+ }
+ }
+ }
+ )
+ }
+
+}
+
+
+object ExecutionContextImpl {
+
+ private[concurrent] def currentExecutionContext: ThreadLocal[ExecutionContextImpl] = new ThreadLocal[ExecutionContextImpl] {
+ override protected def initialValue = null
+ }
+
+}
+
+
diff --git a/src/library/scala/concurrent/impl/Future.scala b/src/library/scala/concurrent/impl/Future.scala
new file mode 100644
index 0000000000..24d0258cc8
--- /dev/null
+++ b/src/library/scala/concurrent/impl/Future.scala
@@ -0,0 +1,89 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent.impl
+
+import scala.concurrent.{Awaitable, ExecutionContext}
+import scala.util.{ Try, Success, Failure }
+//import scala.util.continuations._
+
+trait Future[+T] extends scala.concurrent.Future[T] with Awaitable[T] {
+
+ implicit def executor: ExecutionContextImpl
+
+ /** For use only within a Future.flow block or another compatible Delimited Continuations reset block.
+ *
+ * Returns the result of this Future without blocking, by suspending execution and storing it as a
+ * continuation until the result is available.
+ */
+ //def apply(): T @cps[Future[Any]] = shift(this flatMap (_: T => Future[Any]))
+
+ /** Tests whether this Future has been completed.
+ */
+ final def isCompleted: Boolean = value.isDefined
+
+ /** The contained value of this Future. Before this Future is completed
+ * the value will be None. After completion the value will be Some(Right(t))
+ * if it contains a valid result, or Some(Left(error)) if it contains
+ * an exception.
+ */
+ def value: Option[Try[T]]
+
+ def onComplete[U](func: Try[T] => U): this.type
+
+ /** Creates a new Future[A] which is completed with this Future's result if
+ * that conforms to A's erased type or a ClassCastException otherwise.
+ */
+ final def mapTo[T](implicit m: Manifest[T]) = {
+ val p = executor.promise[T]
+
+ onComplete {
+ case f @ Failure(t) => p complete f.asInstanceOf[Try[T]]
+ case Success(v) =>
+ p complete (try {
+ Success(Future.boxedType(m.erasure).cast(v).asInstanceOf[T])
+ } catch {
+ case e: ClassCastException ⇒ Failure(e)
+ })
+ }
+
+ p.future
+ }
+
+ /** Used by for-comprehensions.
+ */
+ final def withFilter(p: T => Boolean) = new FutureWithFilter[T](this, p)
+
+ final class FutureWithFilter[+A](self: Future[A], p: A => Boolean) {
+ def foreach(f: A => Unit): Unit = self filter p foreach f
+ def map[B](f: A => B) = self filter p map f
+ def flatMap[B](f: A => Future[B]) = self filter p flatMap f
+ def withFilter(q: A => Boolean): FutureWithFilter[A] = new FutureWithFilter[A](self, x ⇒ p(x) && q(x))
+ }
+
+}
+
+object Future {
+ import java.{ lang => jl }
+
+ private val toBoxed = Map[Class[_], Class[_]](
+ classOf[Boolean] -> classOf[jl.Boolean],
+ classOf[Byte] -> classOf[jl.Byte],
+ classOf[Char] -> classOf[jl.Character],
+ classOf[Short] -> classOf[jl.Short],
+ classOf[Int] -> classOf[jl.Integer],
+ classOf[Long] -> classOf[jl.Long],
+ classOf[Float] -> classOf[jl.Float],
+ classOf[Double] -> classOf[jl.Double],
+ classOf[Unit] -> classOf[scala.runtime.BoxedUnit]
+ )
+
+ def boxedType(c: Class[_]): Class[_] = {
+ if (c.isPrimitive) toBoxed(c) else c
+ }
+}
diff --git a/src/library/scala/concurrent/impl/Promise.scala b/src/library/scala/concurrent/impl/Promise.scala
new file mode 100644
index 0000000000..7ef76e1501
--- /dev/null
+++ b/src/library/scala/concurrent/impl/Promise.scala
@@ -0,0 +1,252 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent.impl
+
+
+
+import java.util.concurrent.TimeUnit.{ NANOSECONDS, MILLISECONDS }
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater
+import scala.concurrent.{Awaitable, ExecutionContext, resolve, resolver, blocking, CanAwait, TimeoutException}
+//import scala.util.continuations._
+import scala.util.Duration
+import scala.util.Try
+import scala.util
+import scala.annotation.tailrec
+//import scala.concurrent.NonDeterministic
+
+
+
+trait Promise[T] extends scala.concurrent.Promise[T] with Future[T] {
+
+ def future = this
+
+ def newPromise[S]: Promise[S] = executor promise
+
+ // TODO refine answer and return types here from Any to type parameters
+ // then move this up in the hierarchy
+ /*
+ final def <<(value: T): Future[T] @cps[Future[Any]] = shift {
+ cont: (Future[T] => Future[Any]) =>
+ cont(complete(Right(value)))
+ }
+
+ final def <<(other: Future[T]): Future[T] @cps[Future[Any]] = shift {
+ cont: (Future[T] => Future[Any]) =>
+ val p = executor.promise[Any]
+ val thisPromise = this
+
+ thisPromise completeWith other
+ thisPromise onComplete { v =>
+ try {
+ p completeWith cont(thisPromise)
+ } catch {
+ case e => p complete resolver(e)
+ }
+ }
+
+ p.future
+ }
+ */
+ // TODO finish this once we introduce something like dataflow streams
+
+ /*
+ final def <<(stream: PromiseStreamOut[T]): Future[T] @cps[Future[Any]] = shift { cont: (Future[T] => Future[Any]) =>
+ val fr = executor.promise[Any]
+ val f = stream.dequeue(this)
+ f.onComplete { _ =>
+ try {
+ fr completeWith cont(f)
+ } catch {
+ case e =>
+ fr failure e
+ }
+ }
+ fr
+ }
+ */
+
+}
+
+
+object Promise {
+ def dur2long(dur: Duration): Long = if (dur.isFinite) dur.toNanos else Long.MaxValue
+
+ def EmptyPending[T](): FState[T] = emptyPendingValue.asInstanceOf[FState[T]]
+
+ /** Represents the internal state.
+ */
+ sealed trait FState[+T] { def value: Option[Try[T]] }
+
+ case class Pending[T](listeners: List[Try[T] => Any] = Nil) extends FState[T] {
+ def value: Option[Try[T]] = None
+ }
+
+ case class Success[T](value: Option[util.Success[T]] = None) extends FState[T] {
+ def result: T = value.get.get
+ }
+
+ case class Failure[T](value: Option[util.Failure[T]] = None) extends FState[T] {
+ def exception: Throwable = value.get.exception
+ }
+
+ private val emptyPendingValue = Pending[Nothing](Nil)
+
+ /** Default promise implementation.
+ */
+ class DefaultPromise[T](implicit val executor: ExecutionContextImpl) extends AbstractPromise with Promise[T] {
+ self =>
+
+ updater.set(this, Promise.EmptyPending())
+
+ protected final def tryAwait(atMost: Duration): Boolean = {
+ @tailrec
+ def awaitUnsafe(waitTimeNanos: Long): Boolean = {
+ if (value.isEmpty && waitTimeNanos > 0) {
+ val ms = NANOSECONDS.toMillis(waitTimeNanos)
+ val ns = (waitTimeNanos % 1000000l).toInt // as per object.wait spec
+ val start = System.nanoTime()
+ try {
+ synchronized {
+ while (value.isEmpty) wait(ms, ns)
+ }
+ } catch {
+ case e: InterruptedException =>
+ }
+
+ awaitUnsafe(waitTimeNanos - (System.nanoTime() - start))
+ } else
+ value.isDefined
+ }
+
+ executor.blocking(concurrent.body2awaitable(awaitUnsafe(dur2long(atMost))), Duration.fromNanos(0))
+ }
+
+ private def ready(atMost: Duration)(implicit permit: CanAwait): this.type =
+ if (value.isDefined || tryAwait(atMost)) this
+ else throw new TimeoutException("Futures timed out after [" + atMost.toMillis + "] milliseconds")
+
+ def await(atMost: Duration)(implicit permit: CanAwait): T =
+ ready(atMost).value.get match {
+ case util.Failure(e) => throw e
+ case util.Success(r) => r
+ }
+
+ def value: Option[Try[T]] = getState.value
+
+ @inline
+ private[this] final def updater = AbstractPromise.updater.asInstanceOf[AtomicReferenceFieldUpdater[AbstractPromise, FState[T]]]
+
+ @inline
+ protected final def updateState(oldState: FState[T], newState: FState[T]): Boolean = updater.compareAndSet(this, oldState, newState)
+
+ @inline
+ protected final def getState: FState[T] = updater.get(this)
+
+ def tryComplete(value: Try[T]): Boolean = {
+ val callbacks: List[Try[T] => Any] = {
+ try {
+ @tailrec
+ def tryComplete(v: Try[T]): List[Try[T] => Any] = {
+ getState match {
+ case cur @ Pending(listeners) =>
+ if (updateState(cur, if (v.isFailure) Failure(Some(v.asInstanceOf[util.Failure[T]])) else Success(Some(v.asInstanceOf[util.Success[T]])))) listeners
+ else tryComplete(v)
+ case _ => null
+ }
+ }
+ tryComplete(resolve(value))
+ } finally {
+ synchronized { notifyAll() } // notify any blockers from `tryAwait`
+ }
+ }
+
+ callbacks match {
+ case null => false
+ case cs if cs.isEmpty => true
+ case cs =>
+ executor dispatchFuture {
+ () => cs.foreach(f => notifyCompleted(f, value))
+ }
+ true
+ }
+ }
+
+ def onComplete[U](func: Try[T] => U): this.type = {
+ @tailrec // Returns whether the future has already been completed or not
+ def tryAddCallback(): Boolean = {
+ val cur = getState
+ cur match {
+ case _: Success[_] | _: Failure[_] => true
+ case p: Pending[_] =>
+ val pt = p.asInstanceOf[Pending[T]]
+ if (updateState(pt, pt.copy(listeners = func :: pt.listeners))) false else tryAddCallback()
+ }
+ }
+
+ if (tryAddCallback()) {
+ val result = value.get
+ executor dispatchFuture {
+ () => notifyCompleted(func, result)
+ }
+ }
+
+ this
+ }
+
+ private final def notifyCompleted(func: Try[T] => Any, result: Try[T]) {
+ try {
+ func(result)
+ } catch {
+ case e => executor.reportFailure(e)
+ }
+ }
+ }
+
+ /** An already completed Future is given its result at creation.
+ *
+ * Useful in Future-composition when a value to contribute is already available.
+ */
+ final class KeptPromise[T](suppliedValue: Try[T])(implicit val executor: ExecutionContextImpl) extends Promise[T] {
+ val value = Some(resolve(suppliedValue))
+
+ def tryComplete(value: Try[T]): Boolean = false
+
+ def onComplete[U](func: Try[T] => U): this.type = {
+ val completedAs = value.get
+ executor dispatchFuture {
+ () => func(completedAs)
+ }
+ this
+ }
+
+ private def ready(atMost: Duration)(implicit permit: CanAwait): this.type = this
+
+ def await(atMost: Duration)(implicit permit: CanAwait): T = value.get match {
+ case util.Failure(e) => throw e
+ case util.Success(r) => r
+ }
+ }
+
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/library/scala/concurrent/ops.scala b/src/library/scala/concurrent/ops.scala
index 92220a8313..2cea29aefe 100644
--- a/src/library/scala/concurrent/ops.scala
+++ b/src/library/scala/concurrent/ops.scala
@@ -15,6 +15,7 @@ import scala.util.control.Exception.allCatch
*
* @author Martin Odersky, Stepan Koltsov, Philipp Haller
*/
+@deprecated("Use `future` instead.", "2.10.0")
object ops
{
val defaultRunner: FutureTaskRunner = TaskRunners.threadRunner
diff --git a/src/library/scala/concurrent/package.scala b/src/library/scala/concurrent/package.scala
new file mode 100644
index 0000000000..6a98fd50c2
--- /dev/null
+++ b/src/library/scala/concurrent/package.scala
@@ -0,0 +1,57 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala
+
+import scala.util.{ Duration, Try, Success, Failure }
+
+/** This package object contains primitives for concurrent and parallel programming.
+ */
+package object concurrent extends scala.concurrent.ConcurrentPackageObject {
+ type ExecutionException = java.util.concurrent.ExecutionException
+ type CancellationException = java.util.concurrent.CancellationException
+ type TimeoutException = java.util.concurrent.TimeoutException
+}
+
+package concurrent {
+ object await {
+ def ready[T](atMost: Duration)(awaitable: Awaitable[T])(implicit execCtx: ExecutionContext = executionContext): Awaitable[T] = {
+ try blocking(awaitable, atMost)
+ catch { case _ => }
+ awaitable
+ }
+
+ def result[T](atMost: Duration)(awaitable: Awaitable[T])(implicit execCtx: ExecutionContext = executionContext): T = {
+ blocking(awaitable, atMost)
+ }
+ }
+
+ /** Importing this object allows using some concurrency primitives
+ * on futures and promises that can yield nondeterministic programs.
+ *
+ * While program determinism is broken when using these primitives,
+ * some programs cannot be written without them (e.g. multiple client threads
+ * cannot send requests to a server thread through regular promises and futures).
+ */
+ object nondeterministic { }
+
+ /** A timeout exception.
+ *
+ * Futures are failed with a timeout exception when their timeout expires.
+ *
+ * Each timeout exception contains an origin future which originally timed out.
+ */
+ class FutureTimeoutException(origin: Future[_], message: String) extends TimeoutException(message) {
+ def this(origin: Future[_]) = this(origin, "Future timed out.")
+ }
+
+ final class DurationOps private[concurrent] (x: Int) {
+ // TODO ADD OTHERS
+ def ns = util.Duration.fromNanos(0)
+ }
+}
diff --git a/src/library/scala/concurrent/package.scala.disabled b/src/library/scala/concurrent/package.scala.disabled
deleted file mode 100644
index 42b4bf954c..0000000000
--- a/src/library/scala/concurrent/package.scala.disabled
+++ /dev/null
@@ -1,108 +0,0 @@
-/* __ *\
-** ________ ___ / / ___ Scala API **
-** / __/ __// _ | / / / _ | (c) 2003-2011, LAMP/EPFL **
-** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
-** /____/\___/_/ |_/____/_/ | | **
-** |/ **
-\* */
-
-
-
-package scala
-
-
-
-
-/** This package object contains primitives for parallel programming.
- */
-package object concurrent {
-
- /** Performs a call which can potentially block execution.
- *
- * Example:
- * {{{
- * val lock = new ReentrantLock
- *
- * // ... do something ...
- *
- * blocking {
- * if (!lock.hasLock) lock.lock()
- * }
- * }}}
- *
- * '''Note:''' calling methods that wait arbitrary amounts of time
- * (e.g. for I/O operations or locks) may severely decrease performance
- * or even result in deadlocks. This does not include waiting for
- * results of futures.
- *
- * @tparam T the result type of the blocking operation
- * @param body the blocking operation
- * @param runner the runner used for parallel computations
- * @return the result of the potentially blocking operation
- */
- def blocking[T](body: =>T)(implicit runner: TaskRunner): T = {
- null.asInstanceOf[T]
- }
-
- /** Invokes a computation asynchronously. Does not wait for the computation
- * to finish.
- *
- * @tparam U the result type of the operation
- * @param p the computation to be invoked asynchronously
- * @param runner the runner used for parallel computations
- */
- def spawn[U](p: =>U)(implicit runner: TaskRunner): Unit = {
- }
-
- /** Starts 2 parallel computations and returns once they are completed.
- *
- * $invokingPar
- *
- * @tparam T1 the type of the result of 1st the parallel computation
- * @tparam T2 the type of the result of 2nd the parallel computation
- * @param b1 the 1st computation to be invoked in parallel
- * @param b2 the 2nd computation to be invoked in parallel
- * @param runner the runner used for parallel computations
- * @return a tuple of results corresponding to parallel computations
- */
- def par[T1, T2](b1: =>T1)(b2: =>T2)(implicit runner: TaskRunner): (T1, T2) = {
- null
- }
-
- /** Starts 3 parallel computations and returns once they are completed.
- *
- * $invokingPar
- *
- * @tparam T1 the type of the result of 1st the parallel computation
- * @tparam T2 the type of the result of 2nd the parallel computation
- * @tparam T3 the type of the result of 3rd the parallel computation
- * @param b1 the 1st computation to be invoked in parallel
- * @param b2 the 2nd computation to be invoked in parallel
- * @param b3 the 3rd computation to be invoked in parallel
- * @param runner the runner used for parallel computations
- * @return a tuple of results corresponding to parallel computations
- */
- def par[T1, T2, T3](b1: =>T1)(b2: =>T2)(b3: =>T3)(implicit runner: TaskRunner): (T1, T2, T3) = {
- null
- }
-
- /** Starts 4 parallel computations and returns once they are completed.
- *
- * $invokingPar
- *
- * @tparam T1 the type of the result of 1st the parallel computation
- * @tparam T2 the type of the result of 2nd the parallel computation
- * @tparam T3 the type of the result of 3rd the parallel computation
- * @tparam T4 the type of the result of 4th the parallel computation
- * @param b1 the 1st computation to be invoked in parallel
- * @param b2 the 2nd computation to be invoked in parallel
- * @param b3 the 3rd computation to be invoked in parallel
- * @param b4 the 4th computation to be invoked in parallel
- * @param runner the runner used for parallel computations
- * @return a tuple of results corresponding to parallel computations
- */
- def par[T1, T2, T3, T4](b1: =>T1)(b2: =>T2)(b3: =>T3)(b4: =>T4)(implicit runner: TaskRunner): (T1, T2, T3, T4) = {
- null
- }
-
-}
diff --git a/src/library/scala/package.scala b/src/library/scala/package.scala
index 9425eba232..366af34ee9 100644
--- a/src/library/scala/package.scala
+++ b/src/library/scala/package.scala
@@ -27,12 +27,10 @@ package object scala {
type NoSuchElementException = java.util.NoSuchElementException
type NumberFormatException = java.lang.NumberFormatException
type AbstractMethodError = java.lang.AbstractMethodError
+ type InterruptedException = java.lang.InterruptedException
// A dummy used by the specialization annotation.
- // Normally it's bad juju to place objects inside package objects,
- // but there's no choice here as we'd have to be AnyRef's companion
- // and defined in the same file - except there is no such file.
- object AnyRef extends Specializable {
+ val AnyRef = new Specializable {
override def toString = "object AnyRef"
}
diff --git a/src/library/scala/reflect/api/Trees.scala b/src/library/scala/reflect/api/Trees.scala
index 2c960392ec..181ce85dac 100644
--- a/src/library/scala/reflect/api/Trees.scala
+++ b/src/library/scala/reflect/api/Trees.scala
@@ -13,7 +13,7 @@ trait Trees { self: Universe =>
private[scala] var nodeCount = 0
- type Modifiers <: AbsModifiers
+ type Modifiers >: Null <: AbsModifiers
abstract class AbsModifiers {
def modifiers: Set[Modifier]
@@ -486,12 +486,15 @@ trait Trees { self: Universe =>
* A `New(t, as)` is expanded to: `(new t).<init>(as)`
*/
def New(tpt: Tree, argss: List[List[Tree]]): Tree = {
- assert(!argss.isEmpty)
// todo. we need to expose names in scala.reflect.api
-// val superRef: Tree = Select(New(tpt), nme.CONSTRUCTOR)
val superRef: Tree = Select(New(tpt), nme.CONSTRUCTOR)
- (superRef /: argss) (Apply)
+ if (argss.isEmpty) Apply(superRef, Nil)
+ else (superRef /: argss) (Apply)
}
+ /** 0-1 argument list new, based on a type.
+ */
+ def New(tpe: Type, args: Tree*): Tree =
+ New(TypeTree(tpe), List(args.toList))
/** Type annotation, eliminated by explicit outer */
case class Typed(expr: Tree, tpt: Tree)
@@ -661,6 +664,96 @@ trait Trees { self: Universe =>
val treeCopy = newLazyTreeCopier
+ def copyDefDef(tree: Tree)(
+ mods: Modifiers = null,
+ name: Name = null,
+ tparams: List[TypeDef] = null,
+ vparamss: List[List[ValDef]] = null,
+ tpt: Tree = null,
+ rhs: Tree = null
+ ): DefDef = tree match {
+ case DefDef(mods0, name0, tparams0, vparamss0, tpt0, rhs0) =>
+ treeCopy.DefDef(tree,
+ if (mods eq null) mods0 else mods,
+ if (name eq null) name0 else name,
+ if (tparams eq null) tparams0 else tparams,
+ if (vparamss eq null) vparamss0 else vparamss,
+ if (tpt eq null) tpt0 else tpt,
+ if (rhs eq null) rhs0 else rhs
+ )
+ case t =>
+ sys.error("Not a DefDef: " + t + "/" + t.getClass)
+ }
+ def copyValDef(tree: Tree)(
+ mods: Modifiers = null,
+ name: Name = null,
+ tpt: Tree = null,
+ rhs: Tree = null
+ ): ValDef = tree match {
+ case ValDef(mods0, name0, tpt0, rhs0) =>
+ treeCopy.ValDef(tree,
+ if (mods eq null) mods0 else mods,
+ if (name eq null) name0 else name,
+ if (tpt eq null) tpt0 else tpt,
+ if (rhs eq null) rhs0 else rhs
+ )
+ case t =>
+ sys.error("Not a ValDef: " + t + "/" + t.getClass)
+ }
+ def copyClassDef(tree: Tree)(
+ mods: Modifiers = null,
+ name: Name = null,
+ tparams: List[TypeDef] = null,
+ impl: Template = null
+ ): ClassDef = tree match {
+ case ClassDef(mods0, name0, tparams0, impl0) =>
+ treeCopy.ClassDef(tree,
+ if (mods eq null) mods0 else mods,
+ if (name eq null) name0 else name,
+ if (tparams eq null) tparams0 else tparams,
+ if (impl eq null) impl0 else impl
+ )
+ case t =>
+ sys.error("Not a ClassDef: " + t + "/" + t.getClass)
+ }
+
+ def deriveDefDef(ddef: Tree)(applyToRhs: Tree => Tree): DefDef = ddef match {
+ case DefDef(mods0, name0, tparams0, vparamss0, tpt0, rhs0) =>
+ treeCopy.DefDef(ddef, mods0, name0, tparams0, vparamss0, tpt0, applyToRhs(rhs0))
+ case t =>
+ sys.error("Not a DefDef: " + t + "/" + t.getClass)
+ }
+ def deriveValDef(vdef: Tree)(applyToRhs: Tree => Tree): ValDef = vdef match {
+ case ValDef(mods0, name0, tpt0, rhs0) =>
+ treeCopy.ValDef(vdef, mods0, name0, tpt0, applyToRhs(rhs0))
+ case t =>
+ sys.error("Not a ValDef: " + t + "/" + t.getClass)
+ }
+ def deriveTemplate(templ: Tree)(applyToBody: List[Tree] => List[Tree]): Template = templ match {
+ case Template(parents0, self0, body0) =>
+ treeCopy.Template(templ, parents0, self0, applyToBody(body0))
+ case t =>
+ sys.error("Not a Template: " + t + "/" + t.getClass)
+ }
+ def deriveClassDef(cdef: Tree)(applyToImpl: Template => Template): ClassDef = cdef match {
+ case ClassDef(mods0, name0, tparams0, impl0) =>
+ treeCopy.ClassDef(cdef, mods0, name0, tparams0, applyToImpl(impl0))
+ case t =>
+ sys.error("Not a ClassDef: " + t + "/" + t.getClass)
+ }
+ def deriveCaseDef(cdef: Tree)(applyToBody: Tree => Tree): CaseDef = cdef match {
+ case CaseDef(pat0, guard0, body0) =>
+ treeCopy.CaseDef(cdef, pat0, guard0, applyToBody(body0))
+ case t =>
+ sys.error("Not a CaseDef: " + t + "/" + t.getClass)
+ }
+ def deriveLabelDef(ldef: Tree)(applyToRhs: Tree => Tree): LabelDef = ldef match {
+ case LabelDef(name0, params0, rhs0) =>
+ treeCopy.LabelDef(ldef, name0, params0, applyToRhs(rhs0))
+ case t =>
+ sys.error("Not a LabelDef: " + t + "/" + t.getClass)
+ }
+
class Traverser {
protected var currentOwner: Symbol = definitions.RootClass
diff --git a/src/library/scala/runtime/NonLocalReturnControl.scala b/src/library/scala/runtime/NonLocalReturnControl.scala
index 8be2745086..216e3e664b 100644
--- a/src/library/scala/runtime/NonLocalReturnControl.scala
+++ b/src/library/scala/runtime/NonLocalReturnControl.scala
@@ -6,12 +6,10 @@
** |/ **
\* */
-
-
package scala.runtime
import scala.util.control.ControlThrowable
-class NonLocalReturnControl[T](val key: AnyRef, val value: T) extends ControlThrowable {
+class NonLocalReturnControl[@specialized T](val key: AnyRef, val value: T) extends ControlThrowable {
final override def fillInStackTrace(): Throwable = this
}
diff --git a/src/library/scala/sys/process/BasicIO.scala b/src/library/scala/sys/process/BasicIO.scala
index 44e573896f..edc60a1bb5 100644
--- a/src/library/scala/sys/process/BasicIO.scala
+++ b/src/library/scala/sys/process/BasicIO.scala
@@ -13,15 +13,25 @@ import processInternal._
import java.io.{ BufferedReader, InputStreamReader, FilterInputStream, FilterOutputStream }
import java.util.concurrent.LinkedBlockingQueue
import scala.collection.immutable.Stream
+import scala.annotation.tailrec
/**
* This object contains factories for [[scala.sys.process.ProcessIO]],
* which can be used to control the I/O of a [[scala.sys.process.Process]]
* when a [[scala.sys.process.ProcessBuilder]] is started with the `run`
* command.
+ *
+ * It also contains some helper methods that can be used to in the creation of
+ * `ProcessIO`.
+ *
+ * It is used by other classes in the package in the implementation of various
+ * features, but can also be used by client code.
*/
object BasicIO {
+ /** Size of the buffer used in all the functions that copy data */
final val BufferSize = 8192
+
+ /** Used to separate lines in the `processFully` function that takes `Appendable`. */
final val Newline = props("line.separator")
private[process] final class Streamed[T](
@@ -52,15 +62,70 @@ object BasicIO {
def protect(out: OutputStream): OutputStream = if ((out eq stdout) || (out eq stderr)) Uncloseable(out) else out
}
+ /** Creates a `ProcessIO` from a function `String => Unit`. It can attach the
+ * process input to stdin, and it will either send the error stream to
+ * stderr, or to a `ProcessLogger`.
+ *
+ * For example, the `ProcessIO` created below will print all normal output
+ * while ignoring all error output. No input will be provided.
+ * {{{
+ * import scala.sys.process.BasicIO
+ * val errToDevNull = BasicIO(false, println(_), None)
+ * }}}
+ *
+ * @param withIn True if the process input should be attached to stdin.
+ * @param output A function that will be called with the process output.
+ * @param log An optional `ProcessLogger` to which the output should be
+ * sent. If `None`, output will be sent to stderr.
+ * @return A `ProcessIO` with the characteristics above.
+ */
def apply(withIn: Boolean, output: String => Unit, log: Option[ProcessLogger]) =
new ProcessIO(input(withIn), processFully(output), getErr(log))
+ /** Creates a `ProcessIO` that appends its output to a `StringBuffer`. It can
+ * attach the process input to stdin, and it will either send the error
+ * stream to stderr, or to a `ProcessLogger`.
+ *
+ * For example, the `ProcessIO` created by the function below will store the
+ * normal output on the buffer provided, and print all error on stderr. The
+ * input will be read from stdin.
+ * {{{
+ * import scala.sys.process.{BasicIO, ProcessLogger}
+ * val printer = ProcessLogger(println(_))
+ * def appendToBuffer(b: StringBuffer) = BasicIO(true, b, Some(printer))
+ * }}}
+ *
+ * @param withIn True if the process input should be attached to stdin.
+ * @param buffer A `StringBuffer` which will receive the process normal
+ * output.
+ * @param log An optional `ProcessLogger` to which the output should be
+ * sent. If `None`, output will be sent to stderr.
+ * @return A `ProcessIO` with the characteristics above.
+ */
def apply(withIn: Boolean, buffer: StringBuffer, log: Option[ProcessLogger]) =
new ProcessIO(input(withIn), processFully(buffer), getErr(log))
+ /** Creates a `ProcessIO` from a `ProcessLogger` . It can attach the
+ * process input to stdin.
+ *
+ * @param withIn True if the process input should be attached to stdin.
+ * @param log A `ProcessLogger` to receive all output, normal and error.
+ * @return A `ProcessIO` with the characteristics above.
+ */
def apply(withIn: Boolean, log: ProcessLogger) =
new ProcessIO(input(withIn), processOutFully(log), processErrFully(log))
+ /** Returns a function `InputStream => Unit` given an optional
+ * `ProcessLogger`. If no logger is passed, the function will send the output
+ * to stderr. This function can be used to create a
+ * [[scala.sys.process.ProcessIO]].
+ *
+ * @param log An optional `ProcessLogger` to which the contents of
+ * the `InputStream` will be sent.
+ * @return A function `InputStream => Unit` (used by
+ * [[scala.sys.process.ProcessIO]]) which will send the data to
+ * either the provided `ProcessLogger` or, if `None`, to stderr.
+ */
def getErr(log: Option[ProcessLogger]) = log match {
case Some(lg) => processErrFully(lg)
case None => toStdErr
@@ -69,13 +134,40 @@ object BasicIO {
private def processErrFully(log: ProcessLogger) = processFully(log err _)
private def processOutFully(log: ProcessLogger) = processFully(log out _)
+ /** Closes a `Closeable` without throwing an exception */
def close(c: Closeable) = try c.close() catch { case _: IOException => () }
+
+ /** Returns a function `InputStream => Unit` that appends all data read to the
+ * provided `Appendable`. This function can be used to create a
+ * [[scala.sys.process.ProcessIO]]. The buffer will be appended line by line.
+ *
+ * @param buffer An `Appendable` such as `StringBuilder` or `StringBuffer`.
+ * @return A function `InputStream => Unit` (used by
+ * [[scala.sys.process.ProcessIO]] which will append all data read
+ * from the stream to the buffer.
+ */
def processFully(buffer: Appendable): InputStream => Unit = processFully(appendLine(buffer))
+
+ /** Returns a function `InputStream => Unit` that will call the passed
+ * function with all data read. This function can be used to create a
+ * [[scala.sys.process.ProcessIO]]. The `processLine` function will be called
+ * with each line read, and `Newline` will be appended after each line.
+ *
+ * @param processLine A function that will be called with all data read from
+ * the stream.
+ * @return A function `InputStream => Unit` (used by
+ * [[scala.sys.process.ProcessIO]] which will call `processLine`
+ * with all data read from the stream.
+ */
def processFully(processLine: String => Unit): InputStream => Unit = in => {
val reader = new BufferedReader(new InputStreamReader(in))
processLinesFully(processLine)(reader.readLine)
+ reader.close()
}
+ /** Calls `processLine` with the result of `readLine` until the latter returns
+ * `null`.
+ */
def processLinesFully(processLine: String => Unit)(readLine: () => String) {
def readFully() {
val line = readLine()
@@ -86,14 +178,38 @@ object BasicIO {
}
readFully()
}
- def connectToIn(o: OutputStream): Unit = transferFully(stdin, o)
- def input(connect: Boolean): OutputStream => Unit = if (connect) connectToIn else _ => ()
+
+ /** Copy contents of stdin to the `OutputStream`. */
+ def connectToIn(o: OutputStream): Unit = transferFully(Uncloseable protect stdin, o)
+
+ /** Returns a function `OutputStream => Unit` that either reads the content
+ * from stdin or does nothing. This function can be used by
+ * [[scala.sys.process.ProcessIO]].
+ */
+ def input(connect: Boolean): OutputStream => Unit = { outputToProcess =>
+ if (connect) connectToIn(outputToProcess)
+ outputToProcess.close()
+ }
+
+ /** Returns a `ProcessIO` connected to stdout and stderr, and, optionally, stdin. */
def standard(connectInput: Boolean): ProcessIO = standard(input(connectInput))
+
+ /** Retruns a `ProcessIO` connected to stdout, stderr and the provided `in` */
def standard(in: OutputStream => Unit): ProcessIO = new ProcessIO(in, toStdOut, toStdErr)
+ /** Send all the input from the stream to stderr, and closes the input stream
+ * afterwards.
+ */
def toStdErr = (in: InputStream) => transferFully(in, stderr)
+
+ /** Send all the input from the stream to stdout, and closes the input stream
+ * afterwards.
+ */
def toStdOut = (in: InputStream) => transferFully(in, stdout)
+ /** Copy all input from the input stream to the output stream. Closes the
+ * input stream once it's all read.
+ */
def transferFully(in: InputStream, out: OutputStream): Unit =
try transferFullyImpl(in, out)
catch onInterrupt(())
@@ -105,14 +221,16 @@ object BasicIO {
private[this] def transferFullyImpl(in: InputStream, out: OutputStream) {
val buffer = new Array[Byte](BufferSize)
- def loop() {
+ @tailrec def loop() {
val byteCount = in.read(buffer)
if (byteCount > 0) {
out.write(buffer, 0, byteCount)
- out.flush()
- loop()
+ // flush() will throw an exception once the process has terminated
+ val available = try { out.flush(); true } catch { case _: IOException => false }
+ if (available) loop()
}
}
loop()
+ in.close()
}
}
diff --git a/src/library/scala/sys/process/Process.scala b/src/library/scala/sys/process/Process.scala
index b8765aa615..c2a61af936 100644
--- a/src/library/scala/sys/process/Process.scala
+++ b/src/library/scala/sys/process/Process.scala
@@ -13,7 +13,7 @@ import processInternal._
import ProcessBuilder._
/** Represents a process that is running or has finished running.
- * It may be a compound process with several underlying native processes (such as 'a #&& b`).
+ * It may be a compound process with several underlying native processes (such as `a #&& b`).
*
* This trait is often not used directly, though its companion object contains
* factories for [[scala.sys.process.ProcessBuilder]], the main component of this
@@ -42,28 +42,28 @@ object Process extends ProcessImpl with ProcessCreation { }
* found on and used through [[scala.sys.process.Process]]'s companion object.
*/
trait ProcessCreation {
- /** Create a [[scala.sys.process.ProcessBuilder]] from a `String`, including the
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `String`, including the
* parameters.
*
* @example {{{ apply("cat file.txt") }}}
*/
def apply(command: String): ProcessBuilder = apply(command, None)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a sequence of `String`,
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a sequence of `String`,
* where the head is the command and each element of the tail is a parameter.
*
* @example {{{ apply("cat" :: files) }}}
*/
def apply(command: Seq[String]): ProcessBuilder = apply(command, None)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a command represented by a `String`,
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a command represented by a `String`,
* and a sequence of `String` representing the arguments.
*
* @example {{{ apply("cat", files) }}}
*/
def apply(command: String, arguments: Seq[String]): ProcessBuilder = apply(command +: arguments, None)
- /** Create a [[scala.sys.process.ProcessBuilder]] with working dir set to `File` and extra
+ /** Creates a [[scala.sys.process.ProcessBuilder]] with working dir set to `File` and extra
* environment variables.
*
* @example {{{ apply("java", new java.ioFile("/opt/app"), "CLASSPATH" -> "library.jar") }}}
@@ -71,7 +71,7 @@ trait ProcessCreation {
def apply(command: String, cwd: File, extraEnv: (String, String)*): ProcessBuilder =
apply(command, Some(cwd), extraEnv: _*)
- /** Create a [[scala.sys.process.ProcessBuilder]] with working dir set to `File` and extra
+ /** Creates a [[scala.sys.process.ProcessBuilder]] with working dir set to `File` and extra
* environment variables.
*
* @example {{{ apply("java" :: javaArgs, new java.ioFile("/opt/app"), "CLASSPATH" -> "library.jar") }}}
@@ -79,7 +79,7 @@ trait ProcessCreation {
def apply(command: Seq[String], cwd: File, extraEnv: (String, String)*): ProcessBuilder =
apply(command, Some(cwd), extraEnv: _*)
- /** Create a [[scala.sys.process.ProcessBuilder]] with working dir optionally set to
+ /** Creates a [[scala.sys.process.ProcessBuilder]] with working dir optionally set to
* `File` and extra environment variables.
*
* @example {{{ apply("java", params.get("cwd"), "CLASSPATH" -> "library.jar") }}}
@@ -93,7 +93,7 @@ trait ProcessCreation {
}*/
}
- /** Create a [[scala.sys.process.ProcessBuilder]] with working dir optionally set to
+ /** Creates a [[scala.sys.process.ProcessBuilder]] with working dir optionally set to
* `File` and extra environment variables.
*
* @example {{{ apply("java" :: javaArgs, params.get("cwd"), "CLASSPATH" -> "library.jar") }}}
@@ -105,7 +105,7 @@ trait ProcessCreation {
apply(jpb)
}
- /** create a [[scala.sys.process.ProcessBuilder]] from a `java.lang.ProcessBuilder`.
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `java.lang.ProcessBuilder`.
*
* @example {{{
* apply((new java.lang.ProcessBuilder("ls", "-l")) directory new java.io.File(System.getProperty("user.home")))
@@ -113,19 +113,19 @@ trait ProcessCreation {
*/
def apply(builder: JProcessBuilder): ProcessBuilder = new Simple(builder)
- /** create a [[scala.sys.process.ProcessBuilder]] from a `java.io.File`. This
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `java.io.File`. This
* `ProcessBuilder` can then be used as a `Source` or a `Sink`, so one can
* pipe things from and to it.
*/
def apply(file: File): FileBuilder = new FileImpl(file)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a `java.net.URL`. This
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `java.net.URL`. This
* `ProcessBuilder` can then be used as a `Source`, so that one can pipe things
* from it.
*/
def apply(url: URL): URLBuilder = new URLImpl(url)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a Scala XML Element.
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a Scala XML Element.
* This can be used as a way to template strings.
*
* @example {{{
@@ -134,23 +134,23 @@ trait ProcessCreation {
*/
def apply(command: scala.xml.Elem): ProcessBuilder = apply(command.text.trim)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a `Boolean`. This can be
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `Boolean`. This can be
* to force an exit value.
*/
def apply(value: Boolean): ProcessBuilder = apply(value.toString, if (value) 0 else 1)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a `String` name and a
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a `String` name and a
* `Boolean`. This can be used to force an exit value, with the name being
* used for `toString`.
*/
def apply(name: String, exitValue: => Int): ProcessBuilder = new Dummy(name, exitValue)
- /** Create a sequence of [[scala.sys.process.ProcessBuilder.Source]] from a sequence of
+ /** Creates a sequence of [[scala.sys.process.ProcessBuilder.Source]] from a sequence of
* something else for which there's an implicit conversion to `Source`.
*/
def applySeq[T](builders: Seq[T])(implicit convert: T => Source): Seq[Source] = builders.map(convert)
- /** Create a [[scala.sys.process.ProcessBuilder]] from one or more
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from one or more
* [[scala.sys.process.ProcessBuilder.Source]], which can then be
* piped to something else.
*
@@ -170,7 +170,7 @@ trait ProcessCreation {
*/
def cat(file: Source, files: Source*): ProcessBuilder = cat(file +: files)
- /** Create a [[scala.sys.process.ProcessBuilder]] from a non-empty sequence
+ /** Creates a [[scala.sys.process.ProcessBuilder]] from a non-empty sequence
* of [[scala.sys.process.ProcessBuilder.Source]], which can then be
* piped to something else.
*
@@ -198,18 +198,41 @@ trait ProcessImplicits {
/** Implicitly convert a `java.lang.ProcessBuilder` into a Scala one. */
implicit def builderToProcess(builder: JProcessBuilder): ProcessBuilder = apply(builder)
- /** Implicitly convert a `java.io.File` into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a `java.io.File` into a
+ * [[scala.sys.process.ProcessBuilder.FileBuilder]], which can be used as
+ * either input or output of a process. For example:
+ * {{{
+ * import scala.sys.process._
+ * "ls" #> new java.io.File("dirContents.txt") !
+ * }}}
+ */
implicit def fileToProcess(file: File): FileBuilder = apply(file)
- /** Implicitly convert a `java.net.URL` into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a `java.net.URL` into a
+ * [[scala.sys.process.ProcessBuilder.URLBuilder]] , which can be used as
+ * input to a process. For example:
+ * {{{
+ * import scala.sys.process._
+ * Seq("xmllint", "--html", "-") #< new java.net.URL("http://www.scala-lang.org") #> new java.io.File("fixed.html") !
+ * }}}
+ */
implicit def urlToProcess(url: URL): URLBuilder = apply(url)
- /** Implicitly convert a [[scala.xml.Elem]] into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a [[scala.xml.Elem]] into a
+ * [[scala.sys.process.ProcessBuilder]]. This is done by obtaining the text
+ * elements of the element, trimming spaces, and then converting the result
+ * from string to a process. Importantly, tags are completely ignored, so
+ * they cannot be used to separate parameters.
+ */
implicit def xmlToProcess(command: scala.xml.Elem): ProcessBuilder = apply(command)
- /** Implicitly convert a `String` into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a `String` into a [[scala.sys.process.ProcessBuilder]]. */
implicit def stringToProcess(command: String): ProcessBuilder = apply(command)
- /** Implicitly convert a sequence of `String` into a [[scala.sys.process.ProcessBuilder]] */
+ /** Implicitly convert a sequence of `String` into a
+ * [[scala.sys.process.ProcessBuilder]]. The first argument will be taken to
+ * be the command to be executed, and the remaining will be its arguments.
+ * When using this, arguments may contain spaces.
+ */
implicit def stringSeqToProcess(command: Seq[String]): ProcessBuilder = apply(command)
}
diff --git a/src/library/scala/sys/process/ProcessBuilder.scala b/src/library/scala/sys/process/ProcessBuilder.scala
index 214d908012..20270d423f 100644
--- a/src/library/scala/sys/process/ProcessBuilder.scala
+++ b/src/library/scala/sys/process/ProcessBuilder.scala
@@ -12,133 +12,265 @@ package process
import processInternal._
import ProcessBuilder._
-/** Represents a runnable process.
+/** Represents a sequence of one or more external processes that can be
+ * executed. A `ProcessBuilder` can be a single external process, or a
+ * combination of other `ProcessBuilder`. One can control where a
+ * the output of an external process will go to, and where its input will come
+ * from, or leave that decision to whoever starts it.
*
- * This is the main component of this package. A `ProcessBuilder` may be composed with
- * others, either concatenating their outputs or piping them from one to the next, and
- * possibly with conditional execution depending on the last process exit value.
+ * One creates a `ProcessBuilder` through factories provided in
+ * [[scala.sys.process.Process]]'s companion object, or implicit conversions
+ * based on these factories made available in the package object
+ * [[scala.sys.process]]. Here are some examples:
+ * {{{
+ * import.scala.sys.process._
*
- * Once executed, one can retrieve the output or redirect it to a
- * [[scala.sys.process.ProcessLogger]], or one can get the exit value, discarding or
- * redirecting the output.
+ * // Executes "ls" and sends output to stdout
+ * "ls".!
*
- * One creates a `ProcessBuilder` through factories provided in [[scala.sys.process.Process]]'s
- * companion object, or implicit conversions based on these factories made available in the
- * package object [[scala.sys.process]].
+ * // Execute "ls" and assign a `Stream[String]` of its output to "contents".
+ * // Because [[scala.Predef]] already defines a `lines` method for `String`,
+ * // we use [[scala.sys.process.Process]]'s object companion to create it.
+ * val contents = Process("ls").lines
*
- * Let's examine in detail one example of usage:
+ * // Here we use a `Seq` to make the parameter whitespace-safe
+ * def contentsOf(dir: String): String = Seq("ls", dir).!!
+ * }}}
+ *
+ * The methods of `ProcessBuilder` are divided in three categories: the ones that
+ * combine two `ProcessBuilder` to create a third, the ones that redirect input
+ * or output of a `ProcessBuilder`, and the ones that execute
+ * the external processes associated with it.
+ *
+ * ==Combining `ProcessBuilder`==
+ *
+ * Two existing `ProcessBuilder` can be combined in the following ways:
+ *
+ * * They can be executed in parallel, with the output of the first being fed
+ * as input to the second, like Unix pipes. This is achieved with the `#|`
+ * method.
+ * * They can be executed in sequence, with the second starting as soon as
+ * the first ends. This is done by the `###` method.
+ * * The execution of the second one can be conditioned by the return code
+ * (exit status) of the first, either only when it's zero, or only when it's
+ * not zero. The methods `#&&` and `#||` accomplish these tasks.
+ *
+ * ==Redirecting Input/Output==
+ *
+ * Though control of input and output can be done when executing the process,
+ * there's a few methods that create a new `ProcessBuilder` with a
+ * pre-configured input or output. They are `#<`, `#>` and `#>>`, and may take
+ * as input either another `ProcessBuilder` (like the pipe described above), or
+ * something else such as a `java.io.File` or a `java.lang.InputStream`.
+ * For example:
+ * {{{
+ * new URL("http://databinder.net/dispatch/About") #> "grep JSON" #>> new File("About_JSON") !
+ * }}}
+ *
+ * ==Starting Processes==
+ *
+ * To execute all external commands associated with a `ProcessBuilder`, one
+ * may use one of four groups of methods. Each of these methods have various
+ * overloads and variations to enable further control over the I/O. These
+ * methods are:
+ *
+ * * `run`: the most general method, it returns a
+ * [[scala.sys.process.Process]] immediately, and the external command
+ * executes concurrently.
+ * * `!`: blocks until all external commands exit, and returns the exit code
+ * of the last one in the chain of execution.
+ * * `!!`: blocks until all external commands exit, and returns a `String`
+ * with the output generated.
+ * * `lines`: returns immediately like `run`, and the output being generared
+ * is provided through a `Stream[String]`. Getting the next element of that
+ * `Stream` may block until it becomes available. This method will throw an
+ * exception if the return code is different than zero -- if this is not
+ * desired, use the `lines_!` method.
+ *
+ * ==Handling Input and Output==
+ *
+ * If not specified, the input of the external commands executed with `run` or
+ * `!` will not be tied to anything, and the output will be redirected to the
+ * stdout and stderr of the Scala process. For the methods `!!` and `lines`, no
+ * input will be provided, and the output will be directed according to the
+ * semantics of these methods.
*
+ * Some methods will cause stdin to be used as input. Output can be controlled
+ * with a [[scala.sys.process.ProcessLogger]] -- `!!` and `lines` will only
+ * redirect error output when passed a `ProcessLogger`. If one desires full
+ * control over input and output, then a [[scala.sys.process.ProcessIO]] can be
+ * used with `run`.
+ *
+ * For example, we could silence the error output from `lines_!` like this:
+ * {{{
+ * val etcFiles = "find /etc" lines_! ProcessLogger(line => ())
+ * }}}
+ *
+ * ==Extended Example==
+ *
+ * Let's examine in detail one example of usage:
* {{{
* import scala.sys.process._
* "find src -name *.scala -exec grep null {} ;" #| "xargs test -z" #&& "echo null-free" #|| "echo null detected" !
* }}}
- *
* Note that every `String` is implicitly converted into a `ProcessBuilder`
* through the implicits imported from [[scala.sys.process]]. These `ProcessBuilder` are then
* combined in three different ways.
*
* 1. `#|` pipes the output of the first command into the input of the second command. It
- * mirrors a shell pipe (`|`).
- * 2. `#&&` conditionally executes the second command if the previous one finished with
- * exit value 0. It mirrors shell's `&&`.
- * 3. `#||` conditionally executes the third command if the exit value of the previous
- * command is different than zero. It mirrors shell's `&&`.
- *
- * Not shown here, the equivalent of a shell's `;` would be `###`. The reason for this name is
- * that `;` is a reserved token in Scala.
- *
- * Finally, `!` at the end executes the commands, and returns the exit value. If the output
- * was desired instead, one could run that with `!!` instead.
- *
- * If one wishes to execute the commands in background, one can either call `run`, which
- * returns a [[scala.sys.process.Process]] from which the exit value can be obtained, or
- * `lines`, which returns a [scala.collection.immutable.Stream] of output lines. This throws
- * an exception at the end of the `Stream` is the exit value is non-zero. To avoid exceptions,
- * one can use `lines_!` instead.
- *
- * One can also start the commands in specific ways to further control their I/O. Using `!<` to
- * start the commands will use the stdin from the current process for them. All methods can
- * be used passing a [[scala.sys.process.ProcessLogger]] to capture the output, both stderr and
- * stdout. And, when using `run`, one can pass a [[scala.sys.process.ProcessIO]] to control
- * stdin, stdout and stderr.
- *
- * The stdin of a command can be redirected from a `java.io.InputStream`, a `java.io.File`, a
- * `java.net.URL` or another `ProcessBuilder` through the method `#<`. Likewise, the stdout
- * can be sent to a `java.io.OutputStream`, a `java.io.File` or another `ProcessBuilder` with
- * the method `#>`. The method `#>>` can be used to append the output to a `java.io.File`.
- * For example:
+ * mirrors a shell pipe (`|`).
+ * 1. `#&&` conditionally executes the second command if the previous one finished with
+ * exit value 0. It mirrors shell's `&&`.
+ * 1. `#||` conditionally executes the third command if the exit value of the previous
+ * command is different than zero. It mirrors shell's `&&`.
*
- * {{{
- * new URL("http://databinder.net/dispatch/About") #> "grep JSON" #>> new File("About_JSON") !
- * }}}
+ * Finally, `!` at the end executes the commands, and returns the exit value.
+ * Whatever is printed will be sent to the Scala process standard output. If
+ * we wanted to caputre it, we could run that with `!!` instead.
+ *
+ * Note: though it is not shown above, the equivalent of a shell's `;` would be
+ * `###`. The reason for this name is that `;` is a reserved token in Scala.
*/
trait ProcessBuilder extends Source with Sink {
- /** Starts the process represented by this builder, blocks until it exits, and returns the output as a String. Standard error is
- * sent to the console. If the exit code is non-zero, an exception is thrown.*/
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the output as a String. Standard error is sent to the console. If
+ * the exit code is non-zero, an exception is thrown.
+ */
def !! : String
- /** Starts the process represented by this builder, blocks until it exits, and returns the output as a String. Standard error is
- * sent to the provided ProcessLogger. If the exit code is non-zero, an exception is thrown.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the output as a String. Standard error is sent to the provided
+ * ProcessLogger. If the exit code is non-zero, an exception is thrown.
+ */
def !!(log: ProcessLogger): String
- /** Starts the process represented by this builder. The output is returned as a Stream that blocks when lines are not available
- * but the process has not completed. Standard error is sent to the console. If the process exits with a non-zero value,
- * the Stream will provide all lines up to termination and then throw an exception. */
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the output as a String. Standard error is sent to the console. If
+ * the exit code is non-zero, an exception is thrown. The newly started
+ * process reads from standard input of the current process.
+ */
+ def !!< : String
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the output as a String. Standard error is sent to the provided
+ * ProcessLogger. If the exit code is non-zero, an exception is thrown. The
+ * newly started process reads from standard input of the current process.
+ */
+ def !!<(log: ProcessLogger): String
+
+ /** Starts the process represented by this builder. The output is returned as
+ * a Stream that blocks when lines are not available but the process has not
+ * completed. Standard error is sent to the console. If the process exits
+ * with a non-zero value, the Stream will provide all lines up to termination
+ * and then throw an exception.
+ */
def lines: Stream[String]
- /** Starts the process represented by this builder. The output is returned as a Stream that blocks when lines are not available
- * but the process has not completed. Standard error is sent to the provided ProcessLogger. If the process exits with a non-zero value,
- * the Stream will provide all lines up to termination but will not throw an exception. */
+
+ /** Starts the process represented by this builder. The output is returned as
+ * a Stream that blocks when lines are not available but the process has not
+ * completed. Standard error is sent to the provided ProcessLogger. If the
+ * process exits with a non-zero value, the Stream will provide all lines up
+ * to termination but will not throw an exception.
+ */
def lines(log: ProcessLogger): Stream[String]
- /** Starts the process represented by this builder. The output is returned as a Stream that blocks when lines are not available
- * but the process has not completed. Standard error is sent to the console. If the process exits with a non-zero value,
- * the Stream will provide all lines up to termination but will not throw an exception. */
+
+ /** Starts the process represented by this builder. The output is returned as
+ * a Stream that blocks when lines are not available but the process has not
+ * completed. Standard error is sent to the console. If the process exits
+ * with a non-zero value, the Stream will provide all lines up to termination
+ * but will not throw an exception.
+ */
def lines_! : Stream[String]
- /** Starts the process represented by this builder. The output is returned as a Stream that blocks when lines are not available
- * but the process has not completed. Standard error is sent to the provided ProcessLogger. If the process exits with a non-zero value,
- * the Stream will provide all lines up to termination but will not throw an exception. */
+
+ /** Starts the process represented by this builder. The output is returned as
+ * a Stream that blocks when lines are not available but the process has not
+ * completed. Standard error is sent to the provided ProcessLogger. If the
+ * process exits with a non-zero value, the Stream will provide all lines up
+ * to termination but will not throw an exception.
+ */
def lines_!(log: ProcessLogger): Stream[String]
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the console.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the console.
+ */
def ! : Int
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the given ProcessLogger.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the given
+ * ProcessLogger.
+ */
def !(log: ProcessLogger): Int
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the console. The newly started process reads from standard input of the current process.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the console.
+ * The newly started process reads from standard input of the current process.
+ */
def !< : Int
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the given ProcessLogger. The newly started process reads from standard input of the current process.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the given
+ * ProcessLogger. The newly started process reads from standard input of the
+ * current process.
+ */
def !<(log: ProcessLogger): Int
- /** Starts the process represented by this builder. Standard output and error are sent to the console.*/
+
+ /** Starts the process represented by this builder. Standard output and error
+ * are sent to the console.*/
def run(): Process
- /** Starts the process represented by this builder. Standard output and error are sent to the given ProcessLogger.*/
+
+ /** Starts the process represented by this builder. Standard output and error
+ * are sent to the given ProcessLogger.
+ */
def run(log: ProcessLogger): Process
- /** Starts the process represented by this builder. I/O is handled by the given ProcessIO instance.*/
+
+ /** Starts the process represented by this builder. I/O is handled by the
+ * given ProcessIO instance.
+ */
def run(io: ProcessIO): Process
- /** Starts the process represented by this builder. Standard output and error are sent to the console.
- * The newly started process reads from standard input of the current process if `connectInput` is true.*/
+
+ /** Starts the process represented by this builder. Standard output and error
+ * are sent to the console. The newly started process reads from standard
+ * input of the current process if `connectInput` is true.
+ */
def run(connectInput: Boolean): Process
- /** Starts the process represented by this builder, blocks until it exits, and returns the exit code. Standard output and error are
- * sent to the given ProcessLogger.
- * The newly started process reads from standard input of the current process if `connectInput` is true.*/
+
+ /** Starts the process represented by this builder, blocks until it exits, and
+ * returns the exit code. Standard output and error are sent to the given
+ * ProcessLogger. The newly started process reads from standard input of the
+ * current process if `connectInput` is true.
+ */
def run(log: ProcessLogger, connectInput: Boolean): Process
- /** Constructs a command that runs this command first and then `other` if this command succeeds.*/
+ /** Constructs a command that runs this command first and then `other` if this
+ * command succeeds.
+ */
def #&& (other: ProcessBuilder): ProcessBuilder
- /** Constructs a command that runs this command first and then `other` if this command does not succeed.*/
+
+ /** Constructs a command that runs this command first and then `other` if this
+ * command does not succeed.
+ */
def #|| (other: ProcessBuilder): ProcessBuilder
- /** Constructs a command that will run this command and pipes the output to `other`. `other` must be a simple command.*/
+
+ /** Constructs a command that will run this command and pipes the output to
+ * `other`. `other` must be a simple command.
+ */
def #| (other: ProcessBuilder): ProcessBuilder
- /** Constructs a command that will run this command and then `other`. The exit code will be the exit code of `other`.*/
+
+ /** Constructs a command that will run this command and then `other`. The
+ * exit code will be the exit code of `other`.
+ */
def ### (other: ProcessBuilder): ProcessBuilder
- /** True if this command can be the target of a pipe.
- */
+
+ /** True if this command can be the target of a pipe. */
def canPipeTo: Boolean
- /** True if this command has an exit code which should be propagated to the user.
- * Given a pipe between A and B, if B.hasExitValue is true then the exit code will
- * be the one from B; if it is false, the one from A. This exists to prevent output
- * redirections (implemented as pipes) from masking useful process error codes.
- */
+ /** True if this command has an exit code which should be propagated to the
+ * user. Given a pipe between A and B, if B.hasExitValue is true then the
+ * exit code will be the one from B; if it is false, the one from A. This
+ * exists to prevent output redirections (implemented as pipes) from masking
+ * useful process error codes.
+ */
def hasExitValue: Boolean
}
diff --git a/src/library/scala/sys/process/ProcessIO.scala b/src/library/scala/sys/process/ProcessIO.scala
index 261e837a4d..fa0674670f 100644
--- a/src/library/scala/sys/process/ProcessIO.scala
+++ b/src/library/scala/sys/process/ProcessIO.scala
@@ -11,14 +11,40 @@ package process
import processInternal._
-/** This class is used to control the I/O of every [[scala.sys.process.ProcessBuilder]].
- * Most of the time, there is no need to interact with `ProcessIO` directly. However, if
- * fine control over the I/O of a `ProcessBuilder` is desired, one can use the factories
- * on [[scala.sys.process.BasicIO]] stand-alone object to create one.
- *
- * Each method will be called in a separate thread.
- * If daemonizeThreads is true, they will all be marked daemon threads.
- */
+/** This class is used to control the I/O of every
+ * [[scala.sys.process.Process]]. The functions used to create it will be
+ * called with the process streams once it has been started. It might not be
+ * necessary to use `ProcessIO` directly --
+ * [[scala.sys.process.ProcessBuilder]] can return the process output to the
+ * caller, or use a [[scala.sys.process.ProcessLogger]] which avoids direct
+ * interaction with a stream. One can even use the factories at `BasicIO` to
+ * create a `ProcessIO`, or use its helper methods when creating one's own
+ * `ProcessIO`.
+ *
+ * When creating a `ProcessIO`, it is important to ''close all streams'' when
+ * finished, since the JVM might use system resources to capture the process
+ * input and output, and will not release them unless the streams are
+ * explicitly closed.
+ *
+ * `ProcessBuilder` will call `writeInput`, `processOutput` and `processError`
+ * in separate threads, and if daemonizeThreads is true, they will all be
+ * marked as daemon threads.
+ *
+ * @param writeInput Function that will be called with the `OutputStream` to
+ * which all input to the process must be written. This will
+ * be called in a newly spawned thread.
+ * @param processOutput Function that will be called with the `InputStream`
+ * from which all normal output of the process must be
+ * read from. This will be called in a newly spawned
+ * thread.
+ * @param processError Function that will be called with the `InputStream` from
+ * which all error output of the process must be read from.
+ * This will be called in a newly spawned thread.
+ * @param daemonizeThreads Indicates whether the newly spawned threads that
+ * will run `processOutput`, `processError` and
+ * `writeInput` should be marked as daemon threads.
+ * @note Failure to close the passed streams may result in resource leakage.
+ */
final class ProcessIO(
val writeInput: OutputStream => Unit,
val processOutput: InputStream => Unit,
@@ -27,8 +53,15 @@ final class ProcessIO(
) {
def this(in: OutputStream => Unit, out: InputStream => Unit, err: InputStream => Unit) = this(in, out, err, false)
+ /** Creates a new `ProcessIO` with a different handler for the process input. */
def withInput(write: OutputStream => Unit): ProcessIO = new ProcessIO(write, processOutput, processError, daemonizeThreads)
+
+ /** Creates a new `ProcessIO` with a different handler for the normal output. */
def withOutput(process: InputStream => Unit): ProcessIO = new ProcessIO(writeInput, process, processError, daemonizeThreads)
+
+ /** Creates a new `ProcessIO` with a different handler for the error output. */
def withError(process: InputStream => Unit): ProcessIO = new ProcessIO(writeInput, processOutput, process, daemonizeThreads)
+
+ /** Creates a new `ProcessIO`, with `daemonizeThreads` true. */
def daemonized(): ProcessIO = new ProcessIO(writeInput, processOutput, processError, true)
}
diff --git a/src/library/scala/sys/process/ProcessLogger.scala b/src/library/scala/sys/process/ProcessLogger.scala
index 67146dd70e..a8241db53c 100644
--- a/src/library/scala/sys/process/ProcessLogger.scala
+++ b/src/library/scala/sys/process/ProcessLogger.scala
@@ -11,12 +11,26 @@ package process
import java.io._
-/** Encapsulates the output and error streams of a running process.
- * Many of the methods of `ProcessBuilder` accept a `ProcessLogger` as
- * an argument.
- *
- * @see [[scala.sys.process.ProcessBuilder]]
- */
+/** Encapsulates the output and error streams of a running process. This is used
+ * by [[scala.sys.process.ProcessBuilder]] when starting a process, as an
+ * alternative to [[scala.sys.process.ProcessIO]], which can be more difficult
+ * to use. Note that a `ProcessLogger` will be used to create a `ProcessIO`
+ * anyway. The object `BasicIO` has some functions to do that.
+ *
+ * Here is an example that counts the number of lines in the normal and error
+ * output of a process:
+ * {{{
+ * import scala.sys.process._
+ *
+ * var normalLines = 0
+ * var errorLines = 0
+ * val countLogger = ProcessLogger(line => normalLines += 1,
+ * line => errorLines += 1)
+ * "find /etc" ! countLogger
+ * }}}
+ *
+ * @see [[scala.sys.process.ProcessBuilder]]
+ */
trait ProcessLogger {
/** Will be called with each line read from the process output stream.
*/
diff --git a/src/library/scala/sys/process/package.scala b/src/library/scala/sys/process/package.scala
index 3eb0e5bb89..c1bf470831 100644
--- a/src/library/scala/sys/process/package.scala
+++ b/src/library/scala/sys/process/package.scala
@@ -11,40 +11,175 @@
// for process debugging output.
//
package scala.sys {
- /**
- * This package is used to create process pipelines, similar to Unix command pipelines.
+ /** This package handles the execution of external processes. The contents of
+ * this package can be divided in three groups, according to their
+ * responsibilities:
*
- * The key concept is that one builds a [[scala.sys.process.Process]] that will run and return an exit
- * value. This `Process` is usually composed of one or more [[scala.sys.process.ProcessBuilder]], fed by a
- * [[scala.sys.process.ProcessBuilder.Source]] and feeding a [[scala.sys.process.ProcessBuilder.Sink]]. A
- * `ProcessBuilder` itself is both a `Source` and a `Sink`.
+ * - Indicating what to run and how to run it.
+ * - Handling a process input and output.
+ * - Running the process.
*
- * As `ProcessBuilder`, `Sink` and `Source` are abstract, one usually creates them with `apply` methods on
- * the companion object of [[scala.sys.process.Process]], or through implicit conversions available in this
- * package object from `String` and other types. The pipe is composed through unix-like pipeline and I/O
- * redirection operators available on [[scala.sys.process.ProcessBuilder]].
+ * For simple uses, the only group that matters is the first one. Running an
+ * external command can be as simple as `"ls".!`, or as complex as building a
+ * pipeline of commands such as this:
*
- * The example below shows how to build and combine such commands. It searches for `null` uses in the `src`
- * directory, printing a message indicating whether they were found or not. The first command pipes its
- * output to the second command, whose exit value is then used to choose between the third or fourth
- * commands. This same example is explained in greater detail on [[scala.sys.process.ProcessBuilder]].
+ * {{{
+ * import scala.sys.process._
+ * "ls" #| "grep .scala" #&& "scalac *.scala" #|| "echo nothing found" lines
+ * }}}
+ *
+ * We describe below the general concepts and architecture of the package,
+ * and then take a closer look at each of the categories mentioned above.
+ *
+ * ==Concepts and Architecture==
+ *
+ * The underlying basis for the whole package is Java's `Process` and
+ * `ProcessBuilder` classes. While there's no need to use these Java classes,
+ * they impose boundaries on what is possible. One cannot, for instance,
+ * retrieve a ''process id'' for whatever is executing.
+ *
+ * When executing an external process, one can provide a command's name,
+ * arguments to it, the directory in which it will be executed and what
+ * environment variables will be set. For each executing process, one can
+ * feed its standard input through a `java.io.OutputStream`, and read from
+ * its standard output and standard error through a pair of
+ * `java.io.InputStream`. One can wait until a process finishes execution and
+ * then retrieve its return value, or one can kill an executing process.
+ * Everything else must be built on those features.
+ *
+ * This package provides a DSL for running and chaining such processes,
+ * mimicking Unix shells ability to pipe output from one process to the input
+ * of another, or control the execution of further processes based on the
+ * return status of the previous one.
+ *
+ * In addition to this DSL, this package also provides a few ways of
+ * controlling input and output of these processes, going from simple and
+ * easy to use to complex and flexible.
*
+ * When processes are composed, a new `ProcessBuilder` is created which, when
+ * run, will execute the `ProcessBuilder` instances it is composed of
+ * according to the manner of the composition. If piping one process to
+ * another, they'll be executed simultaneously, and each will be passed a
+ * `ProcessIO` that will copy the output of one to the input of the other.
+ *
+ * ==What to Run and How==
+ *
+ * The central component of the process execution DSL is the
+ * [[scala.sys.process.ProcessBuilder]] trait. It is `ProcessBuilder` that
+ * implements the process execution DSL, that creates the
+ * [[scala.sys.process.Process]] that will handle the execution, and return
+ * the results of such execution to the caller. We can see that DSL in the
+ * introductory example: `#|`, `#&&` and `#!!` are methods on
+ * `ProcessBuilder` used to create a new `ProcessBuilder` through
+ * composition.
+ *
+ * One creates a `ProcessBuilder` either through factories on the
+ * [[scala.sys.process.Process]]'s companion object, or through implicit
+ * conversions available in this package object itself. Implicitly, each
+ * process is created either out of a `String`, with arguments separated by
+ * spaces -- no escaping of spaces is possible -- or out of a
+ * [[scala.collection.Seq]], where the first element represents the command
+ * name, and the remaining elements are arguments to it. In this latter case,
+ * arguments may contain spaces. One can also implicitly convert
+ * [[scala.xml.Elem]] and `java.lang.ProcessBuilder` into a `ProcessBuilder`.
+ * In the introductory example, the strings were converted into
+ * `ProcessBuilder` implicitly.
+ *
+ * To further control what how the process will be run, such as specifying
+ * the directory in which it will be run, see the factories on
+ * [[scala.sys.process.Process]]'s object companion.
+ *
+ * Once the desired `ProcessBuilder` is available, it can be executed in
+ * different ways, depending on how one desires to control its I/O, and what
+ * kind of result one wishes for:
+ *
+ * - Return status of the process (`!` methods)
+ * - Output of the process as a `String` (`!!` methods)
+ * - Continuous output of the process as a `Stream[String]` (`lines` methods)
+ * - The `Process` representing it (`run` methods)
+ *
+ * Some simple examples of these methods:
* {{{
* import scala.sys.process._
- * (
- * "find src -name *.scala -exec grep null {} ;"
- * #| "xargs test -z"
- * #&& "echo null-free" #|| "echo null detected"
- * ) !
+ *
+ * // This uses ! to get the exit code
+ * def fileExists(name: String) = Seq("test", "-f", name).! == 0
+ *
+ * // This uses !! to get the whole result as a string
+ * val dirContents = "ls".!!
+ *
+ * // This "fire-and-forgets" the method, which can be lazily read through
+ * // a Stream[String]
+ * def sourceFilesAt(baseDir: String): Stream[String] = {
+ * val cmd = Seq("find", baseDir, "-name", "*.scala", "-type", "f")
+ * cmd.lines
+ * }
* }}}
*
- * Other implicits available here are for [[scala.sys.process.ProcessBuilder.FileBuilder]], which extends
- * both `Sink` and `Source`, and for [[scala.sys.process.ProcessBuilder.URLBuilder]], which extends
- * `Source` alone.
+ * We'll see more details about controlling I/O of the process in the next
+ * section.
+ *
+ * ==Handling Input and Output==
+ *
+ * In the underlying Java model, once a `Process` has been started, one can
+ * get `java.io.InputStream` and `java.io.OutpuStream` representing its
+ * output and input respectively. That is, what one writes to an
+ * `OutputStream` is turned into input to the process, and the output of a
+ * process can be read from an `InputStream` -- of which there are two, one
+ * representing normal output, and the other representing error output.
+ *
+ * This model creates a difficulty, which is that the code responsible for
+ * actually running the external processes is the one that has to take
+ * decisions about how to handle its I/O.
+ *
+ * This package presents an alternative model: the I/O of a running process
+ * is controlled by a [[scala.sys.process.ProcessIO]] object, which can be
+ * passed _to_ the code that runs the external process. A `ProcessIO` will
+ * have direct access to the java streams associated with the process I/O. It
+ * must, however, close these streams afterwards.
+ *
+ * Simpler abstractions are available, however. The components of this
+ * package that handle I/O are:
+ *
+ * - [[scala.sys.process.ProcessIO]]: provides the low level abstraction.
+ * - [[scala.sys.process.ProcessLogger]]: provides a higher level abstraction
+ * for output, and can be created through its object companion
+ * - [[scala.sys.process.BasicIO]]: a library of helper methods for the
+ * creation of `ProcessIO`.
+ * - This package object itself, with a few implicit conversions.
*
- * One can even create a `Process` solely out of these, without running any command. For example, this will
- * download from a URL to a file:
+ * Some examples of I/O handling:
+ * {{{
+ * import scala.sys.process._
+ *
+ * // An overly complex way of computing size of a compressed file
+ * def gzFileSize(name: String) = {
+ * val cat = Seq("zcat", "name")
+ * var count = 0
+ * def byteCounter(input: java.io.InputStream) = {
+ * while(input.read() != -1) count += 1
+ * input.close()
+ * }
+ * cat ! new ProcessIO(_.close(), byteCounter, _.close())
+ * count
+ * }
+ *
+ * // This "fire-and-forgets" the method, which can be lazily read through
+ * // a Stream[String], and accumulates all errors on a StringBuffer
+ * def sourceFilesAt(baseDir: String): (Stream[String], StringBuffer) = {
+ * val buffer = new StringBuffer()
+ * val cmd = Seq("find", baseDir, "-name", "*.scala", "-type", "f")
+ * val lines = cmd lines_! ProcessLogger(buffer append _)
+ * (lines, buffer)
+ * }
+ * }}}
*
+ * Instances of the java classes `java.io.File` and `java.net.URL` can both
+ * be used directly as input to other processes, and `java.io.File` can be
+ * used as output as well. One can even pipe one to the other directly
+ * without any intervening process, though that's not a design goal or
+ * recommended usage. For example, the following code will copy a web page to
+ * a file:
* {{{
* import java.io.File
* import java.net.URL
@@ -52,26 +187,33 @@ package scala.sys {
* new URL("http://www.scala-lang.org/") #> new File("scala-lang.html") !
* }}}
*
- * One may use a `Process` directly through `ProcessBuilder`'s `run` method, which starts the process in
- * the background, and returns a `Process`. If background execution is not desired, one can get a
- * `ProcessBuilder` to execute through a method such as `!`, `lines`, `run` or variations thereof. That
- * will create the `Process` to execute the commands, and return either the exit value or the output, maybe
- * throwing an exception.
- *
- * Finally, when executing a `ProcessBuilder`, one may pass a [[scala.sys.process.ProcessLogger]] to
- * capture stdout and stderr of the executing processes. A `ProcessLogger` may be created through its
- * companion object from functions of type `(String) => Unit`, or one might redirect it to a file, using
- * [[scala.sys.process.FileProcessLogger]], which can also be created through `ProcessLogger`'s object
- * companion.
+ * More information about the other ways of controlling I/O can be looked at
+ * in the scaladoc for the associated objects, traits and classes.
+ *
+ * ==Running the Process==
+ *
+ * Paradoxically, this is the simplest component of all, and the one least
+ * likely to be interacted with. It consists solely of
+ * [[scala.sys.process.Process]], and it provides only two methods:
+ *
+ * - `exitValue()`: blocks until the process exit, and then returns the exit
+ * value. This is what happens when one uses the `!` method of
+ * `ProcessBuilder`.
+ * - `destroy()`: this will kill the external process and close the streams
+ * associated with it.
*/
package object process extends ProcessImplicits {
+ /** The arguments passed to `java` when creating this process */
def javaVmArguments: List[String] = {
import collection.JavaConversions._
java.lang.management.ManagementFactory.getRuntimeMXBean().getInputArguments().toList
}
+ /** The input stream of this process */
def stdin = java.lang.System.in
+ /** The output stream of this process */
def stdout = java.lang.System.out
+ /** The error stream of this process */
def stderr = java.lang.System.err
}
// private val shell: String => Array[String] =
diff --git a/src/library/scala/util/Duration.scala b/src/library/scala/util/Duration.scala
new file mode 100644
index 0000000000..4c118f8b3b
--- /dev/null
+++ b/src/library/scala/util/Duration.scala
@@ -0,0 +1,485 @@
+/**
+ * Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.util
+
+import java.util.concurrent.TimeUnit
+import TimeUnit._
+import java.lang.{ Long ⇒ JLong, Double ⇒ JDouble }
+//import akka.actor.ActorSystem (commented methods)
+
+class TimerException(message: String) extends RuntimeException(message)
+
+/**
+ * Simple timer class.
+ * Usage:
+ * <pre>
+ * import akka.util.duration._
+ * import akka.util.Timer
+ *
+ * val timer = Timer(30.seconds)
+ * while (timer.isTicking) { ... }
+ * </pre>
+ */
+case class Timer(duration: Duration, throwExceptionOnTimeout: Boolean = false) {
+ val startTimeInMillis = System.currentTimeMillis
+ val timeoutInMillis = duration.toMillis
+
+ /**
+ * Returns true while the timer is ticking. After that it either throws and exception or
+ * returns false. Depending on if the 'throwExceptionOnTimeout' argument is true or false.
+ */
+ def isTicking: Boolean = {
+ if (!(timeoutInMillis > (System.currentTimeMillis - startTimeInMillis))) {
+ if (throwExceptionOnTimeout) throw new TimerException("Time out after " + duration)
+ else false
+ } else true
+ }
+}
+
+object Duration {
+ def apply(length: Long, unit: TimeUnit): Duration = new FiniteDuration(length, unit)
+ def apply(length: Double, unit: TimeUnit): Duration = fromNanos(unit.toNanos(1) * length)
+ def apply(length: Long, unit: String): Duration = new FiniteDuration(length, timeUnit(unit))
+
+ def fromNanos(nanos: Long): Duration = {
+ if (nanos % 86400000000000L == 0) {
+ Duration(nanos / 86400000000000L, DAYS)
+ } else if (nanos % 3600000000000L == 0) {
+ Duration(nanos / 3600000000000L, HOURS)
+ } else if (nanos % 60000000000L == 0) {
+ Duration(nanos / 60000000000L, MINUTES)
+ } else if (nanos % 1000000000L == 0) {
+ Duration(nanos / 1000000000L, SECONDS)
+ } else if (nanos % 1000000L == 0) {
+ Duration(nanos / 1000000L, MILLISECONDS)
+ } else if (nanos % 1000L == 0) {
+ Duration(nanos / 1000L, MICROSECONDS)
+ } else {
+ Duration(nanos, NANOSECONDS)
+ }
+ }
+
+ def fromNanos(nanos: Double): Duration = fromNanos((nanos + 0.5).asInstanceOf[Long])
+
+ /**
+ * Construct a Duration by parsing a String. In case of a format error, a
+ * RuntimeException is thrown. See `unapply(String)` for more information.
+ */
+ def apply(s: String): Duration = unapply(s) getOrElse sys.error("format error")
+
+ /**
+ * Deconstruct a Duration into length and unit if it is finite.
+ */
+ def unapply(d: Duration): Option[(Long, TimeUnit)] = {
+ if (d.finite_?) {
+ Some((d.length, d.unit))
+ } else {
+ None
+ }
+ }
+
+ private val RE = ("""^\s*(\d+(?:\.\d+)?)\s*""" + // length part
+ "(?:" + // units are distinguished in separate match groups
+ "(d|day|days)|" +
+ "(h|hour|hours)|" +
+ "(min|minute|minutes)|" +
+ "(s|sec|second|seconds)|" +
+ "(ms|milli|millis|millisecond|milliseconds)|" +
+ "(µs|micro|micros|microsecond|microseconds)|" +
+ "(ns|nano|nanos|nanosecond|nanoseconds)" +
+ """)\s*$""").r // close the non-capturing group
+ private val REinf = """^\s*Inf\s*$""".r
+ private val REminf = """^\s*(?:-\s*|Minus)Inf\s*""".r
+
+ /**
+ * Parse String, return None if no match. Format is `"<length><unit>"`, where
+ * whitespace is allowed before, between and after the parts. Infinities are
+ * designated by `"Inf"` and `"-Inf"` or `"MinusInf"`.
+ */
+ def unapply(s: String): Option[Duration] = s match {
+ case RE(length, d, h, m, s, ms, mus, ns) ⇒
+ if (d ne null) Some(Duration(JDouble.parseDouble(length), DAYS)) else if (h ne null) Some(Duration(JDouble.parseDouble(length), HOURS)) else if (m ne null) Some(Duration(JDouble.parseDouble(length), MINUTES)) else if (s ne null) Some(Duration(JDouble.parseDouble(length), SECONDS)) else if (ms ne null) Some(Duration(JDouble.parseDouble(length), MILLISECONDS)) else if (mus ne null) Some(Duration(JDouble.parseDouble(length), MICROSECONDS)) else if (ns ne null) Some(Duration(JDouble.parseDouble(length), NANOSECONDS)) else
+ sys.error("made some error in regex (should not be possible)")
+ case REinf() ⇒ Some(Inf)
+ case REminf() ⇒ Some(MinusInf)
+ case _ ⇒ None
+ }
+
+ /**
+ * Parse TimeUnit from string representation.
+ */
+ def timeUnit(unit: String) = unit.toLowerCase match {
+ case "d" | "day" | "days" ⇒ DAYS
+ case "h" | "hour" | "hours" ⇒ HOURS
+ case "min" | "minute" | "minutes" ⇒ MINUTES
+ case "s" | "sec" | "second" | "seconds" ⇒ SECONDS
+ case "ms" | "milli" | "millis" | "millisecond" | "milliseconds" ⇒ MILLISECONDS
+ case "µs" | "micro" | "micros" | "microsecond" | "microseconds" ⇒ MICROSECONDS
+ case "ns" | "nano" | "nanos" | "nanosecond" | "nanoseconds" ⇒ NANOSECONDS
+ }
+
+ val Zero: Duration = new FiniteDuration(0, NANOSECONDS)
+ val Undefined: Duration = new Duration with Infinite {
+ override def toString = "Duration.Undefined"
+ override def equals(other: Any) = other.asInstanceOf[AnyRef] eq this
+ override def +(other: Duration): Duration = throw new IllegalArgumentException("cannot add Undefined duration")
+ override def -(other: Duration): Duration = throw new IllegalArgumentException("cannot subtract Undefined duration")
+ override def *(factor: Double): Duration = throw new IllegalArgumentException("cannot multiply Undefined duration")
+ override def /(factor: Double): Duration = throw new IllegalArgumentException("cannot divide Undefined duration")
+ override def /(other: Duration): Double = throw new IllegalArgumentException("cannot divide Undefined duration")
+ def >(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration")
+ def >=(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration")
+ def <(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration")
+ def <=(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration")
+ def unary_- : Duration = throw new IllegalArgumentException("cannot negate Undefined duration")
+ }
+
+ trait Infinite {
+ this: Duration ⇒
+
+ override def equals(other: Any) = false
+
+ def +(other: Duration): Duration =
+ other match {
+ case _: this.type ⇒ this
+ case _: Infinite ⇒ throw new IllegalArgumentException("illegal addition of infinities")
+ case _ ⇒ this
+ }
+ def -(other: Duration): Duration =
+ other match {
+ case _: this.type ⇒ throw new IllegalArgumentException("illegal subtraction of infinities")
+ case _ ⇒ this
+ }
+ def *(factor: Double): Duration = this
+ def /(factor: Double): Duration = this
+ def /(other: Duration): Double =
+ other match {
+ case _: Infinite ⇒ throw new IllegalArgumentException("illegal division of infinities")
+ // maybe questionable but pragmatic: Inf / 0 => Inf
+ case x ⇒ Double.PositiveInfinity * (if ((this > Zero) ^ (other >= Zero)) -1 else 1)
+ }
+
+ def finite_? = false
+
+ def length: Long = throw new IllegalArgumentException("length not allowed on infinite Durations")
+ def unit: TimeUnit = throw new IllegalArgumentException("unit not allowed on infinite Durations")
+ def toNanos: Long = throw new IllegalArgumentException("toNanos not allowed on infinite Durations")
+ def toMicros: Long = throw new IllegalArgumentException("toMicros not allowed on infinite Durations")
+ def toMillis: Long = throw new IllegalArgumentException("toMillis not allowed on infinite Durations")
+ def toSeconds: Long = throw new IllegalArgumentException("toSeconds not allowed on infinite Durations")
+ def toMinutes: Long = throw new IllegalArgumentException("toMinutes not allowed on infinite Durations")
+ def toHours: Long = throw new IllegalArgumentException("toHours not allowed on infinite Durations")
+ def toDays: Long = throw new IllegalArgumentException("toDays not allowed on infinite Durations")
+ def toUnit(unit: TimeUnit): Double = throw new IllegalArgumentException("toUnit not allowed on infinite Durations")
+
+ def printHMS = toString
+ }
+
+ /**
+ * Infinite duration: greater than any other and not equal to any other,
+ * including itself.
+ */
+ val Inf: Duration = new Duration with Infinite {
+ override def toString = "Duration.Inf"
+ def >(other: Duration) = true
+ def >=(other: Duration) = true
+ def <(other: Duration) = false
+ def <=(other: Duration) = false
+ def unary_- : Duration = MinusInf
+ }
+
+ /**
+ * Infinite negative duration: lesser than any other and not equal to any other,
+ * including itself.
+ */
+ val MinusInf: Duration = new Duration with Infinite {
+ override def toString = "Duration.MinusInf"
+ def >(other: Duration) = false
+ def >=(other: Duration) = false
+ def <(other: Duration) = true
+ def <=(other: Duration) = true
+ def unary_- : Duration = Inf
+ }
+
+ // Java Factories
+ def create(length: Long, unit: TimeUnit): Duration = apply(length, unit)
+ def create(length: Double, unit: TimeUnit): Duration = apply(length, unit)
+ def create(length: Long, unit: String): Duration = apply(length, unit)
+ def parse(s: String): Duration = unapply(s).get
+}
+
+/**
+ * Utility for working with java.util.concurrent.TimeUnit durations.
+ *
+ * <p/>
+ * Examples of usage from Java:
+ * <pre>
+ * import akka.util.FiniteDuration;
+ * import java.util.concurrent.TimeUnit;
+ *
+ * Duration duration = new FiniteDuration(100, MILLISECONDS);
+ * Duration duration = new FiniteDuration(5, "seconds");
+ *
+ * duration.toNanos();
+ * </pre>
+ *
+ * <p/>
+ * Examples of usage from Scala:
+ * <pre>
+ * import akka.util.Duration
+ * import java.util.concurrent.TimeUnit
+ *
+ * val duration = Duration(100, MILLISECONDS)
+ * val duration = Duration(100, "millis")
+ *
+ * duration.toNanos
+ * duration < 1.second
+ * duration <= Duration.Inf
+ * </pre>
+ *
+ * <p/>
+ * Implicits are also provided for Int, Long and Double. Example usage:
+ * <pre>
+ * import akka.util.duration._
+ *
+ * val duration = 100 millis
+ * </pre>
+ *
+ * Extractors, parsing and arithmetic are also included:
+ * <pre>
+ * val d = Duration("1.2 µs")
+ * val Duration(length, unit) = 5 millis
+ * val d2 = d * 2.5
+ * val d3 = d2 + 1.millisecond
+ * </pre>
+ */
+abstract class Duration extends Serializable {
+ def length: Long
+ def unit: TimeUnit
+ def toNanos: Long
+ def toMicros: Long
+ def toMillis: Long
+ def toSeconds: Long
+ def toMinutes: Long
+ def toHours: Long
+ def toDays: Long
+ def toUnit(unit: TimeUnit): Double
+ def printHMS: String
+ def <(other: Duration): Boolean
+ def <=(other: Duration): Boolean
+ def >(other: Duration): Boolean
+ def >=(other: Duration): Boolean
+ def +(other: Duration): Duration
+ def -(other: Duration): Duration
+ def *(factor: Double): Duration
+ def /(factor: Double): Duration
+ def /(other: Duration): Double
+ def unary_- : Duration
+ def finite_? : Boolean
+// def dilated(implicit system: ActorSystem): Duration = this * system.settings.TestTimeFactor
+ def min(other: Duration): Duration = if (this < other) this else other
+ def max(other: Duration): Duration = if (this > other) this else other
+ def sleep(): Unit = Thread.sleep(toMillis)
+
+ // Java API
+ def lt(other: Duration) = this < other
+ def lteq(other: Duration) = this <= other
+ def gt(other: Duration) = this > other
+ def gteq(other: Duration) = this >= other
+ def plus(other: Duration) = this + other
+ def minus(other: Duration) = this - other
+ def mul(factor: Double) = this * factor
+ def div(factor: Double) = this / factor
+ def div(other: Duration) = this / other
+ def neg() = -this
+ def isFinite() = finite_?
+}
+
+class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration {
+ import Duration._
+
+ def this(length: Long, unit: String) = this(length, Duration.timeUnit(unit))
+
+ def toNanos = unit.toNanos(length)
+ def toMicros = unit.toMicros(length)
+ def toMillis = unit.toMillis(length)
+ def toSeconds = unit.toSeconds(length)
+ def toMinutes = unit.toMinutes(length)
+ def toHours = unit.toHours(length)
+ def toDays = unit.toDays(length)
+ def toUnit(u: TimeUnit) = long2double(toNanos) / NANOSECONDS.convert(1, u)
+
+ override def toString = this match {
+ case Duration(1, DAYS) ⇒ "1 day"
+ case Duration(x, DAYS) ⇒ x + " days"
+ case Duration(1, HOURS) ⇒ "1 hour"
+ case Duration(x, HOURS) ⇒ x + " hours"
+ case Duration(1, MINUTES) ⇒ "1 minute"
+ case Duration(x, MINUTES) ⇒ x + " minutes"
+ case Duration(1, SECONDS) ⇒ "1 second"
+ case Duration(x, SECONDS) ⇒ x + " seconds"
+ case Duration(1, MILLISECONDS) ⇒ "1 millisecond"
+ case Duration(x, MILLISECONDS) ⇒ x + " milliseconds"
+ case Duration(1, MICROSECONDS) ⇒ "1 microsecond"
+ case Duration(x, MICROSECONDS) ⇒ x + " microseconds"
+ case Duration(1, NANOSECONDS) ⇒ "1 nanosecond"
+ case Duration(x, NANOSECONDS) ⇒ x + " nanoseconds"
+ }
+
+ def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000.0 % 60)
+
+ def <(other: Duration) = {
+ if (other.finite_?) {
+ toNanos < other.asInstanceOf[FiniteDuration].toNanos
+ } else {
+ other > this
+ }
+ }
+
+ def <=(other: Duration) = {
+ if (other.finite_?) {
+ toNanos <= other.asInstanceOf[FiniteDuration].toNanos
+ } else {
+ other >= this
+ }
+ }
+
+ def >(other: Duration) = {
+ if (other.finite_?) {
+ toNanos > other.asInstanceOf[FiniteDuration].toNanos
+ } else {
+ other < this
+ }
+ }
+
+ def >=(other: Duration) = {
+ if (other.finite_?) {
+ toNanos >= other.asInstanceOf[FiniteDuration].toNanos
+ } else {
+ other <= this
+ }
+ }
+
+ def +(other: Duration) = {
+ if (!other.finite_?) {
+ other
+ } else {
+ val nanos = toNanos + other.asInstanceOf[FiniteDuration].toNanos
+ fromNanos(nanos)
+ }
+ }
+
+ def -(other: Duration) = {
+ if (!other.finite_?) {
+ other
+ } else {
+ val nanos = toNanos - other.asInstanceOf[FiniteDuration].toNanos
+ fromNanos(nanos)
+ }
+ }
+
+ def *(factor: Double) = fromNanos(long2double(toNanos) * factor)
+
+ def /(factor: Double) = fromNanos(long2double(toNanos) / factor)
+
+ def /(other: Duration) = if (other.finite_?) long2double(toNanos) / other.toNanos else 0
+
+ def unary_- = Duration(-length, unit)
+
+ def finite_? = true
+
+ override def equals(other: Any) =
+ other.isInstanceOf[FiniteDuration] &&
+ toNanos == other.asInstanceOf[FiniteDuration].toNanos
+
+ override def hashCode = toNanos.asInstanceOf[Int]
+}
+
+class DurationInt(n: Int) {
+ def nanoseconds = Duration(n, NANOSECONDS)
+ def nanos = Duration(n, NANOSECONDS)
+ def nanosecond = Duration(n, NANOSECONDS)
+ def nano = Duration(n, NANOSECONDS)
+
+ def microseconds = Duration(n, MICROSECONDS)
+ def micros = Duration(n, MICROSECONDS)
+ def microsecond = Duration(n, MICROSECONDS)
+ def micro = Duration(n, MICROSECONDS)
+
+ def milliseconds = Duration(n, MILLISECONDS)
+ def millis = Duration(n, MILLISECONDS)
+ def millisecond = Duration(n, MILLISECONDS)
+ def milli = Duration(n, MILLISECONDS)
+
+ def seconds = Duration(n, SECONDS)
+ def second = Duration(n, SECONDS)
+
+ def minutes = Duration(n, MINUTES)
+ def minute = Duration(n, MINUTES)
+
+ def hours = Duration(n, HOURS)
+ def hour = Duration(n, HOURS)
+
+ def days = Duration(n, DAYS)
+ def day = Duration(n, DAYS)
+}
+
+class DurationLong(n: Long) {
+ def nanoseconds = Duration(n, NANOSECONDS)
+ def nanos = Duration(n, NANOSECONDS)
+ def nanosecond = Duration(n, NANOSECONDS)
+ def nano = Duration(n, NANOSECONDS)
+
+ def microseconds = Duration(n, MICROSECONDS)
+ def micros = Duration(n, MICROSECONDS)
+ def microsecond = Duration(n, MICROSECONDS)
+ def micro = Duration(n, MICROSECONDS)
+
+ def milliseconds = Duration(n, MILLISECONDS)
+ def millis = Duration(n, MILLISECONDS)
+ def millisecond = Duration(n, MILLISECONDS)
+ def milli = Duration(n, MILLISECONDS)
+
+ def seconds = Duration(n, SECONDS)
+ def second = Duration(n, SECONDS)
+
+ def minutes = Duration(n, MINUTES)
+ def minute = Duration(n, MINUTES)
+
+ def hours = Duration(n, HOURS)
+ def hour = Duration(n, HOURS)
+
+ def days = Duration(n, DAYS)
+ def day = Duration(n, DAYS)
+}
+
+class DurationDouble(d: Double) {
+ def nanoseconds = Duration(d, NANOSECONDS)
+ def nanos = Duration(d, NANOSECONDS)
+ def nanosecond = Duration(d, NANOSECONDS)
+ def nano = Duration(d, NANOSECONDS)
+
+ def microseconds = Duration(d, MICROSECONDS)
+ def micros = Duration(d, MICROSECONDS)
+ def microsecond = Duration(d, MICROSECONDS)
+ def micro = Duration(d, MICROSECONDS)
+
+ def milliseconds = Duration(d, MILLISECONDS)
+ def millis = Duration(d, MILLISECONDS)
+ def millisecond = Duration(d, MILLISECONDS)
+ def milli = Duration(d, MILLISECONDS)
+
+ def seconds = Duration(d, SECONDS)
+ def second = Duration(d, SECONDS)
+
+ def minutes = Duration(d, MINUTES)
+ def minute = Duration(d, MINUTES)
+
+ def hours = Duration(d, HOURS)
+ def hour = Duration(d, HOURS)
+
+ def days = Duration(d, DAYS)
+ def day = Duration(d, DAYS)
+}
diff --git a/src/library/scala/util/Timeout.scala b/src/library/scala/util/Timeout.scala
new file mode 100644
index 0000000000..0190675344
--- /dev/null
+++ b/src/library/scala/util/Timeout.scala
@@ -0,0 +1,33 @@
+/**
+ * Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
+ */
+package scala.util
+
+import java.util.concurrent.TimeUnit
+
+case class Timeout(duration: Duration) {
+ def this(timeout: Long) = this(Duration(timeout, TimeUnit.MILLISECONDS))
+ def this(length: Long, unit: TimeUnit) = this(Duration(length, unit))
+}
+
+object Timeout {
+ /**
+ * A timeout with zero duration, will cause most requests to always timeout.
+ */
+ val zero = new Timeout(Duration.Zero)
+
+ /**
+ * A Timeout with infinite duration. Will never timeout. Use extreme caution with this
+ * as it may cause memory leaks, blocked threads, or may not even be supported by
+ * the receiver, which would result in an exception.
+ */
+ val never = new Timeout(Duration.Inf)
+
+ def apply(timeout: Long) = new Timeout(timeout)
+ def apply(length: Long, unit: TimeUnit) = new Timeout(length, unit)
+
+ implicit def durationToTimeout(duration: Duration) = new Timeout(duration)
+ implicit def intToTimeout(timeout: Int) = new Timeout(timeout)
+ implicit def longToTimeout(timeout: Long) = new Timeout(timeout)
+ //implicit def defaultTimeout(implicit system: ActorSystem) = system.settings.ActorTimeout (have to introduce this in ActorSystem)
+}
diff --git a/src/library/scala/util/Try.scala b/src/library/scala/util/Try.scala
new file mode 100644
index 0000000000..a05a75e0b7
--- /dev/null
+++ b/src/library/scala/util/Try.scala
@@ -0,0 +1,165 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2008-2011, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.util
+
+
+
+import collection.Seq
+
+
+
+/**
+ * The `Try` type represents a computation that may either result in an exception,
+ * or return a success value. It's analagous to the `Either` type.
+ */
+sealed abstract class Try[+T] {
+ /**
+ * Returns true if the `Try` is a `Failure`, false otherwise.
+ */
+ def isFailure: Boolean
+
+ /**
+ * Returns true if the `Try` is a `Success`, false otherwise.
+ */
+ def isSuccess: Boolean
+
+ /**
+ * Returns the value from this `Success` or the given argument if this is a `Failure`.
+ */
+ def getOrElse[U >: T](default: => U) = if (isSuccess) get else default
+
+ /**
+ * Returns the value from this `Success` or throws the exception if this is a `Failure`.
+ */
+ def get: T
+
+ /**
+ * Applies the given function f if this is a Result.
+ */
+ def foreach[U](f: T => U): Unit
+
+ /**
+ * Returns the given function applied to the value from this `Success` or returns this if this is a `Failure`.
+ */
+ def flatMap[U](f: T => Try[U]): Try[U]
+
+ /**
+ * Maps the given function to the value from this `Success` or returns this if this is a `Failure`.
+ */
+ def map[U](f: T => U): Try[U]
+
+ def collect[U](pf: PartialFunction[T, U]): Try[U]
+
+ def exists(p: T => Boolean): Boolean
+
+ /**
+ * Converts this to a `Failure` if the predicate is not satisfied.
+ */
+ def filter(p: T => Boolean): Try[T]
+
+ /**
+ * Converts this to a `Failure` if the predicate is not satisfied.
+ */
+ def filterNot(p: T => Boolean): Try[T] = filter(x => !p(x))
+
+ /**
+ * Calls the exceptionHandler with the exception if this is a `Failure`. This is like `flatMap` for the exception.
+ */
+ def rescue[U >: T](rescueException: PartialFunction[Throwable, Try[U]]): Try[U]
+
+ /**
+ * Calls the exceptionHandler with the exception if this is a `Failure`. This is like map for the exception.
+ */
+ def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U]
+
+ /**
+ * Returns `None` if this is a `Failure` or a `Some` containing the value if this is a `Success`.
+ */
+ def toOption = if (isSuccess) Some(get) else None
+
+ def toSeq = if (isSuccess) Seq(get) else Seq()
+
+ /**
+ * Returns the given function applied to the value from this Success or returns this if this is a `Failure`.
+ * Alias for `flatMap`.
+ */
+ def andThen[U](f: T => Try[U]): Try[U] = flatMap(f)
+
+ /**
+ * Transforms a nested `Try`, i.e., a `Try` of type `Try[Try[T]]`,
+ * into an un-nested `Try`, i.e., a `Try` of type `Try[T]`.
+ */
+ def flatten[U](implicit ev: T <:< Try[U]): Try[U]
+
+ def failed: Try[Throwable]
+}
+
+
+final case class Failure[+T](val exception: Throwable) extends Try[T] {
+ def isFailure = true
+ def isSuccess = false
+ def rescue[U >: T](rescueException: PartialFunction[Throwable, Try[U]]): Try[U] = {
+ try {
+ if (rescueException.isDefinedAt(exception)) rescueException(exception) else this
+ } catch {
+ case e2 => Failure(e2)
+ }
+ }
+ def get: T = throw exception
+ def flatMap[U](f: T => Try[U]): Try[U] = Failure[U](exception)
+ def flatten[U](implicit ev: T <:< Try[U]): Try[U] = Failure[U](exception)
+ def foreach[U](f: T => U): Unit = {}
+ def map[U](f: T => U): Try[U] = Failure[U](exception)
+ def collect[U](pf: PartialFunction[T, U]): Try[U] = Failure[U](exception)
+ def filter(p: T => Boolean): Try[T] = this
+ def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U] =
+ if (rescueException.isDefinedAt(exception)) {
+ Try(rescueException(exception))
+ } else {
+ this
+ }
+ def exists(p: T => Boolean): Boolean = false
+ def failed: Try[Throwable] = Success(exception)
+}
+
+
+final case class Success[+T](r: T) extends Try[T] {
+ def isFailure = false
+ def isSuccess = true
+ def rescue[U >: T](rescueException: PartialFunction[Throwable, Try[U]]): Try[U] = Success(r)
+ def get = r
+ def flatMap[U](f: T => Try[U]): Try[U] =
+ try f(r)
+ catch {
+ case e => Failure(e)
+ }
+ def flatten[U](implicit ev: T <:< Try[U]): Try[U] = r
+ def foreach[U](f: T => U): Unit = f(r)
+ def map[U](f: T => U): Try[U] = Try[U](f(r))
+ def collect[U](pf: PartialFunction[T, U]): Try[U] =
+ if (pf isDefinedAt r) Success(pf(r))
+ else Failure[U](new NoSuchElementException("Partial function not defined at " + r))
+ def filter(p: T => Boolean): Try[T] =
+ if (p(r)) this
+ else Failure(new NoSuchElementException("Predicate does not hold for " + r))
+ def recover[U >: T](rescueException: PartialFunction[Throwable, U]): Try[U] = this
+ def exists(p: T => Boolean): Boolean = p(r)
+ def failed: Try[Throwable] = Failure(new UnsupportedOperationException("Success.failed"))
+}
+
+
+object Try {
+
+ def apply[T](r: => T): Try[T] = {
+ try { Success(r) } catch {
+ case e => Failure(e)
+ }
+ }
+
+}
diff --git a/src/partest/scala/tools/partest/nest/PathSettings.scala b/src/partest/scala/tools/partest/nest/PathSettings.scala
index e0a2f65b80..4fe337b19f 100644
--- a/src/partest/scala/tools/partest/nest/PathSettings.scala
+++ b/src/partest/scala/tools/partest/nest/PathSettings.scala
@@ -43,9 +43,11 @@ object PathSettings {
// Directory <root>/test/files/codelib
lazy val srcCodeLibDir = Directory(srcDir / "codelib")
- lazy val srcCodeLib: File = findJar(srcCodeLibDir, "code") getOrElse {
- sys.error("No code.jar found in %s".format(srcCodeLibDir))
- }
+ lazy val srcCodeLib: File = (
+ findJar(srcCodeLibDir, "code")
+ orElse findJar(Directory(testRoot / "files" / "codelib"), "code") // work with --srcpath pending
+ getOrElse sys.error("No code.jar found in %s".format(srcCodeLibDir))
+ )
// Directory <root>/build
lazy val buildDir: Directory = {