summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/build/InnerObjectTestGen.scala2
-rw-r--r--src/build/bnd/scala-compiler-doc.bnd3
-rw-r--r--src/build/bnd/scala-compiler-interactive.bnd3
-rw-r--r--src/build/bnd/scala-compiler.bnd3
-rw-r--r--src/build/bnd/scala-library.bnd3
-rw-r--r--src/build/bnd/scala-parser-combinators.bnd3
-rw-r--r--src/build/bnd/scala-reflect.bnd3
-rw-r--r--src/build/bnd/scala-swing.bnd3
-rw-r--r--src/build/bnd/scala-xml.bnd3
-rw-r--r--src/build/dbuild-meta-json-gen.scala2
-rw-r--r--src/compiler/scala/reflect/quasiquotes/Reifiers.scala2
-rw-r--r--src/compiler/scala/tools/ant/Scalac.scala2
-rw-r--r--src/compiler/scala/tools/nsc/Driver.scala26
-rw-r--r--src/compiler/scala/tools/nsc/GenericRunnerSettings.scala2
-rw-r--r--src/compiler/scala/tools/nsc/Global.scala19
-rw-r--r--src/compiler/scala/tools/nsc/Main.scala3
-rw-r--r--src/compiler/scala/tools/nsc/Properties.scala8
-rw-r--r--src/compiler/scala/tools/nsc/ScriptRunner.scala12
-rw-r--r--src/compiler/scala/tools/nsc/ast/TreeGen.scala2
-rw-r--r--src/compiler/scala/tools/nsc/ast/parser/Scanners.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/icode/Opcodes.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/AsmUtils.scala18
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BCodeAsmCommon.scala34
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BCodeBodyBuilder.scala4
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BCodeHelpers.scala11
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BCodeIdiomatic.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BCodeSkelBuilder.scala10
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BTypes.scala10
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BTypesFromSymbols.scala31
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/BackendReporting.scala29
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/GenASM.scala12
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/GenBCode.scala22
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/analysis/InstructionStackEffect.scala4
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/analysis/ProdConsAnalyzer.scala478
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/opt/ByteCodeRepository.scala2
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/opt/BytecodeUtils.scala36
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/opt/CallGraph.scala118
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/opt/ClosureOptimizer.scala373
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/opt/Inliner.scala254
-rw-r--r--src/compiler/scala/tools/nsc/backend/jvm/opt/LocalOpt.scala4
-rw-r--r--src/compiler/scala/tools/nsc/backend/opt/ConstantOptimization.scala8
-rw-r--r--src/compiler/scala/tools/nsc/backend/opt/InlineExceptionHandlers.scala2
-rw-r--r--src/compiler/scala/tools/nsc/javac/JavaParsers.scala16
-rw-r--r--src/compiler/scala/tools/nsc/plugins/Plugin.scala4
-rw-r--r--src/compiler/scala/tools/nsc/reporters/ConsoleReporter.scala7
-rw-r--r--src/compiler/scala/tools/nsc/reporters/Reporter.scala2
-rw-r--r--src/compiler/scala/tools/nsc/settings/ScalaSettings.scala39
-rw-r--r--src/compiler/scala/tools/nsc/settings/ScalaVersion.scala6
-rw-r--r--src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala7
-rw-r--r--src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala6
-rw-r--r--src/compiler/scala/tools/nsc/transform/Constructors.scala14
-rw-r--r--src/compiler/scala/tools/nsc/transform/LazyVals.scala2
-rw-r--r--src/compiler/scala/tools/nsc/transform/Mixin.scala6
-rw-r--r--src/compiler/scala/tools/nsc/transform/UnCurry.scala2
-rw-r--r--src/compiler/scala/tools/nsc/transform/patmat/MatchAnalysis.scala2
-rw-r--r--src/compiler/scala/tools/nsc/transform/patmat/MatchCodeGen.scala2
-rw-r--r--src/compiler/scala/tools/nsc/transform/patmat/MatchOptimization.scala6
-rw-r--r--src/compiler/scala/tools/nsc/transform/patmat/MatchWarnings.scala2
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Checkable.scala6
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala37
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Contexts.scala61
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Implicits.scala22
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Infer.scala4
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Macros.scala4
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala2
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Namers.scala8
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/PatternTypers.scala4
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/RefChecks.scala11
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/StdAttachments.scala2
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Tags.scala4
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala5
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/Typers.scala6
-rw-r--r--src/compiler/scala/tools/nsc/typechecker/TypersTracking.scala2
-rwxr-xr-xsrc/compiler/scala/tools/nsc/util/DocStrings.scala4
-rw-r--r--src/compiler/scala/tools/util/PathResolver.scala12
-rw-r--r--src/eclipse/partest/.classpath2
-rw-r--r--src/eclipse/repl/.classpath3
-rw-r--r--src/eclipse/scaladoc/.classpath6
-rw-r--r--src/eclipse/test-junit/.classpath2
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java3762
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java1493
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java122
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java1338
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java165
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java69
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java199
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java134
-rw-r--r--src/forkjoin/scala/concurrent/forkjoin/package-info.java28
-rw-r--r--src/forkjoin/scala/concurrent/util/Unsafe.java33
-rw-r--r--src/intellij/forkjoin.iml.SAMPLE11
-rw-r--r--src/intellij/interactive.iml.SAMPLE1
-rw-r--r--src/intellij/library.iml.SAMPLE1
-rw-r--r--src/intellij/repl.iml.SAMPLE1
-rw-r--r--src/intellij/scala.ipr.SAMPLE1
-rw-r--r--src/intellij/scaladoc.iml.SAMPLE1
-rw-r--r--src/intellij/test-junit.iml.SAMPLE1
-rw-r--r--src/intellij/test.iml.SAMPLE1
-rw-r--r--src/library-aux/scala/Any.scala2
-rw-r--r--src/library-aux/scala/AnyRef.scala4
-rw-r--r--src/library/scala/Predef.scala14
-rw-r--r--src/library/scala/StringContext.scala2
-rw-r--r--src/library/scala/collection/GenSeqLike.scala2
-rwxr-xr-xsrc/library/scala/collection/LinearSeqOptimized.scala12
-rw-r--r--src/library/scala/collection/concurrent/TrieMap.scala2
-rw-r--r--src/library/scala/collection/convert/Wrappers.scala50
-rw-r--r--src/library/scala/collection/generic/MutableSortedSetFactory.scala2
-rw-r--r--src/library/scala/collection/immutable/HashSet.scala7
-rw-r--r--src/library/scala/collection/immutable/ListSet.scala7
-rw-r--r--src/library/scala/collection/immutable/Range.scala2
-rw-r--r--src/library/scala/collection/immutable/Set.scala16
-rw-r--r--src/library/scala/collection/immutable/Stream.scala17
-rw-r--r--src/library/scala/collection/immutable/StringOps.scala7
-rw-r--r--src/library/scala/collection/mutable/ArrayBuffer.scala13
-rw-r--r--src/library/scala/collection/mutable/BufferLike.scala7
-rw-r--r--src/library/scala/collection/mutable/ListBuffer.scala11
-rw-r--r--src/library/scala/collection/mutable/RedBlackTree.scala35
-rw-r--r--src/library/scala/collection/mutable/SortedSet.scala3
-rw-r--r--src/library/scala/collection/mutable/TreeMap.scala23
-rw-r--r--src/library/scala/collection/mutable/TreeSet.scala181
-rw-r--r--src/library/scala/collection/parallel/ParIterableLike.scala2
-rw-r--r--src/library/scala/collection/parallel/TaskSupport.scala4
-rw-r--r--src/library/scala/collection/parallel/Tasks.scala2
-rw-r--r--src/library/scala/compat/Platform.scala2
-rw-r--r--src/library/scala/concurrent/forkjoin/package.scala60
-rw-r--r--src/library/scala/concurrent/impl/ExecutionContextImpl.scala3
-rw-r--r--src/library/scala/concurrent/impl/Promise.scala14
-rw-r--r--src/library/scala/concurrent/util/Unsafe.java38
-rw-r--r--src/library/scala/io/Source.scala10
-rw-r--r--src/library/scala/math/BigDecimal.scala2
-rw-r--r--src/library/scala/ref/WeakReference.scala5
-rw-r--r--src/library/scala/reflect/Manifest.scala4
-rw-r--r--src/library/scala/runtime/LambdaDeserializer.scala132
-rw-r--r--src/library/scala/runtime/java8/JFunction.java146
-rw-r--r--src/library/scala/runtime/java8/JFunction0$mcB$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction0$mcC$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction0$mcD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction0$mcF$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction0$mcI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction0$mcJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction0$mcS$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction0$mcV$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction0$mcZ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction0.java39
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcDD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcDF$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcDI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcDJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcFD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcFF$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcFI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcFJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcID$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcIF$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcII$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcIJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcJD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcJF$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcJI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcJJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcVD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcVF$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcVI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcVJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcZD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcZF$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcZI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1$mcZJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction1.java240
-rw-r--r--src/library/scala/runtime/java8/JFunction10.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction11.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction12.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction13.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction14.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction15.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction16.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction17.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction18.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction19.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcDDD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcDDI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcDDJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcDID$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcDII$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcDIJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcDJD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcDJI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcDJJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcFDD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcFDI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcFDJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcFID$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcFII$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcFIJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcFJD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcFJI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcFJJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcIDD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcIDI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcIDJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcIID$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcIII$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcIIJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcIJD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcIJI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcIJJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcJDD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcJDI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcJDJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcJID$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcJII$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcJIJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcJJD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcJJI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcJJJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcVDD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcVDI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcVDJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcVID$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcVII$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcVIJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcVJD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcVJI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcVJJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcZDD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcZDI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcZDJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcZID$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcZII$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcZIJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcZJD$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcZJI$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2$mcZJJ$sp.java13
-rw-r--r--src/library/scala/runtime/java8/JFunction2.java509
-rw-r--r--src/library/scala/runtime/java8/JFunction20.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction21.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction22.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction3.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction4.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction5.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction6.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction7.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction8.java22
-rw-r--r--src/library/scala/runtime/java8/JFunction9.java22
-rw-r--r--src/library/scala/runtime/java8/JProcedure0.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure1.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure10.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure11.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure12.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure13.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure14.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure15.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure16.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure17.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure18.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure19.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure2.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure20.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure21.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure22.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure3.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure4.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure5.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure6.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure7.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure8.java21
-rw-r--r--src/library/scala/runtime/java8/JProcedure9.java21
-rw-r--r--src/library/scala/sys/BooleanProp.scala1
-rw-r--r--src/library/scala/sys/Prop.scala4
-rw-r--r--src/library/scala/sys/SystemProperties.scala4
-rw-r--r--src/library/scala/sys/process/package.scala3
-rw-r--r--src/library/scala/util/control/Exception.scala2
-rw-r--r--src/library/scala/util/control/NoStackTrace.scala4
-rw-r--r--src/manual/scala/tools/docutil/ManMaker.scala2
-rw-r--r--src/partest-extras/scala/tools/partest/ASMConverters.scala80
-rw-r--r--src/partest-extras/scala/tools/partest/ReplTest.scala34
-rw-r--r--src/reflect/scala/reflect/api/FlagSets.scala1
-rw-r--r--src/reflect/scala/reflect/api/Internals.scala2
-rw-r--r--src/reflect/scala/reflect/api/Symbols.scala12
-rw-r--r--src/reflect/scala/reflect/api/Trees.scala2
-rw-r--r--src/reflect/scala/reflect/api/TypeTags.scala2
-rw-r--r--src/reflect/scala/reflect/internal/ClassfileConstants.scala4
-rw-r--r--src/reflect/scala/reflect/internal/Definitions.scala6
-rw-r--r--src/reflect/scala/reflect/internal/ExistentialsAndSkolems.scala2
-rw-r--r--src/reflect/scala/reflect/internal/FlagSets.scala2
-rw-r--r--src/reflect/scala/reflect/internal/Flags.scala155
-rw-r--r--src/reflect/scala/reflect/internal/HasFlags.scala83
-rw-r--r--src/reflect/scala/reflect/internal/Reporting.scala17
-rw-r--r--src/reflect/scala/reflect/internal/StdNames.scala2
-rw-r--r--src/reflect/scala/reflect/internal/SymbolPairs.scala2
-rw-r--r--src/reflect/scala/reflect/internal/Symbols.scala20
-rw-r--r--src/reflect/scala/reflect/internal/TreeGen.scala7
-rw-r--r--src/reflect/scala/reflect/internal/Types.scala14
-rw-r--r--src/reflect/scala/reflect/internal/pickling/UnPickler.scala4
-rw-r--r--src/reflect/scala/reflect/internal/tpe/TypeConstraints.scala2
-rw-r--r--src/reflect/scala/reflect/internal/tpe/TypeMaps.scala2
-rw-r--r--src/reflect/scala/reflect/internal/transform/Erasure.scala2
-rw-r--r--src/reflect/scala/reflect/internal/util/AbstractFileClassLoader.scala2
-rw-r--r--src/reflect/scala/reflect/internal/util/ScalaClassLoader.scala30
-rw-r--r--src/reflect/scala/reflect/macros/FrontEnds.scala2
-rw-r--r--src/reflect/scala/reflect/runtime/JavaMirrors.scala1
-rw-r--r--src/reflect/scala/reflect/runtime/SymbolTable.scala2
-rw-r--r--src/reflect/scala/reflect/runtime/SynchronizedTypes.scala4
-rw-r--r--src/reflect/scala/reflect/runtime/TwoWayCache.scala3
-rw-r--r--src/reflect/scala/reflect/runtime/TwoWayCaches.scala3
-rw-r--r--src/repl-jline/scala/tools/nsc/interpreter/jline/FileBackedHistory.scala12
-rw-r--r--src/repl-jline/scala/tools/nsc/interpreter/jline/JLineHistory.scala2
-rw-r--r--src/repl/scala/tools/nsc/interpreter/Formatting.scala3
-rw-r--r--src/repl/scala/tools/nsc/interpreter/ILoop.scala27
-rw-r--r--src/repl/scala/tools/nsc/interpreter/IMain.scala7
-rw-r--r--src/repl/scala/tools/nsc/interpreter/Imports.scala31
-rw-r--r--src/repl/scala/tools/nsc/interpreter/Pasted.scala8
-rw-r--r--src/repl/scala/tools/nsc/interpreter/ReplProps.scala46
-rwxr-xr-xsrc/scaladoc/scala/tools/nsc/doc/base/MemberLookupBase.scala2
-rw-r--r--src/scaladoc/scala/tools/nsc/doc/base/comment/Comment.scala2
-rw-r--r--src/scaladoc/scala/tools/nsc/doc/html/page/Template.scala3
-rw-r--r--src/scaladoc/scala/tools/nsc/doc/html/page/diagram/DiagramStats.scala2
-rw-r--r--src/scaladoc/scala/tools/nsc/doc/model/CommentFactory.scala2
-rw-r--r--src/scaladoc/scala/tools/nsc/doc/model/Entity.scala2
-rw-r--r--src/scaladoc/scala/tools/nsc/doc/model/ModelFactory.scala4
-rw-r--r--src/scaladoc/scala/tools/nsc/doc/model/ModelFactoryTypeSupport.scala2
-rw-r--r--src/scaladoc/scala/tools/nsc/doc/model/diagram/Diagram.scala2
-rw-r--r--src/scalap/decoder.properties2
-rw-r--r--src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala6
323 files changed, 5461 insertions, 8174 deletions
diff --git a/src/build/InnerObjectTestGen.scala b/src/build/InnerObjectTestGen.scala
index b66112609c..e0b889c969 100644
--- a/src/build/InnerObjectTestGen.scala
+++ b/src/build/InnerObjectTestGen.scala
@@ -22,7 +22,7 @@ object Contexts extends Enumeration {
* object, or equivalent).
*
* Usage: TestGen <nr of levels>
- * - by default it's 2 leves. Currently, 3-level deep uncovers bugs in the type checker.
+ * - by default it's 2 levels. Currently, 3-level deep uncovers bugs in the type checker.
*
* @author Iulian Dragos
*/
diff --git a/src/build/bnd/scala-compiler-doc.bnd b/src/build/bnd/scala-compiler-doc.bnd
index 9d6d0304d1..5b662e8cef 100644
--- a/src/build/bnd/scala-compiler-doc.bnd
+++ b/src/build/bnd/scala-compiler-doc.bnd
@@ -4,4 +4,5 @@ ver: @SCALA_COMPILER_DOC_VERSION@
Bundle-Version: ${ver}
Export-Package: *;version=${ver}
Import-Package: scala.*;version="${range;[==,=+);@VERSION@}",*
-Bundle-RequiredExecutionEnvironment: JavaSE-1.6, JavaSE-1.7
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Include-Resource: @@SOURCE_JARNAME@
diff --git a/src/build/bnd/scala-compiler-interactive.bnd b/src/build/bnd/scala-compiler-interactive.bnd
index 07e3de35b0..fbfff60801 100644
--- a/src/build/bnd/scala-compiler-interactive.bnd
+++ b/src/build/bnd/scala-compiler-interactive.bnd
@@ -4,4 +4,5 @@ ver: @SCALA_COMPILER_INTERACTIVE_VERSION@
Bundle-Version: ${ver}
Export-Package: *;version=${ver}
Import-Package: scala.*;version="${range;[==,=+);@VERSION@}",*
-Bundle-RequiredExecutionEnvironment: JavaSE-1.6, JavaSE-1.7
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Include-Resource: @@SOURCE_JARNAME@
diff --git a/src/build/bnd/scala-compiler.bnd b/src/build/bnd/scala-compiler.bnd
index 2bd24d780d..3e60c4973c 100644
--- a/src/build/bnd/scala-compiler.bnd
+++ b/src/build/bnd/scala-compiler.bnd
@@ -9,4 +9,5 @@ Import-Package: jline.*;resolution:=optional, \
scala.xml.*;version="${range;[====,====];@XML_VERSION@}";resolution:=optional, \
scala.*;version="${range;[==,=+);${ver}}", \
*
-Bundle-RequiredExecutionEnvironment: JavaSE-1.6, JavaSE-1.7
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Include-Resource: @@SOURCE_JARNAME@
diff --git a/src/build/bnd/scala-library.bnd b/src/build/bnd/scala-library.bnd
index 7eb4fa4b2a..e211c5d1ad 100644
--- a/src/build/bnd/scala-library.bnd
+++ b/src/build/bnd/scala-library.bnd
@@ -4,4 +4,5 @@ ver: @VERSION@
Bundle-Version: ${ver}
Export-Package: *;version=${ver}
Import-Package: sun.misc;resolution:=optional, *
-Bundle-RequiredExecutionEnvironment: JavaSE-1.6, JavaSE-1.7
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Include-Resource: @@SOURCE_JARNAME@
diff --git a/src/build/bnd/scala-parser-combinators.bnd b/src/build/bnd/scala-parser-combinators.bnd
index ef8646cbd0..515084f4a8 100644
--- a/src/build/bnd/scala-parser-combinators.bnd
+++ b/src/build/bnd/scala-parser-combinators.bnd
@@ -4,4 +4,5 @@ ver: @PARSER_COMBINATORS_VERSION@
Bundle-Version: ${ver}
Export-Package: *;version=${ver}
Import-Package: scala.*;version="${range;[==,=+);@VERSION@}",*
-Bundle-RequiredExecutionEnvironment: JavaSE-1.6, JavaSE-1.7
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Include-Resource: @@SOURCE_JARNAME@
diff --git a/src/build/bnd/scala-reflect.bnd b/src/build/bnd/scala-reflect.bnd
index e4bc54e52e..59db311f8d 100644
--- a/src/build/bnd/scala-reflect.bnd
+++ b/src/build/bnd/scala-reflect.bnd
@@ -6,4 +6,5 @@ Export-Package: *;version=${ver}
Import-Package: scala.*;version="${range;[==,=+);${ver}}", \
scala.tools.nsc;resolution:=optional;version="${range;[==,=+);${ver}}", \
*
-Bundle-RequiredExecutionEnvironment: JavaSE-1.6, JavaSE-1.7
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Include-Resource: @@SOURCE_JARNAME@
diff --git a/src/build/bnd/scala-swing.bnd b/src/build/bnd/scala-swing.bnd
index f8b50baa91..24cd9f6f90 100644
--- a/src/build/bnd/scala-swing.bnd
+++ b/src/build/bnd/scala-swing.bnd
@@ -4,4 +4,5 @@ ver: @SCALA_SWING_VERSION@
Bundle-Version: ${ver}
Export-Package: *;version=${ver}
Import-Package: scala.*;version="${range;[==,=+);@VERSION@}",*
-Bundle-RequiredExecutionEnvironment: JavaSE-1.6,JavaSE-1.7
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Include-Resource: @@SOURCE_JARNAME@
diff --git a/src/build/bnd/scala-xml.bnd b/src/build/bnd/scala-xml.bnd
index 01bf0144eb..b7b19824e8 100644
--- a/src/build/bnd/scala-xml.bnd
+++ b/src/build/bnd/scala-xml.bnd
@@ -4,4 +4,5 @@ ver: @XML_VERSION@
Bundle-Version: ${ver}
Export-Package: *;version=${ver}
Import-Package: scala.*;version="${range;[==,=+);@VERSION@}",*
-Bundle-RequiredExecutionEnvironment: JavaSE-1.6, JavaSE-1.7
+Bundle-RequiredExecutionEnvironment: JavaSE-1.8
+Include-Resource: @@SOURCE_JARNAME@
diff --git a/src/build/dbuild-meta-json-gen.scala b/src/build/dbuild-meta-json-gen.scala
index f967fffdd0..043ad19b2e 100644
--- a/src/build/dbuild-meta-json-gen.scala
+++ b/src/build/dbuild-meta-json-gen.scala
@@ -16,7 +16,7 @@ val meta =
ProjMeta(version = "2.12.0", projects = Seq(
Project("scala-library", "org.scala-lang",
Seq(ProjectRef("scala-library", "org.scala-lang")),
- Seq.empty), // TODO: forkjoin
+ Seq.empty),
Project("scala-reflect", "org.scala-lang",
Seq(ProjectRef("scala-reflect", "org.scala-lang")),
Seq(ProjectRef("scala-library", "org.scala-lang"))),
diff --git a/src/compiler/scala/reflect/quasiquotes/Reifiers.scala b/src/compiler/scala/reflect/quasiquotes/Reifiers.scala
index 8462debe21..b2002a07ea 100644
--- a/src/compiler/scala/reflect/quasiquotes/Reifiers.scala
+++ b/src/compiler/scala/reflect/quasiquotes/Reifiers.scala
@@ -317,7 +317,7 @@ trait Reifiers { self: Quasiquotes =>
* Reification of non-trivial list is done in two steps:
*
* 1. split the list into groups where every placeholder is always
- * put in a group of its own and all subsquent non-holeMap are
+ * put in a group of its own and all subsequent non-holeMap are
* grouped together; element is considered to be a placeholder if it's
* in the domain of the fill function;
*
diff --git a/src/compiler/scala/tools/ant/Scalac.scala b/src/compiler/scala/tools/ant/Scalac.scala
index 13bf0ef4c6..f46f014096 100644
--- a/src/compiler/scala/tools/ant/Scalac.scala
+++ b/src/compiler/scala/tools/ant/Scalac.scala
@@ -131,7 +131,7 @@ class Scalac extends ScalaMatchingTask with ScalacShared {
/** The character encoding of the files to compile. */
protected var encoding: Option[String] = None
- // the targetted backend
+ // the targeted backend
protected var backend: Option[String] = None
/** Whether to force compilation of all files or not. */
diff --git a/src/compiler/scala/tools/nsc/Driver.scala b/src/compiler/scala/tools/nsc/Driver.scala
index 6befa76b3f..b30744c4df 100644
--- a/src/compiler/scala/tools/nsc/Driver.scala
+++ b/src/compiler/scala/tools/nsc/Driver.scala
@@ -1,7 +1,7 @@
package scala
package tools.nsc
-import scala.tools.nsc.reporters.ConsoleReporter
+import scala.tools.nsc.reporters.{ ConsoleReporter, Reporter }
import Properties.{ versionMsg, residentPromptString }
import scala.reflect.internal.util.FakePos
@@ -9,39 +9,43 @@ abstract class Driver {
val prompt = residentPromptString
- var reporter: ConsoleReporter = _
+ var reporter: Reporter = _
protected var command: CompilerCommand = _
protected var settings: Settings = _
+ /** Forward errors to the (current) reporter. */
protected def scalacError(msg: String): Unit = {
reporter.error(FakePos("scalac"), msg + "\n scalac -help gives more information")
}
+ /** True to continue compilation. */
protected def processSettingsHook(): Boolean = {
- if (settings.version) { reporter echo versionMsg ; false } else true
+ if (settings.version) { reporter echo versionMsg ; false }
+ else !reporter.hasErrors
}
protected def newCompiler(): Global
- protected def doCompile(compiler: Global) {
+ protected def doCompile(compiler: Global): Unit = {
if (command.files.isEmpty) {
reporter.echo(command.usageMsg)
reporter.echo(compiler.pluginOptionsHelp)
} else {
val run = new compiler.Run()
run compile command.files
- reporter.printSummary()
+ reporter.finish()
}
}
- def process(args: Array[String]) {
+ def process(args: Array[String]): Boolean = {
val ss = new Settings(scalacError)
- reporter = new ConsoleReporter(ss)
+ reporter = new ConsoleReporter(ss) // for reporting early config errors, before compiler is constructed
command = new CompilerCommand(args.toList, ss)
settings = command.settings
if (processSettingsHook()) {
val compiler = newCompiler()
+ reporter = compiler.reporter // adopt the configured reporter
try {
if (reporter.hasErrors)
reporter.flush()
@@ -57,11 +61,9 @@ abstract class Driver {
case _ => throw ex // unexpected error, tell the outside world.
}
}
- }
+ } else if (reporter.hasErrors) reporter.flush()
+ !reporter.hasErrors
}
- def main(args: Array[String]) {
- process(args)
- sys.exit(if (reporter.hasErrors) 1 else 0)
- }
+ def main(args: Array[String]): Unit = sys.exit(if (process(args)) 0 else 1)
}
diff --git a/src/compiler/scala/tools/nsc/GenericRunnerSettings.scala b/src/compiler/scala/tools/nsc/GenericRunnerSettings.scala
index 1289d55c37..e99cce9186 100644
--- a/src/compiler/scala/tools/nsc/GenericRunnerSettings.scala
+++ b/src/compiler/scala/tools/nsc/GenericRunnerSettings.scala
@@ -9,7 +9,7 @@ import java.net.URL
import scala.tools.util.PathResolverFactory
class GenericRunnerSettings(error: String => Unit) extends Settings(error) {
- def classpathURLs: Seq[URL] = PathResolverFactory.create(this).resultAsURLs
+ lazy val classpathURLs: Seq[URL] = PathResolverFactory.create(this).resultAsURLs
val howtorun =
ChoiceSetting(
diff --git a/src/compiler/scala/tools/nsc/Global.scala b/src/compiler/scala/tools/nsc/Global.scala
index 778f2fed59..5cb31c1b64 100644
--- a/src/compiler/scala/tools/nsc/Global.scala
+++ b/src/compiler/scala/tools/nsc/Global.scala
@@ -15,7 +15,7 @@ import io.{ SourceReader, AbstractFile, Path }
import reporters.{ Reporter, ConsoleReporter }
import util.{ ClassFileLookup, ClassPath, MergedClassPath, StatisticsInfo, returning }
import scala.reflect.ClassTag
-import scala.reflect.internal.util.{ SourceFile, NoSourceFile, BatchSourceFile, ScriptSourceFile }
+import scala.reflect.internal.util.{ ScalaClassLoader, SourceFile, NoSourceFile, BatchSourceFile, ScriptSourceFile }
import scala.reflect.internal.pickling.PickleBuffer
import symtab.{ Flags, SymbolTable, SymbolTrackers }
import symtab.classfile.Pickler
@@ -90,7 +90,7 @@ class Global(var currentSettings: Settings, var reporter: Reporter)
this(new Settings(err => reporter.error(null, err)), reporter)
def this(settings: Settings) =
- this(settings, new ConsoleReporter(settings))
+ this(settings, Global.reporter(settings))
def picklerPhase: Phase = if (currentRun.isDefined) currentRun.picklerPhase else NoPhase
@@ -1373,7 +1373,7 @@ class Global(var currentSettings: Settings, var reporter: Reporter)
unitbuf += unit
compiledFiles += unit.source.file.path
}
- private def checkDeprecatedSettings(unit: CompilationUnit) {
+ private def warnDeprecatedAndConflictingSettings(unit: CompilationUnit) {
// issue warnings for any usage of deprecated settings
settings.userSetSettings filter (_.isDeprecated) foreach { s =>
currentRun.reporting.deprecationWarning(NoPosition, s.name + " is deprecated: " + s.deprecationMessage.get)
@@ -1383,6 +1383,7 @@ class Global(var currentSettings: Settings, var reporter: Reporter)
currentRun.reporting.deprecationWarning(NoPosition, settings.target.name + ":" + settings.target.value + " is deprecated and has no effect, setting to " + supportedTarget)
settings.target.value = supportedTarget
}
+ settings.conflictWarning.foreach(reporter.warning(NoPosition, _))
}
/* An iterator returning all the units being compiled in this run */
@@ -1473,7 +1474,7 @@ class Global(var currentSettings: Settings, var reporter: Reporter)
def compileSources(sources: List[SourceFile]) = if (!reporter.hasErrors) {
def checkDeprecations() = {
- checkDeprecatedSettings(newCompilationUnit(""))
+ warnDeprecatedAndConflictingSettings(newCompilationUnit(""))
reporting.summarizeErrors()
}
@@ -1495,7 +1496,7 @@ class Global(var currentSettings: Settings, var reporter: Reporter)
val startTime = currentTime
reporter.reset()
- checkDeprecatedSettings(unitbuf.head)
+ warnDeprecatedAndConflictingSettings(unitbuf.head)
globalPhase = fromPhase
while (globalPhase.hasNext && !reporter.hasErrors) {
@@ -1703,4 +1704,12 @@ class Global(var currentSettings: Settings, var reporter: Reporter)
object Global {
def apply(settings: Settings, reporter: Reporter): Global = new Global(settings, reporter)
+
+ def apply(settings: Settings): Global = new Global(settings, reporter(settings))
+
+ private def reporter(settings: Settings): Reporter = {
+ //val loader = ScalaClassLoader(getClass.getClassLoader) // apply does not make delegate
+ val loader = new ClassLoader(getClass.getClassLoader) with ScalaClassLoader
+ loader.create[Reporter](settings.reporter.value, settings.errorFn)(settings)
+ }
}
diff --git a/src/compiler/scala/tools/nsc/Main.scala b/src/compiler/scala/tools/nsc/Main.scala
index a66ee572a9..e2cf49907b 100644
--- a/src/compiler/scala/tools/nsc/Main.scala
+++ b/src/compiler/scala/tools/nsc/Main.scala
@@ -17,7 +17,8 @@ class MainClass extends Driver with EvalLoop {
new compiler.Run() compile command.files
}
- override def newCompiler(): Global = Global(settings, reporter)
+ override def newCompiler(): Global = Global(settings)
+
override def doCompile(compiler: Global) {
if (settings.resident) resident(compiler)
else super.doCompile(compiler)
diff --git a/src/compiler/scala/tools/nsc/Properties.scala b/src/compiler/scala/tools/nsc/Properties.scala
index ca7d8776d4..cb523edfe5 100644
--- a/src/compiler/scala/tools/nsc/Properties.scala
+++ b/src/compiler/scala/tools/nsc/Properties.scala
@@ -12,8 +12,16 @@ object Properties extends scala.util.PropertiesTrait {
protected def pickJarBasedOn = classOf[Global]
// settings based on jar properties, falling back to System prefixed by "scala."
+
+ // messages to display at startup or prompt, format string with string parameters
+ // Scala version, Java version, JVM name
def residentPromptString = scalaPropOrElse("resident.prompt", "\nnsc> ")
def shellPromptString = scalaPropOrElse("shell.prompt", "%nscala> ")
+ def shellWelcomeString = scalaPropOrElse("shell.welcome",
+ """Welcome to Scala %1$#s (%3$s, Java %2$s).
+ |Type in expressions for evaluation. Or try :help.""".stripMargin
+ )
+
// message to display at EOF (which by default ends with
// a newline so as not to break the user's terminal)
def shellInterruptedString = scalaPropOrElse("shell.interrupted", f":quit$lineSeparator")
diff --git a/src/compiler/scala/tools/nsc/ScriptRunner.scala b/src/compiler/scala/tools/nsc/ScriptRunner.scala
index 6d24b31531..bf93ad30bc 100644
--- a/src/compiler/scala/tools/nsc/ScriptRunner.scala
+++ b/src/compiler/scala/tools/nsc/ScriptRunner.scala
@@ -16,16 +16,16 @@ import util.Exceptional.unwrap
/** An object that runs Scala code in script files.
*
- * <p>For example, here is a complete Scala script on Unix:</pre>
- * <pre>
+ * For example, here is a complete Scala script on Unix:
+ * {{{
* #!/bin/sh
* exec scala "$0" "$@"
* !#
* Console.println("Hello, world!")
* args.toList foreach Console.println
- * </pre>
- * <p>And here is a batch file example on Windows XP:</p>
- * <pre>
+ * }}}
+ * And here is a batch file example on Windows XP:
+ * {{{
* ::#!
* @echo off
* call scala %0 %*
@@ -33,7 +33,7 @@ import util.Exceptional.unwrap
* ::!#
* Console.println("Hello, world!")
* args.toList foreach Console.println
- * </pre>
+ * }}}
*
* @author Lex Spoon
* @version 1.0, 15/05/2006
diff --git a/src/compiler/scala/tools/nsc/ast/TreeGen.scala b/src/compiler/scala/tools/nsc/ast/TreeGen.scala
index 0575b9703e..bf53c47e9a 100644
--- a/src/compiler/scala/tools/nsc/ast/TreeGen.scala
+++ b/src/compiler/scala/tools/nsc/ast/TreeGen.scala
@@ -233,7 +233,7 @@ abstract class TreeGen extends scala.reflect.internal.TreeGen with TreeDSL {
}
/** Return the synchronized part of the double-checked locking idiom around the syncBody tree. It guards with `cond` and
- * synchronizez on `clazz.this`. Additional statements can be included after initialization,
+ * synchronizes on `clazz.this`. Additional statements can be included after initialization,
* (outside the synchronized block).
*
* The idiom works only if the condition is using a volatile field.
diff --git a/src/compiler/scala/tools/nsc/ast/parser/Scanners.scala b/src/compiler/scala/tools/nsc/ast/parser/Scanners.scala
index d5cb0d6a3b..51bb0d3c5b 100644
--- a/src/compiler/scala/tools/nsc/ast/parser/Scanners.scala
+++ b/src/compiler/scala/tools/nsc/ast/parser/Scanners.scala
@@ -226,7 +226,7 @@ trait Scanners extends ScannersCommon {
* RPAREN if region starts with '('
* RBRACKET if region starts with '['
* RBRACE if region starts with '{'
- * ARROW if region starts with `case'
+ * ARROW if region starts with 'case'
* STRINGLIT if region is a string interpolation expression starting with '${'
* (the STRINGLIT appears twice in succession on the stack iff the
* expression is a multiline string literal).
diff --git a/src/compiler/scala/tools/nsc/backend/icode/Opcodes.scala b/src/compiler/scala/tools/nsc/backend/icode/Opcodes.scala
index 076f84ce7a..351a8e33d3 100644
--- a/src/compiler/scala/tools/nsc/backend/icode/Opcodes.scala
+++ b/src/compiler/scala/tools/nsc/backend/icode/Opcodes.scala
@@ -86,7 +86,7 @@ trait Opcodes { self: ICodes =>
* Each case subclass will represent a specific operation.
*/
abstract class Instruction extends Cloneable {
- // Vlad: I used these for checking the quality of the implementation, and we should regularely run a build with them
+ // Vlad: I used these for checking the quality of the implementation, and we should regularly run a build with them
// enabled. But for production these should definitely be disabled, unless we enjoy getting angry emails from Greg :)
//if (!this.isInstanceOf[opcodes.LOAD_EXCEPTION])
// assert(consumed == consumedTypes.length)
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/AsmUtils.scala b/src/compiler/scala/tools/nsc/backend/jvm/AsmUtils.scala
index 0df1b2029d..cd7e0b83e8 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/AsmUtils.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/AsmUtils.scala
@@ -10,6 +10,7 @@ import java.io.{StringWriter, PrintWriter}
import scala.tools.asm.util.{CheckClassAdapter, TraceClassVisitor, TraceMethodVisitor, Textifier}
import scala.tools.asm.{ClassWriter, Attribute, ClassReader}
import scala.collection.convert.decorateAsScala._
+import scala.tools.nsc.backend.jvm.analysis.InitialProducer
import scala.tools.nsc.backend.jvm.opt.InlineInfoAttributePrototype
object AsmUtils {
@@ -81,13 +82,16 @@ object AsmUtils {
/**
* Returns a human-readable representation of the given instruction.
*/
- def textify(insn: AbstractInsnNode): String = {
- val trace = new TraceMethodVisitor(new Textifier)
- insn.accept(trace)
- val sw = new StringWriter
- val pw = new PrintWriter(sw)
- trace.p.print(pw)
- sw.toString.trim
+ def textify(insn: AbstractInsnNode): String = insn match {
+ case _: InitialProducer =>
+ insn.toString
+ case _ =>
+ val trace = new TraceMethodVisitor(new Textifier)
+ insn.accept(trace)
+ val sw = new StringWriter
+ val pw = new PrintWriter(sw)
+ trace.p.print(pw)
+ sw.toString.trim
}
/**
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BCodeAsmCommon.scala b/src/compiler/scala/tools/nsc/backend/jvm/BCodeAsmCommon.scala
index dec5adc9aa..93f5159f89 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BCodeAsmCommon.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BCodeAsmCommon.scala
@@ -256,14 +256,17 @@ final class BCodeAsmCommon[G <: Global](val global: G) {
if (hasAbstractMethod) ACC_ABSTRACT else 0
}
GenBCode.mkFlags(
- if (classSym.isPublic) ACC_PUBLIC else 0,
- if (classSym.isFinal) ACC_FINAL else 0,
+ // SI-9393: the classfile / java source parser make java annotation symbols look like classes.
+ // here we recover the actual classfile flags.
+ if (classSym.hasJavaAnnotationFlag) ACC_ANNOTATION | ACC_INTERFACE | ACC_ABSTRACT else 0,
+ if (classSym.isPublic) ACC_PUBLIC else 0,
+ if (classSym.isFinal) ACC_FINAL else 0,
// see the link above. javac does the same: ACC_SUPER for all classes, but not interfaces.
- if (classSym.isInterface) ACC_INTERFACE else ACC_SUPER,
+ if (classSym.isInterface) ACC_INTERFACE else ACC_SUPER,
// for Java enums, we cannot trust `hasAbstractFlag` (see comment in enumFlags)
- if (!classSym.hasEnumFlag && classSym.hasAbstractFlag) ACC_ABSTRACT else 0,
- if (classSym.isArtifact) ACC_SYNTHETIC else 0,
- if (classSym.hasEnumFlag) enumFlags else 0
+ if (!classSym.hasJavaEnumFlag && classSym.hasAbstractFlag) ACC_ABSTRACT else 0,
+ if (classSym.isArtifact) ACC_SYNTHETIC else 0,
+ if (classSym.hasJavaEnumFlag) enumFlags else 0
)
}
@@ -289,7 +292,7 @@ final class BCodeAsmCommon[G <: Global](val global: G) {
lazy val AnnotationRetentionPolicyRuntimeValue = AnnotationRetentionPolicyModule.tpe.member(TermName("RUNTIME"))
/** Whether an annotation should be emitted as a Java annotation
- * .initialize: if 'annot' is read from pickle, atp might be un-initialized
+ * .initialize: if 'annot' is read from pickle, atp might be uninitialized
*/
def shouldEmitAnnotation(annot: AnnotationInfo) = {
annot.symbol.initialize.isJavaDefined &&
@@ -310,10 +313,10 @@ final class BCodeAsmCommon[G <: Global](val global: G) {
}
private def retentionPolicyOf(annot: AnnotationInfo): Symbol =
- annot.atp.typeSymbol.getAnnotation(AnnotationRetentionAttr).map(_.assocs).map(assoc =>
+ annot.atp.typeSymbol.getAnnotation(AnnotationRetentionAttr).map(_.assocs).flatMap(assoc =>
assoc.collectFirst {
case (`nme`.value, LiteralAnnotArg(Constant(value: Symbol))) => value
- }).flatten.getOrElse(AnnotationRetentionPolicyClassValue)
+ }).getOrElse(AnnotationRetentionPolicyClassValue)
def implementedInterfaces(classSym: Symbol): List[Symbol] = {
// Additional interface parents based on annotations and other cues
@@ -322,9 +325,18 @@ final class BCodeAsmCommon[G <: Global](val global: G) {
case _ => None
}
- def isInterfaceOrTrait(sym: Symbol) = sym.isInterface || sym.isTrait
+ // SI-9393: java annotations are interfaces, but the classfile / java source parsers make them look like classes.
+ def isInterfaceOrTrait(sym: Symbol) = sym.isInterface || sym.isTrait || sym.hasJavaAnnotationFlag
- val allParents = classSym.info.parents ++ classSym.annotations.flatMap(newParentForAnnotation)
+ val classParents = {
+ val parents = classSym.info.parents
+ // SI-9393: the classfile / java source parsers add Annotation and ClassfileAnnotation to the
+ // parents of a java annotations. undo this for the backend (where we need classfile-level information).
+ if (classSym.hasJavaAnnotationFlag) parents.filterNot(c => c.typeSymbol == ClassfileAnnotationClass || c.typeSymbol == AnnotationClass)
+ else parents
+ }
+
+ val allParents = classParents ++ classSym.annotations.flatMap(newParentForAnnotation)
// We keep the superClass when computing minimizeParents to eliminate more interfaces.
// Example: T can be eliminated from D
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BCodeBodyBuilder.scala b/src/compiler/scala/tools/nsc/backend/jvm/BCodeBodyBuilder.scala
index c3f71969f6..22ac8f84d4 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BCodeBodyBuilder.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BCodeBodyBuilder.scala
@@ -632,10 +632,11 @@ abstract class BCodeBodyBuilder extends BCodeSkelBuilder {
case _ =>
abort(s"Cannot instantiate $tpt of kind: $generatedType")
}
- case Apply(_, args) if app.hasAttachment[delambdafy.LambdaMetaFactoryCapable] =>
+ case Apply(fun, args) if app.hasAttachment[delambdafy.LambdaMetaFactoryCapable] =>
val attachment = app.attachments.get[delambdafy.LambdaMetaFactoryCapable].get
genLoadArguments(args, paramTKs(app))
genInvokeDynamicLambda(attachment.target, attachment.arity, attachment.functionalInterface)
+ generatedType = asmMethodType(fun.symbol).returnType
case Apply(fun @ _, List(expr)) if currentRun.runDefinitions.isBox(fun.symbol) =>
val nativeKind = tpeTK(expr)
@@ -843,7 +844,6 @@ abstract class BCodeBodyBuilder extends BCodeSkelBuilder {
*
* New (http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.10.1)
* - Requires consistent stack map frames. GenBCode always generates stack frames.
- * or higher.
* - In practice: the ASM library computes stack map frames for us (ClassWriter). Emitting
* correct frames after an ATHROW is probably complex, so ASM uses the following strategy:
* - Every time when generating an ATHROW, a new basic block is started.
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BCodeHelpers.scala b/src/compiler/scala/tools/nsc/backend/jvm/BCodeHelpers.scala
index 23e0a4e17a..0f381a4325 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BCodeHelpers.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BCodeHelpers.scala
@@ -693,7 +693,7 @@ abstract class BCodeHelpers extends BCodeIdiomatic with BytecodeWriters {
* cache = new java.util.HashMap()
* $deserializeLambdaCache$ = cache
* }
- * return scala.compat.java8.runtime.LambdaDeserializer.deserializeLambda(MethodHandles.lookup(), cache, l);
+ * return scala.runtime.LambdaDeserializer.deserializeLambda(MethodHandles.lookup(), cache, l);
* }
*/
def addLambdaDeserialize(clazz: Symbol, jclass: asm.ClassVisitor): Unit = {
@@ -715,7 +715,8 @@ abstract class BCodeHelpers extends BCodeIdiomatic with BytecodeWriters {
{
val mv = cw.visitMethod(ACC_PRIVATE + ACC_STATIC + ACC_SYNTHETIC, "$deserializeLambda$", "(Ljava/lang/invoke/SerializedLambda;)Ljava/lang/Object;", null, null)
mv.visitCode()
- mv.visitFieldInsn(GETSTATIC, clazz.javaBinaryName.encoded, "$deserializeLambdaCache$", "Ljava/util/Map;")
+ // javaBinaryName returns the internal name of a class. Also used in BTypesFromsymbols.classBTypeFromSymbol.
+ mv.visitFieldInsn(GETSTATIC, clazz.javaBinaryName.toString, "$deserializeLambdaCache$", "Ljava/util/Map;")
mv.visitVarInsn(ASTORE, 1)
mv.visitVarInsn(ALOAD, 1)
val l0 = new asm.Label()
@@ -725,13 +726,13 @@ abstract class BCodeHelpers extends BCodeIdiomatic with BytecodeWriters {
mv.visitMethodInsn(INVOKESPECIAL, "java/util/HashMap", "<init>", "()V", false)
mv.visitVarInsn(ASTORE, 1)
mv.visitVarInsn(ALOAD, 1)
- mv.visitFieldInsn(PUTSTATIC, clazz.javaBinaryName.encoded, "$deserializeLambdaCache$", "Ljava/util/Map;")
+ mv.visitFieldInsn(PUTSTATIC, clazz.javaBinaryName.toString, "$deserializeLambdaCache$", "Ljava/util/Map;")
mv.visitLabel(l0)
- mv.visitFrame(asm.Opcodes.F_APPEND,1, Array("java/util/Map"), 0, null)
+ mv.visitFieldInsn(GETSTATIC, "scala/runtime/LambdaDeserializer$", "MODULE$", "Lscala/runtime/LambdaDeserializer$;")
mv.visitMethodInsn(INVOKESTATIC, "java/lang/invoke/MethodHandles", "lookup", "()Ljava/lang/invoke/MethodHandles$Lookup;", false)
mv.visitVarInsn(ALOAD, 1)
mv.visitVarInsn(ALOAD, 0)
- mv.visitMethodInsn(INVOKESTATIC, "scala/compat/java8/runtime/LambdaDeserializer", "deserializeLambda", "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/util/Map;Ljava/lang/invoke/SerializedLambda;)Ljava/lang/Object;", false)
+ mv.visitMethodInsn(INVOKEVIRTUAL, "scala/runtime/LambdaDeserializer$", "deserializeLambda", "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/util/Map;Ljava/lang/invoke/SerializedLambda;)Ljava/lang/Object;", false)
mv.visitInsn(ARETURN)
mv.visitEnd()
}
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BCodeIdiomatic.scala b/src/compiler/scala/tools/nsc/backend/jvm/BCodeIdiomatic.scala
index a25f5cad63..535e1a8620 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BCodeIdiomatic.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BCodeIdiomatic.scala
@@ -436,7 +436,7 @@ abstract class BCodeIdiomatic extends SubComponent {
else { emitTypeBased(JCodeMethodN.returnOpcodes, tk) }
}
- /* Emits one of tableswitch or lookoupswitch.
+ /* Emits one of tableswitch or lookupswitch.
*
* can-multi-thread
*/
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BCodeSkelBuilder.scala b/src/compiler/scala/tools/nsc/backend/jvm/BCodeSkelBuilder.scala
index 0f67852804..a9b6a312e9 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BCodeSkelBuilder.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BCodeSkelBuilder.scala
@@ -140,7 +140,7 @@ abstract class BCodeSkelBuilder extends BCodeHelpers {
if (AsmUtils.traceClassEnabled && cnode.name.contains(AsmUtils.traceClassPattern))
AsmUtils.traceClass(cnode)
- if (settings.YoptInlinerEnabled) {
+ if (settings.YoptAddToBytecodeRepository) {
// The inliner needs to find all classes in the code repo, also those being compiled
byteCodeRepository.add(cnode, ByteCodeRepository.CompilationUnit)
}
@@ -153,9 +153,9 @@ abstract class BCodeSkelBuilder extends BCodeHelpers {
*/
private def initJClass(jclass: asm.ClassVisitor) {
- val ps = claszSymbol.info.parents
- val superClass: String = if (ps.isEmpty) ObjectReference.internalName else internalName(ps.head.typeSymbol)
- val interfaceNames = classBTypeFromSymbol(claszSymbol).info.get.interfaces map {
+ val bType = classBTypeFromSymbol(claszSymbol)
+ val superClass = bType.info.get.superClass.getOrElse(ObjectReference).internalName
+ val interfaceNames = bType.info.get.interfaces map {
case classBType =>
if (classBType.isNestedClass.get) { innerClassBufferASM += classBType }
classBType.internalName
@@ -443,7 +443,7 @@ abstract class BCodeSkelBuilder extends BCodeHelpers {
* which rethrows the caught exception once it's done with the cleanup code.
*
* A particular cleanup may in general contain LabelDefs. Care is needed when duplicating such jump-targets,
- * so as to preserve agreement wit the (also duplicated) jump-sources.
+ * so as to preserve agreement with the (also duplicated) jump-sources.
* This is achieved based on the bookkeeping provided by two maps:
* - `labelDefsAtOrUnder` lists all LabelDefs enclosed by a given Tree node (the key)
* - `labelDef` provides the LabelDef node whose symbol is used as key.
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BTypes.scala b/src/compiler/scala/tools/nsc/backend/jvm/BTypes.scala
index 176292669c..8720da84e8 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BTypes.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BTypes.scala
@@ -44,6 +44,8 @@ abstract class BTypes {
val inliner: Inliner[this.type]
+ val closureOptimizer: ClosureOptimizer[this.type]
+
val callGraph: CallGraph[this.type]
val backendReporting: BackendReporting
@@ -646,7 +648,7 @@ abstract class BTypes {
* JVMS 4.7.7: the attribute must be present "if and only if it represents a local class
* or an anonymous class" (i.e. not for member classes).
*
- * The attribute is mis-named, it should be called "EnclosingClass". It has to be defined for all
+ * The attribute is misnamed, it should be called "EnclosingClass". It has to be defined for all
* local and anonymous classes, no matter if there is an enclosing method or not. Accordingly, the
* "class" field (see below) must be always defined, while the "method" field may be null.
*
@@ -796,7 +798,7 @@ abstract class BTypes {
* 2. The ClassBType should be built from a classfile, but the class could not be found on the
* compilation classpath.
*
- * Note that all ClassBTypes required in a non-optimzied run are built during code generation from
+ * Note that all ClassBTypes required in a non-optimized run are built during code generation from
* the class symbols referenced by the ASTs, so they have a valid info. Therefore the backend
* often invokes `info.get` (which asserts the info to exist) when reading data from the ClassBType.
*
@@ -940,7 +942,7 @@ abstract class BTypes {
*/
def jvmWiseLUB(other: ClassBType): Either[NoClassBTypeInfo, ClassBType] = {
def isNotNullOrNothing(c: ClassBType) = !c.isNullType && !c.isNothingType
- assert(isNotNullOrNothing(this) && isNotNullOrNothing(other), s"jvmWiseLub for null or nothing: $this - $other")
+ assert(isNotNullOrNothing(this) && isNotNullOrNothing(other), s"jvmWiseLUB for null or nothing: $this - $other")
tryEither {
val res: ClassBType = (this.isInterface.orThrow, other.isInterface.orThrow) match {
@@ -965,7 +967,7 @@ abstract class BTypes {
firstCommonSuffix(this :: this.superClassesTransitive.orThrow, other :: other.superClassesTransitive.orThrow)
}
- assert(isNotNullOrNothing(res), s"jvmWiseLub computed: $res")
+ assert(isNotNullOrNothing(res), s"jvmWiseLUB computed: $res")
Right(res)
}
}
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BTypesFromSymbols.scala b/src/compiler/scala/tools/nsc/backend/jvm/BTypesFromSymbols.scala
index d68c916f09..45d9cc3ff3 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BTypesFromSymbols.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BTypesFromSymbols.scala
@@ -7,7 +7,7 @@ package scala.tools.nsc
package backend.jvm
import scala.tools.asm
-import scala.tools.nsc.backend.jvm.opt.{LocalOpt, CallGraph, Inliner, ByteCodeRepository}
+import scala.tools.nsc.backend.jvm.opt._
import scala.tools.nsc.backend.jvm.BTypes.{InlineInfo, MethodInlineInfo, InternalName}
import BackendReporting._
import scala.tools.nsc.settings.ScalaSettings
@@ -42,6 +42,8 @@ class BTypesFromSymbols[G <: Global](val global: G) extends BTypes {
val inliner: Inliner[this.type] = new Inliner(this)
+ val closureOptimizer: ClosureOptimizer[this.type] = new ClosureOptimizer(this)
+
val callGraph: CallGraph[this.type] = new CallGraph(this)
val backendReporting: BackendReporting = new BackendReportingImpl(global)
@@ -214,7 +216,18 @@ class BTypesFromSymbols[G <: Global](val global: G) extends BTypes {
}
private def setClassInfo(classSym: Symbol, classBType: ClassBType): ClassBType = {
- val superClassSym = if (classSym.isImplClass) ObjectClass else classSym.superClass
+ // Check for isImplClass: trait implementation classes have NoSymbol as superClass
+ // Check for hasAnnotationFlag for SI-9393: the classfile / java source parsers add
+ // scala.annotation.Annotation as superclass to java annotations. In reality, java
+ // annotation classfiles have superclass Object (like any interface classfile).
+ val superClassSym = if (classSym.isImplClass || classSym.hasJavaAnnotationFlag) ObjectClass else {
+ val sc = classSym.superClass
+ // SI-9393: Java annotation classes don't have the ABSTRACT/INTERFACE flag, so they appear
+ // (wrongly) as superclasses. Fix this for BTypes: the java annotation will appear as interface
+ // (handled by method implementedInterfaces), the superclass is set to Object.
+ if (sc.hasJavaAnnotationFlag) ObjectClass
+ else sc
+ }
assert(
if (classSym == ObjectClass)
superClassSym == NoSymbol
@@ -349,11 +362,19 @@ class BTypesFromSymbols[G <: Global](val global: G) extends BTypes {
val isTopLevel = innerClassSym.rawowner.isPackageClass
// impl classes are considered top-level, see comment in BTypes
if (isTopLevel || considerAsTopLevelImplementationArtifact(innerClassSym)) None
- else {
+ else if (innerClassSym.rawowner.isTerm) {
+ // This case should never be reached: the lambdalift phase mutates the rawowner field of all
+ // classes to be the enclosing class. SI-9392 shows an errant macro that leaves a reference
+ // to a local class symbol that no longer exists, which is not updated by lambdalift.
+ devWarning(innerClassSym.pos,
+ s"""The class symbol $innerClassSym with the term symbol ${innerClassSym.rawowner} as `rawowner` reached the backend.
+ |Most likely this indicates a stale reference to a non-existing class introduced by a macro, see SI-9392.""".stripMargin)
+ None
+ } else {
// See comment in BTypes, when is a class marked static in the InnerClass table.
val isStaticNestedClass = isOriginallyStaticOwner(innerClassSym.originalOwner)
- // After lambdalift (which is where we are), the rawowoner field contains the enclosing class.
+ // After lambdalift (which is where we are), the rawowner field contains the enclosing class.
val enclosingClass = {
// (1) Example java source: class C { static class D { } }
// The Scala compiler creates a class and a module symbol for C. Because D is a static
@@ -557,7 +578,7 @@ class BTypesFromSymbols[G <: Global](val global: G) extends BTypes {
if (sym.isBridge) ACC_BRIDGE | ACC_SYNTHETIC else 0,
if (sym.isArtifact) ACC_SYNTHETIC else 0,
if (sym.isClass && !sym.isInterface) ACC_SUPER else 0,
- if (sym.hasEnumFlag) ACC_ENUM else 0,
+ if (sym.hasJavaEnumFlag) ACC_ENUM else 0,
if (sym.isVarargsMethod) ACC_VARARGS else 0,
if (sym.hasFlag(symtab.Flags.SYNCHRONIZED)) ACC_SYNCHRONIZED else 0,
if (sym.isDeprecated) asm.Opcodes.ACC_DEPRECATED else 0
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/BackendReporting.scala b/src/compiler/scala/tools/nsc/backend/jvm/BackendReporting.scala
index d641f708d2..b41d0de92f 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/BackendReporting.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/BackendReporting.scala
@@ -1,7 +1,7 @@
package scala.tools.nsc
package backend.jvm
-import scala.tools.asm.tree.{AbstractInsnNode, MethodNode}
+import scala.tools.asm.tree.{InvokeDynamicInsnNode, AbstractInsnNode, MethodNode}
import scala.tools.nsc.backend.jvm.BTypes.InternalName
import scala.reflect.internal.util.Position
import scala.tools.nsc.settings.ScalaSettings
@@ -246,6 +246,33 @@ object BackendReporting {
case class ResultingMethodTooLarge(calleeDeclarationClass: InternalName, name: String, descriptor: String,
callsiteClass: InternalName, callsiteName: String, callsiteDesc: String) extends CannotInlineWarning
+ case object UnknownInvokeDynamicInstruction extends OptimizerWarning {
+ override def toString = "The callee contains an InvokeDynamic instruction with an unknown bootstrap method (not a LambdaMetaFactory)."
+ def emitWarning(settings: ScalaSettings): Boolean = settings.YoptWarningEmitAtInlineFailed
+ }
+
+ /**
+ * Used in `rewriteClosureApplyInvocations` when a closure apply callsite cannot be rewritten
+ * to the closure body method.
+ */
+ sealed trait RewriteClosureApplyToClosureBodyFailed extends OptimizerWarning {
+ def pos: Position
+
+ override def emitWarning(settings: ScalaSettings): Boolean = this match {
+ case RewriteClosureAccessCheckFailed(_, cause) => cause.emitWarning(settings)
+ case RewriteClosureIllegalAccess(_, _) => settings.YoptWarningEmitAtInlineFailed
+ }
+
+ override def toString: String = this match {
+ case RewriteClosureAccessCheckFailed(_, cause) =>
+ s"Failed to rewrite the closure invocation to its implementation method:\n" + cause
+ case RewriteClosureIllegalAccess(_, callsiteClass) =>
+ s"The closure body invocation cannot be rewritten because the target method is not accessible in class $callsiteClass."
+ }
+ }
+ case class RewriteClosureAccessCheckFailed(pos: Position, cause: OptimizerWarning) extends RewriteClosureApplyToClosureBodyFailed
+ case class RewriteClosureIllegalAccess(pos: Position, callsiteClass: InternalName) extends RewriteClosureApplyToClosureBodyFailed
+
/**
* Used in the InlineInfo of a ClassBType, when some issue occurred obtaining the inline information.
*/
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/GenASM.scala b/src/compiler/scala/tools/nsc/backend/jvm/GenASM.scala
index ccad50616c..7153c09377 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/GenASM.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/GenASM.scala
@@ -307,7 +307,7 @@ abstract class GenASM extends SubComponent with BytecodeWriters { self =>
if (sym.isBridge) ACC_BRIDGE | ACC_SYNTHETIC else 0,
if (sym.isArtifact) ACC_SYNTHETIC else 0,
if (sym.isClass && !sym.isInterface) ACC_SUPER else 0,
- if (sym.hasEnumFlag) ACC_ENUM else 0,
+ if (sym.hasJavaEnumFlag) ACC_ENUM else 0,
if (sym.isVarargsMethod) ACC_VARARGS else 0,
if (sym.hasFlag(Flags.SYNCHRONIZED)) ACC_SYNCHRONIZED else 0
)
@@ -492,8 +492,8 @@ abstract class GenASM extends SubComponent with BytecodeWriters { self =>
* generic classes or interfaces.
*
* @param superName the internal of name of the super class. For interfaces,
- * the super class is {@link Object}. May be <tt>null</tt>, but
- * only for the {@link Object} class.
+ * the super class is [[Object]]. May be <tt>null</tt>, but
+ * only for the [[Object]] class.
*
* @param interfaces the internal names of the class's interfaces (see
* {@link Type#getInternalName() getInternalName}). May be
@@ -529,6 +529,10 @@ abstract class GenASM extends SubComponent with BytecodeWriters { self =>
case e: java.lang.RuntimeException if e.getMessage != null && (e.getMessage contains "too large!") =>
reporter.error(sym.pos,
s"Could not write class $jclassName because it exceeds JVM code size limits. ${e.getMessage}")
+ case e: java.io.IOException if e.getMessage != null && (e.getMessage contains "File name too long") =>
+ reporter.error(sym.pos, e.getMessage + "\n" +
+ "This can happen on some encrypted or legacy file systems. Please see SI-3623 for more details.")
+
}
}
@@ -3028,7 +3032,7 @@ abstract class GenASM extends SubComponent with BytecodeWriters { self =>
*
* Rationale for this normalization:
* test/files/run/private-inline.scala after -optimize is chock full of
- * BasicBlocks containing just JUMP(whereTo), where no exception handler straddles them.
+ * BasicBlocks containing just JUMP(whereto), where no exception handler straddles them.
* They should be collapsed by IMethod.normalize() but aren't.
* That was fine in FJBG times when by the time the exception table was emitted,
* it already contained "anchored" labels (ie instruction offsets were known)
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/GenBCode.scala b/src/compiler/scala/tools/nsc/backend/jvm/GenBCode.scala
index a33725ed34..00b4b8b667 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/GenBCode.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/GenBCode.scala
@@ -167,6 +167,11 @@ abstract class GenBCode extends BCodeSyncAndTry {
)
}
+ // shim for SBT, see https://github.com/sbt/sbt/issues/2076
+ // TODO put this closer to classfile writing once we have closure elimination
+ // TODO create a nicer public API to find out the correspondence between sourcefile and ultimate classfiles
+ currentUnit.icode += new icodes.IClass(cd.symbol)
+
// -------------- mirror class, if needed --------------
val mirrorC =
if (isTopLevelModuleClass(claszSymbol)) {
@@ -216,12 +221,17 @@ abstract class GenBCode extends BCodeSyncAndTry {
class Worker2 {
def runGlobalOptimizations(): Unit = {
import scala.collection.convert.decorateAsScala._
- q2.asScala foreach {
- case Item2(_, _, plain, _, _) =>
- // skip mirror / bean: wd don't inline into tem, and they are not used in the plain class
- if (plain != null) callGraph.addClass(plain)
+ if (settings.YoptBuildCallGraph) {
+ q2.asScala foreach {
+ case Item2(_, _, plain, _, _) =>
+ // skip mirror / bean: wd don't inline into tem, and they are not used in the plain class
+ if (plain != null) callGraph.addClass(plain)
+ }
}
- bTypes.inliner.runInliner()
+ if (settings.YoptInlinerEnabled)
+ bTypes.inliner.runInliner()
+ if (settings.YoptClosureElimination)
+ closureOptimizer.rewriteClosureApplyInvocations()
}
def localOptimizations(classNode: ClassNode): Unit = {
@@ -229,7 +239,7 @@ abstract class GenBCode extends BCodeSyncAndTry {
}
def run() {
- if (settings.YoptInlinerEnabled) runGlobalOptimizations()
+ runGlobalOptimizations()
while (true) {
val item = q2.poll
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/analysis/InstructionStackEffect.scala b/src/compiler/scala/tools/nsc/backend/jvm/analysis/InstructionStackEffect.scala
index 98e93c125b..8d8ea839e6 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/analysis/InstructionStackEffect.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/analysis/InstructionStackEffect.scala
@@ -94,7 +94,7 @@ object InstructionStackEffect {
val isSize2 = peekStack(0).getSize == 2
if (isSize2) t(1, 0) else t(2, 0)
- case DUP => t(0, 1)
+ case DUP => t(1, 2)
case DUP_X1 => t(2, 3)
@@ -104,7 +104,7 @@ object InstructionStackEffect {
case DUP2 =>
val isSize2 = peekStack(0).getSize == 2
- if (isSize2) t(0, 1) else t(0, 2)
+ if (isSize2) t(1, 2) else t(2, 4)
case DUP2_X1 =>
val isSize2 = peekStack(0).getSize == 2
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/analysis/ProdConsAnalyzer.scala b/src/compiler/scala/tools/nsc/backend/jvm/analysis/ProdConsAnalyzer.scala
new file mode 100644
index 0000000000..1c24acba03
--- /dev/null
+++ b/src/compiler/scala/tools/nsc/backend/jvm/analysis/ProdConsAnalyzer.scala
@@ -0,0 +1,478 @@
+/* NSC -- new Scala compiler
+ * Copyright 2005-2015 LAMP/EPFL
+ * @author Martin Odersky
+ */
+
+package scala.tools.nsc
+package backend.jvm
+package analysis
+
+import java.util
+
+import scala.annotation.switch
+import scala.collection.mutable
+import scala.tools.asm.{Type, MethodVisitor}
+import scala.tools.asm.Opcodes._
+import scala.tools.asm.tree._
+import scala.tools.asm.tree.analysis._
+import scala.tools.nsc.backend.jvm.BTypes.InternalName
+
+import opt.BytecodeUtils._
+
+import scala.collection.convert.decorateAsScala._
+
+/**
+ * This class provides additional queries over ASM's built-in `SourceValue` analysis.
+ *
+ * The analysis computes for each value in a frame a set of source instructions, which are the
+ * potential producers. Most instructions produce either nothing or a stack value. For example,
+ * a `LOAD` instruction is the producer of the value pushed onto the stack. The exception are
+ * `STORE` instructions, which produce a new value for a local variable slot, so they are used
+ * as producers for the value they stored.
+ *
+ * Note that pseudo-instructions are used as initial producers for parameters and local variables.
+ * See the documentation on class InitialProducer.
+ *
+ * This class implements the following queries over the data computed by the SourceValue analysis:
+ *
+ * - producersForValueAt(insn, slot)
+ * - consumersOfValueAt(insn, slot)
+ *
+ * - producersForInputsOf(insn)
+ * - consumersOfOutputsFrom(insn)
+ *
+ * - initialProducersForValueAt(insn, slot)
+ * - ultimateConsumersOfValueAt(insn, slot)
+ *
+ * - initialProducersForInputsOf(insn)
+ * - ultimateConsumersOfOutputsFrom(insn)
+ *
+ * The following operations are considered as copying operations:
+ * - xLOAD, xSTORE
+ * - DUP, DUP2, DUP_X1, DUP_X2, DUP2_X1, DUP2_X2
+ * - SWAP
+ * - CHECKCAST
+ *
+ * If ever needed, we could introduce a mode where primitive conversions (l2i) are considered as
+ * copying operations.
+ */
+class ProdConsAnalyzer(methodNode: MethodNode, classInternalName: InternalName) {
+
+ /* Timers for benchmarking ProdCons
+ import scala.reflect.internal.util.Statistics._
+ import ProdConsAnalyzer._
+ val analyzerTimer = newSubTimer(classInternalName + "#" + methodNode.name + " - analysis", prodConsAnalyzerTimer)
+ val consumersTimer = newSubTimer(classInternalName + "#" + methodNode.name + " - consumers", prodConsAnalyzerTimer)
+ */
+
+ val analyzer = new Analyzer(new InitialProducerSourceInterpreter)
+
+// val start = analyzerTimer.start()
+ analyzer.analyze(classInternalName, methodNode)
+// analyzerTimer.stop(start)
+// println(analyzerTimer.line)
+
+ def frameAt(insn: AbstractInsnNode) = analyzer.frameAt(insn, methodNode)
+
+ /**
+ * Returns the potential producer instructions of a (local or stack) value in the frame of `insn`.
+ * This method simply returns the producer information computed by the SourceValue analysis.
+ */
+ def producersForValueAt(insn: AbstractInsnNode, slot: Int): Set[AbstractInsnNode] = {
+ frameAt(insn).getValue(slot).insns.asScala.toSet
+ }
+
+ /**
+ * Returns the potential consumer instructions of a (local or stack) value in the frame of `insn`.
+ * This is the counterpart of `producersForValueAt`.
+ */
+ def consumersOfValueAt(insn: AbstractInsnNode, slot: Int): Set[AbstractInsnNode] = {
+ producersForValueAt(insn, slot).flatMap(prod => {
+ val outputNumber = outputValueSlots(prod).indexOf(slot)
+ _consumersOfOutputsFrom.get(prod).map(v => {
+ v(outputNumber)
+ }).getOrElse(Set.empty)
+ })
+ }
+
+ /**
+ * Returns the potential producer instructions of any of the values consumed by `insn`.
+ */
+ def producersForInputsOf(insn: AbstractInsnNode): Set[AbstractInsnNode] = {
+ inputValues(insn).iterator.flatMap(v => v.insns.asScala).toSet
+ }
+
+ def consumersOfOutputsFrom(insn: AbstractInsnNode): Set[AbstractInsnNode] =
+ _consumersOfOutputsFrom.get(insn).map(v => v.indices.flatMap(v.apply)(collection.breakOut): Set[AbstractInsnNode]).getOrElse(Set.empty)
+
+ /**
+ * Returns the potential initial producer instructions of a value in the frame of `insn`.
+ *
+ * Unlike `producersForValueAt`, producers are tracked through copying instructions such as STORE
+ * and LOAD. If the producer of the value is a LOAD, then the producers of the stored value(s) are
+ * returned instead.
+ */
+ def initialProducersForValueAt(insn: AbstractInsnNode, slot: Int): Set[AbstractInsnNode] = {
+ def initialProducers(insn: AbstractInsnNode, producedSlot: Int): Set[AbstractInsnNode] = {
+ if (isCopyOperation(insn)) {
+ val key = (insn, producedSlot)
+ _initialProducersCache.getOrElseUpdate(key, {
+ // prevent infinite recursion if an instruction is its own producer or consumer
+ // see cyclicProdCons in ProdConsAnalyzerTest
+ _initialProducersCache(key) = Set.empty
+ val (sourceValue, sourceValueSlot) = copyOperationSourceValue(insn, producedSlot)
+ sourceValue.insns.iterator.asScala.flatMap(initialProducers(_, sourceValueSlot)).toSet
+ })
+ } else {
+ Set(insn)
+ }
+ }
+ producersForValueAt(insn, slot).flatMap(initialProducers(_, slot))
+ }
+
+ /**
+ * Returns the potential ultimate consumers of a value in the frame of `insn`. Consumers are
+ * tracked through copying operations such as SOTRE and LOAD.
+ */
+ def ultimateConsumersOfValueAt(insn: AbstractInsnNode, slot: Int): Set[AbstractInsnNode] = {
+ def ultimateConsumers(insn: AbstractInsnNode, consumedSlot: Int): Set[AbstractInsnNode] = {
+ if (isCopyOperation(insn)) {
+ val key = (insn, consumedSlot)
+ _ultimateConsumersCache.getOrElseUpdate(key, {
+ // prevent infinite recursion if an instruction is its own producer or consumer
+ // see cyclicProdCons in ProdConsAnalyzerTest
+ _ultimateConsumersCache(key) = Set.empty
+ for {
+ producedSlot <- copyOperationProducedValueSlots(insn, consumedSlot)
+ consumer <- consumersOfValueAt(insn.getNext, producedSlot)
+ ultimateConsumer <- ultimateConsumers(consumer, producedSlot)
+ } yield ultimateConsumer
+ })
+ } else {
+ Set(insn)
+ }
+ }
+ consumersOfValueAt(insn, slot).flatMap(ultimateConsumers(_, slot))
+ }
+
+ def initialProducersForInputsOf(insn: AbstractInsnNode): Set[AbstractInsnNode] = {
+ inputValueSlots(insn).flatMap(slot => initialProducersForValueAt(insn, slot)).toSet
+ }
+
+ def ultimateConsumersOfOutputsFrom(insn: AbstractInsnNode): Set[AbstractInsnNode] = {
+ lazy val next = insn.getNext
+ outputValueSlots(insn).flatMap(slot => ultimateConsumersOfValueAt(next, slot)).toSet
+ }
+
+ private def isCopyOperation(insn: AbstractInsnNode): Boolean = {
+ isVarInstruction(insn) || {
+ (insn.getOpcode: @switch) match {
+ case DUP | DUP_X1 | DUP_X2 | DUP2 | DUP2_X1 | DUP2_X2 | SWAP | CHECKCAST => true
+ case _ => false
+ }
+ }
+ }
+
+ /**
+ * Returns the value and its frame slot that `copyOp` copies into `producedSlot`.
+ *
+ * Example:
+ * - copyOp = DUP_X1, assume it produces slots 2,3,4
+ * - producedSlot = 3
+ * - the result is the value at slot 2 in the frame of `copyOp`
+ */
+ private def copyOperationSourceValue(copyOp: AbstractInsnNode, producedSlot: Int): (SourceValue, Int) = {
+ val frame = frameAt(copyOp)
+
+ // Index of the produced value. Example: DUP_X1 produces 3 values, so producedIndex is 0, 1 or 2,
+ // where 0 corresponds to the lowest value on the stack.
+ def producedIndex(numConsumed: Int) = {
+ val numUsedSlotsBeforeCopy = frame.stackTop + 1
+ producedSlot - (numUsedSlotsBeforeCopy - numConsumed)
+ }
+
+ def stackValue(n: Int) = (frame.peekStack(n), frame.stackTop - n)
+
+ def dupX1Case = (producedIndex(2): @switch) match {
+ case 0 | 2 => stackValue(0)
+ case 1 => stackValue(1)
+ }
+
+ // Form 1 of dup_x2
+ def dupX2Case = (producedIndex(3): @switch) match {
+ case 0 | 3 => stackValue(0)
+ case 1 => stackValue(2)
+ case 2 => stackValue(1)
+ }
+
+ // Form 1 of dup2_x1
+ def dup2X1Case = (producedIndex(3): @switch) match {
+ case 0 | 3 => stackValue(1)
+ case 1 | 4 => stackValue(0)
+ case 2 => stackValue(2)
+ }
+
+ if (isLoad(copyOp)) {
+ val slot = copyOp.asInstanceOf[VarInsnNode].`var`
+ (frame.getLocal(slot), slot)
+ } else if (isStore(copyOp)) {
+ stackValue(0)
+ } else (copyOp.getOpcode: @switch) match {
+ case DUP =>
+ stackValue(0) // the current stack top is the source of both produced values
+
+ case DUP_X1 =>
+ dupX1Case
+
+ case DUP_X2 =>
+ if (frame.peekStack(1).getSize == 2) dupX1Case
+ else dupX2Case
+
+ case DUP2 =>
+ if (frame.peekStack(0).getSize == 2) stackValue(0)
+ else {
+ (producedIndex(2): @switch) match {
+ case 0 | 2 => stackValue(1)
+ case 1 | 3 => stackValue(0)
+ }
+ }
+
+ case DUP2_X1 =>
+ if (frame.peekStack(0).getSize == 2) dupX1Case
+ else dup2X1Case
+
+ case DUP2_X2 =>
+ val v1isSize2 = frame.peekStack(0).getSize == 2
+ if (v1isSize2) {
+ val v2isSize2 = frame.peekStack(1).getSize == 2
+ if (v2isSize2) dupX1Case // Form 4
+ else dupX2Case // Form 2
+ } else {
+ val v3isSize2 = frame.peekStack(2).getSize == 2
+ if (v3isSize2) dup2X1Case // Form 3
+ else {
+ // Form 1
+ (producedIndex(4): @switch) match {
+ case 0 | 4 => stackValue(1)
+ case 1 | 5 => stackValue(0)
+ case 2 => stackValue(3)
+ case 3 => stackValue(2)
+ }
+ }
+ }
+
+ case SWAP =>
+ if (producedIndex(2) == 0) stackValue(0)
+ else stackValue(1)
+
+ case CHECKCAST =>
+ stackValue(0)
+ }
+ }
+
+ /**
+ * Returns the value slots into which `copyOp` copies the value at `consumedSlot`.
+ *
+ * Example:
+ * - copyOp = DUP_X1, assume it consumes slots 2,3 and produces 2,3,4
+ * - if consumedSlot == 2, the result is Set(3)
+ * - if consumedSlot == 3, the result is Set(2, 4)
+ */
+ private def copyOperationProducedValueSlots(copyOp: AbstractInsnNode, consumedSlot: Int): Set[Int] = {
+ if (isStore(copyOp)) Set(copyOp.asInstanceOf[VarInsnNode].`var`)
+ else {
+ val nextFrame = frameAt(copyOp.getNext)
+ val top = nextFrame.stackTop
+
+ // Index of the consumed value. Example: DUP_X1 consumes two values, so consumedIndex is
+ // 0 or 1, where 0 corresponds to the lower value on the stack.
+ def consumedIndex(numProduced: Int) = {
+ val numUsedSlotsAfterCopy = top + 1
+ consumedSlot - (numUsedSlotsAfterCopy - numProduced)
+ }
+
+ def dupX1Case = (consumedIndex(3): @switch) match {
+ case 0 => Set(top - 1)
+ case 1 => Set(top - 2, top)
+ }
+
+ def dupX2Case = (consumedIndex(4): @switch) match {
+ case 0 => Set(top - 2)
+ case 1 => Set(top - 1)
+ case 2 => Set(top - 3, top)
+ }
+
+ def dup2X1Case = (consumedIndex(5): @switch) match {
+ case 0 => Set(top - 2)
+ case 1 => Set(top - 4, top - 1)
+ case 2 => Set(top - 3, top)
+ }
+
+ if (isLoad(copyOp)) Set(top)
+ else (copyOp.getOpcode: @switch) match {
+ case DUP =>
+ Set(top - 1, top)
+
+ case DUP_X1 =>
+ dupX1Case
+
+ case DUP_X2 =>
+ if (nextFrame.peekStack(1).getSize == 2) dupX1Case
+ else dupX2Case
+
+ case DUP2 =>
+ if (nextFrame.peekStack(0).getSize == 2) Set(top - 1, top)
+ else (consumedIndex(4): @switch) match {
+ case 0 => Set(top - 3, top - 1)
+ case 1 => Set(top - 2, top)
+ }
+
+ case DUP2_X1 =>
+ if (nextFrame.peekStack(0).getSize == 2) dupX1Case
+ else dup2X1Case
+
+ case DUP2_X2 =>
+ val v1isSize2 = nextFrame.peekStack(0).getSize == 2
+ if (v1isSize2) {
+ val v2isSize2 = nextFrame.peekStack(1).getSize == 2
+ if (v2isSize2) dupX1Case // Form 4
+ else dupX2Case // Form 2
+ } else {
+ val v3isSize2 = nextFrame.peekStack(2).getSize == 2
+ if (v3isSize2) dup2X1Case // Form 3
+ else {
+ // Form 1
+ (consumedIndex(6): @switch) match {
+ case 0 => Set(top - 3)
+ case 1 => Set(top - 2)
+ case 2 => Set(top - 5, top - 1)
+ case 3 => Set(top - 4, top)
+ }
+ }
+ }
+
+ case SWAP =>
+ if (consumedIndex(2) == 0) Set(top)
+ else Set(top - 1)
+
+ case CHECKCAST =>
+ Set(top)
+ }
+ }
+ }
+
+ /** Returns the frame values consumed by executing `insn`. */
+ private def inputValues(insn: AbstractInsnNode): Seq[SourceValue] = {
+ lazy val frame = frameAt(insn)
+ inputValueSlots(insn) map frame.getValue
+ }
+
+ /** Returns the frame slots holding the values consumed by executing `insn`. */
+ private def inputValueSlots(insn: AbstractInsnNode): Seq[Int] = {
+ if (insn.getOpcode == -1) return Seq.empty
+ if (isLoad(insn)) {
+ Seq(insn.asInstanceOf[VarInsnNode].`var`)
+ } else if (insn.getOpcode == IINC) {
+ Seq(insn.asInstanceOf[IincInsnNode].`var`)
+ } else {
+ val frame = frameAt(insn)
+ val stackEffect = InstructionStackEffect(insn, frame)
+ val stackSize = frame.getLocals + frame.getStackSize
+ (stackSize - stackEffect._1) until stackSize
+ }
+ }
+
+ /** Returns the frame slots holding the values produced by executing `insn`. */
+ private def outputValueSlots(insn: AbstractInsnNode): Seq[Int] = insn match {
+ case ParameterProducer(local) => Seq(local)
+ case UninitializedLocalProducer(local) => Seq(local)
+ case ExceptionProducer(frame) => Seq(frame.stackTop)
+ case _ =>
+ if (insn.getOpcode == -1) return Seq.empty
+ if (isStore(insn)) {
+ Seq(insn.asInstanceOf[VarInsnNode].`var`)
+ } else if (insn.getOpcode == IINC) {
+ Seq(insn.asInstanceOf[IincInsnNode].`var`)
+ } else {
+ val frame = frameAt(insn)
+ val stackEffect = InstructionStackEffect(insn, frame)
+ val nextFrame = frameAt(insn.getNext)
+ val stackSize = nextFrame.getLocals + nextFrame.getStackSize
+ (stackSize - stackEffect._2) until stackSize
+ }
+ }
+
+ /** For each instruction, a set of potential consumers of the produced values. */
+ private lazy val _consumersOfOutputsFrom: Map[AbstractInsnNode, Vector[Set[AbstractInsnNode]]] = {
+// val start = consumersTimer.start()
+ var res = Map.empty[AbstractInsnNode, Vector[Set[AbstractInsnNode]]]
+ for {
+ insn <- methodNode.instructions.iterator.asScala
+ frame = frameAt(insn)
+ i <- inputValueSlots(insn)
+ producer <- frame.getValue(i).insns.asScala
+ } {
+ val producedSlots = outputValueSlots(producer)
+ val currentConsumers = res.getOrElse(producer, Vector.fill(producedSlots.size)(Set.empty[AbstractInsnNode]))
+ val outputIndex = producedSlots.indexOf(i)
+ res = res.updated(producer, currentConsumers.updated(outputIndex, currentConsumers(outputIndex) + insn))
+ }
+// consumersTimer.stop(start)
+// println(consumersTimer.line)
+ res
+ }
+
+ private val _initialProducersCache: mutable.AnyRefMap[(AbstractInsnNode, Int), Set[AbstractInsnNode]] = mutable.AnyRefMap.empty
+ private val _ultimateConsumersCache: mutable.AnyRefMap[(AbstractInsnNode, Int), Set[AbstractInsnNode]] = mutable.AnyRefMap.empty
+}
+
+object ProdConsAnalyzer {
+ import scala.reflect.internal.util.Statistics._
+ val prodConsAnalyzerTimer = newTimer("Time in ProdConsAnalyzer", "jvm")
+}
+
+/**
+ * A class for pseudo-instructions representing the initial producers of local values that have
+ * no producer instruction in the method:
+ * - parameters, including `this`
+ * - uninitialized local variables
+ * - exception values in handlers
+ *
+ * The ASM built-in SourceValue analysis yields an empty producers set for such values. This leads
+ * to ambiguities. Example (in Java one can re-assign parameter):
+ *
+ * void foo(int a) {
+ * if (a == 0) a = 1;
+ * return a;
+ * }
+ *
+ * In the first frame of the method, the SoruceValue for parameter `a` gives an empty set of
+ * producer instructions.
+ *
+ * In the frame of the `IRETURN` instruction, the SoruceValue for parameter `a` lists a single
+ * producer instruction: the `ISTORE 1`. This makes it look as if there was a single producer for
+ * `a`, where in fact it might still hold the parameter's initial value.
+ */
+abstract class InitialProducer extends AbstractInsnNode(-1) {
+ override def getType: Int = throw new UnsupportedOperationException
+ override def clone(labels: util.Map[LabelNode, LabelNode]): AbstractInsnNode = throw new UnsupportedOperationException
+ override def accept(cv: MethodVisitor): Unit = throw new UnsupportedOperationException
+}
+
+case class ParameterProducer(local: Int) extends InitialProducer
+case class UninitializedLocalProducer(local: Int) extends InitialProducer
+case class ExceptionProducer[V <: Value](handlerFrame: Frame[V]) extends InitialProducer
+
+class InitialProducerSourceInterpreter extends SourceInterpreter {
+ override def newParameterValue(isInstanceMethod: Boolean, local: Int, tp: Type): SourceValue = {
+ new SourceValue(tp.getSize, ParameterProducer(local))
+ }
+
+ override def newEmptyNonParameterLocalValue(local: Int): SourceValue = {
+ new SourceValue(1, UninitializedLocalProducer(local))
+ }
+
+ override def newExceptionValue(tryCatchBlockNode: TryCatchBlockNode, handlerFrame: Frame[_ <: Value], exceptionType: Type): SourceValue = {
+ new SourceValue(1, ExceptionProducer(handlerFrame))
+ }
+} \ No newline at end of file
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/opt/ByteCodeRepository.scala b/src/compiler/scala/tools/nsc/backend/jvm/opt/ByteCodeRepository.scala
index dbf19744fa..a5b85e54e7 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/opt/ByteCodeRepository.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/opt/ByteCodeRepository.scala
@@ -102,7 +102,7 @@ class ByteCodeRepository(val classPath: ClassFileLookup[AbstractFile], val isJav
}
/**
- * The method node for a method matching `name` and `descriptor`, accessed in class `classInternalName`.
+ * The method node for a method matching `name` and `descriptor`, accessed in class `ownerInternalNameOrArrayDescriptor`.
* The declaration of the method may be in one of the parents.
*
* @return The [[MethodNode]] of the requested method and the [[InternalName]] of its declaring
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/opt/BytecodeUtils.scala b/src/compiler/scala/tools/nsc/backend/jvm/opt/BytecodeUtils.scala
index 9bd016f964..df8dcc690a 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/opt/BytecodeUtils.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/opt/BytecodeUtils.scala
@@ -12,9 +12,8 @@ import scala.collection.mutable
import scala.reflect.internal.util.Collections._
import scala.tools.asm.commons.CodeSizeEvaluator
import scala.tools.asm.tree.analysis._
-import scala.tools.asm.{MethodWriter, ClassWriter, Label, Opcodes}
+import scala.tools.asm.{MethodWriter, ClassWriter, Label, Opcodes, Type}
import scala.tools.asm.tree._
-import scala.collection.convert.decorateAsScala._
import GenBCode._
import scala.collection.convert.decorateAsScala._
import scala.collection.convert.decorateAsJava._
@@ -73,11 +72,18 @@ object BytecodeUtils {
op >= Opcodes.IRETURN && op <= Opcodes.RETURN
}
- def isVarInstruction(instruction: AbstractInsnNode): Boolean = {
+ def isLoad(instruction: AbstractInsnNode): Boolean = {
+ val op = instruction.getOpcode
+ op >= Opcodes.ILOAD && op <= Opcodes.ALOAD
+ }
+
+ def isStore(instruction: AbstractInsnNode): Boolean = {
val op = instruction.getOpcode
- (op >= Opcodes.ILOAD && op <= Opcodes.ALOAD) || (op >= Opcodes.ISTORE && op <= Opcodes.ASTORE)
+ op >= Opcodes.ISTORE && op <= Opcodes.ASTORE
}
+ def isVarInstruction(instruction: AbstractInsnNode): Boolean = isLoad(instruction) || isStore(instruction)
+
def isExecutable(instruction: AbstractInsnNode): Boolean = instruction.getOpcode >= 0
def isConstructor(methodNode: MethodNode): Boolean = {
@@ -98,6 +104,8 @@ object BytecodeUtils {
def isStrictfpMethod(methodNode: MethodNode): Boolean = (methodNode.access & Opcodes.ACC_STRICT) != 0
+ def isReference(t: Type) = t.getSort == Type.OBJECT || t.getSort == Type.ARRAY
+
def nextExecutableInstruction(instruction: AbstractInsnNode, alsoKeep: AbstractInsnNode => Boolean = Set()): Option[AbstractInsnNode] = {
var result = instruction
do { result = result.getNext }
@@ -325,6 +333,26 @@ object BytecodeUtils {
}
/**
+ * This method is used by optimizer components to eliminate phantom values of instruction
+ * that load a value of type `Nothing$` or `Null$`. Such values on the stack don't interact well
+ * with stack map frames.
+ *
+ * For example, `opt.getOrElse(throw e)` is re-written to an invocation of the lambda body, a
+ * method with return type `Nothing$`. Similarly for `opt.getOrElse(null)` and `Null$`.
+ *
+ * During bytecode generation this is handled by BCodeBodyBuilder.adapt. See the comment in that
+ * method which explains the issue with such phantom values.
+ */
+ def fixLoadedNothingOrNullValue(loadedType: Type, loadInstr: AbstractInsnNode, methodNode: MethodNode, bTypes: BTypes): Unit = {
+ if (loadedType == bTypes.coreBTypes.RT_NOTHING.toASMType) {
+ methodNode.instructions.insert(loadInstr, new InsnNode(Opcodes.ATHROW))
+ } else if (loadedType == bTypes.coreBTypes.RT_NULL.toASMType) {
+ methodNode.instructions.insert(loadInstr, new InsnNode(Opcodes.ACONST_NULL))
+ methodNode.instructions.insert(loadInstr, new InsnNode(Opcodes.POP))
+ }
+ }
+
+ /**
* A wrapper to make ASM's Analyzer a bit easier to use.
*/
class AsmAnalyzer[V <: Value](methodNode: MethodNode, classInternalName: InternalName, interpreter: Interpreter[V] = new BasicInterpreter) {
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/opt/CallGraph.scala b/src/compiler/scala/tools/nsc/backend/jvm/opt/CallGraph.scala
index 0932564b1f..96455c0e38 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/opt/CallGraph.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/opt/CallGraph.scala
@@ -9,8 +9,9 @@ package opt
import scala.reflect.internal.util.{NoPosition, Position}
import scala.tools.asm.tree.analysis.{Value, Analyzer, BasicInterpreter}
-import scala.tools.asm.{Opcodes, Type}
+import scala.tools.asm.{Opcodes, Type, Handle}
import scala.tools.asm.tree._
+import scala.collection.concurrent
import scala.collection.convert.decorateAsScala._
import scala.tools.nsc.backend.jvm.BTypes.InternalName
import scala.tools.nsc.backend.jvm.BackendReporting._
@@ -21,14 +22,25 @@ import BytecodeUtils._
class CallGraph[BT <: BTypes](val btypes: BT) {
import btypes._
- val callsites: collection.concurrent.Map[MethodInsnNode, Callsite] = recordPerRunCache(collection.concurrent.TrieMap.empty[MethodInsnNode, Callsite])
+ val callsites: concurrent.Map[MethodInsnNode, Callsite] = recordPerRunCache(concurrent.TrieMap.empty)
+
+ val closureInstantiations: concurrent.Map[InvokeDynamicInsnNode, ClosureInstantiation] = recordPerRunCache(concurrent.TrieMap.empty)
def addClass(classNode: ClassNode): Unit = {
- for (m <- classNode.methods.asScala; callsite <- analyzeCallsites(m, classBTypeFromClassNode(classNode)))
- callsites(callsite.callsiteInstruction) = callsite
+ val classType = classBTypeFromClassNode(classNode)
+ for {
+ m <- classNode.methods.asScala
+ (calls, closureInits) = analyzeCallsites(m, classType)
+ } {
+ calls foreach (callsite => callsites(callsite.callsiteInstruction) = callsite)
+ closureInits foreach (lmf => closureInstantiations(lmf.indy) = ClosureInstantiation(lmf, m, classType))
+ }
}
- def analyzeCallsites(methodNode: MethodNode, definingClass: ClassBType): List[Callsite] = {
+ /**
+ * Returns a list of callsites in the method, plus a list of closure instantiation indy instructions.
+ */
+ def analyzeCallsites(methodNode: MethodNode, definingClass: ClassBType): (List[Callsite], List[LambdaMetaFactoryCall]) = {
case class CallsiteInfo(safeToInline: Boolean, safeToRewrite: Boolean,
annotatedInline: Boolean, annotatedNoInline: Boolean,
@@ -116,7 +128,10 @@ class CallGraph[BT <: BTypes](val btypes: BT) {
case _ => false
}
- methodNode.instructions.iterator.asScala.collect({
+ val callsites = new collection.mutable.ListBuffer[Callsite]
+ val closureInstantiations = new collection.mutable.ListBuffer[LambdaMetaFactoryCall]
+
+ methodNode.instructions.iterator.asScala foreach {
case call: MethodInsnNode =>
val callee: Either[OptimizerWarning, Callee] = for {
(method, declarationClass) <- byteCodeRepository.methodNode(call.owner, call.name, call.desc): Either[OptimizerWarning, (MethodNode, InternalName)]
@@ -147,7 +162,7 @@ class CallGraph[BT <: BTypes](val btypes: BT) {
receiverNotNullByAnalysis(call, numArgs)
}
- Callsite(
+ callsites += Callsite(
callsiteInstruction = call,
callsiteMethod = methodNode,
callsiteClass = definingClass,
@@ -157,7 +172,14 @@ class CallGraph[BT <: BTypes](val btypes: BT) {
receiverKnownNotNull = receiverNotNull,
callsitePosition = callsitePositions.getOrElse(call, NoPosition)
)
- }).toList
+
+ case LambdaMetaFactoryCall(indy, samMethodType, implMethod, instantiatedMethodType) =>
+ closureInstantiations += LambdaMetaFactoryCall(indy, samMethodType, implMethod, instantiatedMethodType)
+
+ case _ =>
+ }
+
+ (callsites.toList, closureInstantiations.toList)
}
/**
@@ -201,7 +223,7 @@ class CallGraph[BT <: BTypes](val btypes: BT) {
* @param calleeDeclarationClass The class in which the callee is declared
* @param safeToInline True if the callee can be safely inlined: it cannot be overridden,
* and the inliner settings (project / global) allow inlining it.
- * @param safeToRewrite True if the callee the interface method of a concrete trait method
+ * @param safeToRewrite True if the callee is the interface method of a concrete trait method
* that can be safely re-written to the static implementation method.
* @param annotatedInline True if the callee is annotated @inline
* @param annotatedNoInline True if the callee is annotated @noinline
@@ -214,4 +236,82 @@ class CallGraph[BT <: BTypes](val btypes: BT) {
calleeInfoWarning: Option[CalleeInfoWarning]) {
assert(!(safeToInline && safeToRewrite), s"A callee of ${callee.name} can be either safeToInline or safeToRewrite, but not both.")
}
+
+ final case class ClosureInstantiation(lambdaMetaFactoryCall: LambdaMetaFactoryCall, ownerMethod: MethodNode, ownerClass: ClassBType) {
+ override def toString = s"ClosureInstantiation($lambdaMetaFactoryCall, ${ownerMethod.name + ownerMethod.desc}, $ownerClass)"
+ }
+ final case class LambdaMetaFactoryCall(indy: InvokeDynamicInsnNode, samMethodType: Type, implMethod: Handle, instantiatedMethodType: Type)
+
+ object LambdaMetaFactoryCall {
+ private val lambdaMetaFactoryInternalName: InternalName = "java/lang/invoke/LambdaMetafactory"
+
+ private val metafactoryHandle = {
+ val metafactoryMethodName: String = "metafactory"
+ val metafactoryDesc: String = "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/invoke/MethodType;Ljava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"
+ new Handle(Opcodes.H_INVOKESTATIC, lambdaMetaFactoryInternalName, metafactoryMethodName, metafactoryDesc)
+ }
+
+ private val altMetafactoryHandle = {
+ val altMetafactoryMethodName: String = "altMetafactory"
+ val altMetafactoryDesc: String = "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;[Ljava/lang/Object;)Ljava/lang/invoke/CallSite;"
+ new Handle(Opcodes.H_INVOKESTATIC, lambdaMetaFactoryInternalName, altMetafactoryMethodName, altMetafactoryDesc)
+ }
+
+ def unapply(insn: AbstractInsnNode): Option[(InvokeDynamicInsnNode, Type, Handle, Type)] = insn match {
+ case indy: InvokeDynamicInsnNode if indy.bsm == metafactoryHandle || indy.bsm == altMetafactoryHandle =>
+ indy.bsmArgs match {
+ case Array(samMethodType: Type, implMethod: Handle, instantiatedMethodType: Type, xs@_*) => // xs binding because IntelliJ gets confused about _@_*
+ // LambdaMetaFactory performs a number of automatic adaptations when invoking the lambda
+ // implementation method (casting, boxing, unboxing, and primitive widening, see Javadoc).
+ //
+ // The closure optimizer supports only one of those adaptations: it will cast arguments
+ // to the correct type when re-writing a closure call to the body method. Example:
+ //
+ // val fun: String => String = l => l
+ // val l = List("")
+ // fun(l.head)
+ //
+ // The samMethodType of Function1 is `(Object)Object`, while the instantiatedMethodType
+ // is `(String)String`. The return type of `List.head` is `Object`.
+ //
+ // The implMethod has the signature `C$anonfun(String)String`.
+ //
+ // At the closure callsite, we have an `INVOKEINTERFACE Function1.apply (Object)Object`,
+ // so the object returned by `List.head` can be directly passed into the call (no cast).
+ //
+ // The closure object will cast the object to String before passing it to the implMethod.
+ //
+ // When re-writing the closure callsite to the implMethod, we have to insert a cast.
+ //
+ // The check below ensures that
+ // (1) the implMethod type has the expected singature (captured types plus argument types
+ // from instantiatedMethodType)
+ // (2) the receiver of the implMethod matches the first captured type
+ // (3) all parameters that are not the same in samMethodType and instantiatedMethodType
+ // are reference types, so that we can insert casts to perform the same adaptation
+ // that the closure object would.
+
+ val isStatic = implMethod.getTag == Opcodes.H_INVOKESTATIC
+ val indyParamTypes = Type.getArgumentTypes(indy.desc)
+ val instantiatedMethodArgTypes = instantiatedMethodType.getArgumentTypes
+ val expectedImplMethodType = {
+ val paramTypes = (if (isStatic) indyParamTypes else indyParamTypes.tail) ++ instantiatedMethodArgTypes
+ Type.getMethodType(instantiatedMethodType.getReturnType, paramTypes: _*)
+ }
+
+ val isIndyLambda = (
+ Type.getType(implMethod.getDesc) == expectedImplMethodType // (1)
+ && (isStatic || implMethod.getOwner == indyParamTypes(0).getInternalName) // (2)
+ && samMethodType.getArgumentTypes.corresponds(instantiatedMethodArgTypes)((samArgType, instArgType) =>
+ samArgType == instArgType || isReference(samArgType) && isReference(instArgType)) // (3)
+ )
+
+ if (isIndyLambda) Some((indy, samMethodType, implMethod, instantiatedMethodType))
+ else None
+
+ case _ => None
+ }
+ case _ => None
+ }
+ }
}
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/opt/ClosureOptimizer.scala b/src/compiler/scala/tools/nsc/backend/jvm/opt/ClosureOptimizer.scala
new file mode 100644
index 0000000000..92b9b34006
--- /dev/null
+++ b/src/compiler/scala/tools/nsc/backend/jvm/opt/ClosureOptimizer.scala
@@ -0,0 +1,373 @@
+/* NSC -- new Scala compiler
+ * Copyright 2005-2015 LAMP/EPFL
+ * @author Martin Odersky
+ */
+
+package scala.tools.nsc
+package backend.jvm
+package opt
+
+import scala.annotation.switch
+import scala.collection.immutable
+import scala.reflect.internal.util.NoPosition
+import scala.tools.asm.{Type, Opcodes}
+import scala.tools.asm.tree._
+import scala.tools.nsc.backend.jvm.BTypes.InternalName
+import scala.tools.nsc.backend.jvm.analysis.ProdConsAnalyzer
+import BytecodeUtils._
+import BackendReporting._
+import Opcodes._
+import scala.tools.nsc.backend.jvm.opt.ByteCodeRepository.CompilationUnit
+import scala.collection.convert.decorateAsScala._
+
+class ClosureOptimizer[BT <: BTypes](val btypes: BT) {
+ import btypes._
+ import callGraph._
+
+ /**
+ * If a closure is allocated and invoked within the same method, re-write the invocation to the
+ * closure body method.
+ *
+ * Note that the closure body method (generated by delambdafy:method) takes additional parameters
+ * for the values captured by the closure. The bytecode is transformed from
+ *
+ * [generate captured values]
+ * [closure init, capturing values]
+ * [...]
+ * [load closure object]
+ * [generate closure invocation arguments]
+ * [invoke closure.apply]
+ *
+ * to
+ *
+ * [generate captured values]
+ * [store captured values into new locals]
+ * [load the captured values from locals] // a future optimization will eliminate the closure
+ * [closure init, capturing values] // instantiation if the closure object becomes unused
+ * [...]
+ * [load closure object]
+ * [generate closure invocation arguments]
+ * [store argument values into new locals]
+ * [drop the closure object]
+ * [load captured values from locals]
+ * [load argument values from locals]
+ * [invoke the closure body method]
+ */
+ def rewriteClosureApplyInvocations(): Unit = {
+ implicit object closureInitOrdering extends Ordering[ClosureInstantiation] {
+ override def compare(x: ClosureInstantiation, y: ClosureInstantiation): Int = {
+ val cls = x.ownerClass.internalName compareTo y.ownerClass.internalName
+ if (cls != 0) return cls
+
+ val mName = x.ownerMethod.name compareTo y.ownerMethod.name
+ if (mName != 0) return mName
+
+ val mDesc = x.ownerMethod.desc compareTo y.ownerMethod.desc
+ if (mDesc != 0) return mDesc
+
+ def pos(inst: ClosureInstantiation) = inst.ownerMethod.instructions.indexOf(inst.lambdaMetaFactoryCall.indy)
+ pos(x) - pos(y)
+ }
+ }
+
+ // Grouping the closure instantiations by method allows running the ProdConsAnalyzer only once per
+ // method. Also sort the instantiations: If there are multiple closure instantiations in a method,
+ // closure invocations need to be re-written in a consistent order for bytecode stability. The local
+ // variable slots for storing captured values depends on the order of rewriting.
+ val closureInstantiationsByMethod: Map[MethodNode, immutable.TreeSet[ClosureInstantiation]] = {
+ closureInstantiations.values.groupBy(_.ownerMethod).mapValues(immutable.TreeSet.empty ++ _)
+ }
+
+ // For each closure instantiation, a list of callsites of the closure that can be re-written
+ // If a callsite cannot be rewritten, for example because the lambda body method is not accessible,
+ // a warning is returned instead.
+ val callsitesToRewrite: List[(ClosureInstantiation, List[Either[RewriteClosureApplyToClosureBodyFailed, (MethodInsnNode, Int)]])] = {
+ closureInstantiationsByMethod.iterator.flatMap({
+ case (methodNode, closureInits) =>
+ // A lazy val to ensure the analysis only runs if necessary (the value is passed by name to `closureCallsites`)
+ lazy val prodCons = new ProdConsAnalyzer(methodNode, closureInits.head.ownerClass.internalName)
+ closureInits.iterator.map(init => (init, closureCallsites(init, prodCons)))
+ }).toList // mapping to a list (not a map) to keep the sorting of closureInstantiationsByMethod
+ }
+
+ // Rewrite all closure callsites (or issue inliner warnings for those that cannot be rewritten)
+ for ((closureInit, callsites) <- callsitesToRewrite) {
+ // Local variables that hold the captured values and the closure invocation arguments.
+ // They are lazy vals to ensure that locals for captured values are only allocated if there's
+ // actually a callsite to rewrite (an not only warnings to be issued).
+ lazy val (localsForCapturedValues, argumentLocalsList) = localsForClosureRewrite(closureInit)
+ for (callsite <- callsites) callsite match {
+ case Left(warning) =>
+ backendReporting.inlinerWarning(warning.pos, warning.toString)
+
+ case Right((invocation, stackHeight)) =>
+ rewriteClosureApplyInvocation(closureInit, invocation, stackHeight, localsForCapturedValues, argumentLocalsList)
+ }
+ }
+ }
+
+ /**
+ * Insert instructions to store the values captured by a closure instantiation into local variables,
+ * and load the values back to the stack.
+ *
+ * Returns the list of locals holding those captured values, and a list of locals that should be
+ * used at the closure invocation callsite to store the arguments passed to the closure invocation.
+ */
+ private def localsForClosureRewrite(closureInit: ClosureInstantiation): (LocalsList, LocalsList) = {
+ val ownerMethod = closureInit.ownerMethod
+ val captureLocals = storeCaptures(closureInit)
+
+ // allocate locals for storing the arguments of the closure apply callsites.
+ // if there are multiple callsites, the same locals are re-used.
+ val argTypes = closureInit.lambdaMetaFactoryCall.samMethodType.getArgumentTypes
+ val firstArgLocal = ownerMethod.maxLocals
+
+ // The comment in the unapply method of `LambdaMetaFactoryCall` explains why we have to introduce
+ // casts for arguments that have different types in samMethodType and instantiatedMethodType.
+ val castLoadTypes = {
+ val instantiatedMethodType = closureInit.lambdaMetaFactoryCall.instantiatedMethodType
+ (argTypes, instantiatedMethodType.getArgumentTypes).zipped map {
+ case (samArgType, instantiatedArgType) if samArgType != instantiatedArgType =>
+ // the LambdaMetaFactoryCall extractor ensures that the two types are reference types,
+ // so we don't end up casting primitive values.
+ Some(instantiatedArgType)
+ case _ =>
+ None
+ }
+ }
+ val argLocals = LocalsList.fromTypes(firstArgLocal, argTypes, castLoadTypes)
+ ownerMethod.maxLocals = firstArgLocal + argLocals.size
+
+ (captureLocals, argLocals)
+ }
+
+ /**
+ * Find all callsites of a closure within the method where the closure is allocated.
+ */
+ private def closureCallsites(closureInit: ClosureInstantiation, prodCons: => ProdConsAnalyzer): List[Either[RewriteClosureApplyToClosureBodyFailed, (MethodInsnNode, Int)]] = {
+ val ownerMethod = closureInit.ownerMethod
+ val ownerClass = closureInit.ownerClass
+ val lambdaBodyHandle = closureInit.lambdaMetaFactoryCall.implMethod
+
+ ownerMethod.instructions.iterator.asScala.collect({
+ case invocation: MethodInsnNode if isSamInvocation(invocation, closureInit, prodCons) =>
+ // TODO: This is maybe over-cautious.
+ // We are checking if the closure body method is accessible at the closure callsite.
+ // If the closure allocation has access to the body method, then the callsite (in the same
+ // method as the alloction) should have access too.
+ val bodyAccessible: Either[OptimizerWarning, Boolean] = for {
+ (bodyMethodNode, declClass) <- byteCodeRepository.methodNode(lambdaBodyHandle.getOwner, lambdaBodyHandle.getName, lambdaBodyHandle.getDesc): Either[OptimizerWarning, (MethodNode, InternalName)]
+ isAccessible <- inliner.memberIsAccessible(bodyMethodNode.access, classBTypeFromParsedClassfile(declClass), classBTypeFromParsedClassfile(lambdaBodyHandle.getOwner), ownerClass)
+ } yield {
+ isAccessible
+ }
+
+ def pos = callGraph.callsites.get(invocation).map(_.callsitePosition).getOrElse(NoPosition)
+ val stackSize: Either[RewriteClosureApplyToClosureBodyFailed, Int] = bodyAccessible match {
+ case Left(w) => Left(RewriteClosureAccessCheckFailed(pos, w))
+ case Right(false) => Left(RewriteClosureIllegalAccess(pos, ownerClass.internalName))
+ case _ => Right(prodCons.frameAt(invocation).getStackSize)
+ }
+
+ stackSize.right.map((invocation, _))
+ }).toList
+ }
+
+ private def isSamInvocation(invocation: MethodInsnNode, closureInit: ClosureInstantiation, prodCons: => ProdConsAnalyzer): Boolean = {
+ val indy = closureInit.lambdaMetaFactoryCall.indy
+ if (invocation.getOpcode == INVOKESTATIC) false
+ else {
+ def closureIsReceiver = {
+ val invocationFrame = prodCons.frameAt(invocation)
+ val receiverSlot = {
+ val numArgs = Type.getArgumentTypes(invocation.desc).length
+ invocationFrame.stackTop - numArgs
+ }
+ val receiverProducers = prodCons.initialProducersForValueAt(invocation, receiverSlot)
+ receiverProducers.size == 1 && receiverProducers.head == indy
+ }
+
+ invocation.name == indy.name && {
+ val indySamMethodDesc = closureInit.lambdaMetaFactoryCall.samMethodType.getDescriptor
+ indySamMethodDesc == invocation.desc
+ } &&
+ closureIsReceiver // most expensive check last
+ }
+ }
+
+ private def rewriteClosureApplyInvocation(closureInit: ClosureInstantiation, invocation: MethodInsnNode, stackHeight: Int, localsForCapturedValues: LocalsList, argumentLocalsList: LocalsList): Unit = {
+ val ownerMethod = closureInit.ownerMethod
+ val lambdaBodyHandle = closureInit.lambdaMetaFactoryCall.implMethod
+
+ // store arguments
+ insertStoreOps(invocation, ownerMethod, argumentLocalsList)
+
+ // drop the closure from the stack
+ ownerMethod.instructions.insertBefore(invocation, new InsnNode(POP))
+
+ // load captured values and arguments
+ insertLoadOps(invocation, ownerMethod, localsForCapturedValues)
+ insertLoadOps(invocation, ownerMethod, argumentLocalsList)
+
+ // update maxStack
+ val capturesStackSize = localsForCapturedValues.size
+ val invocationStackHeight = stackHeight + capturesStackSize - 1 // -1 because the closure is gone
+ if (invocationStackHeight > ownerMethod.maxStack)
+ ownerMethod.maxStack = invocationStackHeight
+
+ // replace the callsite with a new call to the body method
+ val bodyOpcode = (lambdaBodyHandle.getTag: @switch) match {
+ case H_INVOKEVIRTUAL => INVOKEVIRTUAL
+ case H_INVOKESTATIC => INVOKESTATIC
+ case H_INVOKESPECIAL => INVOKESPECIAL
+ case H_INVOKEINTERFACE => INVOKEINTERFACE
+ case H_NEWINVOKESPECIAL =>
+ val insns = ownerMethod.instructions
+ insns.insertBefore(invocation, new TypeInsnNode(NEW, lambdaBodyHandle.getOwner))
+ insns.insertBefore(invocation, new InsnNode(DUP))
+ INVOKESPECIAL
+ }
+ val isInterface = bodyOpcode == INVOKEINTERFACE
+ val bodyInvocation = new MethodInsnNode(bodyOpcode, lambdaBodyHandle.getOwner, lambdaBodyHandle.getName, lambdaBodyHandle.getDesc, isInterface)
+ ownerMethod.instructions.insertBefore(invocation, bodyInvocation)
+
+ val returnType = Type.getReturnType(lambdaBodyHandle.getDesc)
+ fixLoadedNothingOrNullValue(returnType, bodyInvocation, ownerMethod, btypes) // see comment of that method
+
+ ownerMethod.instructions.remove(invocation)
+
+ // update the call graph
+ val originalCallsite = callGraph.callsites.remove(invocation)
+
+ // the method node is needed for building the call graph entry
+ val bodyMethod = byteCodeRepository.methodNode(lambdaBodyHandle.getOwner, lambdaBodyHandle.getName, lambdaBodyHandle.getDesc)
+ def bodyMethodIsBeingCompiled = byteCodeRepository.classNodeAndSource(lambdaBodyHandle.getOwner).map(_._2 == CompilationUnit).getOrElse(false)
+ val bodyMethodCallsite = Callsite(
+ callsiteInstruction = bodyInvocation,
+ callsiteMethod = ownerMethod,
+ callsiteClass = closureInit.ownerClass,
+ callee = bodyMethod.map({
+ case (bodyMethodNode, bodyMethodDeclClass) => Callee(
+ callee = bodyMethodNode,
+ calleeDeclarationClass = classBTypeFromParsedClassfile(bodyMethodDeclClass),
+ safeToInline = compilerSettings.YoptInlineGlobal || bodyMethodIsBeingCompiled,
+ safeToRewrite = false, // the lambda body method is not a trait interface method
+ annotatedInline = false,
+ annotatedNoInline = false,
+ calleeInfoWarning = None)
+ }),
+ argInfos = Nil,
+ callsiteStackHeight = invocationStackHeight,
+ receiverKnownNotNull = true, // see below (*)
+ callsitePosition = originalCallsite.map(_.callsitePosition).getOrElse(NoPosition)
+ )
+ // (*) The documentation in class LambdaMetafactory says:
+ // "if implMethod corresponds to an instance method, the first capture argument
+ // (corresponding to the receiver) must be non-null"
+ // Explanation: If the lambda body method is non-static, the receiver is a captured
+ // value. It can only be captured within some instance method, so we know it's non-null.
+ callGraph.callsites(bodyInvocation) = bodyMethodCallsite
+ }
+
+ /**
+ * Stores the values captured by a closure creation into fresh local variables, and loads the
+ * values back onto the stack. Returns the list of locals holding the captured values.
+ */
+ private def storeCaptures(closureInit: ClosureInstantiation): LocalsList = {
+ val indy = closureInit.lambdaMetaFactoryCall.indy
+ val capturedTypes = Type.getArgumentTypes(indy.desc)
+ val firstCaptureLocal = closureInit.ownerMethod.maxLocals
+
+ // This could be optimized: in many cases the captured values are produced by LOAD instructions.
+ // If the variable is not modified within the method, we could avoid introducing yet another
+ // local. On the other hand, further optimizations (copy propagation, remove unused locals) will
+ // clean it up.
+
+ // Captured variables don't need to be cast when loaded at the callsite (castLoadTypes are None).
+ // This is checked in `isClosureInstantiation`: the types of the captured variables in the indy
+ // instruction match exactly the corresponding parameter types in the body method.
+ val localsForCaptures = LocalsList.fromTypes(firstCaptureLocal, capturedTypes, castLoadTypes = _ => None)
+ closureInit.ownerMethod.maxLocals = firstCaptureLocal + localsForCaptures.size
+
+ insertStoreOps(indy, closureInit.ownerMethod, localsForCaptures)
+ insertLoadOps(indy, closureInit.ownerMethod, localsForCaptures)
+
+ localsForCaptures
+ }
+
+ /**
+ * Insert store operations in front of the `before` instruction to copy stack values into the
+ * locals denoted by `localsList`.
+ *
+ * The lowest stack value is stored in the head of the locals list, so the last local is stored first.
+ */
+ private def insertStoreOps(before: AbstractInsnNode, methodNode: MethodNode, localsList: LocalsList) =
+ insertLocalValueOps(before, methodNode, localsList, store = true)
+
+ /**
+ * Insert load operations in front of the `before` instruction to copy the local values denoted
+ * by `localsList` onto the stack.
+ *
+ * The head of the locals list will be the lowest value on the stack, so the first local is loaded first.
+ */
+ private def insertLoadOps(before: AbstractInsnNode, methodNode: MethodNode, localsList: LocalsList) =
+ insertLocalValueOps(before, methodNode, localsList, store = false)
+
+ private def insertLocalValueOps(before: AbstractInsnNode, methodNode: MethodNode, localsList: LocalsList, store: Boolean): Unit = {
+ // If `store` is true, the first instruction needs to store into the last local of the `localsList`.
+ // Load instructions on the other hand are emitted in the order of the list.
+ // To avoid reversing the list, we use `insert(previousInstr)` for stores and `insertBefore(before)` for loads.
+ lazy val previous = before.getPrevious
+ for (l <- localsList.locals) {
+ val varOp = new VarInsnNode(if (store) l.storeOpcode else l.loadOpcode, l.local)
+ if (store) methodNode.instructions.insert(previous, varOp)
+ else methodNode.instructions.insertBefore(before, varOp)
+ if (!store) for (castType <- l.castLoadedValue)
+ methodNode.instructions.insert(varOp, new TypeInsnNode(CHECKCAST, castType.getInternalName))
+ }
+ }
+
+ /**
+ * A list of local variables. Each local stores information about its type, see class [[Local]].
+ */
+ case class LocalsList(locals: List[Local]) {
+ val size = locals.iterator.map(_.size).sum
+ }
+
+ object LocalsList {
+ /**
+ * A list of local variables starting at `firstLocal` that can hold values of the types in the
+ * `types` parameter.
+ *
+ * For example, `fromTypes(3, Array(Int, Long, String))` returns
+ * Local(3, intOpOffset) ::
+ * Local(4, longOpOffset) :: // note that this local occupies two slots, the next is at 6
+ * Local(6, refOpOffset) ::
+ * Nil
+ */
+ def fromTypes(firstLocal: Int, types: Array[Type], castLoadTypes: Int => Option[Type]): LocalsList = {
+ var sizeTwoOffset = 0
+ val locals: List[Local] = types.indices.map(i => {
+ // The ASM method `type.getOpcode` returns the opcode for operating on a value of `type`.
+ val offset = types(i).getOpcode(ILOAD) - ILOAD
+ val local = Local(firstLocal + i + sizeTwoOffset, offset, castLoadTypes(i))
+ if (local.size == 2) sizeTwoOffset += 1
+ local
+ })(collection.breakOut)
+ LocalsList(locals)
+ }
+ }
+
+ /**
+ * Stores a local varaible index the opcode offset required for operating on that variable.
+ *
+ * The xLOAD / xSTORE opcodes are in the following sequence: I, L, F, D, A, so the offset for
+ * a local variable holding a reference (`A`) is 4. See also method `getOpcode` in [[scala.tools.asm.Type]].
+ */
+ case class Local(local: Int, opcodeOffset: Int, castLoadedValue: Option[Type]) {
+ def size = if (loadOpcode == LLOAD || loadOpcode == DLOAD) 2 else 1
+
+ def loadOpcode = ILOAD + opcodeOffset
+ def storeOpcode = ISTORE + opcodeOffset
+ }
+}
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/opt/Inliner.scala b/src/compiler/scala/tools/nsc/backend/jvm/opt/Inliner.scala
index b4f091b37f..8477f5461a 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/opt/Inliner.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/opt/Inliner.scala
@@ -9,6 +9,7 @@ package opt
import scala.annotation.tailrec
import scala.tools.asm
+import asm.Handle
import asm.Opcodes._
import asm.tree._
import scala.collection.convert.decorateAsScala._
@@ -26,7 +27,8 @@ class Inliner[BT <: BTypes](val btypes: BT) {
def eliminateUnreachableCodeAndUpdateCallGraph(methodNode: MethodNode, definingClass: InternalName): Unit = {
localOpt.minimalRemoveUnreachableCode(methodNode, definingClass) foreach {
- case invocation: MethodInsnNode => callGraph.callsites.remove(invocation)
+ case invocation: MethodInsnNode => callGraph.callsites.remove(invocation)
+ case indy: InvokeDynamicInsnNode => callGraph.closureInstantiations.remove(indy)
case _ =>
}
}
@@ -432,7 +434,7 @@ class Inliner[BT <: BTypes](val btypes: BT) {
callsiteMethod.localVariables.addAll(cloneLocalVariableNodes(callee, labelsMap, callee.name + "_").asJava)
callsiteMethod.tryCatchBlocks.addAll(cloneTryCatchBlockNodes(callee, labelsMap).asJava)
- // Add all invocation instructions that were inlined to the call graph
+ // Add all invocation instructions and closure instantiations that were inlined to the call graph
callee.instructions.iterator().asScala foreach {
case originalCallsiteIns: MethodInsnNode =>
callGraph.callsites.get(originalCallsiteIns) match {
@@ -452,6 +454,15 @@ class Inliner[BT <: BTypes](val btypes: BT) {
case None =>
}
+ case indy: InvokeDynamicInsnNode =>
+ callGraph.closureInstantiations.get(indy) match {
+ case Some(closureInit) =>
+ val newIndy = instructionMap(indy).asInstanceOf[InvokeDynamicInsnNode]
+ callGraph.closureInstantiations(newIndy) = ClosureInstantiation(closureInit.lambdaMetaFactoryCall.copy(indy = newIndy), callsiteMethod, callsiteClass)
+
+ case None =>
+ }
+
case _ =>
}
// Remove the elided invocation from the call graph
@@ -529,98 +540,97 @@ class Inliner[BT <: BTypes](val btypes: BT) {
}
/**
- * Returns the first instruction in the `instructions` list that would cause a
- * [[java.lang.IllegalAccessError]] when inlined into the `destinationClass`.
- *
- * If validity of some instruction could not be checked because an error occurred, the instruction
- * is returned together with a warning message that describes the problem.
+ * Check if a type is accessible to some class, as defined in JVMS 5.4.4.
+ * (A1) C is public
+ * (A2) C and D are members of the same run-time package
*/
- def findIllegalAccess(instructions: InsnList, calleeDeclarationClass: ClassBType, destinationClass: ClassBType): Option[(AbstractInsnNode, Option[OptimizerWarning])] = {
-
- /**
- * Check if a type is accessible to some class, as defined in JVMS 5.4.4.
- * (A1) C is public
- * (A2) C and D are members of the same run-time package
- */
- def classIsAccessible(accessed: BType, from: ClassBType = destinationClass): Either[OptimizerWarning, Boolean] = (accessed: @unchecked) match {
- // TODO: A2 requires "same run-time package", which seems to be package + classloader (JMVS 5.3.). is the below ok?
- case c: ClassBType => c.isPublic.map(_ || c.packageInternalName == from.packageInternalName)
- case a: ArrayBType => classIsAccessible(a.elementType, from)
- case _: PrimitiveBType => Right(true)
- }
+ def classIsAccessible(accessed: BType, from: ClassBType): Either[OptimizerWarning, Boolean] = (accessed: @unchecked) match {
+ // TODO: A2 requires "same run-time package", which seems to be package + classloader (JMVS 5.3.). is the below ok?
+ case c: ClassBType => c.isPublic.map(_ || c.packageInternalName == from.packageInternalName)
+ case a: ArrayBType => classIsAccessible(a.elementType, from)
+ case _: PrimitiveBType => Right(true)
+ }
- /**
- * Check if a member reference is accessible from the [[destinationClass]], as defined in the
- * JVMS 5.4.4. Note that the class name in a field / method reference is not necessarily the
- * class in which the member is declared:
- *
- * class A { def f = 0 }; class B extends A { f }
- *
- * The INVOKEVIRTUAL instruction uses a method reference "B.f ()I". Therefore this method has
- * two parameters:
- *
- * @param memberDeclClass The class in which the member is declared (A)
- * @param memberRefClass The class used in the member reference (B)
- *
- * (B0) JVMS 5.4.3.2 / 5.4.3.3: when resolving a member of class C in D, the class C is resolved
- * first. According to 5.4.3.1, this requires C to be accessible in D.
- *
- * JVMS 5.4.4 summary: A field or method R is accessible to a class D (destinationClass) iff
- * (B1) R is public
- * (B2) R is protected, declared in C (memberDeclClass) and D is a subclass of C.
- * If R is not static, R must contain a symbolic reference to a class T (memberRefClass),
- * such that T is either a subclass of D, a superclass of D, or D itself.
- * Also (P) needs to be satisfied.
- * (B3) R is either protected or has default access and declared by a class in the same
- * run-time package as D.
- * If R is protected, also (P) needs to be satisfied.
- * (B4) R is private and is declared in D.
- *
- * (P) When accessing a protected instance member, the target object on the stack (the receiver)
- * has to be a subtype of D (destinationClass). This is enforced by classfile verification
- * (https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.10.1.8).
- *
- * TODO: we cannot currently implement (P) because we don't have the necessary information
- * available. Once we have a type propagation analysis implemented, we can extract the receiver
- * type from there (https://github.com/scala-opt/scala/issues/13).
- */
- def memberIsAccessible(memberFlags: Int, memberDeclClass: ClassBType, memberRefClass: ClassBType): Either[OptimizerWarning, Boolean] = {
- // TODO: B3 requires "same run-time package", which seems to be package + classloader (JMVS 5.3.). is the below ok?
- def samePackageAsDestination = memberDeclClass.packageInternalName == destinationClass.packageInternalName
- def targetObjectConformsToDestinationClass = false // needs type propagation analysis, see above
-
- def memberIsAccessibleImpl = {
- val key = (ACC_PUBLIC | ACC_PROTECTED | ACC_PRIVATE) & memberFlags
- key match {
- case ACC_PUBLIC => // B1
- Right(true)
-
- case ACC_PROTECTED => // B2
- val isStatic = (ACC_STATIC & memberFlags) != 0
- tryEither {
- val condB2 = destinationClass.isSubtypeOf(memberDeclClass).orThrow && {
- isStatic || memberRefClass.isSubtypeOf(destinationClass).orThrow || destinationClass.isSubtypeOf(memberRefClass).orThrow
- }
- Right(
- (condB2 || samePackageAsDestination /* B3 (protected) */) &&
- (isStatic || targetObjectConformsToDestinationClass) // (P)
- )
+ /**
+ * Check if a member reference is accessible from the [[destinationClass]], as defined in the
+ * JVMS 5.4.4. Note that the class name in a field / method reference is not necessarily the
+ * class in which the member is declared:
+ *
+ * class A { def f = 0 }; class B extends A { f }
+ *
+ * The INVOKEVIRTUAL instruction uses a method reference "B.f ()I". Therefore this method has
+ * two parameters:
+ *
+ * @param memberDeclClass The class in which the member is declared (A)
+ * @param memberRefClass The class used in the member reference (B)
+ *
+ * (B0) JVMS 5.4.3.2 / 5.4.3.3: when resolving a member of class C in D, the class C is resolved
+ * first. According to 5.4.3.1, this requires C to be accessible in D.
+ *
+ * JVMS 5.4.4 summary: A field or method R is accessible to a class D (destinationClass) iff
+ * (B1) R is public
+ * (B2) R is protected, declared in C (memberDeclClass) and D is a subclass of C.
+ * If R is not static, R must contain a symbolic reference to a class T (memberRefClass),
+ * such that T is either a subclass of D, a superclass of D, or D itself.
+ * Also (P) needs to be satisfied.
+ * (B3) R is either protected or has default access and declared by a class in the same
+ * run-time package as D.
+ * If R is protected, also (P) needs to be satisfied.
+ * (B4) R is private and is declared in D.
+ *
+ * (P) When accessing a protected instance member, the target object on the stack (the receiver)
+ * has to be a subtype of D (destinationClass). This is enforced by classfile verification
+ * (https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.10.1.8).
+ *
+ * TODO: we cannot currently implement (P) because we don't have the necessary information
+ * available. Once we have a type propagation analysis implemented, we can extract the receiver
+ * type from there (https://github.com/scala-opt/scala/issues/13).
+ */
+ def memberIsAccessible(memberFlags: Int, memberDeclClass: ClassBType, memberRefClass: ClassBType, from: ClassBType): Either[OptimizerWarning, Boolean] = {
+ // TODO: B3 requires "same run-time package", which seems to be package + classloader (JMVS 5.3.). is the below ok?
+ def samePackageAsDestination = memberDeclClass.packageInternalName == from.packageInternalName
+ def targetObjectConformsToDestinationClass = false // needs type propagation analysis, see above
+
+ def memberIsAccessibleImpl = {
+ val key = (ACC_PUBLIC | ACC_PROTECTED | ACC_PRIVATE) & memberFlags
+ key match {
+ case ACC_PUBLIC => // B1
+ Right(true)
+
+ case ACC_PROTECTED => // B2
+ val isStatic = (ACC_STATIC & memberFlags) != 0
+ tryEither {
+ val condB2 = from.isSubtypeOf(memberDeclClass).orThrow && {
+ isStatic || memberRefClass.isSubtypeOf(from).orThrow || from.isSubtypeOf(memberRefClass).orThrow
}
+ Right(
+ (condB2 || samePackageAsDestination /* B3 (protected) */) &&
+ (isStatic || targetObjectConformsToDestinationClass) // (P)
+ )
+ }
- case 0 => // B3 (default access)
- Right(samePackageAsDestination)
+ case 0 => // B3 (default access)
+ Right(samePackageAsDestination)
- case ACC_PRIVATE => // B4
- Right(memberDeclClass == destinationClass)
- }
+ case ACC_PRIVATE => // B4
+ Right(memberDeclClass == from)
}
+ }
- classIsAccessible(memberDeclClass) match { // B0
- case Right(true) => memberIsAccessibleImpl
- case r => r
- }
+ classIsAccessible(memberDeclClass, from) match { // B0
+ case Right(true) => memberIsAccessibleImpl
+ case r => r
}
+ }
+ /**
+ * Returns the first instruction in the `instructions` list that would cause a
+ * [[java.lang.IllegalAccessError]] when inlined into the `destinationClass`.
+ *
+ * If validity of some instruction could not be checked because an error occurred, the instruction
+ * is returned together with a warning message that describes the problem.
+ */
+ def findIllegalAccess(instructions: InsnList, calleeDeclarationClass: ClassBType, destinationClass: ClassBType): Option[(AbstractInsnNode, Option[OptimizerWarning])] = {
/**
* Check if `instruction` can be transplanted to `destinationClass`.
*
@@ -637,18 +647,18 @@ class Inliner[BT <: BTypes](val btypes: BT) {
// NEW, ANEWARRAY, CHECKCAST or INSTANCEOF. For these instructions, the reference
// "must be a symbolic reference to a class, array, or interface type" (JVMS 6), so
// it can be an internal name, or a full array descriptor.
- classIsAccessible(bTypeForDescriptorOrInternalNameFromClassfile(ti.desc))
+ classIsAccessible(bTypeForDescriptorOrInternalNameFromClassfile(ti.desc), destinationClass)
case ma: MultiANewArrayInsnNode =>
// "a symbolic reference to a class, array, or interface type"
- classIsAccessible(bTypeForDescriptorOrInternalNameFromClassfile(ma.desc))
+ classIsAccessible(bTypeForDescriptorOrInternalNameFromClassfile(ma.desc), destinationClass)
case fi: FieldInsnNode =>
val fieldRefClass = classBTypeFromParsedClassfile(fi.owner)
for {
(fieldNode, fieldDeclClassNode) <- byteCodeRepository.fieldNode(fieldRefClass.internalName, fi.name, fi.desc): Either[OptimizerWarning, (FieldNode, InternalName)]
fieldDeclClass = classBTypeFromParsedClassfile(fieldDeclClassNode)
- res <- memberIsAccessible(fieldNode.access, fieldDeclClass, fieldRefClass)
+ res <- memberIsAccessible(fieldNode.access, fieldDeclClass, fieldRefClass, destinationClass)
} yield {
res
}
@@ -664,7 +674,7 @@ class Inliner[BT <: BTypes](val btypes: BT) {
Right(destinationClass == calleeDeclarationClass)
case _ => // INVOKEVIRTUAL, INVOKESTATIC, INVOKEINTERFACE and INVOKESPECIAL of constructors
- memberIsAccessible(methodFlags, methodDeclClass, methodRefClass)
+ memberIsAccessible(methodFlags, methodDeclClass, methodRefClass, destinationClass)
}
}
@@ -678,12 +688,70 @@ class Inliner[BT <: BTypes](val btypes: BT) {
}
}
- case ivd: InvokeDynamicInsnNode =>
- // TODO @lry check necessary conditions to inline an indy, instead of giving up
- Right(false)
+ case _: InvokeDynamicInsnNode if destinationClass == calleeDeclarationClass =>
+ // within the same class, any indy instruction can be inlined
+ Right(true)
+
+ // does the InvokeDynamicInsnNode call LambdaMetaFactory?
+ case LambdaMetaFactoryCall(_, _, implMethod, _) =>
+ // an indy instr points to a "call site specifier" (CSP) [1]
+ // - a reference to a bootstrap method [2]
+ // - bootstrap method name
+ // - references to constant arguments, which can be:
+ // - constant (string, long, int, float, double)
+ // - class
+ // - method type (without name)
+ // - method handle
+ // - a method name+type
+ //
+ // execution [3]
+ // - resolve the CSP, yielding the boostrap method handle, the static args and the name+type
+ // - resolution entails accessibility checking [4]
+ // - execute the `invoke` method of the boostrap method handle (which is signature polymorphic, check its javadoc)
+ // - the descriptor for the call is made up from the actual arguments on the stack:
+ // - the first parameters are "MethodHandles.Lookup, String, MethodType", then the types of the constant arguments,
+ // - the return type is CallSite
+ // - the values for the call are
+ // - the bootstrap method handle of the CSP is the receiver
+ // - the Lookup object for the class in which the callsite occurs (obtained as through calling MethodHandles.lookup())
+ // - the method name of the CSP
+ // - the method type of the CSP
+ // - the constants of the CSP (primitives are not boxed)
+ // - the resulting `CallSite` object
+ // - has as `type` the method type of the CSP
+ // - is popped from the operand stack
+ // - the `invokeExact` method (signature polymorphic!) of the `target` method handle of the CallSite is invoked
+ // - the method descriptor is that of the CSP
+ // - the receiver is the target of the CallSite
+ // - the other argument values are those that were on the operand stack at the indy instruction (indyLambda: the captured values)
+ //
+ // [1] http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.4.10
+ // [2] http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.23
+ // [3] http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-6.html#jvms-6.5.invokedynamic
+ // [4] http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-5.html#jvms-5.4.3
+
+ // We cannot generically check if an `invokedynamic` instruction can be safely inlined into
+ // a different class, that depends on the bootstrap method. The Lookup object passed to the
+ // bootstrap method is a capability to access private members of the callsite class. We can
+ // only move the invokedynamic to a new class if we know that the bootstrap method doesn't
+ // use this capability for otherwise non-accessible members.
+ // In the case of indyLambda, it depends on the visibility of the implMethod handle. If
+ // the implMethod is public, lambdaMetaFactory doesn't use the Lookup object's extended
+ // capability, and we can safely inline the instruction into a different class.
+
+ val methodRefClass = classBTypeFromParsedClassfile(implMethod.getOwner)
+ for {
+ (methodNode, methodDeclClassNode) <- byteCodeRepository.methodNode(methodRefClass.internalName, implMethod.getName, implMethod.getDesc): Either[OptimizerWarning, (MethodNode, InternalName)]
+ methodDeclClass = classBTypeFromParsedClassfile(methodDeclClassNode)
+ res <- memberIsAccessible(methodNode.access, methodDeclClass, methodRefClass, destinationClass)
+ } yield {
+ res
+ }
+
+ case _: InvokeDynamicInsnNode => Left(UnknownInvokeDynamicInstruction)
case ci: LdcInsnNode => ci.cst match {
- case t: asm.Type => classIsAccessible(bTypeForDescriptorOrInternalNameFromClassfile(t.getInternalName))
+ case t: asm.Type => classIsAccessible(bTypeForDescriptorOrInternalNameFromClassfile(t.getInternalName), destinationClass)
case _ => Right(true)
}
diff --git a/src/compiler/scala/tools/nsc/backend/jvm/opt/LocalOpt.scala b/src/compiler/scala/tools/nsc/backend/jvm/opt/LocalOpt.scala
index bd5bab28b5..4132710a96 100644
--- a/src/compiler/scala/tools/nsc/backend/jvm/opt/LocalOpt.scala
+++ b/src/compiler/scala/tools/nsc/backend/jvm/opt/LocalOpt.scala
@@ -31,7 +31,7 @@ import scala.tools.nsc.backend.jvm.opt.BytecodeUtils._
* catch block, and the recursive invocation is not necessary.
*
* simplify jumps
- * - various simplifications, see doc domments of individual optimizations
+ * - various simplifications, see doc comments of individual optimizations
* + changing or eliminating jumps may render some code unreachable, therefore "simplify jumps" is
* executed in a loop with "unreachable code"
*
@@ -495,7 +495,7 @@ object LocalOptImpls {
* Replace jumps to a sequence of GOTO instructions by a jump to the final destination.
*
* Jump l; [any ops]; l: GOTO m; [any ops]; m: GOTO n; [any ops]; n: NotGOTO; [...]
- * => Jump n; [rest unchaned]
+ * => Jump n; [rest unchanged]
*
* If there's a loop of GOTOs, the initial jump is replaced by one of the labels in the loop.
*/
diff --git a/src/compiler/scala/tools/nsc/backend/opt/ConstantOptimization.scala b/src/compiler/scala/tools/nsc/backend/opt/ConstantOptimization.scala
index 0e6ee76eb2..fb1799e092 100644
--- a/src/compiler/scala/tools/nsc/backend/opt/ConstantOptimization.scala
+++ b/src/compiler/scala/tools/nsc/backend/opt/ConstantOptimization.scala
@@ -170,9 +170,11 @@ abstract class ConstantOptimization extends SubComponent {
// out all the possibilities
case Impossible(possible2) => (possible -- possible2).nonEmpty
})
- def mightNotEqual(other: Contents): Boolean = (this ne other) && (other match {
- // two Possibles might not be equal if either has possible members that the other doesn't
- case Possible(possible2) => (possible -- possible2).nonEmpty || (possible2 -- possible).nonEmpty
+ def mightNotEqual(other: Contents): Boolean = (other match {
+ case Possible(possible2) =>
+ // two Possibles must equal if each is known to be of the same, single value
+ val mustEqual = possible.size == 1 && possible == possible2
+ !mustEqual
case Impossible(_) => true
})
}
diff --git a/src/compiler/scala/tools/nsc/backend/opt/InlineExceptionHandlers.scala b/src/compiler/scala/tools/nsc/backend/opt/InlineExceptionHandlers.scala
index 425c10d153..9f6883f03f 100644
--- a/src/compiler/scala/tools/nsc/backend/opt/InlineExceptionHandlers.scala
+++ b/src/compiler/scala/tools/nsc/backend/opt/InlineExceptionHandlers.scala
@@ -343,7 +343,7 @@ abstract class InlineExceptionHandlers extends SubComponent {
/**
* This function takes care of duplicating the basic block code for inlining the handler
*
- * Note: This function does not duplicate the same basic block twice. It wil contain a map of the duplicated
+ * Note: This function does not duplicate the same basic block twice. It will contain a map of the duplicated
* basic blocks
*/
private def duplicateExceptionHandlerCache(handler: BasicBlock) =
diff --git a/src/compiler/scala/tools/nsc/javac/JavaParsers.scala b/src/compiler/scala/tools/nsc/javac/JavaParsers.scala
index 9708cba281..eb25eb6e06 100644
--- a/src/compiler/scala/tools/nsc/javac/JavaParsers.scala
+++ b/src/compiler/scala/tools/nsc/javac/JavaParsers.scala
@@ -370,7 +370,7 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
flags |= Flags.FINAL
in.nextToken()
case DEFAULT =>
- flags |= Flags.DEFAULTMETHOD
+ flags |= Flags.JAVA_DEFAULTMETHOD
in.nextToken()
case NATIVE =>
addAnnot(NativeAttr)
@@ -489,8 +489,8 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
val vparams = formalParams()
if (!isVoid) rtpt = optArrayBrackets(rtpt)
optThrows()
- val isStatic = mods hasFlag Flags.STATIC
- val bodyOk = !inInterface || ((mods hasFlag Flags.DEFAULTMETHOD) || isStatic)
+ val isConcreteInterfaceMethod = !inInterface || (mods hasFlag Flags.JAVA_DEFAULTMETHOD) || (mods hasFlag Flags.STATIC)
+ val bodyOk = !(mods1 hasFlag Flags.DEFERRED) && isConcreteInterfaceMethod
val body =
if (bodyOk && in.token == LBRACE) {
methodBody()
@@ -509,7 +509,9 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
EmptyTree
}
}
- if (inInterface && !isStatic) mods1 |= Flags.DEFERRED
+ // for abstract methods (of classes), the `DEFERRED` flag is alredy set.
+ // here we also set it for interface methods that are not static and not default.
+ if (!isConcreteInterfaceMethod) mods1 |= Flags.DEFERRED
List {
atPos(pos) {
DefDef(mods1, name.toTermName, tparams, List(vparams), rtpt, body)
@@ -749,7 +751,7 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
val (statics, body) = typeBody(AT, name)
val templ = makeTemplate(annotationParents, body)
addCompanionObject(statics, atPos(pos) {
- ClassDef(mods, name, List(), templ)
+ ClassDef(mods | Flags.JAVA_ANNOTATION, name, List(), templ)
})
}
@@ -807,7 +809,7 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
if (hasAbstractMember) Flags.ABSTRACT else 0l
}
addCompanionObject(consts ::: statics ::: predefs, atPos(pos) {
- ClassDef(mods | Flags.ENUM | finalFlag | abstractFlag, name, List(),
+ ClassDef(mods | Flags.JAVA_ENUM | finalFlag | abstractFlag, name, List(),
makeTemplate(superclazz :: interfaces, body))
})
}
@@ -828,7 +830,7 @@ trait JavaParsers extends ast.parser.ParsersCommon with JavaScanners {
skipAhead()
accept(RBRACE)
}
- ValDef(Modifiers(Flags.ENUM | Flags.STABLE | Flags.JAVA | Flags.STATIC), name.toTermName, enumType, blankExpr)
+ ValDef(Modifiers(Flags.JAVA_ENUM | Flags.STABLE | Flags.JAVA | Flags.STATIC), name.toTermName, enumType, blankExpr)
}
(res, hasClassBody)
}
diff --git a/src/compiler/scala/tools/nsc/plugins/Plugin.scala b/src/compiler/scala/tools/nsc/plugins/Plugin.scala
index 1a5529140c..dd17750cd4 100644
--- a/src/compiler/scala/tools/nsc/plugins/Plugin.scala
+++ b/src/compiler/scala/tools/nsc/plugins/Plugin.scala
@@ -158,8 +158,8 @@ object Plugin {
def loop(qs: List[Path]): Try[PluginDescription] = qs match {
case Nil => Failure(new MissingPluginException(ps))
case p :: rest =>
- if (p.isDirectory) loadDescriptionFromFile(p.toDirectory / PluginXML)
- else if (p.isFile) loadDescriptionFromJar(p.toFile)
+ if (p.isDirectory) loadDescriptionFromFile(p.toDirectory / PluginXML) orElse loop(rest)
+ else if (p.isFile) loadDescriptionFromJar(p.toFile) orElse loop(rest)
else loop(rest)
}
loop(ps)
diff --git a/src/compiler/scala/tools/nsc/reporters/ConsoleReporter.scala b/src/compiler/scala/tools/nsc/reporters/ConsoleReporter.scala
index 5bf611a7b0..4bf92fd1fb 100644
--- a/src/compiler/scala/tools/nsc/reporters/ConsoleReporter.scala
+++ b/src/compiler/scala/tools/nsc/reporters/ConsoleReporter.scala
@@ -11,8 +11,7 @@ import java.io.{ BufferedReader, IOException, PrintWriter }
import scala.reflect.internal.util._
import StringOps._
-/**
- * This class implements a Reporter that displays messages on a text console.
+/** This class implements a Reporter that displays messages on a text console.
*/
class ConsoleReporter(val settings: Settings, reader: BufferedReader, writer: PrintWriter) extends AbstractReporter {
def this(settings: Settings) = this(settings, Console.in, new PrintWriter(Console.err, true))
@@ -85,5 +84,7 @@ class ConsoleReporter(val settings: Settings, reader: BufferedReader, writer: Pr
}
}
- override def flush() { writer.flush() }
+ override def flush() = writer.flush()
+
+ override def finish() = printSummary()
}
diff --git a/src/compiler/scala/tools/nsc/reporters/Reporter.scala b/src/compiler/scala/tools/nsc/reporters/Reporter.scala
index 3d688efae1..bd438f0e75 100644
--- a/src/compiler/scala/tools/nsc/reporters/Reporter.scala
+++ b/src/compiler/scala/tools/nsc/reporters/Reporter.scala
@@ -20,7 +20,7 @@ abstract class Reporter extends scala.reflect.internal.Reporter {
/** Informational messages. If `!force`, they may be suppressed. */
final def info(pos: Position, msg: String, force: Boolean): Unit = info0(pos, msg, INFO, force)
- /** For sending a message which should not be labeled as a warning/error,
+ /** For sending a message which should not be labelled as a warning/error,
* but also shouldn't require -verbose to be visible.
*/
def echo(msg: String): Unit = info(NoPosition, msg, force = true)
diff --git a/src/compiler/scala/tools/nsc/settings/ScalaSettings.scala b/src/compiler/scala/tools/nsc/settings/ScalaSettings.scala
index c83cc28e2a..3422167d02 100644
--- a/src/compiler/scala/tools/nsc/settings/ScalaSettings.scala
+++ b/src/compiler/scala/tools/nsc/settings/ScalaSettings.scala
@@ -22,13 +22,9 @@ trait ScalaSettings extends AbsScalaSettings
/** Set of settings */
protected[scala] lazy val allSettings = mutable.HashSet[Setting]()
- /** Against my better judgment, giving in to martin here and allowing
- * CLASSPATH to be used automatically. So for the user-specified part
- * of the classpath:
- *
- * - If -classpath or -cp is given, it is that
- * - Otherwise, if CLASSPATH is set, it is that
- * - If neither of those, then "." is used.
+ /** The user class path, specified by `-classpath` or `-cp`,
+ * defaults to the value of CLASSPATH env var if it is set, as in Java,
+ * or else to `"."` for the current user directory.
*/
protected def defaultClasspath = sys.env.getOrElse("CLASSPATH", ".")
@@ -134,8 +130,9 @@ trait ScalaSettings extends AbsScalaSettings
val Xshowobj = StringSetting ("-Xshow-object", "object", "Show internal representation of object.", "")
val showPhases = BooleanSetting ("-Xshow-phases", "Print a synopsis of compiler phases.")
val sourceReader = StringSetting ("-Xsource-reader", "classname", "Specify a custom method for reading source files.", "")
+ val reporter = StringSetting ("-Xreporter", "classname", "Specify a custom reporter for compiler messages.", "scala.tools.nsc.reporters.ConsoleReporter")
val strictInference = BooleanSetting ("-Xstrict-inference", "Don't infer known-unsound types")
- val source = ScalaVersionSetting ("-Xsource", "version", "Treat compiler input as Scala source for the specified version, see SI-8126.", initial = ScalaVersion("2.11"))
+ val source = ScalaVersionSetting ("-Xsource", "version", "Treat compiler input as Scala source for the specified version, see SI-8126.", initial = ScalaVersion("2.12"))
val XnoPatmatAnalysis = BooleanSetting ("-Xno-patmat-analysis", "Don't perform exhaustivity/unreachability analysis. Also, ignore @switch annotation.")
val XfullLubs = BooleanSetting ("-Xfull-lubs", "Retains pre 2.10 behavior of less aggressive truncation of least upper bounds.")
@@ -143,7 +140,7 @@ trait ScalaSettings extends AbsScalaSettings
// XML parsing options
object XxmlSettings extends MultiChoiceEnumeration {
val coalescing = Choice("coalescing", "Convert PCData to Text and coalesce sibling nodes")
- def isCoalescing = (Xxml contains coalescing) || (!isScala212 && !Xxml.isSetByUser)
+ def isCoalescing = Xxml contains coalescing
}
val Xxml = MultiChoiceSetting(
name = "-Xxml",
@@ -234,6 +231,7 @@ trait ScalaSettings extends AbsScalaSettings
val emptyLabels = Choice("empty-labels", "Eliminate and collapse redundant labels in the bytecode.")
val compactLocals = Choice("compact-locals", "Eliminate empty slots in the sequence of local variables.")
val nullnessTracking = Choice("nullness-tracking", "Track nullness / non-nullness of local variables and apply optimizations.")
+ val closureElimination = Choice("closure-elimination" , "Rewrite closure invocations to the implementation method and eliminate closures.")
val inlineProject = Choice("inline-project", "Inline only methods defined in the files being compiled.")
val inlineGlobal = Choice("inline-global", "Inline methods from any source, including classfiles on the compile classpath.")
@@ -242,7 +240,7 @@ trait ScalaSettings extends AbsScalaSettings
private val defaultChoices = List(unreachableCode)
val lDefault = Choice("l:default", "Enable default optimizations: "+ defaultChoices.mkString(","), expandsTo = defaultChoices)
- private val methodChoices = List(unreachableCode, simplifyJumps, emptyLineNumbers, emptyLabels, compactLocals, nullnessTracking)
+ private val methodChoices = List(unreachableCode, simplifyJumps, emptyLineNumbers, emptyLabels, compactLocals, nullnessTracking, closureElimination)
val lMethod = Choice("l:method", "Enable intra-method optimizations: "+ methodChoices.mkString(","), expandsTo = methodChoices)
private val projectChoices = List(lMethod, inlineProject)
@@ -265,11 +263,15 @@ trait ScalaSettings extends AbsScalaSettings
def YoptEmptyLabels = Yopt.contains(YoptChoices.emptyLabels)
def YoptCompactLocals = Yopt.contains(YoptChoices.compactLocals)
def YoptNullnessTracking = Yopt.contains(YoptChoices.nullnessTracking)
+ def YoptClosureElimination = Yopt.contains(YoptChoices.closureElimination)
def YoptInlineProject = Yopt.contains(YoptChoices.inlineProject)
def YoptInlineGlobal = Yopt.contains(YoptChoices.inlineGlobal)
def YoptInlinerEnabled = YoptInlineProject || YoptInlineGlobal
+ def YoptBuildCallGraph = YoptInlinerEnabled || YoptClosureElimination
+ def YoptAddToBytecodeRepository = YoptInlinerEnabled || YoptClosureElimination
+
val YoptInlineHeuristics = ChoiceSetting(
name = "-Yopt-inline-heuristics",
helpArg = "strategy",
@@ -382,6 +384,23 @@ trait ScalaSettings extends AbsScalaSettings
val Normal = "normal"
val Discard = "discard"
}
+
+ def conflictWarning: Option[String] = {
+ def oldOptimiseFlagsInGenBCode: Option[String] = {
+ val optFlags: List[Setting] = if (optimise.value) List(optimise) else optimiseSettings.filter(_.value)
+ if (isBCodeActive && optFlags.nonEmpty) {
+ val msg = s"""Compiler settings for the 2.11 optimizer (${optFlags.map(_.name).mkString(", ")}) are incompatible with -Ybackend:GenBCode (which is the default in 2.12).
+ |The optimizer settings are ignored. See -Yopt:help for enabling the new optimizer in 2.12.""".stripMargin
+ Some(msg)
+ } else
+ None
+ }
+
+ List(oldOptimiseFlagsInGenBCode /*, moreToCome */).flatten match {
+ case Nil => None
+ case warnings => Some("Conflicting compiler settings were detected. Some settings will be ignored.\n" + warnings.mkString("\n"))
+ }
+ }
}
object ClassPathRepresentationType {
diff --git a/src/compiler/scala/tools/nsc/settings/ScalaVersion.scala b/src/compiler/scala/tools/nsc/settings/ScalaVersion.scala
index 7e67b7bec6..0b051ef89d 100644
--- a/src/compiler/scala/tools/nsc/settings/ScalaVersion.scala
+++ b/src/compiler/scala/tools/nsc/settings/ScalaVersion.scala
@@ -77,10 +77,7 @@ object ScalaVersion {
def apply(versionString: String, errorHandler: String => Unit): ScalaVersion = {
def error() = errorHandler(
- s"There was a problem parsing ${versionString}. " +
- "Versions should be in the form major[.minor[.revision]] " +
- "where each part is a positive number, as in 2.10.1. " +
- "The minor and revision parts are optional."
+ s"Bad version (${versionString}) not major[.minor[.revision[-suffix]]]"
)
def toInt(s: String) = s match {
@@ -97,6 +94,7 @@ object ScalaVersion {
versionString match {
case "none" => NoScalaVersion
+ case "" => NoScalaVersion
case "any" => AnyScalaVersion
case vpat(majorS, minorS, revS, buildS) =>
SpecificScalaVersion(toInt(majorS), toInt(minorS), toInt(revS), toBuild(buildS))
diff --git a/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala b/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
index 660028eab8..99e61d2482 100644
--- a/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
+++ b/src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
@@ -84,6 +84,9 @@ abstract class ClassfileParser {
protected final def u2(): Int = in.nextChar.toInt
protected final def u4(): Int = in.nextInt
+ protected final def s1(): Int = in.nextByte.toInt // sign-extend the byte to int
+ protected final def s2(): Int = (in.nextByte.toInt << 8) | u1 // sign-extend and shift the first byte, or with the unsigned second byte
+
private def readInnerClassFlags() = readClassFlags()
private def readClassFlags() = JavaAccFlags classFlags u2
private def readMethodFlags() = JavaAccFlags methodFlags u2
@@ -284,7 +287,7 @@ abstract class ClassfileParser {
def getType(index: Int): Type = getType(null, index)
def getType(sym: Symbol, index: Int): Type = sigToType(sym, getExternalName(index))
- def getSuperClass(index: Int): Symbol = if (index == 0) AnyClass else getClassSymbol(index)
+ def getSuperClass(index: Int): Symbol = if (index == 0) AnyClass else getClassSymbol(index) // the only classfile that is allowed to have `0` in the super_class is java/lang/Object (see jvm spec)
private def createConstant(index: Int): Constant = {
val start = starts(index)
@@ -862,7 +865,7 @@ abstract class ClassfileParser {
srcfile0 = settings.outputDirs.srcFilesFor(in.file, srcpath).find(_.exists)
case tpnme.CodeATTR =>
if (sym.owner.isInterface) {
- sym setFlag DEFAULTMETHOD
+ sym setFlag JAVA_DEFAULTMETHOD
log(s"$sym in ${sym.owner} is a java8+ default method.")
}
in.skip(attrLen)
diff --git a/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala b/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala
index 438a71061e..b2f5a4119d 100644
--- a/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala
+++ b/src/compiler/scala/tools/nsc/symtab/classfile/ICodeReader.scala
@@ -326,8 +326,8 @@ abstract class ICodeReader extends ClassfileParser {
case JVM.dconst_0 => code emit CONSTANT(Constant(0.0))
case JVM.dconst_1 => code emit CONSTANT(Constant(1.0))
- case JVM.bipush => code.emit(CONSTANT(Constant(u1))); size += 1
- case JVM.sipush => code.emit(CONSTANT(Constant(u2))); size += 2
+ case JVM.bipush => code.emit(CONSTANT(Constant(s1))); size += 1
+ case JVM.sipush => code.emit(CONSTANT(Constant(s2))); size += 2
case JVM.ldc => code.emit(CONSTANT(pool.getConstant(u1))); size += 1
case JVM.ldc_w => code.emit(CONSTANT(pool.getConstant(u2))); size += 2
case JVM.ldc2_w => code.emit(CONSTANT(pool.getConstant(u2))); size += 2
@@ -466,7 +466,7 @@ abstract class ICodeReader extends ClassfileParser {
size += 2
val local = code.getLocal(u1, INT)
code.emit(LOAD_LOCAL(local))
- code.emit(CONSTANT(Constant(u1)))
+ code.emit(CONSTANT(Constant(s1)))
code.emit(CALL_PRIMITIVE(Arithmetic(ADD, INT)))
code.emit(STORE_LOCAL(local))
diff --git a/src/compiler/scala/tools/nsc/transform/Constructors.scala b/src/compiler/scala/tools/nsc/transform/Constructors.scala
index 86685d46de..7c66bda46b 100644
--- a/src/compiler/scala/tools/nsc/transform/Constructors.scala
+++ b/src/compiler/scala/tools/nsc/transform/Constructors.scala
@@ -165,11 +165,19 @@ abstract class Constructors extends Statics with Transform with ast.TreeDSL {
return
}
+ // Note: elision of outer reference is based on a class-wise analysis, if a class might have subclasses,
+ // it doesn't work. For example, `LocalParent` retains the outer reference in:
+ //
+ // class Outer { def test = {class LocalParent; class LocalChild extends LocalParent } }
+ //
+ // See run/t9408.scala for related test cases.
+ val isEffectivelyFinal = clazz.isEffectivelyFinal
def isParamCandidateForElision(sym: Symbol) = (sym.isParamAccessor && sym.isPrivateLocal)
- def isOuterCandidateForElision(sym: Symbol) = (sym.isOuterAccessor && sym.owner.isEffectivelyFinal && !sym.isOverridingSymbol)
+ def isOuterCandidateForElision(sym: Symbol) = (sym.isOuterAccessor && isEffectivelyFinal && !sym.isOverridingSymbol)
- val paramCandidatesForElision: Set[ /*Field*/ Symbol] = (clazz.info.decls.toSet filter isParamCandidateForElision)
- val outerCandidatesForElision: Set[ /*Method*/ Symbol] = (clazz.info.decls.toSet filter isOuterCandidateForElision)
+ val decls = clazz.info.decls.toSet
+ val paramCandidatesForElision: Set[ /*Field*/ Symbol] = (decls filter isParamCandidateForElision)
+ val outerCandidatesForElision: Set[ /*Method*/ Symbol] = (decls filter isOuterCandidateForElision)
omittables ++= paramCandidatesForElision
omittables ++= outerCandidatesForElision
diff --git a/src/compiler/scala/tools/nsc/transform/LazyVals.scala b/src/compiler/scala/tools/nsc/transform/LazyVals.scala
index df622d4d1d..b6695efb0b 100644
--- a/src/compiler/scala/tools/nsc/transform/LazyVals.scala
+++ b/src/compiler/scala/tools/nsc/transform/LazyVals.scala
@@ -168,7 +168,7 @@ abstract class LazyVals extends Transform with TypingTransformers with ast.TreeD
/** Add the bitmap definitions to the rhs of a method definition.
* If the rhs has been tail-call transformed, insert the bitmap
* definitions inside the top-level label definition, so that each
- * iteration has the lazy values un-initialized. Otherwise add them
+ * iteration has the lazy values uninitialized. Otherwise add them
* at the very beginning of the method.
*/
private def addBitmapDefs(methSym: Symbol, rhs: Tree): Tree = {
diff --git a/src/compiler/scala/tools/nsc/transform/Mixin.scala b/src/compiler/scala/tools/nsc/transform/Mixin.scala
index 11f9483f77..a079a76ce7 100644
--- a/src/compiler/scala/tools/nsc/transform/Mixin.scala
+++ b/src/compiler/scala/tools/nsc/transform/Mixin.scala
@@ -79,9 +79,9 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
/** Does this field require an initialized bit?
* Note: fields of classes inheriting DelayedInit are not checked.
- * This is because the they are neither initialized in the constructor
+ * This is because they are neither initialized in the constructor
* nor do they have a setter (not if they are vals anyway). The usual
- * logic for setting bitmaps does therefor not work for such fields.
+ * logic for setting bitmaps does therefore not work for such fields.
* That's why they are excluded.
* Note: The `checkinit` option does not check if transient fields are initialized.
*/
@@ -1122,7 +1122,7 @@ abstract class Mixin extends InfoTransform with ast.TreeDSL {
if (scope exists (_.isLazy)) {
val map = mutable.Map[Symbol, Set[Symbol]]() withDefaultValue Set()
// check what fields can be nulled for
- for ((field, users) <- singleUseFields(templ); lazyFld <- users)
+ for ((field, users) <- singleUseFields(templ); lazyFld <- users if !lazyFld.accessed.hasAnnotation(TransientAttr))
map(lazyFld) += field
map.toMap
diff --git a/src/compiler/scala/tools/nsc/transform/UnCurry.scala b/src/compiler/scala/tools/nsc/transform/UnCurry.scala
index 65316e4f00..72e2174bf8 100644
--- a/src/compiler/scala/tools/nsc/transform/UnCurry.scala
+++ b/src/compiler/scala/tools/nsc/transform/UnCurry.scala
@@ -433,7 +433,7 @@ abstract class UnCurry extends InfoTransform
val sym = tree.symbol
- // true if the taget is a lambda body that's been lifted into a method
+ // true if the target is a lambda body that's been lifted into a method
def isLiftedLambdaBody(target: Tree) = target.symbol.isLocalToBlock && target.symbol.isArtifact && target.symbol.name.containsName(nme.ANON_FUN_NAME)
val result = (
diff --git a/src/compiler/scala/tools/nsc/transform/patmat/MatchAnalysis.scala b/src/compiler/scala/tools/nsc/transform/patmat/MatchAnalysis.scala
index a11906ace1..00de77a8d4 100644
--- a/src/compiler/scala/tools/nsc/transform/patmat/MatchAnalysis.scala
+++ b/src/compiler/scala/tools/nsc/transform/patmat/MatchAnalysis.scala
@@ -138,7 +138,7 @@ trait TreeAndTypeAnalysis extends Debugging {
if(grouped) {
def enumerateChildren(sym: Symbol) = {
- sym.children.toList
+ sym.sealedChildren.toList
.sortBy(_.sealedSortName)
.filterNot(x => x.isSealed && x.isAbstractClass && !isPrimitiveValueClass(x))
}
diff --git a/src/compiler/scala/tools/nsc/transform/patmat/MatchCodeGen.scala b/src/compiler/scala/tools/nsc/transform/patmat/MatchCodeGen.scala
index 06b39b035a..1642613b9b 100644
--- a/src/compiler/scala/tools/nsc/transform/patmat/MatchCodeGen.scala
+++ b/src/compiler/scala/tools/nsc/transform/patmat/MatchCodeGen.scala
@@ -134,7 +134,7 @@ trait MatchCodeGen extends Interface {
trait OptimizedCodegen extends CodegenCore with TypedSubstitution with MatchMonadInterface {
override def codegen: AbsCodegen = optimizedCodegen
- // when we know we're targetting Option, do some inlining the optimizer won't do
+ // when we know we're targeting Option, do some inlining the optimizer won't do
// for example, `o.flatMap(f)` becomes `if(o == None) None else f(o.get)`, similarly for orElse and guard
// this is a special instance of the advanced inlining optimization that takes a method call on
// an object of a type that only has two concrete subclasses, and inlines both bodies, guarded by an if to distinguish the two cases
diff --git a/src/compiler/scala/tools/nsc/transform/patmat/MatchOptimization.scala b/src/compiler/scala/tools/nsc/transform/patmat/MatchOptimization.scala
index b3aef8a20e..cca8d2dbb8 100644
--- a/src/compiler/scala/tools/nsc/transform/patmat/MatchOptimization.scala
+++ b/src/compiler/scala/tools/nsc/transform/patmat/MatchOptimization.scala
@@ -286,8 +286,8 @@ trait MatchOptimization extends MatchTreeMaking with MatchAnalysis {
else Apply(Ident(defaultLabel), Nil)
val guardedBody = same.foldRight(jumpToDefault){
- // the last case may be un-guarded (we know it's the last one since fold's accum == jumpToDefault)
- // --> replace jumpToDefault by the un-guarded case's body
+ // the last case may be unguarded (we know it's the last one since fold's accum == jumpToDefault)
+ // --> replace jumpToDefault by the unguarded case's body
case (CaseDef(_, EmptyTree, b), `jumpToDefault`) => b
case (cd@CaseDef(_, g, b), els) if isGuardedCase(cd) => If(g, b, els)
}
@@ -322,7 +322,7 @@ trait MatchOptimization extends MatchTreeMaking with MatchAnalysis {
var remainingCases = cases
val collapsed = scala.collection.mutable.ListBuffer.empty[CaseDef]
- // when some of collapsed cases (except for the default case itself) did not include an un-guarded case
+ // when some of collapsed cases (except for the default case itself) did not include an unguarded case
// we'll need to emit a labeldef for the default case
var needDefault = false
diff --git a/src/compiler/scala/tools/nsc/transform/patmat/MatchWarnings.scala b/src/compiler/scala/tools/nsc/transform/patmat/MatchWarnings.scala
index 9e9372f709..8beb1837ad 100644
--- a/src/compiler/scala/tools/nsc/transform/patmat/MatchWarnings.scala
+++ b/src/compiler/scala/tools/nsc/transform/patmat/MatchWarnings.scala
@@ -40,7 +40,7 @@ trait MatchWarnings {
}
}
- // Issue better warnings than "unreachable code" when people mis-use
+ // Issue better warnings than "unreachable code" when people misuse
// variable patterns thinking they bind to existing identifiers.
//
// Possible TODO: more deeply nested variable patterns, like
diff --git a/src/compiler/scala/tools/nsc/typechecker/Checkable.scala b/src/compiler/scala/tools/nsc/typechecker/Checkable.scala
index fc632e0d0d..2b6a4c763a 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Checkable.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Checkable.scala
@@ -44,7 +44,7 @@ import scala.language.postfixOps
* which is essentially the intersection of X and |P|, where |P| is
* the erasure of P. If XR <: P, then no warning is emitted.
*
- * We evaluate "X with conform to P" by checking `X <: P_wild, where
+ * We evaluate "X with conform to P" by checking `X <: P_wild`, where
* P_wild is the result of substituting wildcard types in place of
* pattern type variables. This is intentionally stricter than
* (X matchesPattern P), see SI-8597 for motivating test cases.
@@ -212,8 +212,8 @@ trait Checkable {
)
/** Are all children of these symbols pairwise irreconcilable? */
def allChildrenAreIrreconcilable(sym1: Symbol, sym2: Symbol) = (
- sym1.children.toList forall (c1 =>
- sym2.children.toList forall (c2 =>
+ sym1.sealedChildren.toList forall (c1 =>
+ sym2.sealedChildren.toList forall (c2 =>
areIrreconcilableAsParents(c1, c2)
)
)
diff --git a/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala b/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala
index c80aaea160..b0bd9977a8 100644
--- a/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/ContextErrors.scala
@@ -550,23 +550,18 @@ trait ContextErrors {
def ModuleUsingCompanionClassDefaultArgsErrror(tree: Tree) =
NormalTypeError(tree, "module extending its companion class cannot use default constructor arguments")
- def NotEnoughArgsError(tree: Tree, fun0: Tree, missing0: List[Symbol]) = {
- def notEnoughArgumentsMsg(fun: Tree, missing: List[Symbol]) = {
- val suffix = {
- if (missing.isEmpty) ""
- else {
- val keep = missing take 3 map (_.name)
- ".\nUnspecified value parameter%s %s".format(
- if (missing.tail.isEmpty) "" else "s",
- if ((missing drop 3).nonEmpty) (keep :+ "...").mkString(", ")
- else keep.mkString("", ", ", ".")
- )
- }
+ def NotEnoughArgsError(tree: Tree, fun: Tree, missing: List[Symbol]) = {
+ val notEnoughArgumentsMsg = {
+ val suffix = if (missing.isEmpty) "" else {
+ val keep = missing take 3 map (_.name)
+ val ess = if (missing.tail.isEmpty) "" else "s"
+ f".%nUnspecified value parameter$ess ${
+ keep.mkString("", ", ", if ((missing drop 3).nonEmpty) "..." else ".")
+ }"
}
-
- "not enough arguments for " + treeSymTypeMsg(fun) + suffix
+ s"not enough arguments for ${ treeSymTypeMsg(fun) }$suffix"
}
- NormalTypeError(tree, notEnoughArgumentsMsg(fun0, missing0))
+ NormalTypeError(tree, notEnoughArgumentsMsg)
}
//doTypedApply - patternMode
@@ -632,12 +627,16 @@ trait ContextErrors {
//adapt
def MissingArgsForMethodTpeError(tree: Tree, meth: Symbol) = {
+ val f = meth.name
+ val paf = s"$f(${ meth.asMethod.paramLists map (_ map (_ => "_") mkString ",") mkString ")(" })"
+ val advice = s"""
+ |Unapplied methods are only converted to functions when a function type is expected.
+ |You can make this conversion explicit by writing `$f _` or `$paf` instead of `$f`.""".stripMargin
val message =
if (meth.isMacro) MacroTooFewArgumentListsMessage
- else "missing arguments for " + meth.fullLocationString + (
- if (meth.isConstructor) ""
- else ";\nfollow this method with `_' if you want to treat it as a partially applied function"
- )
+ else s"""missing argument list for ${meth.fullLocationString}${
+ if (!meth.isConstructor) advice else ""
+ }"""
issueNormalTypeError(tree, message)
setError(tree)
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/Contexts.scala b/src/compiler/scala/tools/nsc/typechecker/Contexts.scala
index 7c931600e5..2ccf375c45 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Contexts.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Contexts.scala
@@ -24,7 +24,8 @@ trait Contexts { self: Analyzer =>
object NoContext
extends Context(EmptyTree, NoSymbol, EmptyScope, NoCompilationUnit,
- null) { // We can't pass the uninitialized `this`. Instead, we treat null specially in `Context#outer`
+ // We can't pass the uninitialized `this`. Instead, we treat null specially in `Context#outer`
+ null) {
enclClass = this
enclMethod = this
@@ -48,12 +49,11 @@ trait Contexts { self: Analyzer =>
def ambiguousDefnAndImport(owner: Symbol, imp: ImportInfo) =
LookupAmbiguous(s"it is both defined in $owner and imported subsequently by \n$imp")
- private lazy val startContext = {
- NoContext.make(
+ private lazy val startContext = NoContext.make(
Template(List(), noSelfType, List()) setSymbol global.NoSymbol setType global.NoType,
rootMirror.RootClass,
- rootMirror.RootClass.info.decls)
- }
+ rootMirror.RootClass.info.decls
+ )
private lazy val allUsedSelectors =
mutable.Map[ImportInfo, Set[ImportSelector]]() withDefaultValue Set()
@@ -168,13 +168,13 @@ trait Contexts { self: Analyzer =>
* fine grained control is needed based on the kind of error; ambiguity errors are often
* suppressed during exploratory typing, such as determining whether `a == b` in an argument
* position is an assignment or a named argument, when `Inferencer#isApplicableSafe` type checks
- * applications with and without an expected type, or whtn `Typer#tryTypedApply` tries to fit arguments to
+ * applications with and without an expected type, or when `Typer#tryTypedApply` tries to fit arguments to
* a function type with/without implicit views.
*
- * When the error policies entails error/warning buffering, the mutable [[ReportBuffer]] records
+ * When the error policies entail error/warning buffering, the mutable [[ReportBuffer]] records
* everything that is issued. It is important to note, that child Contexts created with `make`
* "inherit" the very same `ReportBuffer` instance, whereas children spawned through `makeSilent`
- * receive an separate, fresh buffer.
+ * receive a separate, fresh buffer.
*
* @param tree Tree associated with this context
* @param owner The current owner
@@ -802,6 +802,14 @@ trait Contexts { self: Analyzer =>
(e ne null) && (e.owner == scope) && (!settings.isScala212 || e.sym.exists)
})
+ /** Do something with the symbols with name `name` imported via the import in `imp`,
+ * if any such symbol is accessible from this context and is a qualifying implicit.
+ */
+ private def withQualifyingImplicitAlternatives(imp: ImportInfo, name: Name, pre: Type)(f: Symbol => Unit) = for {
+ sym <- importedAccessibleSymbol(imp, name, requireExplicit = false, record = false).alternatives
+ if isQualifyingImplicit(name, sym, pre, imported = true)
+ } f(sym)
+
private def collectImplicits(syms: Scope, pre: Type, imported: Boolean = false): List[ImplicitInfo] =
for (sym <- syms.toList if isQualifyingImplicit(sym.name, sym, pre, imported)) yield
new ImplicitInfo(sym.name, pre, sym)
@@ -819,9 +827,9 @@ trait Contexts { self: Analyzer =>
case ImportSelector(from, _, to, _) :: sels1 =>
var impls = collect(sels1) filter (info => info.name != from)
if (to != nme.WILDCARD) {
- for (sym <- importedAccessibleSymbol(imp, to).alternatives)
- if (isQualifyingImplicit(to, sym, pre, imported = true))
- impls = new ImplicitInfo(to, pre, sym) :: impls
+ withQualifyingImplicitAlternatives(imp, to, pre) { sym =>
+ impls = new ImplicitInfo(to, pre, sym) :: impls
+ }
}
impls
}
@@ -946,11 +954,8 @@ trait Contexts { self: Analyzer =>
/** The symbol with name `name` imported via the import in `imp`,
* if any such symbol is accessible from this context.
*/
- def importedAccessibleSymbol(imp: ImportInfo, name: Name): Symbol =
- importedAccessibleSymbol(imp, name, requireExplicit = false)
-
- private def importedAccessibleSymbol(imp: ImportInfo, name: Name, requireExplicit: Boolean): Symbol =
- imp.importedSymbol(name, requireExplicit) filter (s => isAccessible(s, imp.qual.tpe, superAccess = false))
+ private def importedAccessibleSymbol(imp: ImportInfo, name: Name, requireExplicit: Boolean, record: Boolean): Symbol =
+ imp.importedSymbol(name, requireExplicit, record) filter (s => isAccessible(s, imp.qual.tpe, superAccess = false))
private def requiresQualifier(s: Symbol) = (
s.owner.isClass
@@ -1057,7 +1062,7 @@ trait Contexts { self: Analyzer =>
def imp2Explicit = imp2 isExplicitImport name
def lookupImport(imp: ImportInfo, requireExplicit: Boolean) =
- importedAccessibleSymbol(imp, name, requireExplicit) filter qualifies
+ importedAccessibleSymbol(imp, name, requireExplicit, record = true) filter qualifies
// Java: A single-type-import declaration d in a compilation unit c of package p
// that imports a type named n shadows, throughout c, the declarations of:
@@ -1353,7 +1358,6 @@ trait Contexts { self: Analyzer =>
protected def handleError(pos: Position, msg: String): Unit = onTreeCheckerError(pos, msg)
}
-
class ImportInfo(val tree: Import, val depth: Int) {
def pos = tree.pos
def posOf(sel: ImportSelector) = tree.pos withPoint sel.namePos
@@ -1369,19 +1373,20 @@ trait Contexts { self: Analyzer =>
def isExplicitImport(name: Name): Boolean =
tree.selectors exists (_.rename == name.toTermName)
- /** The symbol with name `name` imported from import clause `tree`.
- */
- def importedSymbol(name: Name): Symbol = importedSymbol(name, requireExplicit = false)
+ /** The symbol with name `name` imported from import clause `tree`. */
+ def importedSymbol(name: Name): Symbol = importedSymbol(name, requireExplicit = false, record = true)
- private def recordUsage(sel: ImportSelector, result: Symbol) {
- def posstr = pos.source.file.name + ":" + posOf(sel).line
- def resstr = if (tree.symbol.hasCompleteInfo) s"(qual=$qual, $result)" else s"(expr=${tree.expr}, ${result.fullLocationString})"
- debuglog(s"In $this at $posstr, selector '${selectorString(sel)}' resolved to $resstr")
+ private def recordUsage(sel: ImportSelector, result: Symbol): Unit = {
+ debuglog(s"In $this at ${ pos.source.file.name }:${ posOf(sel).line }, selector '${ selectorString(sel)
+ }' resolved to ${
+ if (tree.symbol.hasCompleteInfo) s"(qual=$qual, $result)"
+ else s"(expr=${tree.expr}, ${result.fullLocationString})"
+ }")
allUsedSelectors(this) += sel
}
/** If requireExplicit is true, wildcard imports are not considered. */
- def importedSymbol(name: Name, requireExplicit: Boolean): Symbol = {
+ def importedSymbol(name: Name, requireExplicit: Boolean, record: Boolean): Symbol = {
var result: Symbol = NoSymbol
var renamed = false
var selectors = tree.selectors
@@ -1398,7 +1403,7 @@ trait Contexts { self: Analyzer =>
if (result == NoSymbol)
selectors = selectors.tail
}
- if (settings.warnUnusedImport && selectors.nonEmpty && result != NoSymbol && pos != NoPosition)
+ if (record && settings.warnUnusedImport && selectors.nonEmpty && result != NoSymbol && pos != NoPosition)
recordUsage(current, result)
// Harden against the fallout from bugs like SI-6745
@@ -1495,7 +1500,7 @@ object ContextMode {
final val TypeConstructorAllowed: ContextMode = 1 << 16
/** TODO: The "sticky modes" are EXPRmode, PATTERNmode, TYPEmode.
- * To mimick the sticky mode behavior, when captain stickyfingers
+ * To mimic the sticky mode behavior, when captain stickyfingers
* comes around we need to propagate those modes but forget the other
* context modes which were once mode bits; those being so far the
* ones listed here.
diff --git a/src/compiler/scala/tools/nsc/typechecker/Implicits.scala b/src/compiler/scala/tools/nsc/typechecker/Implicits.scala
index 3274c86072..73e454bf47 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Implicits.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Implicits.scala
@@ -110,10 +110,10 @@ trait Implicits {
* Ignore their constr field! The list of type constraints returned along with each tree specifies the constraints that
* must be met by the corresponding type parameter in `tpars` (for the returned implicit view to be valid).
*
- * @arg tp from-type for the implicit conversion
- * @arg context search implicits here
- * @arg tpars symbols that should be considered free type variables
- * (implicit search should not try to solve them, just track their constraints)
+ * @param tp from-type for the implicit conversion
+ * @param context search implicits here
+ * @param tpars symbols that should be considered free type variables
+ * (implicit search should not try to solve them, just track their constraints)
*/
def allViewsFrom(tp: Type, context: Context, tpars: List[Symbol]): List[(SearchResult, List[TypeConstraint])] = {
// my untouchable typevars are better than yours (they can't be constrained by them)
@@ -324,8 +324,10 @@ trait Implicits {
*/
class ImplicitSearch(tree: Tree, pt: Type, isView: Boolean, context0: Context, pos0: Position = NoPosition) extends Typer(context0) with ImplicitsContextErrors {
val searchId = implicitSearchId()
- private def typingLog(what: String, msg: => String) =
- typingStack.printTyping(tree, f"[search #$searchId] $what $msg")
+ private def typingLog(what: String, msg: => String) = {
+ if (printingOk(tree))
+ typingStack.printTyping(f"[search #$searchId] $what $msg")
+ }
import infer._
if (Statistics.canEnable) Statistics.incCounter(implicitSearchCount)
@@ -846,7 +848,7 @@ trait Implicits {
errors.collectFirst { case err: DivergentImplicitTypeError => err } foreach saveDivergent
if (search.isDivergent && divergentError.isEmpty) {
- // Divergence triggered by `i` at this level of the implicit serach. We haven't
+ // Divergence triggered by `i` at this level of the implicit search. We haven't
// seen divergence so far, we won't issue this error just yet, and instead temporarily
// treat `i` as a failed candidate.
saveDivergent(DivergentImplicitTypeError(tree, pt, i.sym))
@@ -1358,7 +1360,7 @@ trait Implicits {
val succstart = if (stats) Statistics.startTimer(oftypeSucceedNanos) else null
// SI-6667, never search companions after an ambiguous error in in-scope implicits
- val wasAmbigious = result.isAmbiguousFailure
+ val wasAmbiguous = result.isAmbiguousFailure
// TODO: encapsulate
val previousErrs = context.reporter.errors
@@ -1368,7 +1370,7 @@ trait Implicits {
// `materializeImplicit` does some preprocessing for `pt`
// is it only meant for manifests/tags or we need to do the same for `implicitsOfExpectedType`?
- if (result.isFailure && !wasAmbigious)
+ if (result.isFailure && !wasAmbiguous)
result = searchImplicit(implicitsOfExpectedType, isLocalToCallsite = false)
if (result.isFailure)
@@ -1408,7 +1410,7 @@ trait Implicits {
}
if (result.isFailure && settings.debug) // debuglog is not inlined for some reason
- log("no implicits found for "+pt+" "+pt.typeSymbol.info.baseClasses+" "+implicitsOfExpectedType)
+ log(s"no implicits found for ${pt} ${pt.typeSymbol.info.baseClasses} ${implicitsOfExpectedType}")
result
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/Infer.scala b/src/compiler/scala/tools/nsc/typechecker/Infer.scala
index a4c794e8cf..a5fdbb5148 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Infer.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Infer.scala
@@ -1117,7 +1117,7 @@ trait Infer extends Checkable {
// this is quite nasty: it destructively changes the info of the syms of e.g., method type params
// (see #3692, where the type param T's bounds were set to > : T <: T, so that parts looped)
- // the changes are rolled back by restoreTypeBounds, but might be unintentially observed in the mean time
+ // the changes are rolled back by restoreTypeBounds, but might be unintentionally observed in the mean time
def instantiateTypeVar(tvar: TypeVar) {
val tparam = tvar.origin.typeSymbol
val TypeBounds(lo0, hi0) = tparam.info.bounds
@@ -1375,7 +1375,7 @@ trait Infer extends Checkable {
* Otherwise, if there is no best alternative, error.
*
* @param argtpes0 contains the argument types. If an argument is named, as
- * "a = 3", the corresponding type is `NamedType("a", Int)'. If the name
+ * "a = 3", the corresponding type is `NamedType("a", Int)`. If the name
* of some NamedType does not exist in an alternative's parameter names,
* the type is replaces by `Unit`, i.e. the argument is treated as an
* assignment expression.
diff --git a/src/compiler/scala/tools/nsc/typechecker/Macros.scala b/src/compiler/scala/tools/nsc/typechecker/Macros.scala
index 10aefae20b..99dd81c7e2 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Macros.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Macros.scala
@@ -85,9 +85,9 @@ trait Macros extends MacroRuntimes with Traces with Helpers {
*/
case class MacroImplBinding(
// Is this macro impl a bundle (a trait extending *box.Macro) or a vanilla def?
- val isBundle: Boolean,
+ isBundle: Boolean,
// Is this macro impl blackbox (i.e. having blackbox.Context in its signature)?
- val isBlackbox: Boolean,
+ isBlackbox: Boolean,
// Java class name of the class that contains the macro implementation
// is used to load the corresponding object with Java reflection
className: String,
diff --git a/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala b/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala
index f90e32ce8a..f3856db552 100644
--- a/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/MethodSynthesis.scala
@@ -436,7 +436,7 @@ trait MethodSynthesis {
if (tree.symbol.owner.isTrait || hasUnitType(basisSym)) rhs1
else gen.mkAssignAndReturn(basisSym, rhs1)
)
- derivedSym setPos tree.pos // cannot set it at createAndEnterSymbol because basisSym can possible stil have NoPosition
+ derivedSym setPos tree.pos // cannot set it at createAndEnterSymbol because basisSym can possibly still have NoPosition
val ddefRes = DefDef(derivedSym, new ChangeOwnerAndModuleClassTraverser(basisSym, derivedSym)(body))
// ValDef will have its position focused whereas DefDef will have original correct rangepos
// ideally positions would be correct at the creation time but lazy vals are really a special case
diff --git a/src/compiler/scala/tools/nsc/typechecker/Namers.scala b/src/compiler/scala/tools/nsc/typechecker/Namers.scala
index 77c49a862a..4ad81b60ae 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Namers.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Namers.scala
@@ -145,8 +145,8 @@ trait Namers extends MethodSynthesis {
// while Scala's enum constants live directly in the class.
// We don't check for clazz.superClass == JavaEnumClass, because this causes a illegal
// cyclic reference error. See the commit message for details.
- if (context.unit.isJava) owner.companionClass.hasEnumFlag else owner.hasEnumFlag
- vd.mods.hasAllFlags(ENUM | STABLE | STATIC) && ownerHasEnumFlag
+ if (context.unit.isJava) owner.companionClass.hasJavaEnumFlag else owner.hasJavaEnumFlag
+ vd.mods.hasAllFlags(JAVA_ENUM | STABLE | STATIC) && ownerHasEnumFlag
}
def setPrivateWithin[T <: Symbol](tree: Tree, sym: T, mods: Modifiers): T =
@@ -204,7 +204,7 @@ trait Namers extends MethodSynthesis {
}
// FIXME - this logic needs to be thoroughly explained
- // and justified. I know it's wrong with repect to package
+ // and justified. I know it's wrong with respect to package
// objects, but I think it's also wrong in other ways.
protected def conflict(newS: Symbol, oldS: Symbol) = (
( !oldS.isSourceMethod
@@ -1106,7 +1106,7 @@ trait Namers extends MethodSynthesis {
* As a first side effect, this method assigns a MethodType constructed using this
* return type to `meth`. This allows omitting the result type for recursive methods.
*
- * As another side effect, this method also assigns paramter types from the overridden
+ * As another side effect, this method also assigns parameter types from the overridden
* method to parameters of `meth` that have missing types (the parser accepts missing
* parameter types under -Yinfer-argument-types).
*/
diff --git a/src/compiler/scala/tools/nsc/typechecker/PatternTypers.scala b/src/compiler/scala/tools/nsc/typechecker/PatternTypers.scala
index 8a66c7d274..a702b3cdf5 100644
--- a/src/compiler/scala/tools/nsc/typechecker/PatternTypers.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/PatternTypers.scala
@@ -305,7 +305,7 @@ trait PatternTypers {
// clearing the type is necessary so that ref will be stabilized; see bug 881
val fun1 = typedPos(fun.pos)(Apply(Select(fun.clearType(), unapplyMethod), unapplyArgTree :: Nil))
- def makeTypedUnApply() = {
+ def makeTypedUnapply() = {
// the union of the expected type and the inferred type of the argument to unapply
val glbType = glb(ensureFullyDefined(pt) :: unapplyArg.tpe_* :: Nil)
val wrapInTypeTest = canRemedy && !(fun1.symbol.owner isNonBottomSubClass ClassTagClass)
@@ -325,7 +325,7 @@ trait PatternTypers {
if (isBlackbox(unapplyMethod)) duplErrorTree(BlackboxExtractorExpansion(tree))
else duplErrorTree(WrongShapeExtractorExpansion(tree))
} else
- makeTypedUnApply()
+ makeTypedUnapply()
}
def wrapClassTagUnapply(uncheckedPattern: Tree, classTagExtractor: Tree, pt: Type): Tree = {
diff --git a/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala b/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
index 4b30b4e436..90ac1f466d 100644
--- a/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
@@ -421,7 +421,7 @@ abstract class RefChecks extends InfoTransform with scala.reflect.internal.trans
overrideError("cannot be used here - classes can only override abstract types")
} else if (other.isEffectivelyFinal) { // (1.2)
overrideError("cannot override final member")
- } else if (!other.isDeferredOrDefault && !other.hasFlag(DEFAULTMETHOD) && !member.isAnyOverride && !member.isSynthetic) { // (*)
+ } else if (!other.isDeferredOrJavaDefault && !other.hasFlag(JAVA_DEFAULTMETHOD) && !member.isAnyOverride && !member.isSynthetic) { // (*)
// (*) Synthetic exclusion for (at least) default getters, fixes SI-5178. We cannot assign the OVERRIDE flag to
// the default getter: one default getter might sometimes override, sometimes not. Example in comment on ticket.
if (isNeitherInClass && !(other.owner isSubClass member.owner))
@@ -604,7 +604,7 @@ abstract class RefChecks extends InfoTransform with scala.reflect.internal.trans
def checkNoAbstractMembers(): Unit = {
// Avoid spurious duplicates: first gather any missing members.
def memberList = clazz.info.nonPrivateMembersAdmitting(VBRIDGE)
- val (missing, rest) = memberList partition (m => m.isDeferredNotDefault && !ignoreDeferred(m))
+ val (missing, rest) = memberList partition (m => m.isDeferredNotJavaDefault && !ignoreDeferred(m))
// Group missing members by the name of the underlying symbol,
// to consolidate getters and setters.
val grouped = missing groupBy (sym => analyzer.underlyingSymbol(sym).name)
@@ -1134,13 +1134,13 @@ abstract class RefChecks extends InfoTransform with scala.reflect.internal.trans
t hasSymbolWhich (_.accessedOrSelf == valOrDef.symbol)
case _ => false
}
- val trivialInifiniteLoop = (
+ val trivialInfiniteLoop = (
!valOrDef.isErroneous
&& !valOrDef.symbol.isValueParameter
&& valOrDef.symbol.paramss.isEmpty
&& callsSelf
)
- if (trivialInifiniteLoop)
+ if (trivialInfiniteLoop)
reporter.warning(valOrDef.rhs.pos, s"${valOrDef.symbol.fullLocationString} does nothing other than call itself recursively")
}
@@ -1511,7 +1511,8 @@ abstract class RefChecks extends InfoTransform with scala.reflect.internal.trans
sym.isSourceMethod &&
sym.isCase &&
sym.name == nme.apply &&
- isClassTypeAccessible(tree)
+ isClassTypeAccessible(tree) &&
+ !tree.tpe.resultType.typeSymbol.primaryConstructor.isLessAccessibleThan(tree.symbol)
if (doTransform) {
tree foreach {
diff --git a/src/compiler/scala/tools/nsc/typechecker/StdAttachments.scala b/src/compiler/scala/tools/nsc/typechecker/StdAttachments.scala
index ea44b9dc39..92b0719ba3 100644
--- a/src/compiler/scala/tools/nsc/typechecker/StdAttachments.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/StdAttachments.scala
@@ -132,7 +132,7 @@ trait StdAttachments {
/** Marks the tree as a macro impl reference, which is a naked reference to a method.
*
* This is necessary for typechecking macro impl references (see `DefaultMacroCompiler.defaultResolveMacroImpl`),
- * because otherwise typing a naked reference will result in the "follow this method with `_' if you want to
+ * because otherwise typing a naked reference will result in the "follow this method with `_` if you want to
* treat it as a partially applied function" errors.
*
* This mark suppresses adapt except for when the annottee is a macro application.
diff --git a/src/compiler/scala/tools/nsc/typechecker/Tags.scala b/src/compiler/scala/tools/nsc/typechecker/Tags.scala
index 57dc74d2a0..56127f4026 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Tags.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Tags.scala
@@ -36,7 +36,7 @@ trait Tags {
* @param allowMaterialization If true (default) then the resolver is allowed to launch materialization macros when there's no class tag in scope.
* If false then materialization macros are prohibited from running.
*
- * @returns Tree that represents an `scala.reflect.ClassTag` for `tp` if everything is okay.
+ * @return Tree that represents an `scala.reflect.ClassTag` for `tp` if everything is okay.
* EmptyTree if the result contains unresolved (i.e. not spliced) type parameters and abstract type members.
* EmptyTree if `allowMaterialization` is false, and there is no class tag in scope.
*/
@@ -57,7 +57,7 @@ trait Tags {
* @param allowMaterialization If true (default) then the resolver is allowed to launch materialization macros when there's no type tag in scope.
* If false then materialization macros are prohibited from running.
*
- * @returns Tree that represents a `scala.reflect.TypeTag` for `tp` if everything is okay.
+ * @return Tree that represents a `scala.reflect.TypeTag` for `tp` if everything is okay.
* EmptyTree if `concrete` is true and the result contains unresolved (i.e. not spliced) type parameters and abstract type members.
* EmptyTree if `allowMaterialization` is false, and there is no array tag in scope.
*/
diff --git a/src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala b/src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala
index 059981aa37..5f2643cb25 100644
--- a/src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/TypeDiagnostics.scala
@@ -309,6 +309,7 @@ trait TypeDiagnostics {
// save the name because it will be mutated until it has been
// distinguished from the other types in the same error message
private val savedName = sym.name
+ private var postQualifiedWith: List[Symbol] = Nil
def restoreName() = sym.name = savedName
def modifyName(f: String => String) = sym setName newTypeName(f(sym.name.toString))
@@ -317,12 +318,12 @@ trait TypeDiagnostics {
*/
def qualifyDefaultNamespaces() = {
val intersect = Set(trueOwner, aliasOwner) intersect UnqualifiedOwners
- if (intersect.nonEmpty) preQualify()
+ if (intersect.nonEmpty && tp.typeSymbolDirect.name == tp.typeSymbol.name) preQualify()
}
// functions to manipulate the name
def preQualify() = modifyName(trueOwner.fullName + "." + _)
- def postQualify() = modifyName(_ + "(in " + trueOwner + ")")
+ def postQualify() = if (!(postQualifiedWith contains trueOwner)) { postQualifiedWith ::= trueOwner; modifyName(_ + "(in " + trueOwner + ")") }
def typeQualify() = if (sym.isTypeParameterOrSkolem) postQualify()
def nameQualify() = if (trueOwner.isPackageClass) preQualify() else postQualify()
diff --git a/src/compiler/scala/tools/nsc/typechecker/Typers.scala b/src/compiler/scala/tools/nsc/typechecker/Typers.scala
index 2dfecbaea1..8228adc20e 100644
--- a/src/compiler/scala/tools/nsc/typechecker/Typers.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/Typers.scala
@@ -1694,6 +1694,8 @@ trait Typers extends Adaptations with Tags with TypersTracking with PatternTyper
psym addChild context.owner
else
pending += ParentSealedInheritanceError(parent, psym)
+ if (psym.isLocalToBlock && !phase.erasedTypes)
+ psym addChild context.owner
val parentTypeOfThis = parent.tpe.dealias.typeOfThis
if (!(selfType <:< parentTypeOfThis) &&
@@ -2712,7 +2714,7 @@ trait Typers extends Adaptations with Tags with TypersTracking with PatternTyper
*
* If 'T' is not fully defined, it is inferred by type checking
* `apply$body` without a result type before type checking the block.
- * The method's inferred result type is used instead of T`. [See test/files/pos/sammy_poly.scala]
+ * The method's inferred result type is used instead of `T`. [See test/files/pos/sammy_poly.scala]
*
* The `apply` method is identified by the argument `sam`; `S` corresponds to the argument `samClassTp`,
* and `resPt` is derived from `samClassTp` -- it may be fully defined, or not...
@@ -4564,7 +4566,7 @@ trait Typers extends Adaptations with Tags with TypersTracking with PatternTyper
typed1(atPos(tree.pos)(Block(stats, Apply(expr, args) setPos tree.pos.makeTransparent)), mode, pt)
case Apply(fun, args) =>
normalTypedApply(tree, fun, args) match {
- case ArrayInstantiation(tree1) => typed(tree1, mode, pt)
+ case ArrayInstantiation(tree1) => if (tree1.isErrorTyped) tree1 else typed(tree1, mode, pt)
case Apply(Select(fun, nme.apply), _) if treeInfo.isSuperConstrCall(fun) => TooManyArgumentListsForConstructor(tree) //SI-5696
case tree1 => tree1
}
diff --git a/src/compiler/scala/tools/nsc/typechecker/TypersTracking.scala b/src/compiler/scala/tools/nsc/typechecker/TypersTracking.scala
index 550fd4e68d..37fbb73b85 100644
--- a/src/compiler/scala/tools/nsc/typechecker/TypersTracking.scala
+++ b/src/compiler/scala/tools/nsc/typechecker/TypersTracking.scala
@@ -159,7 +159,7 @@ trait TypersTracking {
// Some trees which are typed with mind-numbing frequency and
// which add nothing by being printed. Did () type to Unit? Let's
// gamble on yes.
- private def printingOk(t: Tree) = printTypings && (settings.debug.value || !noPrint(t))
+ def printingOk(t: Tree) = printTypings && (settings.debug.value || !noPrint(t))
def noPrintTyping(t: Tree) = (t.tpe ne null) || !printingOk(t)
def noPrintAdapt(tree1: Tree, tree2: Tree) = !printingOk(tree1) || (
(tree1.tpe == tree2.tpe)
diff --git a/src/compiler/scala/tools/nsc/util/DocStrings.scala b/src/compiler/scala/tools/nsc/util/DocStrings.scala
index 4ff7067a21..501546b8f6 100755
--- a/src/compiler/scala/tools/nsc/util/DocStrings.scala
+++ b/src/compiler/scala/tools/nsc/util/DocStrings.scala
@@ -37,7 +37,7 @@ object DocStrings {
/** Returns index of string `str` after `start` skipping longest
* sequence of space and tab characters, possibly also containing
* a single `*` character or the `/``**` sequence.
- * @pre start == str.length || str(start) == `\n'
+ * @pre start == str.length || str(start) == `\n`
*/
def skipLineLead(str: String, start: Int): Int =
if (start == str.length) start
@@ -49,7 +49,7 @@ object DocStrings {
else idx
}
- /** Skips to next occurrence of `\n' or to the position after the `/``**` sequence following index `start`.
+ /** Skips to next occurrence of `\n` or to the position after the `/``**` sequence following index `start`.
*/
def skipToEol(str: String, start: Int): Int =
if (start + 2 < str.length && (str charAt start) == '/' && (str charAt (start + 1)) == '*' && (str charAt (start + 2)) == '*') start + 3
diff --git a/src/compiler/scala/tools/util/PathResolver.scala b/src/compiler/scala/tools/util/PathResolver.scala
index f122437b63..09c6c9d744 100644
--- a/src/compiler/scala/tools/util/PathResolver.scala
+++ b/src/compiler/scala/tools/util/PathResolver.scala
@@ -254,17 +254,7 @@ abstract class PathResolverBase[BaseClassPathType <: ClassFileLookup[AbstractFil
* TODO: we should refactor this as a separate -bootstrap option to have a clean implementation, no? */
def sourcePath = if (!settings.isScaladoc) cmdLineOrElse("sourcepath", Defaults.scalaSourcePath) else ""
- /** Against my better judgment, giving in to martin here and allowing
- * CLASSPATH to be used automatically. So for the user-specified part
- * of the classpath:
- *
- * - If -classpath or -cp is given, it is that
- * - Otherwise, if CLASSPATH is set, it is that
- * - If neither of those, then "." is used.
- */
- def userClassPath =
- if (!settings.classpath.isDefault) settings.classpath.value
- else sys.env.getOrElse("CLASSPATH", ".")
+ def userClassPath = settings.classpath.value // default is specified by settings and can be overridden there
import classPathFactory._
diff --git a/src/eclipse/partest/.classpath b/src/eclipse/partest/.classpath
index 7e2f119193..de4acf8aa0 100644
--- a/src/eclipse/partest/.classpath
+++ b/src/eclipse/partest/.classpath
@@ -5,7 +5,7 @@
<classpathentry combineaccessrules="false" kind="src" path="/repl"/>
<classpathentry kind="var" path="M2_REPO/com/googlecode/java-diff-utils/diffutils/1.3.0/diffutils-1.3.0.jar"/>
<classpathentry kind="var" path="M2_REPO/org/scala-sbt/test-interface/1.0/test-interface-1.0.jar"/>
- <classpathentry kind="var" path="M2_REPO/org/scala-lang/modules/scala-partest_2.11/1.0.0/scala-partest_2.11-1.0.0.jar"/>
+ <classpathentry kind="var" path="M2_REPO/org/scala-lang/modules/scala-partest_2.12.0-M1/1.0.7/scala-partest_2.12.0-M1-1.0.7.jar"/>
<classpathentry kind="var" path="SCALA_BASEDIR/lib/ant/ant.jar"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
<classpathentry combineaccessrules="false" kind="src" path="/scala-compiler"/>
diff --git a/src/eclipse/repl/.classpath b/src/eclipse/repl/.classpath
index cbaabb9af1..14f7e16670 100644
--- a/src/eclipse/repl/.classpath
+++ b/src/eclipse/repl/.classpath
@@ -2,7 +2,8 @@
<classpath>
<classpathentry kind="src" path="repl"/>
<classpathentry combineaccessrules="false" kind="src" path="/asm"/>
- <classpathentry kind="var" path="M2_REPO/jline/jline/2.12/jline-2.12.jar"/>
+ <classpathentry kind="var" path="M2_REPO/jline/jline/2.12.1/jline-2.12.1.jar"/>
+ <!-- <classpathentry kind="var" path="SCALA_BASEDIR/build/deps/repl/jline-2.12.jar"/> -->
<classpathentry combineaccessrules="false" kind="src" path="/scala-compiler"/>
<classpathentry combineaccessrules="false" kind="src" path="/scala-library"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
diff --git a/src/eclipse/scaladoc/.classpath b/src/eclipse/scaladoc/.classpath
index ee6427176a..2b8282cfb3 100644
--- a/src/eclipse/scaladoc/.classpath
+++ b/src/eclipse/scaladoc/.classpath
@@ -6,8 +6,8 @@
<classpathentry combineaccessrules="false" kind="src" path="/scala-compiler"/>
<classpathentry combineaccessrules="false" kind="src" path="/scala-library"/>
<classpathentry combineaccessrules="false" kind="src" path="/partest-extras"/>
- <classpathentry kind="var" path="M2_REPO/org/scala-lang/modules/scala-xml_2.11/1.0.2/scala-xml_2.11-1.0.2.jar"/>
- <classpathentry kind="var" path="M2_REPO/org/scala-lang/modules/scala-parser-combinators_2.11/1.0.1/scala-parser-combinators_2.11-1.0.1.jar"/>
- <classpathentry kind="var" path="M2_REPO/org/scala-lang/modules/scala-partest_2.11/1.0.0/scala-partest_2.11-1.0.0.jar"/>
+ <classpathentry kind="var" path="M2_REPO/org/scala-lang/modules/scala-xml_2.12.0-M1/1.0.4/scala-xml_2.12.0-M1-1.0.4"/>
+ <classpathentry kind="var" path="M2_REPO/org/scala-lang/modules/scala-parser-combinators_2.12.0-M1/1.0.4/scala-parser-combinators_2.12.0-M1-1.0.4.jar"/>
+ <classpathentry kind="var" path="M2_REPO/org/scala-lang/modules/scala-partest_2.12.0-M1/1.0.7/scala-partest_2.12.0-M1-1.0.7.jar"/>
<classpathentry kind="output" path="build-quick-scaladoc"/>
</classpath>
diff --git a/src/eclipse/test-junit/.classpath b/src/eclipse/test-junit/.classpath
index 8a599bd8c7..710d33b030 100644
--- a/src/eclipse/test-junit/.classpath
+++ b/src/eclipse/test-junit/.classpath
@@ -2,7 +2,7 @@
<classpath>
<classpathentry kind="src" path="test-junit"/>
<classpathentry kind="var" path="SCALA_BASEDIR/lib/ant/ant.jar"/>
- <classpathentry kind="var" path="M2_REPO/junit/junit/4.10/junit-4.10.jar"/>
+ <classpathentry kind="var" path="M2_REPO/junit/junit/4.11/junit-4.11.jar"/>
<classpathentry combineaccessrules="false" kind="src" path="/reflect"/>
<classpathentry combineaccessrules="false" kind="src" path="/scala-library"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java
deleted file mode 100644
index 9bd378c61c..0000000000
--- a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinPool.java
+++ /dev/null
@@ -1,3762 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package scala.concurrent.forkjoin;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.AbstractExecutorService;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.RunnableFuture;
-import java.util.concurrent.TimeUnit;
-
-/**
- * @since 1.8
- * @author Doug Lea
- */
-@Deprecated
-/*public*/ abstract class CountedCompleter<T> extends ForkJoinTask<T> {
- private static final long serialVersionUID = 5232453752276485070L;
-
- /** This task's completer, or null if none */
- final CountedCompleter<?> completer;
- /** The number of pending tasks until completion */
- volatile int pending;
-
- /**
- * Creates a new CountedCompleter with the given completer
- * and initial pending count.
- *
- * @param completer this task's completer, or {@code null} if none
- * @param initialPendingCount the initial pending count
- */
- protected CountedCompleter(CountedCompleter<?> completer,
- int initialPendingCount) {
- this.completer = completer;
- this.pending = initialPendingCount;
- }
-
- /**
- * Creates a new CountedCompleter with the given completer
- * and an initial pending count of zero.
- *
- * @param completer this task's completer, or {@code null} if none
- */
- protected CountedCompleter(CountedCompleter<?> completer) {
- this.completer = completer;
- }
-
- /**
- * Creates a new CountedCompleter with no completer
- * and an initial pending count of zero.
- */
- protected CountedCompleter() {
- this.completer = null;
- }
-
- /**
- * The main computation performed by this task.
- */
- public abstract void compute();
-
- /**
- * Performs an action when method {@link #tryComplete} is invoked
- * and the pending count is zero, or when the unconditional
- * method {@link #complete} is invoked. By default, this method
- * does nothing. You can distinguish cases by checking the
- * identity of the given caller argument. If not equal to {@code
- * this}, then it is typically a subtask that may contain results
- * (and/or links to other results) to combine.
- *
- * @param caller the task invoking this method (which may
- * be this task itself)
- */
- public void onCompletion(CountedCompleter<?> caller) {
- }
-
- /**
- * Performs an action when method {@link #completeExceptionally}
- * is invoked or method {@link #compute} throws an exception, and
- * this task has not otherwise already completed normally. On
- * entry to this method, this task {@link
- * ForkJoinTask#isCompletedAbnormally}. The return value of this
- * method controls further propagation: If {@code true} and this
- * task has a completer, then this completer is also completed
- * exceptionally. The default implementation of this method does
- * nothing except return {@code true}.
- *
- * @param ex the exception
- * @param caller the task invoking this method (which may
- * be this task itself)
- * @return true if this exception should be propagated to this
- * task's completer, if one exists
- */
- public boolean onExceptionalCompletion(Throwable ex, CountedCompleter<?> caller) {
- return true;
- }
-
- /**
- * Returns the completer established in this task's constructor,
- * or {@code null} if none.
- *
- * @return the completer
- */
- public final CountedCompleter<?> getCompleter() {
- return completer;
- }
-
- /**
- * Returns the current pending count.
- *
- * @return the current pending count
- */
- public final int getPendingCount() {
- return pending;
- }
-
- /**
- * Sets the pending count to the given value.
- *
- * @param count the count
- */
- public final void setPendingCount(int count) {
- pending = count;
- }
-
- /**
- * Adds (atomically) the given value to the pending count.
- *
- * @param delta the value to add
- */
- public final void addToPendingCount(int delta) {
- int c; // note: can replace with intrinsic in jdk8
- do {} while (!U.compareAndSwapInt(this, PENDING, c = pending, c+delta));
- }
-
- /**
- * Sets (atomically) the pending count to the given count only if
- * it currently holds the given expected value.
- *
- * @param expected the expected value
- * @param count the new value
- * @return true if successful
- */
- public final boolean compareAndSetPendingCount(int expected, int count) {
- return U.compareAndSwapInt(this, PENDING, expected, count);
- }
-
- /**
- * If the pending count is nonzero, (atomically) decrements it.
- *
- * @return the initial (undecremented) pending count holding on entry
- * to this method
- */
- public final int decrementPendingCountUnlessZero() {
- int c;
- do {} while ((c = pending) != 0 &&
- !U.compareAndSwapInt(this, PENDING, c, c - 1));
- return c;
- }
-
- /**
- * Returns the root of the current computation; i.e., this
- * task if it has no completer, else its completer's root.
- *
- * @return the root of the current computation
- */
- public final CountedCompleter<?> getRoot() {
- CountedCompleter<?> a = this, p;
- while ((p = a.completer) != null)
- a = p;
- return a;
- }
-
- /**
- * If the pending count is nonzero, decrements the count;
- * otherwise invokes {@link #onCompletion} and then similarly
- * tries to complete this task's completer, if one exists,
- * else marks this task as complete.
- */
- public final void tryComplete() {
- CountedCompleter<?> a = this, s = a;
- for (int c;;) {
- if ((c = a.pending) == 0) {
- a.onCompletion(s);
- if ((a = (s = a).completer) == null) {
- s.quietlyComplete();
- return;
- }
- }
- else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
- return;
- }
- }
-
- /**
- * Equivalent to {@link #tryComplete} but does not invoke {@link
- * #onCompletion} along the completion path: If the pending count
- * is nonzero, decrements the count; otherwise, similarly tries to
- * complete this task's completer, if one exists, else marks this
- * task as complete. This method may be useful in cases where
- * {@code onCompletion} should not, or need not, be invoked for
- * each completer in a computation.
- */
- public final void propagateCompletion() {
- CountedCompleter<?> a = this, s = a;
- for (int c;;) {
- if ((c = a.pending) == 0) {
- if ((a = (s = a).completer) == null) {
- s.quietlyComplete();
- return;
- }
- }
- else if (U.compareAndSwapInt(a, PENDING, c, c - 1))
- return;
- }
- }
-
- /**
- * Regardless of pending count, invokes {@link #onCompletion},
- * marks this task as complete and further triggers {@link
- * #tryComplete} on this task's completer, if one exists. The
- * given rawResult is used as an argument to {@link #setRawResult}
- * before invoking {@link #onCompletion} or marking this task as
- * complete; its value is meaningful only for classes overriding
- * {@code setRawResult}.
- *
- * <p>This method may be useful when forcing completion as soon as
- * any one (versus all) of several subtask results are obtained.
- * However, in the common (and recommended) case in which {@code
- * setRawResult} is not overridden, this effect can be obtained
- * more simply using {@code quietlyCompleteRoot();}.
- *
- * @param rawResult the raw result
- */
- public void complete(T rawResult) {
- CountedCompleter<?> p;
- setRawResult(rawResult);
- onCompletion(this);
- quietlyComplete();
- if ((p = completer) != null)
- p.tryComplete();
- }
-
-
- /**
- * If this task's pending count is zero, returns this task;
- * otherwise decrements its pending count and returns {@code
- * null}. This method is designed to be used with {@link
- * #nextComplete} in completion traversal loops.
- *
- * @return this task, if pending count was zero, else {@code null}
- */
- public final CountedCompleter<?> firstComplete() {
- for (int c;;) {
- if ((c = pending) == 0)
- return this;
- else if (U.compareAndSwapInt(this, PENDING, c, c - 1))
- return null;
- }
- }
-
- /**
- * If this task does not have a completer, invokes {@link
- * ForkJoinTask#quietlyComplete} and returns {@code null}. Or, if
- * this task's pending count is non-zero, decrements its pending
- * count and returns {@code null}. Otherwise, returns the
- * completer. This method can be used as part of a completion
- * traversal loop for homogeneous task hierarchies:
- *
- * <pre> {@code
- * for (CountedCompleter<?> c = firstComplete();
- * c != null;
- * c = c.nextComplete()) {
- * // ... process c ...
- * }}</pre>
- *
- * @return the completer, or {@code null} if none
- */
- public final CountedCompleter<?> nextComplete() {
- CountedCompleter<?> p;
- if ((p = completer) != null)
- return p.firstComplete();
- else {
- quietlyComplete();
- return null;
- }
- }
-
- /**
- * Equivalent to {@code getRoot().quietlyComplete()}.
- */
- public final void quietlyCompleteRoot() {
- for (CountedCompleter<?> a = this, p;;) {
- if ((p = a.completer) == null) {
- a.quietlyComplete();
- return;
- }
- a = p;
- }
- }
-
- /**
- * Supports ForkJoinTask exception propagation.
- */
- void internalPropagateException(Throwable ex) {
- CountedCompleter<?> a = this, s = a;
- while (a.onExceptionalCompletion(ex, s) &&
- (a = (s = a).completer) != null && a.status >= 0)
- a.recordExceptionalCompletion(ex);
- }
-
- /**
- * Implements execution conventions for CountedCompleters.
- */
- protected final boolean exec() {
- compute();
- return false;
- }
-
- /**
- * Returns the result of the computation. By default
- * returns {@code null}, which is appropriate for {@code Void}
- * actions, but in other cases should be overridden, almost
- * always to return a field or function of a field that
- * holds the result upon completion.
- *
- * @return the result of the computation
- */
- public T getRawResult() { return null; }
-
- /**
- * A method that result-bearing CountedCompleters may optionally
- * use to help maintain result data. By default, does nothing.
- * Overrides are not recommended. However, if this method is
- * overridden to update existing objects or fields, then it must
- * in general be defined to be thread-safe.
- */
- protected void setRawResult(T t) { }
-
- // Unsafe mechanics
- private static final sun.misc.Unsafe U;
- private static final long PENDING;
- static {
- try {
- U = getUnsafe();
- PENDING = U.objectFieldOffset
- (CountedCompleter.class.getDeclaredField("pending"));
- } catch (Exception e) {
- throw new Error(e);
- }
- }
-
- /**
- * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
- * Replace with a simple call to Unsafe.getUnsafe when integrating
- * into a jdk.
- *
- * @return a sun.misc.Unsafe
- */
- private static sun.misc.Unsafe getUnsafe() {
- return scala.concurrent.util.Unsafe.instance;
- }
-}
-
-/**
- * An {@link ExecutorService} for running {@link ForkJoinTask}s.
- * A {@code ForkJoinPool} provides the entry point for submissions
- * from non-{@code ForkJoinTask} clients, as well as management and
- * monitoring operations.
- *
- * <p>A {@code ForkJoinPool} differs from other kinds of {@link
- * ExecutorService} mainly by virtue of employing
- * <em>work-stealing</em>: all threads in the pool attempt to find and
- * execute tasks submitted to the pool and/or created by other active
- * tasks (eventually blocking waiting for work if none exist). This
- * enables efficient processing when most tasks spawn other subtasks
- * (as do most {@code ForkJoinTask}s), as well as when many small
- * tasks are submitted to the pool from external clients. Especially
- * when setting <em>asyncMode</em> to true in constructors, {@code
- * ForkJoinPool}s may also be appropriate for use with event-style
- * tasks that are never joined.
- *
- * <p>A static {@link #commonPool()} is available and appropriate for
- * most applications. The common pool is used by any ForkJoinTask that
- * is not explicitly submitted to a specified pool. Using the common
- * pool normally reduces resource usage (its threads are slowly
- * reclaimed during periods of non-use, and reinstated upon subsequent
- * use).
- *
- * <p>For applications that require separate or custom pools, a {@code
- * ForkJoinPool} may be constructed with a given target parallelism
- * level; by default, equal to the number of available processors. The
- * pool attempts to maintain enough active (or available) threads by
- * dynamically adding, suspending, or resuming internal worker
- * threads, even if some tasks are stalled waiting to join
- * others. However, no such adjustments are guaranteed in the face of
- * blocked I/O or other unmanaged synchronization. The nested {@link
- * ManagedBlocker} interface enables extension of the kinds of
- * synchronization accommodated.
- *
- * <p>In addition to execution and lifecycle control methods, this
- * class provides status check methods (for example
- * {@link #getStealCount}) that are intended to aid in developing,
- * tuning, and monitoring fork/join applications. Also, method
- * {@link #toString} returns indications of pool state in a
- * convenient form for informal monitoring.
- *
- * <p>As is the case with other ExecutorServices, there are three
- * main task execution methods summarized in the following table.
- * These are designed to be used primarily by clients not already
- * engaged in fork/join computations in the current pool. The main
- * forms of these methods accept instances of {@code ForkJoinTask},
- * but overloaded forms also allow mixed execution of plain {@code
- * Runnable}- or {@code Callable}- based activities as well. However,
- * tasks that are already executing in a pool should normally instead
- * use the within-computation forms listed in the table unless using
- * async event-style tasks that are not usually joined, in which case
- * there is little difference among choice of methods.
- *
- * <table BORDER CELLPADDING=3 CELLSPACING=1>
- * <tr>
- * <td></td>
- * <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td>
- * <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td>
- * </tr>
- * <tr>
- * <td> <b>Arrange async execution</td>
- * <td> {@link #execute(ForkJoinTask)}</td>
- * <td> {@link ForkJoinTask#fork}</td>
- * </tr>
- * <tr>
- * <td> <b>Await and obtain result</td>
- * <td> {@link #invoke(ForkJoinTask)}</td>
- * <td> {@link ForkJoinTask#invoke}</td>
- * </tr>
- * <tr>
- * <td> <b>Arrange exec and obtain Future</td>
- * <td> {@link #submit(ForkJoinTask)}</td>
- * <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
- * </tr>
- * </table>
- *
- * <p>The common pool is by default constructed with default
- * parameters, but these may be controlled by setting three {@link
- * System#getProperty system properties} with prefix {@code
- * java.util.concurrent.ForkJoinPool.common}: {@code parallelism} --
- * an integer greater than zero, {@code threadFactory} -- the class
- * name of a {@link ForkJoinWorkerThreadFactory}, and {@code
- * exceptionHandler} -- the class name of a {@link
- * java.lang.Thread.UncaughtExceptionHandler
- * Thread.UncaughtExceptionHandler}. Upon any error in establishing
- * these settings, default parameters are used.
- *
- * <p><b>Implementation notes</b>: This implementation restricts the
- * maximum number of running threads to 32767. Attempts to create
- * pools with greater than the maximum number result in
- * {@code IllegalArgumentException}.
- *
- * <p>This implementation rejects submitted tasks (that is, by throwing
- * {@link RejectedExecutionException}) only when the pool is shut down
- * or internal resources have been exhausted.
- *
- * @since 1.7
- * @author Doug Lea
- */
-@Deprecated
-public class ForkJoinPool extends AbstractExecutorService {
-
- /*
- * Implementation Overview
- *
- * This class and its nested classes provide the main
- * functionality and control for a set of worker threads:
- * Submissions from non-FJ threads enter into submission queues.
- * Workers take these tasks and typically split them into subtasks
- * that may be stolen by other workers. Preference rules give
- * first priority to processing tasks from their own queues (LIFO
- * or FIFO, depending on mode), then to randomized FIFO steals of
- * tasks in other queues.
- *
- * WorkQueues
- * ==========
- *
- * Most operations occur within work-stealing queues (in nested
- * class WorkQueue). These are special forms of Deques that
- * support only three of the four possible end-operations -- push,
- * pop, and poll (aka steal), under the further constraints that
- * push and pop are called only from the owning thread (or, as
- * extended here, under a lock), while poll may be called from
- * other threads. (If you are unfamiliar with them, you probably
- * want to read Herlihy and Shavit's book "The Art of
- * Multiprocessor programming", chapter 16 describing these in
- * more detail before proceeding.) The main work-stealing queue
- * design is roughly similar to those in the papers "Dynamic
- * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
- * (http://research.sun.com/scalable/pubs/index.html) and
- * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
- * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
- * The main differences ultimately stem from GC requirements that
- * we null out taken slots as soon as we can, to maintain as small
- * a footprint as possible even in programs generating huge
- * numbers of tasks. To accomplish this, we shift the CAS
- * arbitrating pop vs poll (steal) from being on the indices
- * ("base" and "top") to the slots themselves. So, both a
- * successful pop and poll mainly entail a CAS of a slot from
- * non-null to null. Because we rely on CASes of references, we
- * do not need tag bits on base or top. They are simple ints as
- * used in any circular array-based queue (see for example
- * ArrayDeque). Updates to the indices must still be ordered in a
- * way that guarantees that top == base means the queue is empty,
- * but otherwise may err on the side of possibly making the queue
- * appear nonempty when a push, pop, or poll have not fully
- * committed. Note that this means that the poll operation,
- * considered individually, is not wait-free. One thief cannot
- * successfully continue until another in-progress one (or, if
- * previously empty, a push) completes. However, in the
- * aggregate, we ensure at least probabilistic non-blockingness.
- * If an attempted steal fails, a thief always chooses a different
- * random victim target to try next. So, in order for one thief to
- * progress, it suffices for any in-progress poll or new push on
- * any empty queue to complete. (This is why we normally use
- * method pollAt and its variants that try once at the apparent
- * base index, else consider alternative actions, rather than
- * method poll.)
- *
- * This approach also enables support of a user mode in which local
- * task processing is in FIFO, not LIFO order, simply by using
- * poll rather than pop. This can be useful in message-passing
- * frameworks in which tasks are never joined. However neither
- * mode considers affinities, loads, cache localities, etc, so
- * rarely provide the best possible performance on a given
- * machine, but portably provide good throughput by averaging over
- * these factors. (Further, even if we did try to use such
- * information, we do not usually have a basis for exploiting it.
- * For example, some sets of tasks profit from cache affinities,
- * but others are harmed by cache pollution effects.)
- *
- * WorkQueues are also used in a similar way for tasks submitted
- * to the pool. We cannot mix these tasks in the same queues used
- * for work-stealing (this would contaminate lifo/fifo
- * processing). Instead, we randomly associate submission queues
- * with submitting threads, using a form of hashing. The
- * ThreadLocal Submitter class contains a value initially used as
- * a hash code for choosing existing queues, but may be randomly
- * repositioned upon contention with other submitters. In
- * essence, submitters act like workers except that they are
- * restricted to executing local tasks that they submitted (or in
- * the case of CountedCompleters, others with the same root task).
- * However, because most shared/external queue operations are more
- * expensive than internal, and because, at steady state, external
- * submitters will compete for CPU with workers, ForkJoinTask.join
- * and related methods disable them from repeatedly helping to
- * process tasks if all workers are active. Insertion of tasks in
- * shared mode requires a lock (mainly to protect in the case of
- * resizing) but we use only a simple spinlock (using bits in
- * field qlock), because submitters encountering a busy queue move
- * on to try or create other queues -- they block only when
- * creating and registering new queues.
- *
- * Management
- * ==========
- *
- * The main throughput advantages of work-stealing stem from
- * decentralized control -- workers mostly take tasks from
- * themselves or each other. We cannot negate this in the
- * implementation of other management responsibilities. The main
- * tactic for avoiding bottlenecks is packing nearly all
- * essentially atomic control state into two volatile variables
- * that are by far most often read (not written) as status and
- * consistency checks.
- *
- * Field "ctl" contains 64 bits holding all the information needed
- * to atomically decide to add, inactivate, enqueue (on an event
- * queue), dequeue, and/or re-activate workers. To enable this
- * packing, we restrict maximum parallelism to (1<<15)-1 (which is
- * far in excess of normal operating range) to allow ids, counts,
- * and their negations (used for thresholding) to fit into 16bit
- * fields.
- *
- * Field "plock" is a form of sequence lock with a saturating
- * shutdown bit (similarly for per-queue "qlocks"), mainly
- * protecting updates to the workQueues array, as well as to
- * enable shutdown. When used as a lock, it is normally only very
- * briefly held, so is nearly always available after at most a
- * brief spin, but we use a monitor-based backup strategy to
- * block when needed.
- *
- * Recording WorkQueues. WorkQueues are recorded in the
- * "workQueues" array that is created upon first use and expanded
- * if necessary. Updates to the array while recording new workers
- * and unrecording terminated ones are protected from each other
- * by a lock but the array is otherwise concurrently readable, and
- * accessed directly. To simplify index-based operations, the
- * array size is always a power of two, and all readers must
- * tolerate null slots. Worker queues are at odd indices. Shared
- * (submission) queues are at even indices, up to a maximum of 64
- * slots, to limit growth even if array needs to expand to add
- * more workers. Grouping them together in this way simplifies and
- * speeds up task scanning.
- *
- * All worker thread creation is on-demand, triggered by task
- * submissions, replacement of terminated workers, and/or
- * compensation for blocked workers. However, all other support
- * code is set up to work with other policies. To ensure that we
- * do not hold on to worker references that would prevent GC, ALL
- * accesses to workQueues are via indices into the workQueues
- * array (which is one source of some of the messy code
- * constructions here). In essence, the workQueues array serves as
- * a weak reference mechanism. Thus for example the wait queue
- * field of ctl stores indices, not references. Access to the
- * workQueues in associated methods (for example signalWork) must
- * both index-check and null-check the IDs. All such accesses
- * ignore bad IDs by returning out early from what they are doing,
- * since this can only be associated with termination, in which
- * case it is OK to give up. All uses of the workQueues array
- * also check that it is non-null (even if previously
- * non-null). This allows nulling during termination, which is
- * currently not necessary, but remains an option for
- * resource-revocation-based shutdown schemes. It also helps
- * reduce JIT issuance of uncommon-trap code, which tends to
- * unnecessarily complicate control flow in some methods.
- *
- * Event Queuing. Unlike HPC work-stealing frameworks, we cannot
- * let workers spin indefinitely scanning for tasks when none can
- * be found immediately, and we cannot start/resume workers unless
- * there appear to be tasks available. On the other hand, we must
- * quickly prod them into action when new tasks are submitted or
- * generated. In many usages, ramp-up time to activate workers is
- * the main limiting factor in overall performance (this is
- * compounded at program start-up by JIT compilation and
- * allocation). So we try to streamline this as much as possible.
- * We park/unpark workers after placing in an event wait queue
- * when they cannot find work. This "queue" is actually a simple
- * Treiber stack, headed by the "id" field of ctl, plus a 15bit
- * counter value (that reflects the number of times a worker has
- * been inactivated) to avoid ABA effects (we need only as many
- * version numbers as worker threads). Successors are held in
- * field WorkQueue.nextWait. Queuing deals with several intrinsic
- * races, mainly that a task-producing thread can miss seeing (and
- * signalling) another thread that gave up looking for work but
- * has not yet entered the wait queue. We solve this by requiring
- * a full sweep of all workers (via repeated calls to method
- * scan()) both before and after a newly waiting worker is added
- * to the wait queue. During a rescan, the worker might release
- * some other queued worker rather than itself, which has the same
- * net effect. Because enqueued workers may actually be rescanning
- * rather than waiting, we set and clear the "parker" field of
- * WorkQueues to reduce unnecessary calls to unpark. (This
- * requires a secondary recheck to avoid missed signals.) Note
- * the unusual conventions about Thread.interrupts surrounding
- * parking and other blocking: Because interrupts are used solely
- * to alert threads to check termination, which is checked anyway
- * upon blocking, we clear status (using Thread.interrupted)
- * before any call to park, so that park does not immediately
- * return due to status being set via some other unrelated call to
- * interrupt in user code.
- *
- * Signalling. We create or wake up workers only when there
- * appears to be at least one task they might be able to find and
- * execute. However, many other threads may notice the same task
- * and each signal to wake up a thread that might take it. So in
- * general, pools will be over-signalled. When a submission is
- * added or another worker adds a task to a queue that has fewer
- * than two tasks, they signal waiting workers (or trigger
- * creation of new ones if fewer than the given parallelism level
- * -- signalWork), and may leave a hint to the unparked worker to
- * help signal others upon wakeup). These primary signals are
- * buttressed by others (see method helpSignal) whenever other
- * threads scan for work or do not have a task to process. On
- * most platforms, signalling (unpark) overhead time is noticeably
- * long, and the time between signalling a thread and it actually
- * making progress can be very noticeably long, so it is worth
- * offloading these delays from critical paths as much as
- * possible.
- *
- * Trimming workers. To release resources after periods of lack of
- * use, a worker starting to wait when the pool is quiescent will
- * time out and terminate if the pool has remained quiescent for a
- * given period -- a short period if there are more threads than
- * parallelism, longer as the number of threads decreases. This
- * will slowly propagate, eventually terminating all workers after
- * periods of non-use.
- *
- * Shutdown and Termination. A call to shutdownNow atomically sets
- * a plock bit and then (non-atomically) sets each worker's
- * qlock status, cancels all unprocessed tasks, and wakes up
- * all waiting workers. Detecting whether termination should
- * commence after a non-abrupt shutdown() call requires more work
- * and bookkeeping. We need consensus about quiescence (i.e., that
- * there is no more work). The active count provides a primary
- * indication but non-abrupt shutdown still requires a rechecking
- * scan for any workers that are inactive but not queued.
- *
- * Joining Tasks
- * =============
- *
- * Any of several actions may be taken when one worker is waiting
- * to join a task stolen (or always held) by another. Because we
- * are multiplexing many tasks on to a pool of workers, we can't
- * just let them block (as in Thread.join). We also cannot just
- * reassign the joiner's run-time stack with another and replace
- * it later, which would be a form of "continuation", that even if
- * possible is not necessarily a good idea since we sometimes need
- * both an unblocked task and its continuation to progress.
- * Instead we combine two tactics:
- *
- * Helping: Arranging for the joiner to execute some task that it
- * would be running if the steal had not occurred.
- *
- * Compensating: Unless there are already enough live threads,
- * method tryCompensate() may create or re-activate a spare
- * thread to compensate for blocked joiners until they unblock.
- *
- * A third form (implemented in tryRemoveAndExec) amounts to
- * helping a hypothetical compensator: If we can readily tell that
- * a possible action of a compensator is to steal and execute the
- * task being joined, the joining thread can do so directly,
- * without the need for a compensation thread (although at the
- * expense of larger run-time stacks, but the tradeoff is
- * typically worthwhile).
- *
- * The ManagedBlocker extension API can't use helping so relies
- * only on compensation in method awaitBlocker.
- *
- * The algorithm in tryHelpStealer entails a form of "linear"
- * helping: Each worker records (in field currentSteal) the most
- * recent task it stole from some other worker. Plus, it records
- * (in field currentJoin) the task it is currently actively
- * joining. Method tryHelpStealer uses these markers to try to
- * find a worker to help (i.e., steal back a task from and execute
- * it) that could hasten completion of the actively joined task.
- * In essence, the joiner executes a task that would be on its own
- * local deque had the to-be-joined task not been stolen. This may
- * be seen as a conservative variant of the approach in Wagner &
- * Calder "Leapfrogging: a portable technique for implementing
- * efficient futures" SIGPLAN Notices, 1993
- * (http://portal.acm.org/citation.cfm?id=155354). It differs in
- * that: (1) We only maintain dependency links across workers upon
- * steals, rather than use per-task bookkeeping. This sometimes
- * requires a linear scan of workQueues array to locate stealers,
- * but often doesn't because stealers leave hints (that may become
- * stale/wrong) of where to locate them. It is only a hint
- * because a worker might have had multiple steals and the hint
- * records only one of them (usually the most current). Hinting
- * isolates cost to when it is needed, rather than adding to
- * per-task overhead. (2) It is "shallow", ignoring nesting and
- * potentially cyclic mutual steals. (3) It is intentionally
- * racy: field currentJoin is updated only while actively joining,
- * which means that we miss links in the chain during long-lived
- * tasks, GC stalls etc (which is OK since blocking in such cases
- * is usually a good idea). (4) We bound the number of attempts
- * to find work (see MAX_HELP) and fall back to suspending the
- * worker and if necessary replacing it with another.
- *
- * Helping actions for CountedCompleters are much simpler: Method
- * helpComplete can take and execute any task with the same root
- * as the task being waited on. However, this still entails some
- * traversal of completer chains, so is less efficient than using
- * CountedCompleters without explicit joins.
- *
- * It is impossible to keep exactly the target parallelism number
- * of threads running at any given time. Determining the
- * existence of conservatively safe helping targets, the
- * availability of already-created spares, and the apparent need
- * to create new spares are all racy, so we rely on multiple
- * retries of each. Compensation in the apparent absence of
- * helping opportunities is challenging to control on JVMs, where
- * GC and other activities can stall progress of tasks that in
- * turn stall out many other dependent tasks, without us being
- * able to determine whether they will ever require compensation.
- * Even though work-stealing otherwise encounters little
- * degradation in the presence of more threads than cores,
- * aggressively adding new threads in such cases entails risk of
- * unwanted positive feedback control loops in which more threads
- * cause more dependent stalls (as well as delayed progress of
- * unblocked threads to the point that we know they are available)
- * leading to more situations requiring more threads, and so
- * on. This aspect of control can be seen as an (analytically
- * intractable) game with an opponent that may choose the worst
- * (for us) active thread to stall at any time. We take several
- * precautions to bound losses (and thus bound gains), mainly in
- * methods tryCompensate and awaitJoin.
- *
- * Common Pool
- * ===========
- *
- * The static common Pool always exists after static
- * initialization. Since it (or any other created pool) need
- * never be used, we minimize initial construction overhead and
- * footprint to the setup of about a dozen fields, with no nested
- * allocation. Most bootstrapping occurs within method
- * fullExternalPush during the first submission to the pool.
- *
- * When external threads submit to the common pool, they can
- * perform some subtask processing (see externalHelpJoin and
- * related methods). We do not need to record whether these
- * submissions are to the common pool -- if not, externalHelpJoin
- * returns quickly (at the most helping to signal some common pool
- * workers). These submitters would otherwise be blocked waiting
- * for completion, so the extra effort (with liberally sprinkled
- * task status checks) in inapplicable cases amounts to an odd
- * form of limited spin-wait before blocking in ForkJoinTask.join.
- *
- * Style notes
- * ===========
- *
- * There is a lot of representation-level coupling among classes
- * ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The
- * fields of WorkQueue maintain data structures managed by
- * ForkJoinPool, so are directly accessed. There is little point
- * trying to reduce this, since any associated future changes in
- * representations will need to be accompanied by algorithmic
- * changes anyway. Several methods intrinsically sprawl because
- * they must accumulate sets of consistent reads of volatiles held
- * in local variables. Methods signalWork() and scan() are the
- * main bottlenecks, so are especially heavily
- * micro-optimized/mangled. There are lots of inline assignments
- * (of form "while ((local = field) != 0)") which are usually the
- * simplest way to ensure the required read orderings (which are
- * sometimes critical). This leads to a "C"-like style of listing
- * declarations of these locals at the heads of methods or blocks.
- * There are several occurrences of the unusual "do {} while
- * (!cas...)" which is the simplest way to force an update of a
- * CAS'ed variable. There are also other coding oddities (including
- * several unnecessary-looking hoisted null checks) that help
- * some methods perform reasonably even when interpreted (not
- * compiled).
- *
- * The order of declarations in this file is:
- * (1) Static utility functions
- * (2) Nested (static) classes
- * (3) Static fields
- * (4) Fields, along with constants used when unpacking some of them
- * (5) Internal control methods
- * (6) Callbacks and other support for ForkJoinTask methods
- * (7) Exported methods
- * (8) Static block initializing statics in minimally dependent order
- */
-
- // Static utilities
-
- /**
- * If there is a security manager, makes sure caller has
- * permission to modify threads.
- */
- private static void checkPermission() {
- SecurityManager security = System.getSecurityManager();
- if (security != null)
- security.checkPermission(modifyThreadPermission);
- }
-
- // Nested classes
-
- /**
- * Factory for creating new {@link ForkJoinWorkerThread}s.
- * A {@code ForkJoinWorkerThreadFactory} must be defined and used
- * for {@code ForkJoinWorkerThread} subclasses that extend base
- * functionality or initialize threads with different contexts.
- */
- public static interface ForkJoinWorkerThreadFactory {
- /**
- * Returns a new worker thread operating in the given pool.
- *
- * @param pool the pool this thread works in
- * @throws NullPointerException if the pool is null
- */
- public ForkJoinWorkerThread newThread(ForkJoinPool pool);
- }
-
- /**
- * Default ForkJoinWorkerThreadFactory implementation; creates a
- * new ForkJoinWorkerThread.
- */
- static final class DefaultForkJoinWorkerThreadFactory
- implements ForkJoinWorkerThreadFactory {
- public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
- return new ForkJoinWorkerThread(pool);
- }
- }
-
- /**
- * Per-thread records for threads that submit to pools. Currently
- * holds only pseudo-random seed / index that is used to choose
- * submission queues in method externalPush. In the future, this may
- * also incorporate a means to implement different task rejection
- * and resubmission policies.
- *
- * Seeds for submitters and workers/workQueues work in basically
- * the same way but are initialized and updated using slightly
- * different mechanics. Both are initialized using the same
- * approach as in class ThreadLocal, where successive values are
- * unlikely to collide with previous values. Seeds are then
- * randomly modified upon collisions using xorshifts, which
- * requires a non-zero seed.
- */
- static final class Submitter {
- int seed;
- Submitter(int s) { seed = s; }
- }
-
- /**
- * Class for artificial tasks that are used to replace the target
- * of local joins if they are removed from an interior queue slot
- * in WorkQueue.tryRemoveAndExec. We don't need the proxy to
- * actually do anything beyond having a unique identity.
- */
- static final class EmptyTask extends ForkJoinTask<Void> {
- private static final long serialVersionUID = -7721805057305804111L;
- EmptyTask() { status = ForkJoinTask.NORMAL; } // force done
- public final Void getRawResult() { return null; }
- public final void setRawResult(Void x) {}
- public final boolean exec() { return true; }
- }
-
- /**
- * Queues supporting work-stealing as well as external task
- * submission. See above for main rationale and algorithms.
- * Implementation relies heavily on "Unsafe" intrinsics
- * and selective use of "volatile":
- *
- * Field "base" is the index (mod array.length) of the least valid
- * queue slot, which is always the next position to steal (poll)
- * from if nonempty. Reads and writes require volatile orderings
- * but not CAS, because updates are only performed after slot
- * CASes.
- *
- * Field "top" is the index (mod array.length) of the next queue
- * slot to push to or pop from. It is written only by owner thread
- * for push, or under lock for external/shared push, and accessed
- * by other threads only after reading (volatile) base. Both top
- * and base are allowed to wrap around on overflow, but (top -
- * base) (or more commonly -(base - top) to force volatile read of
- * base before top) still estimates size. The lock ("qlock") is
- * forced to -1 on termination, causing all further lock attempts
- * to fail. (Note: we don't need CAS for termination state because
- * upon pool shutdown, all shared-queues will stop being used
- * anyway.) Nearly all lock bodies are set up so that exceptions
- * within lock bodies are "impossible" (modulo JVM errors that
- * would cause failure anyway.)
- *
- * The array slots are read and written using the emulation of
- * volatiles/atomics provided by Unsafe. Insertions must in
- * general use putOrderedObject as a form of releasing store to
- * ensure that all writes to the task object are ordered before
- * its publication in the queue. All removals entail a CAS to
- * null. The array is always a power of two. To ensure safety of
- * Unsafe array operations, all accesses perform explicit null
- * checks and implicit bounds checks via power-of-two masking.
- *
- * In addition to basic queuing support, this class contains
- * fields described elsewhere to control execution. It turns out
- * to work better memory-layout-wise to include them in this class
- * rather than a separate class.
- *
- * Performance on most platforms is very sensitive to placement of
- * instances of both WorkQueues and their arrays -- we absolutely
- * do not want multiple WorkQueue instances or multiple queue
- * arrays sharing cache lines. (It would be best for queue objects
- * and their arrays to share, but there is nothing available to
- * help arrange that). Unfortunately, because they are recorded
- * in a common array, WorkQueue instances are often moved to be
- * adjacent by garbage collectors. To reduce impact, we use field
- * padding that works OK on common platforms; this effectively
- * trades off slightly slower average field access for the sake of
- * avoiding really bad worst-case access. (Until better JVM
- * support is in place, this padding is dependent on transient
- * properties of JVM field layout rules.) We also take care in
- * allocating, sizing and resizing the array. Non-shared queue
- * arrays are initialized by workers before use. Others are
- * allocated on first use.
- */
- static final class WorkQueue {
- /**
- * Capacity of work-stealing queue array upon initialization.
- * Must be a power of two; at least 4, but should be larger to
- * reduce or eliminate cacheline sharing among queues.
- * Currently, it is much larger, as a partial workaround for
- * the fact that JVMs often place arrays in locations that
- * share GC bookkeeping (especially cardmarks) such that
- * per-write accesses encounter serious memory contention.
- */
- static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
-
- /**
- * Maximum size for queue arrays. Must be a power of two less
- * than or equal to 1 << (31 - width of array entry) to ensure
- * lack of wraparound of index calculations, but defined to a
- * value a bit less than this to help users trap runaway
- * programs before saturating systems.
- */
- static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
-
- // Heuristic padding to ameliorate unfortunate memory placements
- volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
-
- int seed; // for random scanning; initialize nonzero
- volatile int eventCount; // encoded inactivation count; < 0 if inactive
- int nextWait; // encoded record of next event waiter
- int hint; // steal or signal hint (index)
- int poolIndex; // index of this queue in pool (or 0)
- final int mode; // 0: lifo, > 0: fifo, < 0: shared
- int nsteals; // number of steals
- volatile int qlock; // 1: locked, -1: terminate; else 0
- volatile int base; // index of next slot for poll
- int top; // index of next slot for push
- ForkJoinTask<?>[] array; // the elements (initially unallocated)
- final ForkJoinPool pool; // the containing pool (may be null)
- final ForkJoinWorkerThread owner; // owning thread or null if shared
- volatile Thread parker; // == owner during call to park; else null
- volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
- ForkJoinTask<?> currentSteal; // current non-local task being executed
-
- volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
- volatile Object pad18, pad19, pad1a, pad1b, pad1c, pad1d;
-
- WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode,
- int seed) {
- this.pool = pool;
- this.owner = owner;
- this.mode = mode;
- this.seed = seed;
- // Place indices in the center of array (that is not yet allocated)
- base = top = INITIAL_QUEUE_CAPACITY >>> 1;
- }
-
- /**
- * Returns the approximate number of tasks in the queue.
- */
- final int queueSize() {
- int n = base - top; // non-owner callers must read base first
- return (n >= 0) ? 0 : -n; // ignore transient negative
- }
-
- /**
- * Provides a more accurate estimate of whether this queue has
- * any tasks than does queueSize, by checking whether a
- * near-empty queue has at least one unclaimed task.
- */
- final boolean isEmpty() {
- ForkJoinTask<?>[] a; int m, s;
- int n = base - (s = top);
- return (n >= 0 ||
- (n == -1 &&
- ((a = array) == null ||
- (m = a.length - 1) < 0 ||
- U.getObject
- (a, (long)((m & (s - 1)) << ASHIFT) + ABASE) == null)));
- }
-
- /**
- * Pushes a task. Call only by owner in unshared queues. (The
- * shared-queue version is embedded in method externalPush.)
- *
- * @param task the task. Caller must ensure non-null.
- * @throws RejectedExecutionException if array cannot be resized
- */
- final void push(ForkJoinTask<?> task) {
- ForkJoinTask<?>[] a; ForkJoinPool p;
- int s = top, m, n;
- if ((a = array) != null) { // ignore if queue removed
- int j = (((m = a.length - 1) & s) << ASHIFT) + ABASE;
- U.putOrderedObject(a, j, task);
- if ((n = (top = s + 1) - base) <= 2) {
- if ((p = pool) != null)
- p.signalWork(this);
- }
- else if (n >= m)
- growArray();
- }
- }
-
- /**
- * Initializes or doubles the capacity of array. Call either
- * by owner or with lock held -- it is OK for base, but not
- * top, to move while resizings are in progress.
- */
- final ForkJoinTask<?>[] growArray() {
- ForkJoinTask<?>[] oldA = array;
- int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
- if (size > MAXIMUM_QUEUE_CAPACITY)
- throw new RejectedExecutionException("Queue capacity exceeded");
- int oldMask, t, b;
- ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
- if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
- (t = top) - (b = base) > 0) {
- int mask = size - 1;
- do {
- ForkJoinTask<?> x;
- int oldj = ((b & oldMask) << ASHIFT) + ABASE;
- int j = ((b & mask) << ASHIFT) + ABASE;
- x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
- if (x != null &&
- U.compareAndSwapObject(oldA, oldj, x, null))
- U.putObjectVolatile(a, j, x);
- } while (++b != t);
- }
- return a;
- }
-
- /**
- * Takes next task, if one exists, in LIFO order. Call only
- * by owner in unshared queues.
- */
- final ForkJoinTask<?> pop() {
- ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
- if ((a = array) != null && (m = a.length - 1) >= 0) {
- for (int s; (s = top - 1) - base >= 0;) {
- long j = ((m & s) << ASHIFT) + ABASE;
- if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
- break;
- if (U.compareAndSwapObject(a, j, t, null)) {
- top = s;
- return t;
- }
- }
- }
- return null;
- }
-
- /**
- * Takes a task in FIFO order if b is base of queue and a task
- * can be claimed without contention. Specialized versions
- * appear in ForkJoinPool methods scan and tryHelpStealer.
- */
- final ForkJoinTask<?> pollAt(int b) {
- ForkJoinTask<?> t; ForkJoinTask<?>[] a;
- if ((a = array) != null) {
- int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
- if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
- base == b &&
- U.compareAndSwapObject(a, j, t, null)) {
- base = b + 1;
- return t;
- }
- }
- return null;
- }
-
- /**
- * Takes next task, if one exists, in FIFO order.
- */
- final ForkJoinTask<?> poll() {
- ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
- while ((b = base) - top < 0 && (a = array) != null) {
- int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
- t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
- if (t != null) {
- if (base == b &&
- U.compareAndSwapObject(a, j, t, null)) {
- base = b + 1;
- return t;
- }
- }
- else if (base == b) {
- if (b + 1 == top)
- break;
- Thread.yield(); // wait for lagging update (very rare)
- }
- }
- return null;
- }
-
- /**
- * Takes next task, if one exists, in order specified by mode.
- */
- final ForkJoinTask<?> nextLocalTask() {
- return mode == 0 ? pop() : poll();
- }
-
- /**
- * Returns next task, if one exists, in order specified by mode.
- */
- final ForkJoinTask<?> peek() {
- ForkJoinTask<?>[] a = array; int m;
- if (a == null || (m = a.length - 1) < 0)
- return null;
- int i = mode == 0 ? top - 1 : base;
- int j = ((i & m) << ASHIFT) + ABASE;
- return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
- }
-
- /**
- * Pops the given task only if it is at the current top.
- * (A shared version is available only via FJP.tryExternalUnpush)
- */
- final boolean tryUnpush(ForkJoinTask<?> t) {
- ForkJoinTask<?>[] a; int s;
- if ((a = array) != null && (s = top) != base &&
- U.compareAndSwapObject
- (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
- top = s;
- return true;
- }
- return false;
- }
-
- /**
- * Removes and cancels all known tasks, ignoring any exceptions.
- */
- final void cancelAll() {
- ForkJoinTask.cancelIgnoringExceptions(currentJoin);
- ForkJoinTask.cancelIgnoringExceptions(currentSteal);
- for (ForkJoinTask<?> t; (t = poll()) != null; )
- ForkJoinTask.cancelIgnoringExceptions(t);
- }
-
- /**
- * Computes next value for random probes. Scans don't require
- * a very high quality generator, but also not a crummy one.
- * Marsaglia xor-shift is cheap and works well enough. Note:
- * This is manually inlined in its usages in ForkJoinPool to
- * avoid writes inside busy scan loops.
- */
- final int nextSeed() {
- int r = seed;
- r ^= r << 13;
- r ^= r >>> 17;
- return seed = r ^= r << 5;
- }
-
- // Specialized execution methods
-
- /**
- * Pops and runs tasks until empty.
- */
- private void popAndExecAll() {
- // A bit faster than repeated pop calls
- ForkJoinTask<?>[] a; int m, s; long j; ForkJoinTask<?> t;
- while ((a = array) != null && (m = a.length - 1) >= 0 &&
- (s = top - 1) - base >= 0 &&
- (t = ((ForkJoinTask<?>)
- U.getObject(a, j = ((m & s) << ASHIFT) + ABASE)))
- != null) {
- if (U.compareAndSwapObject(a, j, t, null)) {
- top = s;
- t.doExec();
- }
- }
- }
-
- /**
- * Polls and runs tasks until empty.
- */
- private void pollAndExecAll() {
- for (ForkJoinTask<?> t; (t = poll()) != null;)
- t.doExec();
- }
-
- /**
- * If present, removes from queue and executes the given task,
- * or any other cancelled task. Returns (true) on any CAS
- * or consistency check failure so caller can retry.
- *
- * @return false if no progress can be made, else true
- */
- final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
- boolean stat = true, removed = false, empty = true;
- ForkJoinTask<?>[] a; int m, s, b, n;
- if ((a = array) != null && (m = a.length - 1) >= 0 &&
- (n = (s = top) - (b = base)) > 0) {
- for (ForkJoinTask<?> t;;) { // traverse from s to b
- int j = ((--s & m) << ASHIFT) + ABASE;
- t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
- if (t == null) // inconsistent length
- break;
- else if (t == task) {
- if (s + 1 == top) { // pop
- if (!U.compareAndSwapObject(a, j, task, null))
- break;
- top = s;
- removed = true;
- }
- else if (base == b) // replace with proxy
- removed = U.compareAndSwapObject(a, j, task,
- new EmptyTask());
- break;
- }
- else if (t.status >= 0)
- empty = false;
- else if (s + 1 == top) { // pop and throw away
- if (U.compareAndSwapObject(a, j, t, null))
- top = s;
- break;
- }
- if (--n == 0) {
- if (!empty && base == b)
- stat = false;
- break;
- }
- }
- }
- if (removed)
- task.doExec();
- return stat;
- }
-
- /**
- * Polls for and executes the given task or any other task in
- * its CountedCompleter computation.
- */
- final boolean pollAndExecCC(ForkJoinTask<?> root) {
- ForkJoinTask<?>[] a; int b; Object o;
- outer: while ((b = base) - top < 0 && (a = array) != null) {
- long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
- if ((o = U.getObject(a, j)) == null ||
- !(o instanceof CountedCompleter))
- break;
- for (CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;;) {
- if (r == root) {
- if (base == b &&
- U.compareAndSwapObject(a, j, t, null)) {
- base = b + 1;
- t.doExec();
- return true;
- }
- else
- break; // restart
- }
- if ((r = r.completer) == null)
- break outer; // not part of root computation
- }
- }
- return false;
- }
-
- /**
- * Executes a top-level task and any local tasks remaining
- * after execution.
- */
- final void runTask(ForkJoinTask<?> t) {
- if (t != null) {
- (currentSteal = t).doExec();
- currentSteal = null;
- ++nsteals;
- if (base - top < 0) { // process remaining local tasks
- if (mode == 0)
- popAndExecAll();
- else
- pollAndExecAll();
- }
- }
- }
-
- /**
- * Executes a non-top-level (stolen) task.
- */
- final void runSubtask(ForkJoinTask<?> t) {
- if (t != null) {
- ForkJoinTask<?> ps = currentSteal;
- (currentSteal = t).doExec();
- currentSteal = ps;
- }
- }
-
- /**
- * Returns true if owned and not known to be blocked.
- */
- final boolean isApparentlyUnblocked() {
- Thread wt; Thread.State s;
- return (eventCount >= 0 &&
- (wt = owner) != null &&
- (s = wt.getState()) != Thread.State.BLOCKED &&
- s != Thread.State.WAITING &&
- s != Thread.State.TIMED_WAITING);
- }
-
- // Unsafe mechanics
- private static final sun.misc.Unsafe U;
- private static final long QLOCK;
- private static final int ABASE;
- private static final int ASHIFT;
- static {
- try {
- U = getUnsafe();
- Class<?> k = WorkQueue.class;
- Class<?> ak = ForkJoinTask[].class;
- QLOCK = U.objectFieldOffset
- (k.getDeclaredField("qlock"));
- ABASE = U.arrayBaseOffset(ak);
- int scale = U.arrayIndexScale(ak);
- if ((scale & (scale - 1)) != 0)
- throw new Error("data type scale not a power of two");
- ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
- } catch (Exception e) {
- throw new Error(e);
- }
- }
- }
-
- // static fields (initialized in static initializer below)
-
- /**
- * Creates a new ForkJoinWorkerThread. This factory is used unless
- * overridden in ForkJoinPool constructors.
- */
- public static final ForkJoinWorkerThreadFactory
- defaultForkJoinWorkerThreadFactory;
-
- /**
- * Per-thread submission bookkeeping. Shared across all pools
- * to reduce ThreadLocal pollution and because random motion
- * to avoid contention in one pool is likely to hold for others.
- * Lazily initialized on first submission (but null-checked
- * in other contexts to avoid unnecessary initialization).
- */
- static final ThreadLocal<Submitter> submitters;
-
- /**
- * Permission required for callers of methods that may start or
- * kill threads.
- */
- private static final RuntimePermission modifyThreadPermission;
-
- /**
- * Common (static) pool. Non-null for public use unless a static
- * construction exception, but internal usages null-check on use
- * to paranoically avoid potential initialization circularities
- * as well as to simplify generated code.
- */
- static final ForkJoinPool common;
-
- /**
- * Common pool parallelism. Must equal common.parallelism.
- */
- static final int commonParallelism;
-
- /**
- * Sequence number for creating workerNamePrefix.
- */
- private static int poolNumberSequence;
-
- /**
- * Returns the next sequence number. We don't expect this to
- * ever contend, so use simple builtin sync.
- */
- private static final synchronized int nextPoolId() {
- return ++poolNumberSequence;
- }
-
- // static constants
-
- /**
- * Initial timeout value (in nanoseconds) for the thread
- * triggering quiescence to park waiting for new work. On timeout,
- * the thread will instead try to shrink the number of
- * workers. The value should be large enough to avoid overly
- * aggressive shrinkage during most transient stalls (long GCs
- * etc).
- */
- private static final long IDLE_TIMEOUT = 2000L * 1000L * 1000L; // 2sec
-
- /**
- * Timeout value when there are more threads than parallelism level
- */
- private static final long FAST_IDLE_TIMEOUT = 200L * 1000L * 1000L;
-
- /**
- * Tolerance for idle timeouts, to cope with timer undershoots
- */
- private static final long TIMEOUT_SLOP = 2000000L;
-
- /**
- * The maximum stolen->joining link depth allowed in method
- * tryHelpStealer. Must be a power of two. Depths for legitimate
- * chains are unbounded, but we use a fixed constant to avoid
- * (otherwise unchecked) cycles and to bound staleness of
- * traversal parameters at the expense of sometimes blocking when
- * we could be helping.
- */
- private static final int MAX_HELP = 64;
-
- /**
- * Increment for seed generators. See class ThreadLocal for
- * explanation.
- */
- private static final int SEED_INCREMENT = 0x61c88647;
-
- /*
- * Bits and masks for control variables
- *
- * Field ctl is a long packed with:
- * AC: Number of active running workers minus target parallelism (16 bits)
- * TC: Number of total workers minus target parallelism (16 bits)
- * ST: true if pool is terminating (1 bit)
- * EC: the wait count of top waiting thread (15 bits)
- * ID: poolIndex of top of Treiber stack of waiters (16 bits)
- *
- * When convenient, we can extract the upper 32 bits of counts and
- * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
- * (int)ctl. The ec field is never accessed alone, but always
- * together with id and st. The offsets of counts by the target
- * parallelism and the positionings of fields makes it possible to
- * perform the most common checks via sign tests of fields: When
- * ac is negative, there are not enough active workers, when tc is
- * negative, there are not enough total workers, and when e is
- * negative, the pool is terminating. To deal with these possibly
- * negative fields, we use casts in and out of "short" and/or
- * signed shifts to maintain signedness.
- *
- * When a thread is queued (inactivated), its eventCount field is
- * set negative, which is the only way to tell if a worker is
- * prevented from executing tasks, even though it must continue to
- * scan for them to avoid queuing races. Note however that
- * eventCount updates lag releases so usage requires care.
- *
- * Field plock is an int packed with:
- * SHUTDOWN: true if shutdown is enabled (1 bit)
- * SEQ: a sequence lock, with PL_LOCK bit set if locked (30 bits)
- * SIGNAL: set when threads may be waiting on the lock (1 bit)
- *
- * The sequence number enables simple consistency checks:
- * Staleness of read-only operations on the workQueues array can
- * be checked by comparing plock before vs after the reads.
- */
-
- // bit positions/shifts for fields
- private static final int AC_SHIFT = 48;
- private static final int TC_SHIFT = 32;
- private static final int ST_SHIFT = 31;
- private static final int EC_SHIFT = 16;
-
- // bounds
- private static final int SMASK = 0xffff; // short bits
- private static final int MAX_CAP = 0x7fff; // max #workers - 1
- private static final int EVENMASK = 0xfffe; // even short bits
- private static final int SQMASK = 0x007e; // max 64 (even) slots
- private static final int SHORT_SIGN = 1 << 15;
- private static final int INT_SIGN = 1 << 31;
-
- // masks
- private static final long STOP_BIT = 0x0001L << ST_SHIFT;
- private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
- private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
-
- // units for incrementing and decrementing
- private static final long TC_UNIT = 1L << TC_SHIFT;
- private static final long AC_UNIT = 1L << AC_SHIFT;
-
- // masks and units for dealing with u = (int)(ctl >>> 32)
- private static final int UAC_SHIFT = AC_SHIFT - 32;
- private static final int UTC_SHIFT = TC_SHIFT - 32;
- private static final int UAC_MASK = SMASK << UAC_SHIFT;
- private static final int UTC_MASK = SMASK << UTC_SHIFT;
- private static final int UAC_UNIT = 1 << UAC_SHIFT;
- private static final int UTC_UNIT = 1 << UTC_SHIFT;
-
- // masks and units for dealing with e = (int)ctl
- private static final int E_MASK = 0x7fffffff; // no STOP_BIT
- private static final int E_SEQ = 1 << EC_SHIFT;
-
- // plock bits
- private static final int SHUTDOWN = 1 << 31;
- private static final int PL_LOCK = 2;
- private static final int PL_SIGNAL = 1;
- private static final int PL_SPINS = 1 << 8;
-
- // access mode for WorkQueue
- static final int LIFO_QUEUE = 0;
- static final int FIFO_QUEUE = 1;
- static final int SHARED_QUEUE = -1;
-
- // bounds for #steps in scan loop -- must be power 2 minus 1
- private static final int MIN_SCAN = 0x1ff; // cover estimation slop
- private static final int MAX_SCAN = 0x1ffff; // 4 * max workers
-
- // Instance fields
-
- /*
- * Field layout of this class tends to matter more than one would
- * like. Runtime layout order is only loosely related to
- * declaration order and may differ across JVMs, but the following
- * empirically works OK on current JVMs.
- */
-
- // Heuristic padding to ameliorate unfortunate memory placements
- volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
-
- volatile long stealCount; // collects worker counts
- volatile long ctl; // main pool control
- volatile int plock; // shutdown status and seqLock
- volatile int indexSeed; // worker/submitter index seed
- final int config; // mode and parallelism level
- WorkQueue[] workQueues; // main registry
- final ForkJoinWorkerThreadFactory factory;
- final Thread.UncaughtExceptionHandler ueh; // per-worker UEH
- final String workerNamePrefix; // to create worker name string
-
- volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
- volatile Object pad18, pad19, pad1a, pad1b;
-
- /**
- * Acquires the plock lock to protect worker array and related
- * updates. This method is called only if an initial CAS on plock
- * fails. This acts as a spinlock for normal cases, but falls back
- * to builtin monitor to block when (rarely) needed. This would be
- * a terrible idea for a highly contended lock, but works fine as
- * a more conservative alternative to a pure spinlock.
- */
- private int acquirePlock() {
- int spins = PL_SPINS, r = 0, ps, nps;
- for (;;) {
- if (((ps = plock) & PL_LOCK) == 0 &&
- U.compareAndSwapInt(this, PLOCK, ps, nps = ps + PL_LOCK))
- return nps;
- else if (r == 0) { // randomize spins if possible
- Thread t = Thread.currentThread(); WorkQueue w; Submitter z;
- if ((t instanceof ForkJoinWorkerThread) &&
- (w = ((ForkJoinWorkerThread)t).workQueue) != null)
- r = w.seed;
- else if ((z = submitters.get()) != null)
- r = z.seed;
- else
- r = 1;
- }
- else if (spins >= 0) {
- r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift
- if (r >= 0)
- --spins;
- }
- else if (U.compareAndSwapInt(this, PLOCK, ps, ps | PL_SIGNAL)) {
- synchronized (this) {
- if ((plock & PL_SIGNAL) != 0) {
- try {
- wait();
- } catch (InterruptedException ie) {
- try {
- Thread.currentThread().interrupt();
- } catch (SecurityException ignore) {
- }
- }
- }
- else
- notifyAll();
- }
- }
- }
- }
-
- /**
- * Unlocks and signals any thread waiting for plock. Called only
- * when CAS of seq value for unlock fails.
- */
- private void releasePlock(int ps) {
- plock = ps;
- synchronized (this) { notifyAll(); }
- }
-
- /**
- * Tries to create and start one worker if fewer than target
- * parallelism level exist. Adjusts counts etc on failure.
- */
- private void tryAddWorker() {
- long c; int u;
- while ((u = (int)((c = ctl) >>> 32)) < 0 &&
- (u & SHORT_SIGN) != 0 && (int)c == 0) {
- long nc = (long)(((u + UTC_UNIT) & UTC_MASK) |
- ((u + UAC_UNIT) & UAC_MASK)) << 32;
- if (U.compareAndSwapLong(this, CTL, c, nc)) {
- ForkJoinWorkerThreadFactory fac;
- Throwable ex = null;
- ForkJoinWorkerThread wt = null;
- try {
- if ((fac = factory) != null &&
- (wt = fac.newThread(this)) != null) {
- wt.start();
- break;
- }
- } catch (Throwable e) {
- ex = e;
- }
- deregisterWorker(wt, ex);
- break;
- }
- }
- }
-
- // Registering and deregistering workers
-
- /**
- * Callback from ForkJoinWorkerThread to establish and record its
- * WorkQueue. To avoid scanning bias due to packing entries in
- * front of the workQueues array, we treat the array as a simple
- * power-of-two hash table using per-thread seed as hash,
- * expanding as needed.
- *
- * @param wt the worker thread
- * @return the worker's queue
- */
- final WorkQueue registerWorker(ForkJoinWorkerThread wt) {
- Thread.UncaughtExceptionHandler handler; WorkQueue[] ws; int s, ps;
- wt.setDaemon(true);
- if ((handler = ueh) != null)
- wt.setUncaughtExceptionHandler(handler);
- do {} while (!U.compareAndSwapInt(this, INDEXSEED, s = indexSeed,
- s += SEED_INCREMENT) ||
- s == 0); // skip 0
- WorkQueue w = new WorkQueue(this, wt, config >>> 16, s);
- if (((ps = plock) & PL_LOCK) != 0 ||
- !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
- ps = acquirePlock();
- int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
- try {
- if ((ws = workQueues) != null) { // skip if shutting down
- int n = ws.length, m = n - 1;
- int r = (s << 1) | 1; // use odd-numbered indices
- if (ws[r &= m] != null) { // collision
- int probes = 0; // step by approx half size
- int step = (n <= 4) ? 2 : ((n >>> 1) & EVENMASK) + 2;
- while (ws[r = (r + step) & m] != null) {
- if (++probes >= n) {
- workQueues = ws = Arrays.copyOf(ws, n <<= 1);
- m = n - 1;
- probes = 0;
- }
- }
- }
- w.eventCount = w.poolIndex = r; // volatile write orders
- ws[r] = w;
- }
- } finally {
- if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
- releasePlock(nps);
- }
- wt.setName(workerNamePrefix.concat(Integer.toString(w.poolIndex)));
- return w;
- }
-
- /**
- * Final callback from terminating worker, as well as upon failure
- * to construct or start a worker. Removes record of worker from
- * array, and adjusts counts. If pool is shutting down, tries to
- * complete termination.
- *
- * @param wt the worker thread or null if construction failed
- * @param ex the exception causing failure, or null if none
- */
- final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
- WorkQueue w = null;
- if (wt != null && (w = wt.workQueue) != null) {
- int ps;
- w.qlock = -1; // ensure set
- long ns = w.nsteals, sc; // collect steal count
- do {} while (!U.compareAndSwapLong(this, STEALCOUNT,
- sc = stealCount, sc + ns));
- if (((ps = plock) & PL_LOCK) != 0 ||
- !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
- ps = acquirePlock();
- int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
- try {
- int idx = w.poolIndex;
- WorkQueue[] ws = workQueues;
- if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w)
- ws[idx] = null;
- } finally {
- if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
- releasePlock(nps);
- }
- }
-
- long c; // adjust ctl counts
- do {} while (!U.compareAndSwapLong
- (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) |
- ((c - TC_UNIT) & TC_MASK) |
- (c & ~(AC_MASK|TC_MASK)))));
-
- if (!tryTerminate(false, false) && w != null && w.array != null) {
- w.cancelAll(); // cancel remaining tasks
- WorkQueue[] ws; WorkQueue v; Thread p; int u, i, e;
- while ((u = (int)((c = ctl) >>> 32)) < 0 && (e = (int)c) >= 0) {
- if (e > 0) { // activate or create replacement
- if ((ws = workQueues) == null ||
- (i = e & SMASK) >= ws.length ||
- (v = ws[i]) == null)
- break;
- long nc = (((long)(v.nextWait & E_MASK)) |
- ((long)(u + UAC_UNIT) << 32));
- if (v.eventCount != (e | INT_SIGN))
- break;
- if (U.compareAndSwapLong(this, CTL, c, nc)) {
- v.eventCount = (e + E_SEQ) & E_MASK;
- if ((p = v.parker) != null)
- U.unpark(p);
- break;
- }
- }
- else {
- if ((short)u < 0)
- tryAddWorker();
- break;
- }
- }
- }
- if (ex == null) // help clean refs on way out
- ForkJoinTask.helpExpungeStaleExceptions();
- else // rethrow
- ForkJoinTask.rethrow(ex);
- }
-
- // Submissions
-
- /**
- * Unless shutting down, adds the given task to a submission queue
- * at submitter's current queue index (modulo submission
- * range). Only the most common path is directly handled in this
- * method. All others are relayed to fullExternalPush.
- *
- * @param task the task. Caller must ensure non-null.
- */
- final void externalPush(ForkJoinTask<?> task) {
- WorkQueue[] ws; WorkQueue q; Submitter z; int m; ForkJoinTask<?>[] a;
- if ((z = submitters.get()) != null && plock > 0 &&
- (ws = workQueues) != null && (m = (ws.length - 1)) >= 0 &&
- (q = ws[m & z.seed & SQMASK]) != null &&
- U.compareAndSwapInt(q, QLOCK, 0, 1)) { // lock
- int b = q.base, s = q.top, n, an;
- if ((a = q.array) != null && (an = a.length) > (n = s + 1 - b)) {
- int j = (((an - 1) & s) << ASHIFT) + ABASE;
- U.putOrderedObject(a, j, task);
- q.top = s + 1; // push on to deque
- q.qlock = 0;
- if (n <= 2)
- signalWork(q);
- return;
- }
- q.qlock = 0;
- }
- fullExternalPush(task);
- }
-
- /**
- * Full version of externalPush. This method is called, among
- * other times, upon the first submission of the first task to the
- * pool, so must perform secondary initialization. It also
- * detects first submission by an external thread by looking up
- * its ThreadLocal, and creates a new shared queue if the one at
- * index if empty or contended. The plock lock body must be
- * exception-free (so no try/finally) so we optimistically
- * allocate new queues outside the lock and throw them away if
- * (very rarely) not needed.
- *
- * Secondary initialization occurs when plock is zero, to create
- * workQueue array and set plock to a valid value. This lock body
- * must also be exception-free. Because the plock seq value can
- * eventually wrap around zero, this method harmlessly fails to
- * reinitialize if workQueues exists, while still advancing plock.
- */
- private void fullExternalPush(ForkJoinTask<?> task) {
- int r = 0; // random index seed
- for (Submitter z = submitters.get();;) {
- WorkQueue[] ws; WorkQueue q; int ps, m, k;
- if (z == null) {
- if (U.compareAndSwapInt(this, INDEXSEED, r = indexSeed,
- r += SEED_INCREMENT) && r != 0)
- submitters.set(z = new Submitter(r));
- }
- else if (r == 0) { // move to a different index
- r = z.seed;
- r ^= r << 13; // same xorshift as WorkQueues
- r ^= r >>> 17;
- z.seed = r ^ (r << 5);
- }
- else if ((ps = plock) < 0)
- throw new RejectedExecutionException();
- else if (ps == 0 || (ws = workQueues) == null ||
- (m = ws.length - 1) < 0) { // initialize workQueues
- int p = config & SMASK; // find power of two table size
- int n = (p > 1) ? p - 1 : 1; // ensure at least 2 slots
- n |= n >>> 1; n |= n >>> 2; n |= n >>> 4;
- n |= n >>> 8; n |= n >>> 16; n = (n + 1) << 1;
- WorkQueue[] nws = ((ws = workQueues) == null || ws.length == 0 ?
- new WorkQueue[n] : null);
- if (((ps = plock) & PL_LOCK) != 0 ||
- !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
- ps = acquirePlock();
- if (((ws = workQueues) == null || ws.length == 0) && nws != null)
- workQueues = nws;
- int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
- if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
- releasePlock(nps);
- }
- else if ((q = ws[k = r & m & SQMASK]) != null) {
- if (q.qlock == 0 && U.compareAndSwapInt(q, QLOCK, 0, 1)) {
- ForkJoinTask<?>[] a = q.array;
- int s = q.top;
- boolean submitted = false;
- try { // locked version of push
- if ((a != null && a.length > s + 1 - q.base) ||
- (a = q.growArray()) != null) { // must presize
- int j = (((a.length - 1) & s) << ASHIFT) + ABASE;
- U.putOrderedObject(a, j, task);
- q.top = s + 1;
- submitted = true;
- }
- } finally {
- q.qlock = 0; // unlock
- }
- if (submitted) {
- signalWork(q);
- return;
- }
- }
- r = 0; // move on failure
- }
- else if (((ps = plock) & PL_LOCK) == 0) { // create new queue
- q = new WorkQueue(this, null, SHARED_QUEUE, r);
- if (((ps = plock) & PL_LOCK) != 0 ||
- !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
- ps = acquirePlock();
- if ((ws = workQueues) != null && k < ws.length && ws[k] == null)
- ws[k] = q;
- int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN);
- if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
- releasePlock(nps);
- }
- else
- r = 0; // try elsewhere while lock held
- }
- }
-
- // Maintaining ctl counts
-
- /**
- * Increments active count; mainly called upon return from blocking.
- */
- final void incrementActiveCount() {
- long c;
- do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT));
- }
-
- /**
- * Tries to create or activate a worker if too few are active.
- *
- * @param q the (non-null) queue holding tasks to be signalled
- */
- final void signalWork(WorkQueue q) {
- int hint = q.poolIndex;
- long c; int e, u, i, n; WorkQueue[] ws; WorkQueue w; Thread p;
- while ((u = (int)((c = ctl) >>> 32)) < 0) {
- if ((e = (int)c) > 0) {
- if ((ws = workQueues) != null && ws.length > (i = e & SMASK) &&
- (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
- long nc = (((long)(w.nextWait & E_MASK)) |
- ((long)(u + UAC_UNIT) << 32));
- if (U.compareAndSwapLong(this, CTL, c, nc)) {
- w.hint = hint;
- w.eventCount = (e + E_SEQ) & E_MASK;
- if ((p = w.parker) != null)
- U.unpark(p);
- break;
- }
- if (q.top - q.base <= 0)
- break;
- }
- else
- break;
- }
- else {
- if ((short)u < 0)
- tryAddWorker();
- break;
- }
- }
- }
-
- // Scanning for tasks
-
- /**
- * Top-level runloop for workers, called by ForkJoinWorkerThread.run.
- */
- final void runWorker(WorkQueue w) {
- w.growArray(); // allocate queue
- do { w.runTask(scan(w)); } while (w.qlock >= 0);
- }
-
- /**
- * Scans for and, if found, returns one task, else possibly
- * inactivates the worker. This method operates on single reads of
- * volatile state and is designed to be re-invoked continuously,
- * in part because it returns upon detecting inconsistencies,
- * contention, or state changes that indicate possible success on
- * re-invocation.
- *
- * The scan searches for tasks across queues (starting at a random
- * index, and relying on registerWorker to irregularly scatter
- * them within array to avoid bias), checking each at least twice.
- * The scan terminates upon either finding a non-empty queue, or
- * completing the sweep. If the worker is not inactivated, it
- * takes and returns a task from this queue. Otherwise, if not
- * activated, it signals workers (that may include itself) and
- * returns so caller can retry. Also returns for true if the
- * worker array may have changed during an empty scan. On failure
- * to find a task, we take one of the following actions, after
- * which the caller will retry calling this method unless
- * terminated.
- *
- * * If pool is terminating, terminate the worker.
- *
- * * If not already enqueued, try to inactivate and enqueue the
- * worker on wait queue. Or, if inactivating has caused the pool
- * to be quiescent, relay to idleAwaitWork to possibly shrink
- * pool.
- *
- * * If already enqueued and none of the above apply, possibly
- * park awaiting signal, else lingering to help scan and signal.
- *
- * * If a non-empty queue discovered or left as a hint,
- * help wake up other workers before return.
- *
- * @param w the worker (via its WorkQueue)
- * @return a task or null if none found
- */
- private final ForkJoinTask<?> scan(WorkQueue w) {
- WorkQueue[] ws; int m;
- int ps = plock; // read plock before ws
- if (w != null && (ws = workQueues) != null && (m = ws.length - 1) >= 0) {
- int ec = w.eventCount; // ec is negative if inactive
- int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5;
- w.hint = -1; // update seed and clear hint
- int j = ((m + m + 1) | MIN_SCAN) & MAX_SCAN;
- do {
- WorkQueue q; ForkJoinTask<?>[] a; int b;
- if ((q = ws[(r + j) & m]) != null && (b = q.base) - q.top < 0 &&
- (a = q.array) != null) { // probably nonempty
- int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
- ForkJoinTask<?> t = (ForkJoinTask<?>)
- U.getObjectVolatile(a, i);
- if (q.base == b && ec >= 0 && t != null &&
- U.compareAndSwapObject(a, i, t, null)) {
- if ((q.base = b + 1) - q.top < 0)
- signalWork(q);
- return t; // taken
- }
- else if ((ec < 0 || j < m) && (int)(ctl >> AC_SHIFT) <= 0) {
- w.hint = (r + j) & m; // help signal below
- break; // cannot take
- }
- }
- } while (--j >= 0);
-
- int h, e, ns; long c, sc; WorkQueue q;
- if ((ns = w.nsteals) != 0) {
- if (U.compareAndSwapLong(this, STEALCOUNT,
- sc = stealCount, sc + ns))
- w.nsteals = 0; // collect steals and rescan
- }
- else if (plock != ps) // consistency check
- ; // skip
- else if ((e = (int)(c = ctl)) < 0)
- w.qlock = -1; // pool is terminating
- else {
- if ((h = w.hint) < 0) {
- if (ec >= 0) { // try to enqueue/inactivate
- long nc = (((long)ec |
- ((c - AC_UNIT) & (AC_MASK|TC_MASK))));
- w.nextWait = e; // link and mark inactive
- w.eventCount = ec | INT_SIGN;
- if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc))
- w.eventCount = ec; // unmark on CAS failure
- else if ((int)(c >> AC_SHIFT) == 1 - (config & SMASK))
- idleAwaitWork(w, nc, c);
- }
- else if (w.eventCount < 0 && ctl == c) {
- Thread wt = Thread.currentThread();
- Thread.interrupted(); // clear status
- U.putObject(wt, PARKBLOCKER, this);
- w.parker = wt; // emulate LockSupport.park
- if (w.eventCount < 0) // recheck
- U.park(false, 0L); // block
- w.parker = null;
- U.putObject(wt, PARKBLOCKER, null);
- }
- }
- if ((h >= 0 || (h = w.hint) >= 0) &&
- (ws = workQueues) != null && h < ws.length &&
- (q = ws[h]) != null) { // signal others before retry
- WorkQueue v; Thread p; int u, i, s;
- for (int n = (config & SMASK) - 1;;) {
- int idleCount = (w.eventCount < 0) ? 0 : -1;
- if (((s = idleCount - q.base + q.top) <= n &&
- (n = s) <= 0) ||
- (u = (int)((c = ctl) >>> 32)) >= 0 ||
- (e = (int)c) <= 0 || m < (i = e & SMASK) ||
- (v = ws[i]) == null)
- break;
- long nc = (((long)(v.nextWait & E_MASK)) |
- ((long)(u + UAC_UNIT) << 32));
- if (v.eventCount != (e | INT_SIGN) ||
- !U.compareAndSwapLong(this, CTL, c, nc))
- break;
- v.hint = h;
- v.eventCount = (e + E_SEQ) & E_MASK;
- if ((p = v.parker) != null)
- U.unpark(p);
- if (--n <= 0)
- break;
- }
- }
- }
- }
- return null;
- }
-
- /**
- * If inactivating worker w has caused the pool to become
- * quiescent, checks for pool termination, and, so long as this is
- * not the only worker, waits for event for up to a given
- * duration. On timeout, if ctl has not changed, terminates the
- * worker, which will in turn wake up another worker to possibly
- * repeat this process.
- *
- * @param w the calling worker
- * @param currentCtl the ctl value triggering possible quiescence
- * @param prevCtl the ctl value to restore if thread is terminated
- */
- private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) {
- if (w != null && w.eventCount < 0 &&
- !tryTerminate(false, false) && (int)prevCtl != 0 &&
- ctl == currentCtl) {
- int dc = -(short)(currentCtl >>> TC_SHIFT);
- long parkTime = dc < 0 ? FAST_IDLE_TIMEOUT: (dc + 1) * IDLE_TIMEOUT;
- long deadline = System.nanoTime() + parkTime - TIMEOUT_SLOP;
- Thread wt = Thread.currentThread();
- while (ctl == currentCtl) {
- Thread.interrupted(); // timed variant of version in scan()
- U.putObject(wt, PARKBLOCKER, this);
- w.parker = wt;
- if (ctl == currentCtl)
- U.park(false, parkTime);
- w.parker = null;
- U.putObject(wt, PARKBLOCKER, null);
- if (ctl != currentCtl)
- break;
- if (deadline - System.nanoTime() <= 0L &&
- U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) {
- w.eventCount = (w.eventCount + E_SEQ) | E_MASK;
- w.hint = -1;
- w.qlock = -1; // shrink
- break;
- }
- }
- }
- }
-
- /**
- * Scans through queues looking for work while joining a task; if
- * any present, signals. May return early if more signalling is
- * detectably unneeded.
- *
- * @param task return early if done
- * @param origin an index to start scan
- */
- private void helpSignal(ForkJoinTask<?> task, int origin) {
- WorkQueue[] ws; WorkQueue w; Thread p; long c; int m, u, e, i, s;
- if (task != null && task.status >= 0 &&
- (u = (int)(ctl >>> 32)) < 0 && (u >> UAC_SHIFT) < 0 &&
- (ws = workQueues) != null && (m = ws.length - 1) >= 0) {
- outer: for (int k = origin, j = m; j >= 0; --j) {
- WorkQueue q = ws[k++ & m];
- for (int n = m;;) { // limit to at most m signals
- if (task.status < 0)
- break outer;
- if (q == null ||
- ((s = -q.base + q.top) <= n && (n = s) <= 0))
- break;
- if ((u = (int)((c = ctl) >>> 32)) >= 0 ||
- (e = (int)c) <= 0 || m < (i = e & SMASK) ||
- (w = ws[i]) == null)
- break outer;
- long nc = (((long)(w.nextWait & E_MASK)) |
- ((long)(u + UAC_UNIT) << 32));
- if (w.eventCount != (e | INT_SIGN))
- break outer;
- if (U.compareAndSwapLong(this, CTL, c, nc)) {
- w.eventCount = (e + E_SEQ) & E_MASK;
- if ((p = w.parker) != null)
- U.unpark(p);
- if (--n <= 0)
- break;
- }
- }
- }
- }
- }
-
- /**
- * Tries to locate and execute tasks for a stealer of the given
- * task, or in turn one of its stealers, Traces currentSteal ->
- * currentJoin links looking for a thread working on a descendant
- * of the given task and with a non-empty queue to steal back and
- * execute tasks from. The first call to this method upon a
- * waiting join will often entail scanning/search, (which is OK
- * because the joiner has nothing better to do), but this method
- * leaves hints in workers to speed up subsequent calls. The
- * implementation is very branchy to cope with potential
- * inconsistencies or loops encountering chains that are stale,
- * unknown, or so long that they are likely cyclic.
- *
- * @param joiner the joining worker
- * @param task the task to join
- * @return 0 if no progress can be made, negative if task
- * known complete, else positive
- */
- private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) {
- int stat = 0, steps = 0; // bound to avoid cycles
- if (joiner != null && task != null) { // hoist null checks
- restart: for (;;) {
- ForkJoinTask<?> subtask = task; // current target
- for (WorkQueue j = joiner, v;;) { // v is stealer of subtask
- WorkQueue[] ws; int m, s, h;
- if ((s = task.status) < 0) {
- stat = s;
- break restart;
- }
- if ((ws = workQueues) == null || (m = ws.length - 1) <= 0)
- break restart; // shutting down
- if ((v = ws[h = (j.hint | 1) & m]) == null ||
- v.currentSteal != subtask) {
- for (int origin = h;;) { // find stealer
- if (((h = (h + 2) & m) & 15) == 1 &&
- (subtask.status < 0 || j.currentJoin != subtask))
- continue restart; // occasional staleness check
- if ((v = ws[h]) != null &&
- v.currentSteal == subtask) {
- j.hint = h; // save hint
- break;
- }
- if (h == origin)
- break restart; // cannot find stealer
- }
- }
- for (;;) { // help stealer or descend to its stealer
- ForkJoinTask[] a; int b;
- if (subtask.status < 0) // surround probes with
- continue restart; // consistency checks
- if ((b = v.base) - v.top < 0 && (a = v.array) != null) {
- int i = (((a.length - 1) & b) << ASHIFT) + ABASE;
- ForkJoinTask<?> t =
- (ForkJoinTask<?>)U.getObjectVolatile(a, i);
- if (subtask.status < 0 || j.currentJoin != subtask ||
- v.currentSteal != subtask)
- continue restart; // stale
- stat = 1; // apparent progress
- if (t != null && v.base == b &&
- U.compareAndSwapObject(a, i, t, null)) {
- v.base = b + 1; // help stealer
- joiner.runSubtask(t);
- }
- else if (v.base == b && ++steps == MAX_HELP)
- break restart; // v apparently stalled
- }
- else { // empty -- try to descend
- ForkJoinTask<?> next = v.currentJoin;
- if (subtask.status < 0 || j.currentJoin != subtask ||
- v.currentSteal != subtask)
- continue restart; // stale
- else if (next == null || ++steps == MAX_HELP)
- break restart; // dead-end or maybe cyclic
- else {
- subtask = next;
- j = v;
- break;
- }
- }
- }
- }
- }
- }
- return stat;
- }
-
- /**
- * Analog of tryHelpStealer for CountedCompleters. Tries to steal
- * and run tasks within the target's computation.
- *
- * @param task the task to join
- * @param mode if shared, exit upon completing any task
- * if all workers are active
- */
- private int helpComplete(ForkJoinTask<?> task, int mode) {
- WorkQueue[] ws; WorkQueue q; int m, n, s, u;
- if (task != null && (ws = workQueues) != null &&
- (m = ws.length - 1) >= 0) {
- for (int j = 1, origin = j;;) {
- if ((s = task.status) < 0)
- return s;
- if ((q = ws[j & m]) != null && q.pollAndExecCC(task)) {
- origin = j;
- if (mode == SHARED_QUEUE &&
- ((u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0))
- break;
- }
- else if ((j = (j + 2) & m) == origin)
- break;
- }
- }
- return 0;
- }
-
- /**
- * Tries to decrement active count (sometimes implicitly) and
- * possibly release or create a compensating worker in preparation
- * for blocking. Fails on contention or termination. Otherwise,
- * adds a new thread if no idle workers are available and pool
- * may become starved.
- */
- final boolean tryCompensate() {
- int pc = config & SMASK, e, i, tc; long c;
- WorkQueue[] ws; WorkQueue w; Thread p;
- if ((ws = workQueues) != null && (e = (int)(c = ctl)) >= 0) {
- if (e != 0 && (i = e & SMASK) < ws.length &&
- (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) {
- long nc = ((long)(w.nextWait & E_MASK) |
- (c & (AC_MASK|TC_MASK)));
- if (U.compareAndSwapLong(this, CTL, c, nc)) {
- w.eventCount = (e + E_SEQ) & E_MASK;
- if ((p = w.parker) != null)
- U.unpark(p);
- return true; // replace with idle worker
- }
- }
- else if ((tc = (short)(c >>> TC_SHIFT)) >= 0 &&
- (int)(c >> AC_SHIFT) + pc > 1) {
- long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
- if (U.compareAndSwapLong(this, CTL, c, nc))
- return true; // no compensation
- }
- else if (tc + pc < MAX_CAP) {
- long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
- if (U.compareAndSwapLong(this, CTL, c, nc)) {
- ForkJoinWorkerThreadFactory fac;
- Throwable ex = null;
- ForkJoinWorkerThread wt = null;
- try {
- if ((fac = factory) != null &&
- (wt = fac.newThread(this)) != null) {
- wt.start();
- return true;
- }
- } catch (Throwable rex) {
- ex = rex;
- }
- deregisterWorker(wt, ex); // clean up and return false
- }
- }
- }
- return false;
- }
-
- /**
- * Helps and/or blocks until the given task is done.
- *
- * @param joiner the joining worker
- * @param task the task
- * @return task status on exit
- */
- final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) {
- int s = 0;
- if (joiner != null && task != null && (s = task.status) >= 0) {
- ForkJoinTask<?> prevJoin = joiner.currentJoin;
- joiner.currentJoin = task;
- do {} while ((s = task.status) >= 0 && !joiner.isEmpty() &&
- joiner.tryRemoveAndExec(task)); // process local tasks
- if (s >= 0 && (s = task.status) >= 0) {
- helpSignal(task, joiner.poolIndex);
- if ((s = task.status) >= 0 &&
- (task instanceof CountedCompleter))
- s = helpComplete(task, LIFO_QUEUE);
- }
- while (s >= 0 && (s = task.status) >= 0) {
- if ((!joiner.isEmpty() || // try helping
- (s = tryHelpStealer(joiner, task)) == 0) &&
- (s = task.status) >= 0) {
- helpSignal(task, joiner.poolIndex);
- if ((s = task.status) >= 0 && tryCompensate()) {
- if (task.trySetSignal() && (s = task.status) >= 0) {
- synchronized (task) {
- if (task.status >= 0) {
- try { // see ForkJoinTask
- task.wait(); // for explanation
- } catch (InterruptedException ie) {
- }
- }
- else
- task.notifyAll();
- }
- }
- long c; // re-activate
- do {} while (!U.compareAndSwapLong
- (this, CTL, c = ctl, c + AC_UNIT));
- }
- }
- }
- joiner.currentJoin = prevJoin;
- }
- return s;
- }
-
- /**
- * Stripped-down variant of awaitJoin used by timed joins. Tries
- * to help join only while there is continuous progress. (Caller
- * will then enter a timed wait.)
- *
- * @param joiner the joining worker
- * @param task the task
- */
- final void helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) {
- int s;
- if (joiner != null && task != null && (s = task.status) >= 0) {
- ForkJoinTask<?> prevJoin = joiner.currentJoin;
- joiner.currentJoin = task;
- do {} while ((s = task.status) >= 0 && !joiner.isEmpty() &&
- joiner.tryRemoveAndExec(task));
- if (s >= 0 && (s = task.status) >= 0) {
- helpSignal(task, joiner.poolIndex);
- if ((s = task.status) >= 0 &&
- (task instanceof CountedCompleter))
- s = helpComplete(task, LIFO_QUEUE);
- }
- if (s >= 0 && joiner.isEmpty()) {
- do {} while (task.status >= 0 &&
- tryHelpStealer(joiner, task) > 0);
- }
- joiner.currentJoin = prevJoin;
- }
- }
-
- /**
- * Returns a (probably) non-empty steal queue, if one is found
- * during a scan, else null. This method must be retried by
- * caller if, by the time it tries to use the queue, it is empty.
- * @param r a (random) seed for scanning
- */
- private WorkQueue findNonEmptyStealQueue(int r) {
- for (;;) {
- int ps = plock, m; WorkQueue[] ws; WorkQueue q;
- if ((ws = workQueues) != null && (m = ws.length - 1) >= 0) {
- for (int j = (m + 1) << 2; j >= 0; --j) {
- if ((q = ws[(((r + j) << 1) | 1) & m]) != null &&
- q.base - q.top < 0)
- return q;
- }
- }
- if (plock == ps)
- return null;
- }
- }
-
- /**
- * Runs tasks until {@code isQuiescent()}. We piggyback on
- * active count ctl maintenance, but rather than blocking
- * when tasks cannot be found, we rescan until all others cannot
- * find tasks either.
- */
- final void helpQuiescePool(WorkQueue w) {
- for (boolean active = true;;) {
- long c; WorkQueue q; ForkJoinTask<?> t; int b;
- while ((t = w.nextLocalTask()) != null) {
- if (w.base - w.top < 0)
- signalWork(w);
- t.doExec();
- }
- if ((q = findNonEmptyStealQueue(w.nextSeed())) != null) {
- if (!active) { // re-establish active count
- active = true;
- do {} while (!U.compareAndSwapLong
- (this, CTL, c = ctl, c + AC_UNIT));
- }
- if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
- if (q.base - q.top < 0)
- signalWork(q);
- w.runSubtask(t);
- }
- }
- else if (active) { // decrement active count without queuing
- long nc = (c = ctl) - AC_UNIT;
- if ((int)(nc >> AC_SHIFT) + (config & SMASK) == 0)
- return; // bypass decrement-then-increment
- if (U.compareAndSwapLong(this, CTL, c, nc))
- active = false;
- }
- else if ((int)((c = ctl) >> AC_SHIFT) + (config & SMASK) == 0 &&
- U.compareAndSwapLong(this, CTL, c, c + AC_UNIT))
- return;
- }
- }
-
- /**
- * Gets and removes a local or stolen task for the given worker.
- *
- * @return a task, if available
- */
- final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
- for (ForkJoinTask<?> t;;) {
- WorkQueue q; int b;
- if ((t = w.nextLocalTask()) != null)
- return t;
- if ((q = findNonEmptyStealQueue(w.nextSeed())) == null)
- return null;
- if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) {
- if (q.base - q.top < 0)
- signalWork(q);
- return t;
- }
- }
- }
-
- /**
- * Returns a cheap heuristic guide for task partitioning when
- * programmers, frameworks, tools, or languages have little or no
- * idea about task granularity. In essence by offering this
- * method, we ask users only about tradeoffs in overhead vs
- * expected throughput and its variance, rather than how finely to
- * partition tasks.
- *
- * In a steady state strict (tree-structured) computation, each
- * thread makes available for stealing enough tasks for other
- * threads to remain active. Inductively, if all threads play by
- * the same rules, each thread should make available only a
- * constant number of tasks.
- *
- * The minimum useful constant is just 1. But using a value of 1
- * would require immediate replenishment upon each steal to
- * maintain enough tasks, which is infeasible. Further,
- * partitionings/granularities of offered tasks should minimize
- * steal rates, which in general means that threads nearer the top
- * of computation tree should generate more than those nearer the
- * bottom. In perfect steady state, each thread is at
- * approximately the same level of computation tree. However,
- * producing extra tasks amortizes the uncertainty of progress and
- * diffusion assumptions.
- *
- * So, users will want to use values larger (but not much larger)
- * than 1 to both smooth over transient shortages and hedge
- * against uneven progress; as traded off against the cost of
- * extra task overhead. We leave the user to pick a threshold
- * value to compare with the results of this call to guide
- * decisions, but recommend values such as 3.
- *
- * When all threads are active, it is on average OK to estimate
- * surplus strictly locally. In steady-state, if one thread is
- * maintaining say 2 surplus tasks, then so are others. So we can
- * just use estimated queue length. However, this strategy alone
- * leads to serious mis-estimates in some non-steady-state
- * conditions (ramp-up, ramp-down, other stalls). We can detect
- * many of these by further considering the number of "idle"
- * threads, that are known to have zero queued tasks, so
- * compensate by a factor of (#idle/#active) threads.
- *
- * Note: The approximation of #busy workers as #active workers is
- * not very good under current signalling scheme, and should be
- * improved.
- */
- static int getSurplusQueuedTaskCount() {
- Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
- if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) {
- int p = (pool = (wt = (ForkJoinWorkerThread)t).pool).config & SMASK;
- int n = (q = wt.workQueue).top - q.base;
- int a = (int)(pool.ctl >> AC_SHIFT) + p;
- return n - (a > (p >>>= 1) ? 0 :
- a > (p >>>= 1) ? 1 :
- a > (p >>>= 1) ? 2 :
- a > (p >>>= 1) ? 4 :
- 8);
- }
- return 0;
- }
-
- // Termination
-
- /**
- * Possibly initiates and/or completes termination. The caller
- * triggering termination runs three passes through workQueues:
- * (0) Setting termination status, followed by wakeups of queued
- * workers; (1) cancelling all tasks; (2) interrupting lagging
- * threads (likely in external tasks, but possibly also blocked in
- * joins). Each pass repeats previous steps because of potential
- * lagging thread creation.
- *
- * @param now if true, unconditionally terminate, else only
- * if no work and no active workers
- * @param enable if true, enable shutdown when next possible
- * @return true if now terminating or terminated
- */
- private boolean tryTerminate(boolean now, boolean enable) {
- int ps;
- if (this == common) // cannot shut down
- return false;
- if ((ps = plock) >= 0) { // enable by setting plock
- if (!enable)
- return false;
- if ((ps & PL_LOCK) != 0 ||
- !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK))
- ps = acquirePlock();
- int nps = ((ps + PL_LOCK) & ~SHUTDOWN) | SHUTDOWN;
- if (!U.compareAndSwapInt(this, PLOCK, ps, nps))
- releasePlock(nps);
- }
- for (long c;;) {
- if (((c = ctl) & STOP_BIT) != 0) { // already terminating
- if ((short)(c >>> TC_SHIFT) == -(config & SMASK)) {
- synchronized (this) {
- notifyAll(); // signal when 0 workers
- }
- }
- return true;
- }
- if (!now) { // check if idle & no tasks
- WorkQueue[] ws; WorkQueue w;
- if ((int)(c >> AC_SHIFT) != -(config & SMASK))
- return false;
- if ((ws = workQueues) != null) {
- for (int i = 0; i < ws.length; ++i) {
- if ((w = ws[i]) != null) {
- if (!w.isEmpty()) { // signal unprocessed tasks
- signalWork(w);
- return false;
- }
- if ((i & 1) != 0 && w.eventCount >= 0)
- return false; // unqueued inactive worker
- }
- }
- }
- }
- if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) {
- for (int pass = 0; pass < 3; ++pass) {
- WorkQueue[] ws; WorkQueue w; Thread wt;
- if ((ws = workQueues) != null) {
- int n = ws.length;
- for (int i = 0; i < n; ++i) {
- if ((w = ws[i]) != null) {
- w.qlock = -1;
- if (pass > 0) {
- w.cancelAll();
- if (pass > 1 && (wt = w.owner) != null) {
- if (!wt.isInterrupted()) {
- try {
- wt.interrupt();
- } catch (Throwable ignore) {
- }
- }
- U.unpark(wt);
- }
- }
- }
- }
- // Wake up workers parked on event queue
- int i, e; long cc; Thread p;
- while ((e = (int)(cc = ctl) & E_MASK) != 0 &&
- (i = e & SMASK) < n && i >= 0 &&
- (w = ws[i]) != null) {
- long nc = ((long)(w.nextWait & E_MASK) |
- ((cc + AC_UNIT) & AC_MASK) |
- (cc & (TC_MASK|STOP_BIT)));
- if (w.eventCount == (e | INT_SIGN) &&
- U.compareAndSwapLong(this, CTL, cc, nc)) {
- w.eventCount = (e + E_SEQ) & E_MASK;
- w.qlock = -1;
- if ((p = w.parker) != null)
- U.unpark(p);
- }
- }
- }
- }
- }
- }
- }
-
- // external operations on common pool
-
- /**
- * Returns common pool queue for a thread that has submitted at
- * least one task.
- */
- static WorkQueue commonSubmitterQueue() {
- ForkJoinPool p; WorkQueue[] ws; int m; Submitter z;
- return ((z = submitters.get()) != null &&
- (p = common) != null &&
- (ws = p.workQueues) != null &&
- (m = ws.length - 1) >= 0) ?
- ws[m & z.seed & SQMASK] : null;
- }
-
- /**
- * Tries to pop the given task from submitter's queue in common pool.
- */
- static boolean tryExternalUnpush(ForkJoinTask<?> t) {
- ForkJoinPool p; WorkQueue[] ws; WorkQueue q; Submitter z;
- ForkJoinTask<?>[] a; int m, s;
- if (t != null &&
- (z = submitters.get()) != null &&
- (p = common) != null &&
- (ws = p.workQueues) != null &&
- (m = ws.length - 1) >= 0 &&
- (q = ws[m & z.seed & SQMASK]) != null &&
- (s = q.top) != q.base &&
- (a = q.array) != null) {
- long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
- if (U.getObject(a, j) == t &&
- U.compareAndSwapInt(q, QLOCK, 0, 1)) {
- if (q.array == a && q.top == s && // recheck
- U.compareAndSwapObject(a, j, t, null)) {
- q.top = s - 1;
- q.qlock = 0;
- return true;
- }
- q.qlock = 0;
- }
- }
- return false;
- }
-
- /**
- * Tries to pop and run local tasks within the same computation
- * as the given root. On failure, tries to help complete from
- * other queues via helpComplete.
- */
- private void externalHelpComplete(WorkQueue q, ForkJoinTask<?> root) {
- ForkJoinTask<?>[] a; int m;
- if (q != null && (a = q.array) != null && (m = (a.length - 1)) >= 0 &&
- root != null && root.status >= 0) {
- for (;;) {
- int s, u; Object o; CountedCompleter<?> task = null;
- if ((s = q.top) - q.base > 0) {
- long j = ((m & (s - 1)) << ASHIFT) + ABASE;
- if ((o = U.getObject(a, j)) != null &&
- (o instanceof CountedCompleter)) {
- CountedCompleter<?> t = (CountedCompleter<?>)o, r = t;
- do {
- if (r == root) {
- if (U.compareAndSwapInt(q, QLOCK, 0, 1)) {
- if (q.array == a && q.top == s &&
- U.compareAndSwapObject(a, j, t, null)) {
- q.top = s - 1;
- task = t;
- }
- q.qlock = 0;
- }
- break;
- }
- } while ((r = r.completer) != null);
- }
- }
- if (task != null)
- task.doExec();
- if (root.status < 0 ||
- (u = (int)(ctl >>> 32)) >= 0 || (u >> UAC_SHIFT) >= 0)
- break;
- if (task == null) {
- helpSignal(root, q.poolIndex);
- if (root.status >= 0)
- helpComplete(root, SHARED_QUEUE);
- break;
- }
- }
- }
- }
-
- /**
- * Tries to help execute or signal availability of the given task
- * from submitter's queue in common pool.
- */
- static void externalHelpJoin(ForkJoinTask<?> t) {
- // Some hard-to-avoid overlap with tryExternalUnpush
- ForkJoinPool p; WorkQueue[] ws; WorkQueue q, w; Submitter z;
- ForkJoinTask<?>[] a; int m, s, n;
- if (t != null &&
- (z = submitters.get()) != null &&
- (p = common) != null &&
- (ws = p.workQueues) != null &&
- (m = ws.length - 1) >= 0 &&
- (q = ws[m & z.seed & SQMASK]) != null &&
- (a = q.array) != null) {
- int am = a.length - 1;
- if ((s = q.top) != q.base) {
- long j = ((am & (s - 1)) << ASHIFT) + ABASE;
- if (U.getObject(a, j) == t &&
- U.compareAndSwapInt(q, QLOCK, 0, 1)) {
- if (q.array == a && q.top == s &&
- U.compareAndSwapObject(a, j, t, null)) {
- q.top = s - 1;
- q.qlock = 0;
- t.doExec();
- }
- else
- q.qlock = 0;
- }
- }
- if (t.status >= 0) {
- if (t instanceof CountedCompleter)
- p.externalHelpComplete(q, t);
- else
- p.helpSignal(t, q.poolIndex);
- }
- }
- }
-
- // Exported methods
-
- // Constructors
-
- /**
- * Creates a {@code ForkJoinPool} with parallelism equal to {@link
- * java.lang.Runtime#availableProcessors}, using the {@linkplain
- * #defaultForkJoinWorkerThreadFactory default thread factory},
- * no UncaughtExceptionHandler, and non-async LIFO processing mode.
- *
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}{@code ("modifyThread")}
- */
- public ForkJoinPool() {
- this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()),
- defaultForkJoinWorkerThreadFactory, null, false);
- }
-
- /**
- * Creates a {@code ForkJoinPool} with the indicated parallelism
- * level, the {@linkplain
- * #defaultForkJoinWorkerThreadFactory default thread factory},
- * no UncaughtExceptionHandler, and non-async LIFO processing mode.
- *
- * @param parallelism the parallelism level
- * @throws IllegalArgumentException if parallelism less than or
- * equal to zero, or greater than implementation limit
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}{@code ("modifyThread")}
- */
- public ForkJoinPool(int parallelism) {
- this(parallelism, defaultForkJoinWorkerThreadFactory, null, false);
- }
-
- /**
- * Creates a {@code ForkJoinPool} with the given parameters.
- *
- * @param parallelism the parallelism level. For default value,
- * use {@link java.lang.Runtime#availableProcessors}.
- * @param factory the factory for creating new threads. For default value,
- * use {@link #defaultForkJoinWorkerThreadFactory}.
- * @param handler the handler for internal worker threads that
- * terminate due to unrecoverable errors encountered while executing
- * tasks. For default value, use {@code null}.
- * @param asyncMode if true,
- * establishes local first-in-first-out scheduling mode for forked
- * tasks that are never joined. This mode may be more appropriate
- * than default locally stack-based mode in applications in which
- * worker threads only process event-style asynchronous tasks.
- * For default value, use {@code false}.
- * @throws IllegalArgumentException if parallelism less than or
- * equal to zero, or greater than implementation limit
- * @throws NullPointerException if the factory is null
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}{@code ("modifyThread")}
- */
- public ForkJoinPool(int parallelism,
- ForkJoinWorkerThreadFactory factory,
- Thread.UncaughtExceptionHandler handler,
- boolean asyncMode) {
- checkPermission();
- if (factory == null)
- throw new NullPointerException();
- if (parallelism <= 0 || parallelism > MAX_CAP)
- throw new IllegalArgumentException();
- this.factory = factory;
- this.ueh = handler;
- this.config = parallelism | (asyncMode ? (FIFO_QUEUE << 16) : 0);
- long np = (long)(-parallelism); // offset ctl counts
- this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
- int pn = nextPoolId();
- StringBuilder sb = new StringBuilder("ForkJoinPool-");
- sb.append(Integer.toString(pn));
- sb.append("-worker-");
- this.workerNamePrefix = sb.toString();
- }
-
- /**
- * Constructor for common pool, suitable only for static initialization.
- * Basically the same as above, but uses smallest possible initial footprint.
- */
- ForkJoinPool(int parallelism, long ctl,
- ForkJoinWorkerThreadFactory factory,
- Thread.UncaughtExceptionHandler handler) {
- this.config = parallelism;
- this.ctl = ctl;
- this.factory = factory;
- this.ueh = handler;
- this.workerNamePrefix = "ForkJoinPool.commonPool-worker-";
- }
-
- /**
- * Returns the common pool instance. This pool is statically
- * constructed; its run state is unaffected by attempts to {@link
- * #shutdown} or {@link #shutdownNow}. However this pool and any
- * ongoing processing are automatically terminated upon program
- * {@link System#exit}. Any program that relies on asynchronous
- * task processing to complete before program termination should
- * invoke {@code commonPool().}{@link #awaitQuiescence}, before
- * exit.
- *
- * @return the common pool instance
- * @since 1.8
- */
- public static ForkJoinPool commonPool() {
- // assert common != null : "static init error";
- return common;
- }
-
- // Execution methods
-
- /**
- * Performs the given task, returning its result upon completion.
- * If the computation encounters an unchecked Exception or Error,
- * it is rethrown as the outcome of this invocation. Rethrown
- * exceptions behave in the same way as regular exceptions, but,
- * when possible, contain stack traces (as displayed for example
- * using {@code ex.printStackTrace()}) of both the current thread
- * as well as the thread actually encountering the exception;
- * minimally only the latter.
- *
- * @param task the task
- * @return the task's result
- * @throws NullPointerException if the task is null
- * @throws RejectedExecutionException if the task cannot be
- * scheduled for execution
- */
- public <T> T invoke(ForkJoinTask<T> task) {
- if (task == null)
- throw new NullPointerException();
- externalPush(task);
- return task.join();
- }
-
- /**
- * Arranges for (asynchronous) execution of the given task.
- *
- * @param task the task
- * @throws NullPointerException if the task is null
- * @throws RejectedExecutionException if the task cannot be
- * scheduled for execution
- */
- public void execute(ForkJoinTask<?> task) {
- if (task == null)
- throw new NullPointerException();
- externalPush(task);
- }
-
- // AbstractExecutorService methods
-
- /**
- * @throws NullPointerException if the task is null
- * @throws RejectedExecutionException if the task cannot be
- * scheduled for execution
- */
- public void execute(Runnable task) {
- if (task == null)
- throw new NullPointerException();
- ForkJoinTask<?> job;
- if (task instanceof ForkJoinTask<?>) // avoid re-wrap
- job = (ForkJoinTask<?>) task;
- else
- job = new ForkJoinTask.AdaptedRunnableAction(task);
- externalPush(job);
- }
-
- /**
- * Submits a ForkJoinTask for execution.
- *
- * @param task the task to submit
- * @return the task
- * @throws NullPointerException if the task is null
- * @throws RejectedExecutionException if the task cannot be
- * scheduled for execution
- */
- public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
- if (task == null)
- throw new NullPointerException();
- externalPush(task);
- return task;
- }
-
- /**
- * @throws NullPointerException if the task is null
- * @throws RejectedExecutionException if the task cannot be
- * scheduled for execution
- */
- public <T> ForkJoinTask<T> submit(Callable<T> task) {
- ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task);
- externalPush(job);
- return job;
- }
-
- /**
- * @throws NullPointerException if the task is null
- * @throws RejectedExecutionException if the task cannot be
- * scheduled for execution
- */
- public <T> ForkJoinTask<T> submit(Runnable task, T result) {
- ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result);
- externalPush(job);
- return job;
- }
-
- /**
- * @throws NullPointerException if the task is null
- * @throws RejectedExecutionException if the task cannot be
- * scheduled for execution
- */
- public ForkJoinTask<?> submit(Runnable task) {
- if (task == null)
- throw new NullPointerException();
- ForkJoinTask<?> job;
- if (task instanceof ForkJoinTask<?>) // avoid re-wrap
- job = (ForkJoinTask<?>) task;
- else
- job = new ForkJoinTask.AdaptedRunnableAction(task);
- externalPush(job);
- return job;
- }
-
- /**
- * @throws NullPointerException {@inheritDoc}
- * @throws RejectedExecutionException {@inheritDoc}
- */
- public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
- // In previous versions of this class, this method constructed
- // a task to run ForkJoinTask.invokeAll, but now external
- // invocation of multiple tasks is at least as efficient.
- ArrayList<Future<T>> futures = new ArrayList<Future<T>>(tasks.size());
-
- boolean done = false;
- try {
- for (Callable<T> t : tasks) {
- ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t);
- futures.add(f);
- externalPush(f);
- }
- for (int i = 0, size = futures.size(); i < size; i++)
- ((ForkJoinTask<?>)futures.get(i)).quietlyJoin();
- done = true;
- return futures;
- } finally {
- if (!done)
- for (int i = 0, size = futures.size(); i < size; i++)
- futures.get(i).cancel(false);
- }
- }
-
- /**
- * Returns the factory used for constructing new workers.
- *
- * @return the factory used for constructing new workers
- */
- public ForkJoinWorkerThreadFactory getFactory() {
- return factory;
- }
-
- /**
- * Returns the handler for internal worker threads that terminate
- * due to unrecoverable errors encountered while executing tasks.
- *
- * @return the handler, or {@code null} if none
- */
- public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() {
- return ueh;
- }
-
- /**
- * Returns the targeted parallelism level of this pool.
- *
- * @return the targeted parallelism level of this pool
- */
- public int getParallelism() {
- return config & SMASK;
- }
-
- /**
- * Returns the targeted parallelism level of the common pool.
- *
- * @return the targeted parallelism level of the common pool
- * @since 1.8
- */
- public static int getCommonPoolParallelism() {
- return commonParallelism;
- }
-
- /**
- * Returns the number of worker threads that have started but not
- * yet terminated. The result returned by this method may differ
- * from {@link #getParallelism} when threads are created to
- * maintain parallelism when others are cooperatively blocked.
- *
- * @return the number of worker threads
- */
- public int getPoolSize() {
- return (config & SMASK) + (short)(ctl >>> TC_SHIFT);
- }
-
- /**
- * Returns {@code true} if this pool uses local first-in-first-out
- * scheduling mode for forked tasks that are never joined.
- *
- * @return {@code true} if this pool uses async mode
- */
- public boolean getAsyncMode() {
- return (config >>> 16) == FIFO_QUEUE;
- }
-
- /**
- * Returns an estimate of the number of worker threads that are
- * not blocked waiting to join tasks or for other managed
- * synchronization. This method may overestimate the
- * number of running threads.
- *
- * @return the number of worker threads
- */
- public int getRunningThreadCount() {
- int rc = 0;
- WorkQueue[] ws; WorkQueue w;
- if ((ws = workQueues) != null) {
- for (int i = 1; i < ws.length; i += 2) {
- if ((w = ws[i]) != null && w.isApparentlyUnblocked())
- ++rc;
- }
- }
- return rc;
- }
-
- /**
- * Returns an estimate of the number of threads that are currently
- * stealing or executing tasks. This method may overestimate the
- * number of active threads.
- *
- * @return the number of active threads
- */
- public int getActiveThreadCount() {
- int r = (config & SMASK) + (int)(ctl >> AC_SHIFT);
- return (r <= 0) ? 0 : r; // suppress momentarily negative values
- }
-
- /**
- * Returns {@code true} if all worker threads are currently idle.
- * An idle worker is one that cannot obtain a task to execute
- * because none are available to steal from other threads, and
- * there are no pending submissions to the pool. This method is
- * conservative; it might not return {@code true} immediately upon
- * idleness of all threads, but will eventually become true if
- * threads remain inactive.
- *
- * @return {@code true} if all threads are currently idle
- */
- public boolean isQuiescent() {
- return (int)(ctl >> AC_SHIFT) + (config & SMASK) == 0;
- }
-
- /**
- * Returns an estimate of the total number of tasks stolen from
- * one thread's work queue by another. The reported value
- * underestimates the actual total number of steals when the pool
- * is not quiescent. This value may be useful for monitoring and
- * tuning fork/join programs: in general, steal counts should be
- * high enough to keep threads busy, but low enough to avoid
- * overhead and contention across threads.
- *
- * @return the number of steals
- */
- public long getStealCount() {
- long count = stealCount;
- WorkQueue[] ws; WorkQueue w;
- if ((ws = workQueues) != null) {
- for (int i = 1; i < ws.length; i += 2) {
- if ((w = ws[i]) != null)
- count += w.nsteals;
- }
- }
- return count;
- }
-
- /**
- * Returns an estimate of the total number of tasks currently held
- * in queues by worker threads (but not including tasks submitted
- * to the pool that have not begun executing). This value is only
- * an approximation, obtained by iterating across all threads in
- * the pool. This method may be useful for tuning task
- * granularities.
- *
- * @return the number of queued tasks
- */
- public long getQueuedTaskCount() {
- long count = 0;
- WorkQueue[] ws; WorkQueue w;
- if ((ws = workQueues) != null) {
- for (int i = 1; i < ws.length; i += 2) {
- if ((w = ws[i]) != null)
- count += w.queueSize();
- }
- }
- return count;
- }
-
- /**
- * Returns an estimate of the number of tasks submitted to this
- * pool that have not yet begun executing. This method may take
- * time proportional to the number of submissions.
- *
- * @return the number of queued submissions
- */
- public int getQueuedSubmissionCount() {
- int count = 0;
- WorkQueue[] ws; WorkQueue w;
- if ((ws = workQueues) != null) {
- for (int i = 0; i < ws.length; i += 2) {
- if ((w = ws[i]) != null)
- count += w.queueSize();
- }
- }
- return count;
- }
-
- /**
- * Returns {@code true} if there are any tasks submitted to this
- * pool that have not yet begun executing.
- *
- * @return {@code true} if there are any queued submissions
- */
- public boolean hasQueuedSubmissions() {
- WorkQueue[] ws; WorkQueue w;
- if ((ws = workQueues) != null) {
- for (int i = 0; i < ws.length; i += 2) {
- if ((w = ws[i]) != null && !w.isEmpty())
- return true;
- }
- }
- return false;
- }
-
- /**
- * Removes and returns the next unexecuted submission if one is
- * available. This method may be useful in extensions to this
- * class that re-assign work in systems with multiple pools.
- *
- * @return the next submission, or {@code null} if none
- */
- protected ForkJoinTask<?> pollSubmission() {
- WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
- if ((ws = workQueues) != null) {
- for (int i = 0; i < ws.length; i += 2) {
- if ((w = ws[i]) != null && (t = w.poll()) != null)
- return t;
- }
- }
- return null;
- }
-
- /**
- * Removes all available unexecuted submitted and forked tasks
- * from scheduling queues and adds them to the given collection,
- * without altering their execution status. These may include
- * artificially generated or wrapped tasks. This method is
- * designed to be invoked only when the pool is known to be
- * quiescent. Invocations at other times may not remove all
- * tasks. A failure encountered while attempting to add elements
- * to collection {@code c} may result in elements being in
- * neither, either or both collections when the associated
- * exception is thrown. The behavior of this operation is
- * undefined if the specified collection is modified while the
- * operation is in progress.
- *
- * @param c the collection to transfer elements into
- * @return the number of elements transferred
- */
- protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
- int count = 0;
- WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t;
- if ((ws = workQueues) != null) {
- for (int i = 0; i < ws.length; ++i) {
- if ((w = ws[i]) != null) {
- while ((t = w.poll()) != null) {
- c.add(t);
- ++count;
- }
- }
- }
- }
- return count;
- }
-
- /**
- * Returns a string identifying this pool, as well as its state,
- * including indications of run state, parallelism level, and
- * worker and task counts.
- *
- * @return a string identifying this pool, as well as its state
- */
- public String toString() {
- // Use a single pass through workQueues to collect counts
- long qt = 0L, qs = 0L; int rc = 0;
- long st = stealCount;
- long c = ctl;
- WorkQueue[] ws; WorkQueue w;
- if ((ws = workQueues) != null) {
- for (int i = 0; i < ws.length; ++i) {
- if ((w = ws[i]) != null) {
- int size = w.queueSize();
- if ((i & 1) == 0)
- qs += size;
- else {
- qt += size;
- st += w.nsteals;
- if (w.isApparentlyUnblocked())
- ++rc;
- }
- }
- }
- }
- int pc = (config & SMASK);
- int tc = pc + (short)(c >>> TC_SHIFT);
- int ac = pc + (int)(c >> AC_SHIFT);
- if (ac < 0) // ignore transient negative
- ac = 0;
- String level;
- if ((c & STOP_BIT) != 0)
- level = (tc == 0) ? "Terminated" : "Terminating";
- else
- level = plock < 0 ? "Shutting down" : "Running";
- return super.toString() +
- "[" + level +
- ", parallelism = " + pc +
- ", size = " + tc +
- ", active = " + ac +
- ", running = " + rc +
- ", steals = " + st +
- ", tasks = " + qt +
- ", submissions = " + qs +
- "]";
- }
-
- /**
- * Possibly initiates an orderly shutdown in which previously
- * submitted tasks are executed, but no new tasks will be
- * accepted. Invocation has no effect on execution state if this
- * is the {@link #commonPool()}, and no additional effect if
- * already shut down. Tasks that are in the process of being
- * submitted concurrently during the course of this method may or
- * may not be rejected.
- *
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}{@code ("modifyThread")}
- */
- public void shutdown() {
- checkPermission();
- tryTerminate(false, true);
- }
-
- /**
- * Possibly attempts to cancel and/or stop all tasks, and reject
- * all subsequently submitted tasks. Invocation has no effect on
- * execution state if this is the {@link #commonPool()}, and no
- * additional effect if already shut down. Otherwise, tasks that
- * are in the process of being submitted or executed concurrently
- * during the course of this method may or may not be
- * rejected. This method cancels both existing and unexecuted
- * tasks, in order to permit termination in the presence of task
- * dependencies. So the method always returns an empty list
- * (unlike the case for some other Executors).
- *
- * @return an empty list
- * @throws SecurityException if a security manager exists and
- * the caller is not permitted to modify threads
- * because it does not hold {@link
- * java.lang.RuntimePermission}{@code ("modifyThread")}
- */
- public List<Runnable> shutdownNow() {
- checkPermission();
- tryTerminate(true, true);
- return Collections.emptyList();
- }
-
- /**
- * Returns {@code true} if all tasks have completed following shut down.
- *
- * @return {@code true} if all tasks have completed following shut down
- */
- public boolean isTerminated() {
- long c = ctl;
- return ((c & STOP_BIT) != 0L &&
- (short)(c >>> TC_SHIFT) == -(config & SMASK));
- }
-
- /**
- * Returns {@code true} if the process of termination has
- * commenced but not yet completed. This method may be useful for
- * debugging. A return of {@code true} reported a sufficient
- * period after shutdown may indicate that submitted tasks have
- * ignored or suppressed interruption, or are waiting for I/O,
- * causing this executor not to properly terminate. (See the
- * advisory notes for class {@link ForkJoinTask} stating that
- * tasks should not normally entail blocking operations. But if
- * they do, they must abort them on interrupt.)
- *
- * @return {@code true} if terminating but not yet terminated
- */
- public boolean isTerminating() {
- long c = ctl;
- return ((c & STOP_BIT) != 0L &&
- (short)(c >>> TC_SHIFT) != -(config & SMASK));
- }
-
- /**
- * Returns {@code true} if this pool has been shut down.
- *
- * @return {@code true} if this pool has been shut down
- */
- public boolean isShutdown() {
- return plock < 0;
- }
-
- /**
- * Blocks until all tasks have completed execution after a
- * shutdown request, or the timeout occurs, or the current thread
- * is interrupted, whichever happens first. Because the {@link
- * #commonPool()} never terminates until program shutdown, when
- * applied to the common pool, this method is equivalent to {@link
- * #awaitQuiescence} but always returns {@code false}.
- *
- * @param timeout the maximum time to wait
- * @param unit the time unit of the timeout argument
- * @return {@code true} if this executor terminated and
- * {@code false} if the timeout elapsed before termination
- * @throws InterruptedException if interrupted while waiting
- */
- public boolean awaitTermination(long timeout, TimeUnit unit)
- throws InterruptedException {
- if (Thread.interrupted())
- throw new InterruptedException();
- if (this == common) {
- awaitQuiescence(timeout, unit);
- return false;
- }
- long nanos = unit.toNanos(timeout);
- if (isTerminated())
- return true;
- long startTime = System.nanoTime();
- boolean terminated = false;
- synchronized (this) {
- for (long waitTime = nanos, millis = 0L;;) {
- if (terminated = isTerminated() ||
- waitTime <= 0L ||
- (millis = unit.toMillis(waitTime)) <= 0L)
- break;
- wait(millis);
- waitTime = nanos - (System.nanoTime() - startTime);
- }
- }
- return terminated;
- }
-
- /**
- * If called by a ForkJoinTask operating in this pool, equivalent
- * in effect to {@link ForkJoinTask#helpQuiesce}. Otherwise,
- * waits and/or attempts to assist performing tasks until this
- * pool {@link #isQuiescent} or the indicated timeout elapses.
- *
- * @param timeout the maximum time to wait
- * @param unit the time unit of the timeout argument
- * @return {@code true} if quiescent; {@code false} if the
- * timeout elapsed.
- */
- public boolean awaitQuiescence(long timeout, TimeUnit unit) {
- long nanos = unit.toNanos(timeout);
- ForkJoinWorkerThread wt;
- Thread thread = Thread.currentThread();
- if ((thread instanceof ForkJoinWorkerThread) &&
- (wt = (ForkJoinWorkerThread)thread).pool == this) {
- helpQuiescePool(wt.workQueue);
- return true;
- }
- long startTime = System.nanoTime();
- WorkQueue[] ws;
- int r = 0, m;
- boolean found = true;
- while (!isQuiescent() && (ws = workQueues) != null &&
- (m = ws.length - 1) >= 0) {
- if (!found) {
- if ((System.nanoTime() - startTime) > nanos)
- return false;
- Thread.yield(); // cannot block
- }
- found = false;
- for (int j = (m + 1) << 2; j >= 0; --j) {
- ForkJoinTask<?> t; WorkQueue q; int b;
- if ((q = ws[r++ & m]) != null && (b = q.base) - q.top < 0) {
- found = true;
- if ((t = q.pollAt(b)) != null) {
- if (q.base - q.top < 0)
- signalWork(q);
- t.doExec();
- }
- break;
- }
- }
- }
- return true;
- }
-
- /**
- * Waits and/or attempts to assist performing tasks indefinitely
- * until the {@link #commonPool()} {@link #isQuiescent}.
- */
- static void quiesceCommonPool() {
- common.awaitQuiescence(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
- }
-
- /**
- * Interface for extending managed parallelism for tasks running
- * in {@link ForkJoinPool}s.
- *
- * <p>A {@code ManagedBlocker} provides two methods. Method
- * {@code isReleasable} must return {@code true} if blocking is
- * not necessary. Method {@code block} blocks the current thread
- * if necessary (perhaps internally invoking {@code isReleasable}
- * before actually blocking). These actions are performed by any
- * thread invoking {@link ForkJoinPool#managedBlock}. The
- * unusual methods in this API accommodate synchronizers that may,
- * but don't usually, block for long periods. Similarly, they
- * allow more efficient internal handling of cases in which
- * additional workers may be, but usually are not, needed to
- * ensure sufficient parallelism. Toward this end,
- * implementations of method {@code isReleasable} must be amenable
- * to repeated invocation.
- *
- * <p>For example, here is a ManagedBlocker based on a
- * ReentrantLock:
- * <pre> {@code
- * class ManagedLocker implements ManagedBlocker {
- * final ReentrantLock lock;
- * boolean hasLock = false;
- * ManagedLocker(ReentrantLock lock) { this.lock = lock; }
- * public boolean block() {
- * if (!hasLock)
- * lock.lock();
- * return true;
- * }
- * public boolean isReleasable() {
- * return hasLock || (hasLock = lock.tryLock());
- * }
- * }}</pre>
- *
- * <p>Here is a class that possibly blocks waiting for an
- * item on a given queue:
- * <pre> {@code
- * class QueueTaker<E> implements ManagedBlocker {
- * final BlockingQueue<E> queue;
- * volatile E item = null;
- * QueueTaker(BlockingQueue<E> q) { this.queue = q; }
- * public boolean block() throws InterruptedException {
- * if (item == null)
- * item = queue.take();
- * return true;
- * }
- * public boolean isReleasable() {
- * return item != null || (item = queue.poll()) != null;
- * }
- * public E getItem() { // call after pool.managedBlock completes
- * return item;
- * }
- * }}</pre>
- */
- @Deprecated
- public static interface ManagedBlocker {
- /**
- * Possibly blocks the current thread, for example waiting for
- * a lock or condition.
- *
- * @return {@code true} if no additional blocking is necessary
- * (i.e., if isReleasable would return true)
- * @throws InterruptedException if interrupted while waiting
- * (the method is not required to do so, but is allowed to)
- */
- boolean block() throws InterruptedException;
-
- /**
- * Returns {@code true} if blocking is unnecessary.
- */
- boolean isReleasable();
- }
-
- /**
- * Blocks in accord with the given blocker. If the current thread
- * is a {@link ForkJoinWorkerThread}, this method possibly
- * arranges for a spare thread to be activated if necessary to
- * ensure sufficient parallelism while the current thread is blocked.
- *
- * <p>If the caller is not a {@link ForkJoinTask}, this method is
- * behaviorally equivalent to
- * <pre> {@code
- * while (!blocker.isReleasable())
- * if (blocker.block())
- * return;
- * }</pre>
- *
- * If the caller is a {@code ForkJoinTask}, then the pool may
- * first be expanded to ensure parallelism, and later adjusted.
- *
- * @param blocker the blocker
- * @throws InterruptedException if blocker.block did so
- */
- public static void managedBlock(ManagedBlocker blocker)
- throws InterruptedException {
- Thread t = Thread.currentThread();
- if (t instanceof ForkJoinWorkerThread) {
- ForkJoinPool p = ((ForkJoinWorkerThread)t).pool;
- while (!blocker.isReleasable()) { // variant of helpSignal
- WorkQueue[] ws; WorkQueue q; int m, u;
- if ((ws = p.workQueues) != null && (m = ws.length - 1) >= 0) {
- for (int i = 0; i <= m; ++i) {
- if (blocker.isReleasable())
- return;
- if ((q = ws[i]) != null && q.base - q.top < 0) {
- p.signalWork(q);
- if ((u = (int)(p.ctl >>> 32)) >= 0 ||
- (u >> UAC_SHIFT) >= 0)
- break;
- }
- }
- }
- if (p.tryCompensate()) {
- try {
- do {} while (!blocker.isReleasable() &&
- !blocker.block());
- } finally {
- p.incrementActiveCount();
- }
- break;
- }
- }
- }
- else {
- do {} while (!blocker.isReleasable() &&
- !blocker.block());
- }
- }
-
- // AbstractExecutorService overrides. These rely on undocumented
- // fact that ForkJoinTask.adapt returns ForkJoinTasks that also
- // implement RunnableFuture.
-
- protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
- return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
- }
-
- protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
- return new ForkJoinTask.AdaptedCallable<T>(callable);
- }
-
- // Unsafe mechanics
- private static final sun.misc.Unsafe U;
- private static final long CTL;
- private static final long PARKBLOCKER;
- private static final int ABASE;
- private static final int ASHIFT;
- private static final long STEALCOUNT;
- private static final long PLOCK;
- private static final long INDEXSEED;
- private static final long QLOCK;
-
- static {
- // initialize field offsets for CAS etc
- try {
- U = getUnsafe();
- Class<?> k = ForkJoinPool.class;
- CTL = U.objectFieldOffset
- (k.getDeclaredField("ctl"));
- STEALCOUNT = U.objectFieldOffset
- (k.getDeclaredField("stealCount"));
- PLOCK = U.objectFieldOffset
- (k.getDeclaredField("plock"));
- INDEXSEED = U.objectFieldOffset
- (k.getDeclaredField("indexSeed"));
- Class<?> tk = Thread.class;
- PARKBLOCKER = U.objectFieldOffset
- (tk.getDeclaredField("parkBlocker"));
- Class<?> wk = WorkQueue.class;
- QLOCK = U.objectFieldOffset
- (wk.getDeclaredField("qlock"));
- Class<?> ak = ForkJoinTask[].class;
- ABASE = U.arrayBaseOffset(ak);
- int scale = U.arrayIndexScale(ak);
- if ((scale & (scale - 1)) != 0)
- throw new Error("data type scale not a power of two");
- ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
- } catch (Exception e) {
- throw new Error(e);
- }
-
- submitters = new ThreadLocal<Submitter>();
- ForkJoinWorkerThreadFactory fac = defaultForkJoinWorkerThreadFactory =
- new DefaultForkJoinWorkerThreadFactory();
- modifyThreadPermission = new RuntimePermission("modifyThread");
-
- /*
- * Establish common pool parameters. For extra caution,
- * computations to set up common pool state are here; the
- * constructor just assigns these values to fields.
- */
-
- int par = 0;
- Thread.UncaughtExceptionHandler handler = null;
- try { // TBD: limit or report ignored exceptions?
- String pp = System.getProperty
- ("java.util.concurrent.ForkJoinPool.common.parallelism");
- String hp = System.getProperty
- ("java.util.concurrent.ForkJoinPool.common.exceptionHandler");
- String fp = System.getProperty
- ("java.util.concurrent.ForkJoinPool.common.threadFactory");
- if (fp != null)
- fac = ((ForkJoinWorkerThreadFactory)ClassLoader.
- getSystemClassLoader().loadClass(fp).newInstance());
- if (hp != null)
- handler = ((Thread.UncaughtExceptionHandler)ClassLoader.
- getSystemClassLoader().loadClass(hp).newInstance());
- if (pp != null)
- par = Integer.parseInt(pp);
- } catch (Exception ignore) {
- }
-
- if (par <= 0)
- par = Runtime.getRuntime().availableProcessors();
- if (par > MAX_CAP)
- par = MAX_CAP;
- commonParallelism = par;
- long np = (long)(-par); // precompute initial ctl value
- long ct = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
-
- common = new ForkJoinPool(par, ct, fac, handler);
- }
-
- /**
- * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
- * Replace with a simple call to Unsafe.getUnsafe when integrating
- * into a jdk.
- *
- * @return a sun.misc.Unsafe
- */
- private static sun.misc.Unsafe getUnsafe() {
- return scala.concurrent.util.Unsafe.instance;
- }
-}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java
deleted file mode 100644
index b4f5c24ca9..0000000000
--- a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinTask.java
+++ /dev/null
@@ -1,1493 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package scala.concurrent.forkjoin;
-
-import java.io.Serializable;
-import java.util.Collection;
-import java.util.List;
-import java.util.RandomAccess;
-import java.lang.ref.WeakReference;
-import java.lang.ref.ReferenceQueue;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.RunnableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.locks.ReentrantLock;
-import java.lang.reflect.Constructor;
-
-/**
- * Abstract base class for tasks that run within a {@link ForkJoinPool}.
- * A {@code ForkJoinTask} is a thread-like entity that is much
- * lighter weight than a normal thread. Huge numbers of tasks and
- * subtasks may be hosted by a small number of actual threads in a
- * ForkJoinPool, at the price of some usage limitations.
- *
- * <p>A "main" {@code ForkJoinTask} begins execution when it is
- * explicitly submitted to a {@link ForkJoinPool}, or, if not already
- * engaged in a ForkJoin computation, commenced in the {@link
- * ForkJoinPool#commonPool()} via {@link #fork}, {@link #invoke}, or
- * related methods. Once started, it will usually in turn start other
- * subtasks. As indicated by the name of this class, many programs
- * using {@code ForkJoinTask} employ only methods {@link #fork} and
- * {@link #join}, or derivatives such as {@link
- * #invokeAll(ForkJoinTask...) invokeAll}. However, this class also
- * provides a number of other methods that can come into play in
- * advanced usages, as well as extension mechanics that allow support
- * of new forms of fork/join processing.
- *
- * <p>A {@code ForkJoinTask} is a lightweight form of {@link Future}.
- * The efficiency of {@code ForkJoinTask}s stems from a set of
- * restrictions (that are only partially statically enforceable)
- * reflecting their main use as computational tasks calculating pure
- * functions or operating on purely isolated objects. The primary
- * coordination mechanisms are {@link #fork}, that arranges
- * asynchronous execution, and {@link #join}, that doesn't proceed
- * until the task's result has been computed. Computations should
- * ideally avoid {@code synchronized} methods or blocks, and should
- * minimize other blocking synchronization apart from joining other
- * tasks or using synchronizers such as Phasers that are advertised to
- * cooperate with fork/join scheduling. Subdividable tasks should also
- * not perform blocking I/O, and should ideally access variables that
- * are completely independent of those accessed by other running
- * tasks. These guidelines are loosely enforced by not permitting
- * checked exceptions such as {@code IOExceptions} to be
- * thrown. However, computations may still encounter unchecked
- * exceptions, that are rethrown to callers attempting to join
- * them. These exceptions may additionally include {@link
- * RejectedExecutionException} stemming from internal resource
- * exhaustion, such as failure to allocate internal task
- * queues. Rethrown exceptions behave in the same way as regular
- * exceptions, but, when possible, contain stack traces (as displayed
- * for example using {@code ex.printStackTrace()}) of both the thread
- * that initiated the computation as well as the thread actually
- * encountering the exception; minimally only the latter.
- *
- * <p>It is possible to define and use ForkJoinTasks that may block,
- * but doing do requires three further considerations: (1) Completion
- * of few if any <em>other</em> tasks should be dependent on a task
- * that blocks on external synchronization or I/O. Event-style async
- * tasks that are never joined (for example, those subclassing {@link
- * CountedCompleter}) often fall into this category. (2) To minimize
- * resource impact, tasks should be small; ideally performing only the
- * (possibly) blocking action. (3) Unless the {@link
- * ForkJoinPool.ManagedBlocker} API is used, or the number of possibly
- * blocked tasks is known to be less than the pool's {@link
- * ForkJoinPool#getParallelism} level, the pool cannot guarantee that
- * enough threads will be available to ensure progress or good
- * performance.
- *
- * <p>The primary method for awaiting completion and extracting
- * results of a task is {@link #join}, but there are several variants:
- * The {@link Future#get} methods support interruptible and/or timed
- * waits for completion and report results using {@code Future}
- * conventions. Method {@link #invoke} is semantically
- * equivalent to {@code fork(); join()} but always attempts to begin
- * execution in the current thread. The "<em>quiet</em>" forms of
- * these methods do not extract results or report exceptions. These
- * may be useful when a set of tasks are being executed, and you need
- * to delay processing of results or exceptions until all complete.
- * Method {@code invokeAll} (available in multiple versions)
- * performs the most common form of parallel invocation: forking a set
- * of tasks and joining them all.
- *
- * <p>In the most typical usages, a fork-join pair act like a call
- * (fork) and return (join) from a parallel recursive function. As is
- * the case with other forms of recursive calls, returns (joins)
- * should be performed innermost-first. For example, {@code a.fork();
- * b.fork(); b.join(); a.join();} is likely to be substantially more
- * efficient than joining {@code a} before {@code b}.
- *
- * <p>The execution status of tasks may be queried at several levels
- * of detail: {@link #isDone} is true if a task completed in any way
- * (including the case where a task was cancelled without executing);
- * {@link #isCompletedNormally} is true if a task completed without
- * cancellation or encountering an exception; {@link #isCancelled} is
- * true if the task was cancelled (in which case {@link #getException}
- * returns a {@link java.util.concurrent.CancellationException}); and
- * {@link #isCompletedAbnormally} is true if a task was either
- * cancelled or encountered an exception, in which case {@link
- * #getException} will return either the encountered exception or
- * {@link java.util.concurrent.CancellationException}.
- *
- * <p>The ForkJoinTask class is not usually directly subclassed.
- * Instead, you subclass one of the abstract classes that support a
- * particular style of fork/join processing, typically {@link
- * RecursiveAction} for most computations that do not return results,
- * {@link RecursiveTask} for those that do, and {@link
- * CountedCompleter} for those in which completed actions trigger
- * other actions. Normally, a concrete ForkJoinTask subclass declares
- * fields comprising its parameters, established in a constructor, and
- * then defines a {@code compute} method that somehow uses the control
- * methods supplied by this base class.
- *
- * <p>Method {@link #join} and its variants are appropriate for use
- * only when completion dependencies are acyclic; that is, the
- * parallel computation can be described as a directed acyclic graph
- * (DAG). Otherwise, executions may encounter a form of deadlock as
- * tasks cyclically wait for each other. However, this framework
- * supports other methods and techniques (for example the use of
- * {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that
- * may be of use in constructing custom subclasses for problems that
- * are not statically structured as DAGs. To support such usages a
- * ForkJoinTask may be atomically <em>tagged</em> with a {@code short}
- * value using {@link #setForkJoinTaskTag} or {@link
- * #compareAndSetForkJoinTaskTag} and checked using {@link
- * #getForkJoinTaskTag}. The ForkJoinTask implementation does not use
- * these {@code protected} methods or tags for any purpose, but they
- * may be of use in the construction of specialized subclasses. For
- * example, parallel graph traversals can use the supplied methods to
- * avoid revisiting nodes/tasks that have already been processed.
- * (Method names for tagging are bulky in part to encourage definition
- * of methods that reflect their usage patterns.)
- *
- * <p>Most base support methods are {@code final}, to prevent
- * overriding of implementations that are intrinsically tied to the
- * underlying lightweight task scheduling framework. Developers
- * creating new basic styles of fork/join processing should minimally
- * implement {@code protected} methods {@link #exec}, {@link
- * #setRawResult}, and {@link #getRawResult}, while also introducing
- * an abstract computational method that can be implemented in its
- * subclasses, possibly relying on other {@code protected} methods
- * provided by this class.
- *
- * <p>ForkJoinTasks should perform relatively small amounts of
- * computation. Large tasks should be split into smaller subtasks,
- * usually via recursive decomposition. As a very rough rule of thumb,
- * a task should perform more than 100 and less than 10000 basic
- * computational steps, and should avoid indefinite looping. If tasks
- * are too big, then parallelism cannot improve throughput. If too
- * small, then memory and internal task maintenance overhead may
- * overwhelm processing.
- *
- * <p>This class provides {@code adapt} methods for {@link Runnable}
- * and {@link Callable}, that may be of use when mixing execution of
- * {@code ForkJoinTasks} with other kinds of tasks. When all tasks are
- * of this form, consider using a pool constructed in <em>asyncMode</em>.
- *
- * <p>ForkJoinTasks are {@code Serializable}, which enables them to be
- * used in extensions such as remote execution frameworks. It is
- * sensible to serialize tasks only before or after, but not during,
- * execution. Serialization is not relied on during execution itself.
- *
- * @since 1.7
- * @author Doug Lea
- */
-@Deprecated
-public abstract class ForkJoinTask<V> implements Future<V>, Serializable {
-
- /*
- * See the internal documentation of class ForkJoinPool for a
- * general implementation overview. ForkJoinTasks are mainly
- * responsible for maintaining their "status" field amidst relays
- * to methods in ForkJoinWorkerThread and ForkJoinPool.
- *
- * The methods of this class are more-or-less layered into
- * (1) basic status maintenance
- * (2) execution and awaiting completion
- * (3) user-level methods that additionally report results.
- * This is sometimes hard to see because this file orders exported
- * methods in a way that flows well in javadocs.
- */
-
- /*
- * The status field holds run control status bits packed into a
- * single int to minimize footprint and to ensure atomicity (via
- * CAS). Status is initially zero, and takes on nonnegative
- * values until completed, upon which status (anded with
- * DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks
- * undergoing blocking waits by other threads have the SIGNAL bit
- * set. Completion of a stolen task with SIGNAL set awakens any
- * waiters via notifyAll. Even though suboptimal for some
- * purposes, we use basic builtin wait/notify to take advantage of
- * "monitor inflation" in JVMs that we would otherwise need to
- * emulate to avoid adding further per-task bookkeeping overhead.
- * We want these monitors to be "fat", i.e., not use biasing or
- * thin-lock techniques, so use some odd coding idioms that tend
- * to avoid them, mainly by arranging that every synchronized
- * block performs a wait, notifyAll or both.
- *
- * These control bits occupy only (some of) the upper half (16
- * bits) of status field. The lower bits are used for user-defined
- * tags.
- */
-
- /** The run status of this task */
- volatile int status; // accessed directly by pool and workers
- static final int DONE_MASK = 0xf0000000; // mask out non-completion bits
- static final int NORMAL = 0xf0000000; // must be negative
- static final int CANCELLED = 0xc0000000; // must be < NORMAL
- static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED
- static final int SIGNAL = 0x00010000; // must be >= 1 << 16
- static final int SMASK = 0x0000ffff; // short bits for tags
-
- /**
- * Marks completion and wakes up threads waiting to join this
- * task.
- *
- * @param completion one of NORMAL, CANCELLED, EXCEPTIONAL
- * @return completion status on exit
- */
- private int setCompletion(int completion) {
- for (int s;;) {
- if ((s = status) < 0)
- return s;
- if (U.compareAndSwapInt(this, STATUS, s, s | completion)) {
- if ((s >>> 16) != 0)
- synchronized (this) { notifyAll(); }
- return completion;
- }
- }
- }
-
- /**
- * Primary execution method for stolen tasks. Unless done, calls
- * exec and records status if completed, but doesn't wait for
- * completion otherwise.
- *
- * @return status on exit from this method
- */
- final int doExec() {
- int s; boolean completed;
- if ((s = status) >= 0) {
- try {
- completed = exec();
- } catch (Throwable rex) {
- return setExceptionalCompletion(rex);
- }
- if (completed)
- s = setCompletion(NORMAL);
- }
- return s;
- }
-
- /**
- * Tries to set SIGNAL status unless already completed. Used by
- * ForkJoinPool. Other variants are directly incorporated into
- * externalAwaitDone etc.
- *
- * @return true if successful
- */
- final boolean trySetSignal() {
- int s = status;
- return s >= 0 && U.compareAndSwapInt(this, STATUS, s, s | SIGNAL);
- }
-
- /**
- * Blocks a non-worker-thread until completion.
- * @return status upon completion
- */
- private int externalAwaitDone() {
- int s;
- ForkJoinPool.externalHelpJoin(this);
- boolean interrupted = false;
- while ((s = status) >= 0) {
- if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
- synchronized (this) {
- if (status >= 0) {
- try {
- wait();
- } catch (InterruptedException ie) {
- interrupted = true;
- }
- }
- else
- notifyAll();
- }
- }
- }
- if (interrupted)
- Thread.currentThread().interrupt();
- return s;
- }
-
- /**
- * Blocks a non-worker-thread until completion or interruption.
- */
- private int externalInterruptibleAwaitDone() throws InterruptedException {
- int s;
- if (Thread.interrupted())
- throw new InterruptedException();
- ForkJoinPool.externalHelpJoin(this);
- while ((s = status) >= 0) {
- if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
- synchronized (this) {
- if (status >= 0)
- wait();
- else
- notifyAll();
- }
- }
- }
- return s;
- }
-
-
- /**
- * Implementation for join, get, quietlyJoin. Directly handles
- * only cases of already-completed, external wait, and
- * unfork+exec. Others are relayed to ForkJoinPool.awaitJoin.
- *
- * @return status upon completion
- */
- private int doJoin() {
- int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w;
- return (s = status) < 0 ? s :
- ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
- (w = (wt = (ForkJoinWorkerThread)t).workQueue).
- tryUnpush(this) && (s = doExec()) < 0 ? s :
- wt.pool.awaitJoin(w, this) :
- externalAwaitDone();
- }
-
- /**
- * Implementation for invoke, quietlyInvoke.
- *
- * @return status upon completion
- */
- private int doInvoke() {
- int s; Thread t; ForkJoinWorkerThread wt;
- return (s = doExec()) < 0 ? s :
- ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
- (wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue, this) :
- externalAwaitDone();
- }
-
- // Exception table support
-
- /**
- * Table of exceptions thrown by tasks, to enable reporting by
- * callers. Because exceptions are rare, we don't directly keep
- * them with task objects, but instead use a weak ref table. Note
- * that cancellation exceptions don't appear in the table, but are
- * instead recorded as status values.
- *
- * Note: These statics are initialized below in static block.
- */
- private static final ExceptionNode[] exceptionTable;
- private static final ReentrantLock exceptionTableLock;
- private static final ReferenceQueue<Object> exceptionTableRefQueue;
-
- /**
- * Fixed capacity for exceptionTable.
- */
- private static final int EXCEPTION_MAP_CAPACITY = 32;
-
- /**
- * Key-value nodes for exception table. The chained hash table
- * uses identity comparisons, full locking, and weak references
- * for keys. The table has a fixed capacity because it only
- * maintains task exceptions long enough for joiners to access
- * them, so should never become very large for sustained
- * periods. However, since we do not know when the last joiner
- * completes, we must use weak references and expunge them. We do
- * so on each operation (hence full locking). Also, some thread in
- * any ForkJoinPool will call helpExpungeStaleExceptions when its
- * pool becomes isQuiescent.
- */
- @Deprecated
- static final class ExceptionNode extends WeakReference<ForkJoinTask<?>> {
- final Throwable ex;
- ExceptionNode next;
- final long thrower; // use id not ref to avoid weak cycles
- ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
- super(task, exceptionTableRefQueue);
- this.ex = ex;
- this.next = next;
- this.thrower = Thread.currentThread().getId();
- }
- }
-
- /**
- * Records exception and sets status.
- *
- * @return status on exit
- */
- final int recordExceptionalCompletion(Throwable ex) {
- int s;
- if ((s = status) >= 0) {
- int h = System.identityHashCode(this);
- final ReentrantLock lock = exceptionTableLock;
- lock.lock();
- try {
- expungeStaleExceptions();
- ExceptionNode[] t = exceptionTable;
- int i = h & (t.length - 1);
- for (ExceptionNode e = t[i]; ; e = e.next) {
- if (e == null) {
- t[i] = new ExceptionNode(this, ex, t[i]);
- break;
- }
- if (e.get() == this) // already present
- break;
- }
- } finally {
- lock.unlock();
- }
- s = setCompletion(EXCEPTIONAL);
- }
- return s;
- }
-
- /**
- * Records exception and possibly propagates.
- *
- * @return status on exit
- */
- private int setExceptionalCompletion(Throwable ex) {
- int s = recordExceptionalCompletion(ex);
- if ((s & DONE_MASK) == EXCEPTIONAL)
- internalPropagateException(ex);
- return s;
- }
-
- /**
- * Hook for exception propagation support for tasks with completers.
- */
- void internalPropagateException(Throwable ex) {
- }
-
- /**
- * Cancels, ignoring any exceptions thrown by cancel. Used during
- * worker and pool shutdown. Cancel is spec'ed not to throw any
- * exceptions, but if it does anyway, we have no recourse during
- * shutdown, so guard against this case.
- */
- static final void cancelIgnoringExceptions(ForkJoinTask<?> t) {
- if (t != null && t.status >= 0) {
- try {
- t.cancel(false);
- } catch (Throwable ignore) {
- }
- }
- }
-
- /**
- * Removes exception node and clears status.
- */
- private void clearExceptionalCompletion() {
- int h = System.identityHashCode(this);
- final ReentrantLock lock = exceptionTableLock;
- lock.lock();
- try {
- ExceptionNode[] t = exceptionTable;
- int i = h & (t.length - 1);
- ExceptionNode e = t[i];
- ExceptionNode pred = null;
- while (e != null) {
- ExceptionNode next = e.next;
- if (e.get() == this) {
- if (pred == null)
- t[i] = next;
- else
- pred.next = next;
- break;
- }
- pred = e;
- e = next;
- }
- expungeStaleExceptions();
- status = 0;
- } finally {
- lock.unlock();
- }
- }
-
- /**
- * Returns a rethrowable exception for the given task, if
- * available. To provide accurate stack traces, if the exception
- * was not thrown by the current thread, we try to create a new
- * exception of the same type as the one thrown, but with the
- * recorded exception as its cause. If there is no such
- * constructor, we instead try to use a no-arg constructor,
- * followed by initCause, to the same effect. If none of these
- * apply, or any fail due to other exceptions, we return the
- * recorded exception, which is still correct, although it may
- * contain a misleading stack trace.
- *
- * @return the exception, or null if none
- */
- private Throwable getThrowableException() {
- if ((status & DONE_MASK) != EXCEPTIONAL)
- return null;
- int h = System.identityHashCode(this);
- ExceptionNode e;
- final ReentrantLock lock = exceptionTableLock;
- lock.lock();
- try {
- expungeStaleExceptions();
- ExceptionNode[] t = exceptionTable;
- e = t[h & (t.length - 1)];
- while (e != null && e.get() != this)
- e = e.next;
- } finally {
- lock.unlock();
- }
- Throwable ex;
- if (e == null || (ex = e.ex) == null)
- return null;
- if (false && e.thrower != Thread.currentThread().getId()) {
- Class<? extends Throwable> ec = ex.getClass();
- try {
- Constructor<?> noArgCtor = null;
- Constructor<?>[] cs = ec.getConstructors();// public ctors only
- for (int i = 0; i < cs.length; ++i) {
- Constructor<?> c = cs[i];
- Class<?>[] ps = c.getParameterTypes();
- if (ps.length == 0)
- noArgCtor = c;
- else if (ps.length == 1 && ps[0] == Throwable.class)
- return (Throwable)(c.newInstance(ex));
- }
- if (noArgCtor != null) {
- Throwable wx = (Throwable)(noArgCtor.newInstance());
- wx.initCause(ex);
- return wx;
- }
- } catch (Exception ignore) {
- }
- }
- return ex;
- }
-
- /**
- * Poll stale refs and remove them. Call only while holding lock.
- */
- private static void expungeStaleExceptions() {
- for (Object x; (x = exceptionTableRefQueue.poll()) != null;) {
- if (x instanceof ExceptionNode) {
- ForkJoinTask<?> key = ((ExceptionNode)x).get();
- ExceptionNode[] t = exceptionTable;
- int i = System.identityHashCode(key) & (t.length - 1);
- ExceptionNode e = t[i];
- ExceptionNode pred = null;
- while (e != null) {
- ExceptionNode next = e.next;
- if (e == x) {
- if (pred == null)
- t[i] = next;
- else
- pred.next = next;
- break;
- }
- pred = e;
- e = next;
- }
- }
- }
- }
-
- /**
- * If lock is available, poll stale refs and remove them.
- * Called from ForkJoinPool when pools become quiescent.
- */
- static final void helpExpungeStaleExceptions() {
- final ReentrantLock lock = exceptionTableLock;
- if (lock.tryLock()) {
- try {
- expungeStaleExceptions();
- } finally {
- lock.unlock();
- }
- }
- }
-
- /**
- * A version of "sneaky throw" to relay exceptions
- */
- static void rethrow(final Throwable ex) {
- if (ex != null) {
- if (ex instanceof Error)
- throw (Error)ex;
- if (ex instanceof RuntimeException)
- throw (RuntimeException)ex;
- ForkJoinTask.<RuntimeException>uncheckedThrow(ex);
- }
- }
-
- /**
- * The sneaky part of sneaky throw, relying on generics
- * limitations to evade compiler complaints about rethrowing
- * unchecked exceptions
- */
- @SuppressWarnings("unchecked") static <T extends Throwable>
- void uncheckedThrow(Throwable t) throws T {
- if (t != null)
- throw (T)t; // rely on vacuous cast
- }
-
- /**
- * Throws exception, if any, associated with the given status.
- */
- private void reportException(int s) {
- if (s == CANCELLED)
- throw new CancellationException();
- if (s == EXCEPTIONAL)
- rethrow(getThrowableException());
- }
-
- // public methods
-
- /**
- * Arranges to asynchronously execute this task in the pool the
- * current task is running in, if applicable, or using the {@link
- * ForkJoinPool#commonPool()} if not {@link #inForkJoinPool}. While
- * it is not necessarily enforced, it is a usage error to fork a
- * task more than once unless it has completed and been
- * reinitialized. Subsequent modifications to the state of this
- * task or any data it operates on are not necessarily
- * consistently observable by any thread other than the one
- * executing it unless preceded by a call to {@link #join} or
- * related methods, or a call to {@link #isDone} returning {@code
- * true}.
- *
- * @return {@code this}, to simplify usage
- */
- public final ForkJoinTask<V> fork() {
- Thread t;
- if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
- ((ForkJoinWorkerThread)t).workQueue.push(this);
- else
- ForkJoinPool.common.externalPush(this);
- return this;
- }
-
- /**
- * Returns the result of the computation when it {@link #isDone is
- * done}. This method differs from {@link #get()} in that
- * abnormal completion results in {@code RuntimeException} or
- * {@code Error}, not {@code ExecutionException}, and that
- * interrupts of the calling thread do <em>not</em> cause the
- * method to abruptly return by throwing {@code
- * InterruptedException}.
- *
- * @return the computed result
- */
- public final V join() {
- int s;
- if ((s = doJoin() & DONE_MASK) != NORMAL)
- reportException(s);
- return getRawResult();
- }
-
- /**
- * Commences performing this task, awaits its completion if
- * necessary, and returns its result, or throws an (unchecked)
- * {@code RuntimeException} or {@code Error} if the underlying
- * computation did so.
- *
- * @return the computed result
- */
- public final V invoke() {
- int s;
- if ((s = doInvoke() & DONE_MASK) != NORMAL)
- reportException(s);
- return getRawResult();
- }
-
- /**
- * Forks the given tasks, returning when {@code isDone} holds for
- * each task or an (unchecked) exception is encountered, in which
- * case the exception is rethrown. If more than one task
- * encounters an exception, then this method throws any one of
- * these exceptions. If any task encounters an exception, the
- * other may be cancelled. However, the execution status of
- * individual tasks is not guaranteed upon exceptional return. The
- * status of each task may be obtained using {@link
- * #getException()} and related methods to check if they have been
- * cancelled, completed normally or exceptionally, or left
- * unprocessed.
- *
- * @param t1 the first task
- * @param t2 the second task
- * @throws NullPointerException if any task is null
- */
- public static void invokeAll(ForkJoinTask<?> t1, ForkJoinTask<?> t2) {
- int s1, s2;
- t2.fork();
- if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL)
- t1.reportException(s1);
- if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL)
- t2.reportException(s2);
- }
-
- /**
- * Forks the given tasks, returning when {@code isDone} holds for
- * each task or an (unchecked) exception is encountered, in which
- * case the exception is rethrown. If more than one task
- * encounters an exception, then this method throws any one of
- * these exceptions. If any task encounters an exception, others
- * may be cancelled. However, the execution status of individual
- * tasks is not guaranteed upon exceptional return. The status of
- * each task may be obtained using {@link #getException()} and
- * related methods to check if they have been cancelled, completed
- * normally or exceptionally, or left unprocessed.
- *
- * @param tasks the tasks
- * @throws NullPointerException if any task is null
- */
- public static void invokeAll(ForkJoinTask<?>... tasks) {
- Throwable ex = null;
- int last = tasks.length - 1;
- for (int i = last; i >= 0; --i) {
- ForkJoinTask<?> t = tasks[i];
- if (t == null) {
- if (ex == null)
- ex = new NullPointerException();
- }
- else if (i != 0)
- t.fork();
- else if (t.doInvoke() < NORMAL && ex == null)
- ex = t.getException();
- }
- for (int i = 1; i <= last; ++i) {
- ForkJoinTask<?> t = tasks[i];
- if (t != null) {
- if (ex != null)
- t.cancel(false);
- else if (t.doJoin() < NORMAL)
- ex = t.getException();
- }
- }
- if (ex != null)
- rethrow(ex);
- }
-
- /**
- * Forks all tasks in the specified collection, returning when
- * {@code isDone} holds for each task or an (unchecked) exception
- * is encountered, in which case the exception is rethrown. If
- * more than one task encounters an exception, then this method
- * throws any one of these exceptions. If any task encounters an
- * exception, others may be cancelled. However, the execution
- * status of individual tasks is not guaranteed upon exceptional
- * return. The status of each task may be obtained using {@link
- * #getException()} and related methods to check if they have been
- * cancelled, completed normally or exceptionally, or left
- * unprocessed.
- *
- * @param tasks the collection of tasks
- * @return the tasks argument, to simplify usage
- * @throws NullPointerException if tasks or any element are null
- */
- public static <T extends ForkJoinTask<?>> Collection<T> invokeAll(Collection<T> tasks) {
- if (!(tasks instanceof RandomAccess) || !(tasks instanceof List<?>)) {
- invokeAll(tasks.toArray(new ForkJoinTask<?>[tasks.size()]));
- return tasks;
- }
- @SuppressWarnings("unchecked")
- List<? extends ForkJoinTask<?>> ts =
- (List<? extends ForkJoinTask<?>>) tasks;
- Throwable ex = null;
- int last = ts.size() - 1;
- for (int i = last; i >= 0; --i) {
- ForkJoinTask<?> t = ts.get(i);
- if (t == null) {
- if (ex == null)
- ex = new NullPointerException();
- }
- else if (i != 0)
- t.fork();
- else if (t.doInvoke() < NORMAL && ex == null)
- ex = t.getException();
- }
- for (int i = 1; i <= last; ++i) {
- ForkJoinTask<?> t = ts.get(i);
- if (t != null) {
- if (ex != null)
- t.cancel(false);
- else if (t.doJoin() < NORMAL)
- ex = t.getException();
- }
- }
- if (ex != null)
- rethrow(ex);
- return tasks;
- }
-
- /**
- * Attempts to cancel execution of this task. This attempt will
- * fail if the task has already completed or could not be
- * cancelled for some other reason. If successful, and this task
- * has not started when {@code cancel} is called, execution of
- * this task is suppressed. After this method returns
- * successfully, unless there is an intervening call to {@link
- * #reinitialize}, subsequent calls to {@link #isCancelled},
- * {@link #isDone}, and {@code cancel} will return {@code true}
- * and calls to {@link #join} and related methods will result in
- * {@code CancellationException}.
- *
- * <p>This method may be overridden in subclasses, but if so, must
- * still ensure that these properties hold. In particular, the
- * {@code cancel} method itself must not throw exceptions.
- *
- * <p>This method is designed to be invoked by <em>other</em>
- * tasks. To terminate the current task, you can just return or
- * throw an unchecked exception from its computation method, or
- * invoke {@link #completeExceptionally}.
- *
- * @param mayInterruptIfRunning this value has no effect in the
- * default implementation because interrupts are not used to
- * control cancellation.
- *
- * @return {@code true} if this task is now cancelled
- */
- public boolean cancel(boolean mayInterruptIfRunning) {
- return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED;
- }
-
- public final boolean isDone() {
- return status < 0;
- }
-
- public final boolean isCancelled() {
- return (status & DONE_MASK) == CANCELLED;
- }
-
- /**
- * Returns {@code true} if this task threw an exception or was cancelled.
- *
- * @return {@code true} if this task threw an exception or was cancelled
- */
- public final boolean isCompletedAbnormally() {
- return status < NORMAL;
- }
-
- /**
- * Returns {@code true} if this task completed without throwing an
- * exception and was not cancelled.
- *
- * @return {@code true} if this task completed without throwing an
- * exception and was not cancelled
- */
- public final boolean isCompletedNormally() {
- return (status & DONE_MASK) == NORMAL;
- }
-
- /**
- * Returns the exception thrown by the base computation, or a
- * {@code CancellationException} if cancelled, or {@code null} if
- * none or if the method has not yet completed.
- *
- * @return the exception, or {@code null} if none
- */
- public final Throwable getException() {
- int s = status & DONE_MASK;
- return ((s >= NORMAL) ? null :
- (s == CANCELLED) ? new CancellationException() :
- getThrowableException());
- }
-
- /**
- * Completes this task abnormally, and if not already aborted or
- * cancelled, causes it to throw the given exception upon
- * {@code join} and related operations. This method may be used
- * to induce exceptions in asynchronous tasks, or to force
- * completion of tasks that would not otherwise complete. Its use
- * in other situations is discouraged. This method is
- * overridable, but overridden versions must invoke {@code super}
- * implementation to maintain guarantees.
- *
- * @param ex the exception to throw. If this exception is not a
- * {@code RuntimeException} or {@code Error}, the actual exception
- * thrown will be a {@code RuntimeException} with cause {@code ex}.
- */
- public void completeExceptionally(Throwable ex) {
- setExceptionalCompletion((ex instanceof RuntimeException) ||
- (ex instanceof Error) ? ex :
- new RuntimeException(ex));
- }
-
- /**
- * Completes this task, and if not already aborted or cancelled,
- * returning the given value as the result of subsequent
- * invocations of {@code join} and related operations. This method
- * may be used to provide results for asynchronous tasks, or to
- * provide alternative handling for tasks that would not otherwise
- * complete normally. Its use in other situations is
- * discouraged. This method is overridable, but overridden
- * versions must invoke {@code super} implementation to maintain
- * guarantees.
- *
- * @param value the result value for this task
- */
- public void complete(V value) {
- try {
- setRawResult(value);
- } catch (Throwable rex) {
- setExceptionalCompletion(rex);
- return;
- }
- setCompletion(NORMAL);
- }
-
- /**
- * Completes this task normally without setting a value. The most
- * recent value established by {@link #setRawResult} (or {@code
- * null} by default) will be returned as the result of subsequent
- * invocations of {@code join} and related operations.
- *
- * @since 1.8
- */
- public final void quietlyComplete() {
- setCompletion(NORMAL);
- }
-
- /**
- * Waits if necessary for the computation to complete, and then
- * retrieves its result.
- *
- * @return the computed result
- * @throws CancellationException if the computation was cancelled
- * @throws ExecutionException if the computation threw an
- * exception
- * @throws InterruptedException if the current thread is not a
- * member of a ForkJoinPool and was interrupted while waiting
- */
- public final V get() throws InterruptedException, ExecutionException {
- int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ?
- doJoin() : externalInterruptibleAwaitDone();
- Throwable ex;
- if ((s &= DONE_MASK) == CANCELLED)
- throw new CancellationException();
- if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
- throw new ExecutionException(ex);
- return getRawResult();
- }
-
- /**
- * Waits if necessary for at most the given time for the computation
- * to complete, and then retrieves its result, if available.
- *
- * @param timeout the maximum time to wait
- * @param unit the time unit of the timeout argument
- * @return the computed result
- * @throws CancellationException if the computation was cancelled
- * @throws ExecutionException if the computation threw an
- * exception
- * @throws InterruptedException if the current thread is not a
- * member of a ForkJoinPool and was interrupted while waiting
- * @throws TimeoutException if the wait timed out
- */
- public final V get(long timeout, TimeUnit unit)
- throws InterruptedException, ExecutionException, TimeoutException {
- if (Thread.interrupted())
- throw new InterruptedException();
- // Messy in part because we measure in nanosecs, but wait in millisecs
- int s; long ms;
- long ns = unit.toNanos(timeout);
- if ((s = status) >= 0 && ns > 0L) {
- long deadline = System.nanoTime() + ns;
- ForkJoinPool p = null;
- ForkJoinPool.WorkQueue w = null;
- Thread t = Thread.currentThread();
- if (t instanceof ForkJoinWorkerThread) {
- ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
- p = wt.pool;
- w = wt.workQueue;
- p.helpJoinOnce(w, this); // no retries on failure
- }
- else
- ForkJoinPool.externalHelpJoin(this);
- boolean canBlock = false;
- boolean interrupted = false;
- try {
- while ((s = status) >= 0) {
- if (w != null && w.qlock < 0)
- cancelIgnoringExceptions(this);
- else if (!canBlock) {
- if (p == null || p.tryCompensate())
- canBlock = true;
- }
- else {
- if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L &&
- U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) {
- synchronized (this) {
- if (status >= 0) {
- try {
- wait(ms);
- } catch (InterruptedException ie) {
- if (p == null)
- interrupted = true;
- }
- }
- else
- notifyAll();
- }
- }
- if ((s = status) < 0 || interrupted ||
- (ns = deadline - System.nanoTime()) <= 0L)
- break;
- }
- }
- } finally {
- if (p != null && canBlock)
- p.incrementActiveCount();
- }
- if (interrupted)
- throw new InterruptedException();
- }
- if ((s &= DONE_MASK) != NORMAL) {
- Throwable ex;
- if (s == CANCELLED)
- throw new CancellationException();
- if (s != EXCEPTIONAL)
- throw new TimeoutException();
- if ((ex = getThrowableException()) != null)
- throw new ExecutionException(ex);
- }
- return getRawResult();
- }
-
- /**
- * Joins this task, without returning its result or throwing its
- * exception. This method may be useful when processing
- * collections of tasks when some have been cancelled or otherwise
- * known to have aborted.
- */
- public final void quietlyJoin() {
- doJoin();
- }
-
- /**
- * Commences performing this task and awaits its completion if
- * necessary, without returning its result or throwing its
- * exception.
- */
- public final void quietlyInvoke() {
- doInvoke();
- }
-
- /**
- * Possibly executes tasks until the pool hosting the current task
- * {@link ForkJoinPool#isQuiescent is quiescent}. This method may
- * be of use in designs in which many tasks are forked, but none
- * are explicitly joined, instead executing them until all are
- * processed.
- */
- public static void helpQuiesce() {
- Thread t;
- if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) {
- ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t;
- wt.pool.helpQuiescePool(wt.workQueue);
- }
- else
- ForkJoinPool.quiesceCommonPool();
- }
-
- /**
- * Resets the internal bookkeeping state of this task, allowing a
- * subsequent {@code fork}. This method allows repeated reuse of
- * this task, but only if reuse occurs when this task has either
- * never been forked, or has been forked, then completed and all
- * outstanding joins of this task have also completed. Effects
- * under any other usage conditions are not guaranteed.
- * This method may be useful when executing
- * pre-constructed trees of subtasks in loops.
- *
- * <p>Upon completion of this method, {@code isDone()} reports
- * {@code false}, and {@code getException()} reports {@code
- * null}. However, the value returned by {@code getRawResult} is
- * unaffected. To clear this value, you can invoke {@code
- * setRawResult(null)}.
- */
- public void reinitialize() {
- if ((status & DONE_MASK) == EXCEPTIONAL)
- clearExceptionalCompletion();
- else
- status = 0;
- }
-
- /**
- * Returns the pool hosting the current task execution, or null
- * if this task is executing outside of any ForkJoinPool.
- *
- * @see #inForkJoinPool
- * @return the pool, or {@code null} if none
- */
- public static ForkJoinPool getPool() {
- Thread t = Thread.currentThread();
- return (t instanceof ForkJoinWorkerThread) ?
- ((ForkJoinWorkerThread) t).pool : null;
- }
-
- /**
- * Returns {@code true} if the current thread is a {@link
- * ForkJoinWorkerThread} executing as a ForkJoinPool computation.
- *
- * @return {@code true} if the current thread is a {@link
- * ForkJoinWorkerThread} executing as a ForkJoinPool computation,
- * or {@code false} otherwise
- */
- public static boolean inForkJoinPool() {
- return Thread.currentThread() instanceof ForkJoinWorkerThread;
- }
-
- /**
- * Tries to unschedule this task for execution. This method will
- * typically (but is not guaranteed to) succeed if this task is
- * the most recently forked task by the current thread, and has
- * not commenced executing in another thread. This method may be
- * useful when arranging alternative local processing of tasks
- * that could have been, but were not, stolen.
- *
- * @return {@code true} if unforked
- */
- public boolean tryUnfork() {
- Thread t;
- return (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
- ((ForkJoinWorkerThread)t).workQueue.tryUnpush(this) :
- ForkJoinPool.tryExternalUnpush(this));
- }
-
- /**
- * Returns an estimate of the number of tasks that have been
- * forked by the current worker thread but not yet executed. This
- * value may be useful for heuristic decisions about whether to
- * fork other tasks.
- *
- * @return the number of tasks
- */
- public static int getQueuedTaskCount() {
- Thread t; ForkJoinPool.WorkQueue q;
- if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
- q = ((ForkJoinWorkerThread)t).workQueue;
- else
- q = ForkJoinPool.commonSubmitterQueue();
- return (q == null) ? 0 : q.queueSize();
- }
-
- /**
- * Returns an estimate of how many more locally queued tasks are
- * held by the current worker thread than there are other worker
- * threads that might steal them, or zero if this thread is not
- * operating in a ForkJoinPool. This value may be useful for
- * heuristic decisions about whether to fork other tasks. In many
- * usages of ForkJoinTasks, at steady state, each worker should
- * aim to maintain a small constant surplus (for example, 3) of
- * tasks, and to process computations locally if this threshold is
- * exceeded.
- *
- * @return the surplus number of tasks, which may be negative
- */
- public static int getSurplusQueuedTaskCount() {
- return ForkJoinPool.getSurplusQueuedTaskCount();
- }
-
- // Extension methods
-
- /**
- * Returns the result that would be returned by {@link #join}, even
- * if this task completed abnormally, or {@code null} if this task
- * is not known to have been completed. This method is designed
- * to aid debugging, as well as to support extensions. Its use in
- * any other context is discouraged.
- *
- * @return the result, or {@code null} if not completed
- */
- public abstract V getRawResult();
-
- /**
- * Forces the given value to be returned as a result. This method
- * is designed to support extensions, and should not in general be
- * called otherwise.
- *
- * @param value the value
- */
- protected abstract void setRawResult(V value);
-
- /**
- * Immediately performs the base action of this task and returns
- * true if, upon return from this method, this task is guaranteed
- * to have completed normally. This method may return false
- * otherwise, to indicate that this task is not necessarily
- * complete (or is not known to be complete), for example in
- * asynchronous actions that require explicit invocations of
- * completion methods. This method may also throw an (unchecked)
- * exception to indicate abnormal exit. This method is designed to
- * support extensions, and should not in general be called
- * otherwise.
- *
- * @return {@code true} if this task is known to have completed normally
- */
- protected abstract boolean exec();
-
- /**
- * Returns, but does not unschedule or execute, a task queued by
- * the current thread but not yet executed, if one is immediately
- * available. There is no guarantee that this task will actually
- * be polled or executed next. Conversely, this method may return
- * null even if a task exists but cannot be accessed without
- * contention with other threads. This method is designed
- * primarily to support extensions, and is unlikely to be useful
- * otherwise.
- *
- * @return the next task, or {@code null} if none are available
- */
- protected static ForkJoinTask<?> peekNextLocalTask() {
- Thread t; ForkJoinPool.WorkQueue q;
- if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)
- q = ((ForkJoinWorkerThread)t).workQueue;
- else
- q = ForkJoinPool.commonSubmitterQueue();
- return (q == null) ? null : q.peek();
- }
-
- /**
- * Unschedules and returns, without executing, the next task
- * queued by the current thread but not yet executed, if the
- * current thread is operating in a ForkJoinPool. This method is
- * designed primarily to support extensions, and is unlikely to be
- * useful otherwise.
- *
- * @return the next task, or {@code null} if none are available
- */
- protected static ForkJoinTask<?> pollNextLocalTask() {
- Thread t;
- return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
- ((ForkJoinWorkerThread)t).workQueue.nextLocalTask() :
- null;
- }
-
- /**
- * If the current thread is operating in a ForkJoinPool,
- * unschedules and returns, without executing, the next task
- * queued by the current thread but not yet executed, if one is
- * available, or if not available, a task that was forked by some
- * other thread, if available. Availability may be transient, so a
- * {@code null} result does not necessarily imply quiescence of
- * the pool this task is operating in. This method is designed
- * primarily to support extensions, and is unlikely to be useful
- * otherwise.
- *
- * @return a task, or {@code null} if none are available
- */
- protected static ForkJoinTask<?> pollTask() {
- Thread t; ForkJoinWorkerThread wt;
- return ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) ?
- (wt = (ForkJoinWorkerThread)t).pool.nextTaskFor(wt.workQueue) :
- null;
- }
-
- // tag operations
-
- /**
- * Returns the tag for this task.
- *
- * @return the tag for this task
- * @since 1.8
- */
- public final short getForkJoinTaskTag() {
- return (short)status;
- }
-
- /**
- * Atomically sets the tag value for this task.
- *
- * @param tag the tag value
- * @return the previous value of the tag
- * @since 1.8
- */
- public final short setForkJoinTaskTag(short tag) {
- for (int s;;) {
- if (U.compareAndSwapInt(this, STATUS, s = status,
- (s & ~SMASK) | (tag & SMASK)))
- return (short)s;
- }
- }
-
- /**
- * Atomically conditionally sets the tag value for this task.
- * Among other applications, tags can be used as visit markers
- * in tasks operating on graphs, as in methods that check: {@code
- * if (task.compareAndSetForkJoinTaskTag((short)0, (short)1))}
- * before processing, otherwise exiting because the node has
- * already been visited.
- *
- * @param e the expected tag value
- * @param tag the new tag value
- * @return true if successful; i.e., the current value was
- * equal to e and is now tag.
- * @since 1.8
- */
- public final boolean compareAndSetForkJoinTaskTag(short e, short tag) {
- for (int s;;) {
- if ((short)(s = status) != e)
- return false;
- if (U.compareAndSwapInt(this, STATUS, s,
- (s & ~SMASK) | (tag & SMASK)))
- return true;
- }
- }
-
- /**
- * Adaptor for Runnables. This implements RunnableFuture
- * to be compliant with AbstractExecutorService constraints
- * when used in ForkJoinPool.
- */
- @Deprecated
- static final class AdaptedRunnable<T> extends ForkJoinTask<T>
- implements RunnableFuture<T> {
- final Runnable runnable;
- T result;
- AdaptedRunnable(Runnable runnable, T result) {
- if (runnable == null) throw new NullPointerException();
- this.runnable = runnable;
- this.result = result; // OK to set this even before completion
- }
- public final T getRawResult() { return result; }
- public final void setRawResult(T v) { result = v; }
- public final boolean exec() { runnable.run(); return true; }
- public final void run() { invoke(); }
- private static final long serialVersionUID = 5232453952276885070L;
- }
-
- /**
- * Adaptor for Runnables without results
- */
- @Deprecated
- static final class AdaptedRunnableAction extends ForkJoinTask<Void>
- implements RunnableFuture<Void> {
- final Runnable runnable;
- AdaptedRunnableAction(Runnable runnable) {
- if (runnable == null) throw new NullPointerException();
- this.runnable = runnable;
- }
- public final Void getRawResult() { return null; }
- public final void setRawResult(Void v) { }
- public final boolean exec() { runnable.run(); return true; }
- public final void run() { invoke(); }
- private static final long serialVersionUID = 5232453952276885070L;
- }
-
- /**
- * Adaptor for Callables
- */
- @Deprecated
- static final class AdaptedCallable<T> extends ForkJoinTask<T>
- implements RunnableFuture<T> {
- final Callable<? extends T> callable;
- T result;
- AdaptedCallable(Callable<? extends T> callable) {
- if (callable == null) throw new NullPointerException();
- this.callable = callable;
- }
- public final T getRawResult() { return result; }
- public final void setRawResult(T v) { result = v; }
- public final boolean exec() {
- try {
- result = callable.call();
- return true;
- } catch (Error err) {
- throw err;
- } catch (RuntimeException rex) {
- throw rex;
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
- }
- public final void run() { invoke(); }
- private static final long serialVersionUID = 2838392045355241008L;
- }
-
- /**
- * Returns a new {@code ForkJoinTask} that performs the {@code run}
- * method of the given {@code Runnable} as its action, and returns
- * a null result upon {@link #join}.
- *
- * @param runnable the runnable action
- * @return the task
- */
- public static ForkJoinTask<?> adapt(Runnable runnable) {
- return new AdaptedRunnableAction(runnable);
- }
-
- /**
- * Returns a new {@code ForkJoinTask} that performs the {@code run}
- * method of the given {@code Runnable} as its action, and returns
- * the given result upon {@link #join}.
- *
- * @param runnable the runnable action
- * @param result the result upon completion
- * @return the task
- */
- public static <T> ForkJoinTask<T> adapt(Runnable runnable, T result) {
- return new AdaptedRunnable<T>(runnable, result);
- }
-
- /**
- * Returns a new {@code ForkJoinTask} that performs the {@code call}
- * method of the given {@code Callable} as its action, and returns
- * its result upon {@link #join}, translating any checked exceptions
- * encountered into {@code RuntimeException}.
- *
- * @param callable the callable action
- * @return the task
- */
- public static <T> ForkJoinTask<T> adapt(Callable<? extends T> callable) {
- return new AdaptedCallable<T>(callable);
- }
-
- // Serialization support
-
- private static final long serialVersionUID = -7721805057305804111L;
-
- /**
- * Saves this task to a stream (that is, serializes it).
- *
- * @serialData the current run status and the exception thrown
- * during execution, or {@code null} if none
- */
- private void writeObject(java.io.ObjectOutputStream s)
- throws java.io.IOException {
- s.defaultWriteObject();
- s.writeObject(getException());
- }
-
- /**
- * Reconstitutes this task from a stream (that is, deserializes it).
- */
- private void readObject(java.io.ObjectInputStream s)
- throws java.io.IOException, ClassNotFoundException {
- s.defaultReadObject();
- Object ex = s.readObject();
- if (ex != null)
- setExceptionalCompletion((Throwable)ex);
- }
-
- // Unsafe mechanics
- private static final sun.misc.Unsafe U;
- private static final long STATUS;
-
- static {
- exceptionTableLock = new ReentrantLock();
- exceptionTableRefQueue = new ReferenceQueue<Object>();
- exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY];
- try {
- U = getUnsafe();
- Class<?> k = ForkJoinTask.class;
- STATUS = U.objectFieldOffset
- (k.getDeclaredField("status"));
- } catch (Exception e) {
- throw new Error(e);
- }
- }
-
- /**
- * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
- * Replace with a simple call to Unsafe.getUnsafe when integrating
- * into a jdk.
- *
- * @return a sun.misc.Unsafe
- */
- private static sun.misc.Unsafe getUnsafe() {
- return scala.concurrent.util.Unsafe.instance;
- }
-}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java b/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java
deleted file mode 100644
index e00fb5cc43..0000000000
--- a/src/forkjoin/scala/concurrent/forkjoin/ForkJoinWorkerThread.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package scala.concurrent.forkjoin;
-
-/**
- * A thread managed by a {@link ForkJoinPool}, which executes
- * {@link ForkJoinTask}s.
- * This class is subclassable solely for the sake of adding
- * functionality -- there are no overridable methods dealing with
- * scheduling or execution. However, you can override initialization
- * and termination methods surrounding the main task processing loop.
- * If you do create such a subclass, you will also need to supply a
- * custom {@link ForkJoinPool.ForkJoinWorkerThreadFactory} to use it
- * in a {@code ForkJoinPool}.
- *
- * @since 1.7
- * @author Doug Lea
- */
-@Deprecated
-public class ForkJoinWorkerThread extends Thread {
- /*
- * ForkJoinWorkerThreads are managed by ForkJoinPools and perform
- * ForkJoinTasks. For explanation, see the internal documentation
- * of class ForkJoinPool.
- *
- * This class just maintains links to its pool and WorkQueue. The
- * pool field is set immediately upon construction, but the
- * workQueue field is not set until a call to registerWorker
- * completes. This leads to a visibility race, that is tolerated
- * by requiring that the workQueue field is only accessed by the
- * owning thread.
- */
-
- final ForkJoinPool pool; // the pool this thread works in
- final ForkJoinPool.WorkQueue workQueue; // work-stealing mechanics
-
- /**
- * Creates a ForkJoinWorkerThread operating in the given pool.
- *
- * @param pool the pool this thread works in
- * @throws NullPointerException if pool is null
- */
- protected ForkJoinWorkerThread(ForkJoinPool pool) {
- // Use a placeholder until a useful name can be set in registerWorker
- super("aForkJoinWorkerThread");
- this.pool = pool;
- this.workQueue = pool.registerWorker(this);
- }
-
- /**
- * Returns the pool hosting this thread.
- *
- * @return the pool
- */
- public ForkJoinPool getPool() {
- return pool;
- }
-
- /**
- * Returns the index number of this thread in its pool. The
- * returned value ranges from zero to the maximum number of
- * threads (minus one) that have ever been created in the pool.
- * This method may be useful for applications that track status or
- * collect results per-worker rather than per-task.
- *
- * @return the index number
- */
- public int getPoolIndex() {
- return workQueue.poolIndex;
- }
-
- /**
- * Initializes internal state after construction but before
- * processing any tasks. If you override this method, you must
- * invoke {@code super.onStart()} at the beginning of the method.
- * Initialization requires care: Most fields must have legal
- * default values, to ensure that attempted accesses from other
- * threads work correctly even before this thread starts
- * processing tasks.
- */
- protected void onStart() {
- }
-
- /**
- * Performs cleanup associated with termination of this worker
- * thread. If you override this method, you must invoke
- * {@code super.onTermination} at the end of the overridden method.
- *
- * @param exception the exception causing this thread to abort due
- * to an unrecoverable error, or {@code null} if completed normally
- */
- protected void onTermination(Throwable exception) {
- }
-
- /**
- * This method is required to be public, but should never be
- * called explicitly. It performs the main run loop to execute
- * {@link ForkJoinTask}s.
- */
- public void run() {
- Throwable exception = null;
- try {
- onStart();
- pool.runWorker(workQueue);
- } catch (Throwable ex) {
- exception = ex;
- } finally {
- try {
- onTermination(exception);
- } catch (Throwable ex) {
- if (exception == null)
- exception = ex;
- } finally {
- pool.deregisterWorker(this, exception);
- }
- }
- }
-}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java b/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java
deleted file mode 100644
index 47d52af895..0000000000
--- a/src/forkjoin/scala/concurrent/forkjoin/LinkedTransferQueue.java
+++ /dev/null
@@ -1,1338 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package scala.concurrent.forkjoin;
-
-import java.util.AbstractQueue;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-import java.util.Queue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.LockSupport;
-
-/**
- * An unbounded {@link TransferQueue} based on linked nodes.
- * This queue orders elements FIFO (first-in-first-out) with respect
- * to any given producer. The <em>head</em> of the queue is that
- * element that has been on the queue the longest time for some
- * producer. The <em>tail</em> of the queue is that element that has
- * been on the queue the shortest time for some producer.
- *
- * <p>Beware that, unlike in most collections, the {@code size} method
- * is <em>NOT</em> a constant-time operation. Because of the
- * asynchronous nature of these queues, determining the current number
- * of elements requires a traversal of the elements, and so may report
- * inaccurate results if this collection is modified during traversal.
- * Additionally, the bulk operations {@code addAll},
- * {@code removeAll}, {@code retainAll}, {@code containsAll},
- * {@code equals}, and {@code toArray} are <em>not</em> guaranteed
- * to be performed atomically. For example, an iterator operating
- * concurrently with an {@code addAll} operation might view only some
- * of the added elements.
- *
- * <p>This class and its iterator implement all of the
- * <em>optional</em> methods of the {@link Collection} and {@link
- * Iterator} interfaces.
- *
- * <p>Memory consistency effects: As with other concurrent
- * collections, actions in a thread prior to placing an object into a
- * {@code LinkedTransferQueue}
- * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
- * actions subsequent to the access or removal of that element from
- * the {@code LinkedTransferQueue} in another thread.
- *
- * <p>This class is a member of the
- * <a href="{@docRoot}/../technotes/guides/collections/index.html">
- * Java Collections Framework</a>.
- *
- * @since 1.7
- * @author Doug Lea
- * @param <E> the type of elements held in this collection
- */
-@Deprecated
-public class LinkedTransferQueue<E> extends AbstractQueue<E>
- implements TransferQueue<E>, java.io.Serializable {
- private static final long serialVersionUID = -3223113410248163686L;
-
- /*
- * *** Overview of Dual Queues with Slack ***
- *
- * Dual Queues, introduced by Scherer and Scott
- * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are
- * (linked) queues in which nodes may represent either data or
- * requests. When a thread tries to enqueue a data node, but
- * encounters a request node, it instead "matches" and removes it;
- * and vice versa for enqueuing requests. Blocking Dual Queues
- * arrange that threads enqueuing unmatched requests block until
- * other threads provide the match. Dual Synchronous Queues (see
- * Scherer, Lea, & Scott
- * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
- * additionally arrange that threads enqueuing unmatched data also
- * block. Dual Transfer Queues support all of these modes, as
- * dictated by callers.
- *
- * A FIFO dual queue may be implemented using a variation of the
- * Michael & Scott (M&S) lock-free queue algorithm
- * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf).
- * It maintains two pointer fields, "head", pointing to a
- * (matched) node that in turn points to the first actual
- * (unmatched) queue node (or null if empty); and "tail" that
- * points to the last node on the queue (or again null if
- * empty). For example, here is a possible queue with four data
- * elements:
- *
- * head tail
- * | |
- * v v
- * M -> U -> U -> U -> U
- *
- * The M&S queue algorithm is known to be prone to scalability and
- * overhead limitations when maintaining (via CAS) these head and
- * tail pointers. This has led to the development of
- * contention-reducing variants such as elimination arrays (see
- * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
- * optimistic back pointers (see Ladan-Mozes & Shavit
- * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
- * However, the nature of dual queues enables a simpler tactic for
- * improving M&S-style implementations when dual-ness is needed.
- *
- * In a dual queue, each node must atomically maintain its match
- * status. While there are other possible variants, we implement
- * this here as: for a data-mode node, matching entails CASing an
- * "item" field from a non-null data value to null upon match, and
- * vice-versa for request nodes, CASing from null to a data
- * value. (Note that the linearization properties of this style of
- * queue are easy to verify -- elements are made available by
- * linking, and unavailable by matching.) Compared to plain M&S
- * queues, this property of dual queues requires one additional
- * successful atomic operation per enq/deq pair. But it also
- * enables lower cost variants of queue maintenance mechanics. (A
- * variation of this idea applies even for non-dual queues that
- * support deletion of interior elements, such as
- * j.u.c.ConcurrentLinkedQueue.)
- *
- * Once a node is matched, its match status can never again
- * change. We may thus arrange that the linked list of them
- * contain a prefix of zero or more matched nodes, followed by a
- * suffix of zero or more unmatched nodes. (Note that we allow
- * both the prefix and suffix to be zero length, which in turn
- * means that we do not use a dummy header.) If we were not
- * concerned with either time or space efficiency, we could
- * correctly perform enqueue and dequeue operations by traversing
- * from a pointer to the initial node; CASing the item of the
- * first unmatched node on match and CASing the next field of the
- * trailing node on appends. (Plus some special-casing when
- * initially empty). While this would be a terrible idea in
- * itself, it does have the benefit of not requiring ANY atomic
- * updates on head/tail fields.
- *
- * We introduce here an approach that lies between the extremes of
- * never versus always updating queue (head and tail) pointers.
- * This offers a tradeoff between sometimes requiring extra
- * traversal steps to locate the first and/or last unmatched
- * nodes, versus the reduced overhead and contention of fewer
- * updates to queue pointers. For example, a possible snapshot of
- * a queue is:
- *
- * head tail
- * | |
- * v v
- * M -> M -> U -> U -> U -> U
- *
- * The best value for this "slack" (the targeted maximum distance
- * between the value of "head" and the first unmatched node, and
- * similarly for "tail") is an empirical matter. We have found
- * that using very small constants in the range of 1-3 work best
- * over a range of platforms. Larger values introduce increasing
- * costs of cache misses and risks of long traversal chains, while
- * smaller values increase CAS contention and overhead.
- *
- * Dual queues with slack differ from plain M&S dual queues by
- * virtue of only sometimes updating head or tail pointers when
- * matching, appending, or even traversing nodes; in order to
- * maintain a targeted slack. The idea of "sometimes" may be
- * operationalized in several ways. The simplest is to use a
- * per-operation counter incremented on each traversal step, and
- * to try (via CAS) to update the associated queue pointer
- * whenever the count exceeds a threshold. Another, that requires
- * more overhead, is to use random number generators to update
- * with a given probability per traversal step.
- *
- * In any strategy along these lines, because CASes updating
- * fields may fail, the actual slack may exceed targeted
- * slack. However, they may be retried at any time to maintain
- * targets. Even when using very small slack values, this
- * approach works well for dual queues because it allows all
- * operations up to the point of matching or appending an item
- * (hence potentially allowing progress by another thread) to be
- * read-only, thus not introducing any further contention. As
- * described below, we implement this by performing slack
- * maintenance retries only after these points.
- *
- * As an accompaniment to such techniques, traversal overhead can
- * be further reduced without increasing contention of head
- * pointer updates: Threads may sometimes shortcut the "next" link
- * path from the current "head" node to be closer to the currently
- * known first unmatched node, and similarly for tail. Again, this
- * may be triggered with using thresholds or randomization.
- *
- * These ideas must be further extended to avoid unbounded amounts
- * of costly-to-reclaim garbage caused by the sequential "next"
- * links of nodes starting at old forgotten head nodes: As first
- * described in detail by Boehm
- * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC
- * delays noticing that any arbitrarily old node has become
- * garbage, all newer dead nodes will also be unreclaimed.
- * (Similar issues arise in non-GC environments.) To cope with
- * this in our implementation, upon CASing to advance the head
- * pointer, we set the "next" link of the previous head to point
- * only to itself; thus limiting the length of connected dead lists.
- * (We also take similar care to wipe out possibly garbage
- * retaining values held in other Node fields.) However, doing so
- * adds some further complexity to traversal: If any "next"
- * pointer links to itself, it indicates that the current thread
- * has lagged behind a head-update, and so the traversal must
- * continue from the "head". Traversals trying to find the
- * current tail starting from "tail" may also encounter
- * self-links, in which case they also continue at "head".
- *
- * It is tempting in slack-based scheme to not even use CAS for
- * updates (similarly to Ladan-Mozes & Shavit). However, this
- * cannot be done for head updates under the above link-forgetting
- * mechanics because an update may leave head at a detached node.
- * And while direct writes are possible for tail updates, they
- * increase the risk of long retraversals, and hence long garbage
- * chains, which can be much more costly than is worthwhile
- * considering that the cost difference of performing a CAS vs
- * write is smaller when they are not triggered on each operation
- * (especially considering that writes and CASes equally require
- * additional GC bookkeeping ("write barriers") that are sometimes
- * more costly than the writes themselves because of contention).
- *
- * *** Overview of implementation ***
- *
- * We use a threshold-based approach to updates, with a slack
- * threshold of two -- that is, we update head/tail when the
- * current pointer appears to be two or more steps away from the
- * first/last node. The slack value is hard-wired: a path greater
- * than one is naturally implemented by checking equality of
- * traversal pointers except when the list has only one element,
- * in which case we keep slack threshold at one. Avoiding tracking
- * explicit counts across method calls slightly simplifies an
- * already-messy implementation. Using randomization would
- * probably work better if there were a low-quality dirt-cheap
- * per-thread one available, but even ThreadLocalRandom is too
- * heavy for these purposes.
- *
- * With such a small slack threshold value, it is not worthwhile
- * to augment this with path short-circuiting (i.e., unsplicing
- * interior nodes) except in the case of cancellation/removal (see
- * below).
- *
- * We allow both the head and tail fields to be null before any
- * nodes are enqueued; initializing upon first append. This
- * simplifies some other logic, as well as providing more
- * efficient explicit control paths instead of letting JVMs insert
- * implicit NullPointerExceptions when they are null. While not
- * currently fully implemented, we also leave open the possibility
- * of re-nulling these fields when empty (which is complicated to
- * arrange, for little benefit.)
- *
- * All enqueue/dequeue operations are handled by the single method
- * "xfer" with parameters indicating whether to act as some form
- * of offer, put, poll, take, or transfer (each possibly with
- * timeout). The relative complexity of using one monolithic
- * method outweighs the code bulk and maintenance problems of
- * using separate methods for each case.
- *
- * Operation consists of up to three phases. The first is
- * implemented within method xfer, the second in tryAppend, and
- * the third in method awaitMatch.
- *
- * 1. Try to match an existing node
- *
- * Starting at head, skip already-matched nodes until finding
- * an unmatched node of opposite mode, if one exists, in which
- * case matching it and returning, also if necessary updating
- * head to one past the matched node (or the node itself if the
- * list has no other unmatched nodes). If the CAS misses, then
- * a loop retries advancing head by two steps until either
- * success or the slack is at most two. By requiring that each
- * attempt advances head by two (if applicable), we ensure that
- * the slack does not grow without bound. Traversals also check
- * if the initial head is now off-list, in which case they
- * start at the new head.
- *
- * If no candidates are found and the call was untimed
- * poll/offer, (argument "how" is NOW) return.
- *
- * 2. Try to append a new node (method tryAppend)
- *
- * Starting at current tail pointer, find the actual last node
- * and try to append a new node (or if head was null, establish
- * the first node). Nodes can be appended only if their
- * predecessors are either already matched or are of the same
- * mode. If we detect otherwise, then a new node with opposite
- * mode must have been appended during traversal, so we must
- * restart at phase 1. The traversal and update steps are
- * otherwise similar to phase 1: Retrying upon CAS misses and
- * checking for staleness. In particular, if a self-link is
- * encountered, then we can safely jump to a node on the list
- * by continuing the traversal at current head.
- *
- * On successful append, if the call was ASYNC, return.
- *
- * 3. Await match or cancellation (method awaitMatch)
- *
- * Wait for another thread to match node; instead cancelling if
- * the current thread was interrupted or the wait timed out. On
- * multiprocessors, we use front-of-queue spinning: If a node
- * appears to be the first unmatched node in the queue, it
- * spins a bit before blocking. In either case, before blocking
- * it tries to unsplice any nodes between the current "head"
- * and the first unmatched node.
- *
- * Front-of-queue spinning vastly improves performance of
- * heavily contended queues. And so long as it is relatively
- * brief and "quiet", spinning does not much impact performance
- * of less-contended queues. During spins threads check their
- * interrupt status and generate a thread-local random number
- * to decide to occasionally perform a Thread.yield. While
- * yield has underdefined specs, we assume that it might help,
- * and will not hurt, in limiting impact of spinning on busy
- * systems. We also use smaller (1/2) spins for nodes that are
- * not known to be front but whose predecessors have not
- * blocked -- these "chained" spins avoid artifacts of
- * front-of-queue rules which otherwise lead to alternating
- * nodes spinning vs blocking. Further, front threads that
- * represent phase changes (from data to request node or vice
- * versa) compared to their predecessors receive additional
- * chained spins, reflecting longer paths typically required to
- * unblock threads during phase changes.
- *
- *
- * ** Unlinking removed interior nodes **
- *
- * In addition to minimizing garbage retention via self-linking
- * described above, we also unlink removed interior nodes. These
- * may arise due to timed out or interrupted waits, or calls to
- * remove(x) or Iterator.remove. Normally, given a node that was
- * at one time known to be the predecessor of some node s that is
- * to be removed, we can unsplice s by CASing the next field of
- * its predecessor if it still points to s (otherwise s must
- * already have been removed or is now offlist). But there are two
- * situations in which we cannot guarantee to make node s
- * unreachable in this way: (1) If s is the trailing node of list
- * (i.e., with null next), then it is pinned as the target node
- * for appends, so can only be removed later after other nodes are
- * appended. (2) We cannot necessarily unlink s given a
- * predecessor node that is matched (including the case of being
- * cancelled): the predecessor may already be unspliced, in which
- * case some previous reachable node may still point to s.
- * (For further explanation see Herlihy & Shavit "The Art of
- * Multiprocessor Programming" chapter 9). Although, in both
- * cases, we can rule out the need for further action if either s
- * or its predecessor are (or can be made to be) at, or fall off
- * from, the head of list.
- *
- * Without taking these into account, it would be possible for an
- * unbounded number of supposedly removed nodes to remain
- * reachable. Situations leading to such buildup are uncommon but
- * can occur in practice; for example when a series of short timed
- * calls to poll repeatedly time out but never otherwise fall off
- * the list because of an untimed call to take at the front of the
- * queue.
- *
- * When these cases arise, rather than always retraversing the
- * entire list to find an actual predecessor to unlink (which
- * won't help for case (1) anyway), we record a conservative
- * estimate of possible unsplice failures (in "sweepVotes").
- * We trigger a full sweep when the estimate exceeds a threshold
- * ("SWEEP_THRESHOLD") indicating the maximum number of estimated
- * removal failures to tolerate before sweeping through, unlinking
- * cancelled nodes that were not unlinked upon initial removal.
- * We perform sweeps by the thread hitting threshold (rather than
- * background threads or by spreading work to other threads)
- * because in the main contexts in which removal occurs, the
- * caller is already timed-out, cancelled, or performing a
- * potentially O(n) operation (e.g. remove(x)), none of which are
- * time-critical enough to warrant the overhead that alternatives
- * would impose on other threads.
- *
- * Because the sweepVotes estimate is conservative, and because
- * nodes become unlinked "naturally" as they fall off the head of
- * the queue, and because we allow votes to accumulate even while
- * sweeps are in progress, there are typically significantly fewer
- * such nodes than estimated. Choice of a threshold value
- * balances the likelihood of wasted effort and contention, versus
- * providing a worst-case bound on retention of interior nodes in
- * quiescent queues. The value defined below was chosen
- * empirically to balance these under various timeout scenarios.
- *
- * Note that we cannot self-link unlinked interior nodes during
- * sweeps. However, the associated garbage chains terminate when
- * some successor ultimately falls off the head of the list and is
- * self-linked.
- */
-
- /** True if on multiprocessor */
- private static final boolean MP =
- Runtime.getRuntime().availableProcessors() > 1;
-
- /**
- * The number of times to spin (with randomly interspersed calls
- * to Thread.yield) on multiprocessor before blocking when a node
- * is apparently the first waiter in the queue. See above for
- * explanation. Must be a power of two. The value is empirically
- * derived -- it works pretty well across a variety of processors,
- * numbers of CPUs, and OSes.
- */
- private static final int FRONT_SPINS = 1 << 7;
-
- /**
- * The number of times to spin before blocking when a node is
- * preceded by another node that is apparently spinning. Also
- * serves as an increment to FRONT_SPINS on phase changes, and as
- * base average frequency for yielding during spins. Must be a
- * power of two.
- */
- private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
-
- /**
- * The maximum number of estimated removal failures (sweepVotes)
- * to tolerate before sweeping through the queue unlinking
- * cancelled nodes that were not unlinked upon initial
- * removal. See above for explanation. The value must be at least
- * two to avoid useless sweeps when removing trailing nodes.
- */
- static final int SWEEP_THRESHOLD = 32;
-
- /**
- * Queue nodes. Uses Object, not E, for items to allow forgetting
- * them after use. Relies heavily on Unsafe mechanics to minimize
- * unnecessary ordering constraints: Writes that are intrinsically
- * ordered wrt other accesses or CASes use simple relaxed forms.
- */
- @Deprecated
- static final class Node {
- final boolean isData; // false if this is a request node
- volatile Object item; // initially non-null if isData; CASed to match
- volatile Node next;
- volatile Thread waiter; // null until waiting
-
- // CAS methods for fields
- final boolean casNext(Node cmp, Node val) {
- return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
- }
-
- final boolean casItem(Object cmp, Object val) {
- // assert cmp == null || cmp.getClass() != Node.class;
- return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
- }
-
- /**
- * Constructs a new node. Uses relaxed write because item can
- * only be seen after publication via casNext.
- */
- Node(Object item, boolean isData) {
- UNSAFE.putObject(this, itemOffset, item); // relaxed write
- this.isData = isData;
- }
-
- /**
- * Links node to itself to avoid garbage retention. Called
- * only after CASing head field, so uses relaxed write.
- */
- final void forgetNext() {
- UNSAFE.putObject(this, nextOffset, this);
- }
-
- /**
- * Sets item to self and waiter to null, to avoid garbage
- * retention after matching or cancelling. Uses relaxed writes
- * because order is already constrained in the only calling
- * contexts: item is forgotten only after volatile/atomic
- * mechanics that extract items. Similarly, clearing waiter
- * follows either CAS or return from park (if ever parked;
- * else we don't care).
- */
- final void forgetContents() {
- UNSAFE.putObject(this, itemOffset, this);
- UNSAFE.putObject(this, waiterOffset, null);
- }
-
- /**
- * Returns true if this node has been matched, including the
- * case of artificial matches due to cancellation.
- */
- final boolean isMatched() {
- Object x = item;
- return (x == this) || ((x == null) == isData);
- }
-
- /**
- * Returns true if this is an unmatched request node.
- */
- final boolean isUnmatchedRequest() {
- return !isData && item == null;
- }
-
- /**
- * Returns true if a node with the given mode cannot be
- * appended to this node because this node is unmatched and
- * has opposite data mode.
- */
- final boolean cannotPrecede(boolean haveData) {
- boolean d = isData;
- Object x;
- return d != haveData && (x = item) != this && (x != null) == d;
- }
-
- /**
- * Tries to artificially match a data node -- used by remove.
- */
- final boolean tryMatchData() {
- // assert isData;
- Object x = item;
- if (x != null && x != this && casItem(x, null)) {
- LockSupport.unpark(waiter);
- return true;
- }
- return false;
- }
-
- private static final long serialVersionUID = -3375979862319811754L;
-
- // Unsafe mechanics
- private static final sun.misc.Unsafe UNSAFE;
- private static final long itemOffset;
- private static final long nextOffset;
- private static final long waiterOffset;
- static {
- try {
- UNSAFE = getUnsafe();
- Class<?> k = Node.class;
- itemOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("item"));
- nextOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("next"));
- waiterOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("waiter"));
- } catch (Exception e) {
- throw new Error(e);
- }
- }
- }
-
- /** head of the queue; null until first enqueue */
- transient volatile Node head;
-
- /** tail of the queue; null until first append */
- private transient volatile Node tail;
-
- /** The number of apparent failures to unsplice removed nodes */
- private transient volatile int sweepVotes;
-
- // CAS methods for fields
- private boolean casTail(Node cmp, Node val) {
- return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
- }
-
- private boolean casHead(Node cmp, Node val) {
- return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
- }
-
- private boolean casSweepVotes(int cmp, int val) {
- return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val);
- }
-
- /*
- * Possible values for "how" argument in xfer method.
- */
- private static final int NOW = 0; // for untimed poll, tryTransfer
- private static final int ASYNC = 1; // for offer, put, add
- private static final int SYNC = 2; // for transfer, take
- private static final int TIMED = 3; // for timed poll, tryTransfer
-
- @SuppressWarnings("unchecked")
- static <E> E cast(Object item) {
- // assert item == null || item.getClass() != Node.class;
- return (E) item;
- }
-
- /**
- * Implements all queuing methods. See above for explanation.
- *
- * @param e the item or null for take
- * @param haveData true if this is a put, else a take
- * @param how NOW, ASYNC, SYNC, or TIMED
- * @param nanos timeout in nanosecs, used only if mode is TIMED
- * @return an item if matched, else e
- * @throws NullPointerException if haveData mode but e is null
- */
- private E xfer(E e, boolean haveData, int how, long nanos) {
- if (haveData && (e == null))
- throw new NullPointerException();
- Node s = null; // the node to append, if needed
-
- retry:
- for (;;) { // restart on append race
-
- for (Node h = head, p = h; p != null;) { // find & match first node
- boolean isData = p.isData;
- Object item = p.item;
- if (item != p && (item != null) == isData) { // unmatched
- if (isData == haveData) // can't match
- break;
- if (p.casItem(item, e)) { // match
- for (Node q = p; q != h;) {
- Node n = q.next; // update by 2 unless singleton
- if (head == h && casHead(h, n == null ? q : n)) {
- h.forgetNext();
- break;
- } // advance and retry
- if ((h = head) == null ||
- (q = h.next) == null || !q.isMatched())
- break; // unless slack < 2
- }
- LockSupport.unpark(p.waiter);
- return LinkedTransferQueue.<E>cast(item);
- }
- }
- Node n = p.next;
- p = (p != n) ? n : (h = head); // Use head if p offlist
- }
-
- if (how != NOW) { // No matches available
- if (s == null)
- s = new Node(e, haveData);
- Node pred = tryAppend(s, haveData);
- if (pred == null)
- continue retry; // lost race vs opposite mode
- if (how != ASYNC)
- return awaitMatch(s, pred, e, (how == TIMED), nanos);
- }
- return e; // not waiting
- }
- }
-
- /**
- * Tries to append node s as tail.
- *
- * @param s the node to append
- * @param haveData true if appending in data mode
- * @return null on failure due to losing race with append in
- * different mode, else s's predecessor, or s itself if no
- * predecessor
- */
- private Node tryAppend(Node s, boolean haveData) {
- for (Node t = tail, p = t;;) { // move p to last node and append
- Node n, u; // temps for reads of next & tail
- if (p == null && (p = head) == null) {
- if (casHead(null, s))
- return s; // initialize
- }
- else if (p.cannotPrecede(haveData))
- return null; // lost race vs opposite mode
- else if ((n = p.next) != null) // not last; keep traversing
- p = p != t && t != (u = tail) ? (t = u) : // stale tail
- (p != n) ? n : null; // restart if off list
- else if (!p.casNext(null, s))
- p = p.next; // re-read on CAS failure
- else {
- if (p != t) { // update if slack now >= 2
- while ((tail != t || !casTail(t, s)) &&
- (t = tail) != null &&
- (s = t.next) != null && // advance and retry
- (s = s.next) != null && s != t);
- }
- return p;
- }
- }
- }
-
- /**
- * Spins/yields/blocks until node s is matched or caller gives up.
- *
- * @param s the waiting node
- * @param pred the predecessor of s, or s itself if it has no
- * predecessor, or null if unknown (the null case does not occur
- * in any current calls but may in possible future extensions)
- * @param e the comparison value for checking match
- * @param timed if true, wait only until timeout elapses
- * @param nanos timeout in nanosecs, used only if timed is true
- * @return matched item, or e if unmatched on interrupt or timeout
- */
- private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
- long lastTime = timed ? System.nanoTime() : 0L;
- Thread w = Thread.currentThread();
- int spins = -1; // initialized after first item and cancel checks
- ThreadLocalRandom randomYields = null; // bound if needed
-
- for (;;) {
- Object item = s.item;
- if (item != e) { // matched
- // assert item != s;
- s.forgetContents(); // avoid garbage
- return LinkedTransferQueue.<E>cast(item);
- }
- if ((w.isInterrupted() || (timed && nanos <= 0)) &&
- s.casItem(e, s)) { // cancel
- unsplice(pred, s);
- return e;
- }
-
- if (spins < 0) { // establish spins at/near front
- if ((spins = spinsFor(pred, s.isData)) > 0)
- randomYields = ThreadLocalRandom.current();
- }
- else if (spins > 0) { // spin
- --spins;
- if (randomYields.nextInt(CHAINED_SPINS) == 0)
- Thread.yield(); // occasionally yield
- }
- else if (s.waiter == null) {
- s.waiter = w; // request unpark then recheck
- }
- else if (timed) {
- long now = System.nanoTime();
- if ((nanos -= now - lastTime) > 0)
- LockSupport.parkNanos(this, nanos);
- lastTime = now;
- }
- else {
- LockSupport.park(this);
- }
- }
- }
-
- /**
- * Returns spin/yield value for a node with given predecessor and
- * data mode. See above for explanation.
- */
- private static int spinsFor(Node pred, boolean haveData) {
- if (MP && pred != null) {
- if (pred.isData != haveData) // phase change
- return FRONT_SPINS + CHAINED_SPINS;
- if (pred.isMatched()) // probably at front
- return FRONT_SPINS;
- if (pred.waiter == null) // pred apparently spinning
- return CHAINED_SPINS;
- }
- return 0;
- }
-
- /* -------------- Traversal methods -------------- */
-
- /**
- * Returns the successor of p, or the head node if p.next has been
- * linked to self, which will only be true if traversing with a
- * stale pointer that is now off the list.
- */
- final Node succ(Node p) {
- Node next = p.next;
- return (p == next) ? head : next;
- }
-
- /**
- * Returns the first unmatched node of the given mode, or null if
- * none. Used by methods isEmpty, hasWaitingConsumer.
- */
- private Node firstOfMode(boolean isData) {
- for (Node p = head; p != null; p = succ(p)) {
- if (!p.isMatched())
- return (p.isData == isData) ? p : null;
- }
- return null;
- }
-
- /**
- * Returns the item in the first unmatched node with isData; or
- * null if none. Used by peek.
- */
- private E firstDataItem() {
- for (Node p = head; p != null; p = succ(p)) {
- Object item = p.item;
- if (p.isData) {
- if (item != null && item != p)
- return LinkedTransferQueue.<E>cast(item);
- }
- else if (item == null)
- return null;
- }
- return null;
- }
-
- /**
- * Traverses and counts unmatched nodes of the given mode.
- * Used by methods size and getWaitingConsumerCount.
- */
- private int countOfMode(boolean data) {
- int count = 0;
- for (Node p = head; p != null; ) {
- if (!p.isMatched()) {
- if (p.isData != data)
- return 0;
- if (++count == Integer.MAX_VALUE) // saturated
- break;
- }
- Node n = p.next;
- if (n != p)
- p = n;
- else {
- count = 0;
- p = head;
- }
- }
- return count;
- }
-
- @Deprecated
- final class Itr implements Iterator<E> {
- private Node nextNode; // next node to return item for
- private E nextItem; // the corresponding item
- private Node lastRet; // last returned node, to support remove
- private Node lastPred; // predecessor to unlink lastRet
-
- /**
- * Moves to next node after prev, or first node if prev null.
- */
- private void advance(Node prev) {
- /*
- * To track and avoid buildup of deleted nodes in the face
- * of calls to both Queue.remove and Itr.remove, we must
- * include variants of unsplice and sweep upon each
- * advance: Upon Itr.remove, we may need to catch up links
- * from lastPred, and upon other removes, we might need to
- * skip ahead from stale nodes and unsplice deleted ones
- * found while advancing.
- */
-
- Node r, b; // reset lastPred upon possible deletion of lastRet
- if ((r = lastRet) != null && !r.isMatched())
- lastPred = r; // next lastPred is old lastRet
- else if ((b = lastPred) == null || b.isMatched())
- lastPred = null; // at start of list
- else {
- Node s, n; // help with removal of lastPred.next
- while ((s = b.next) != null &&
- s != b && s.isMatched() &&
- (n = s.next) != null && n != s)
- b.casNext(s, n);
- }
-
- this.lastRet = prev;
-
- for (Node p = prev, s, n;;) {
- s = (p == null) ? head : p.next;
- if (s == null)
- break;
- else if (s == p) {
- p = null;
- continue;
- }
- Object item = s.item;
- if (s.isData) {
- if (item != null && item != s) {
- nextItem = LinkedTransferQueue.<E>cast(item);
- nextNode = s;
- return;
- }
- }
- else if (item == null)
- break;
- // assert s.isMatched();
- if (p == null)
- p = s;
- else if ((n = s.next) == null)
- break;
- else if (s == n)
- p = null;
- else
- p.casNext(s, n);
- }
- nextNode = null;
- nextItem = null;
- }
-
- Itr() {
- advance(null);
- }
-
- public final boolean hasNext() {
- return nextNode != null;
- }
-
- public final E next() {
- Node p = nextNode;
- if (p == null) throw new NoSuchElementException();
- E e = nextItem;
- advance(p);
- return e;
- }
-
- public final void remove() {
- final Node lastRet = this.lastRet;
- if (lastRet == null)
- throw new IllegalStateException();
- this.lastRet = null;
- if (lastRet.tryMatchData())
- unsplice(lastPred, lastRet);
- }
- }
-
- /* -------------- Removal methods -------------- */
-
- /**
- * Unsplices (now or later) the given deleted/cancelled node with
- * the given predecessor.
- *
- * @param pred a node that was at one time known to be the
- * predecessor of s, or null or s itself if s is/was at head
- * @param s the node to be unspliced
- */
- final void unsplice(Node pred, Node s) {
- s.forgetContents(); // forget unneeded fields
- /*
- * See above for rationale. Briefly: if pred still points to
- * s, try to unlink s. If s cannot be unlinked, because it is
- * trailing node or pred might be unlinked, and neither pred
- * nor s are head or offlist, add to sweepVotes, and if enough
- * votes have accumulated, sweep.
- */
- if (pred != null && pred != s && pred.next == s) {
- Node n = s.next;
- if (n == null ||
- (n != s && pred.casNext(s, n) && pred.isMatched())) {
- for (;;) { // check if at, or could be, head
- Node h = head;
- if (h == pred || h == s || h == null)
- return; // at head or list empty
- if (!h.isMatched())
- break;
- Node hn = h.next;
- if (hn == null)
- return; // now empty
- if (hn != h && casHead(h, hn))
- h.forgetNext(); // advance head
- }
- if (pred.next != pred && s.next != s) { // recheck if offlist
- for (;;) { // sweep now if enough votes
- int v = sweepVotes;
- if (v < SWEEP_THRESHOLD) {
- if (casSweepVotes(v, v + 1))
- break;
- }
- else if (casSweepVotes(v, 0)) {
- sweep();
- break;
- }
- }
- }
- }
- }
- }
-
- /**
- * Unlinks matched (typically cancelled) nodes encountered in a
- * traversal from head.
- */
- private void sweep() {
- for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
- if (!s.isMatched())
- // Unmatched nodes are never self-linked
- p = s;
- else if ((n = s.next) == null) // trailing node is pinned
- break;
- else if (s == n) // stale
- // No need to also check for p == s, since that implies s == n
- p = head;
- else
- p.casNext(s, n);
- }
- }
-
- /**
- * Main implementation of remove(Object)
- */
- private boolean findAndRemove(Object e) {
- if (e != null) {
- for (Node pred = null, p = head; p != null; ) {
- Object item = p.item;
- if (p.isData) {
- if (item != null && item != p && e.equals(item) &&
- p.tryMatchData()) {
- unsplice(pred, p);
- return true;
- }
- }
- else if (item == null)
- break;
- pred = p;
- if ((p = p.next) == pred) { // stale
- pred = null;
- p = head;
- }
- }
- }
- return false;
- }
-
-
- /**
- * Creates an initially empty {@code LinkedTransferQueue}.
- */
- public LinkedTransferQueue() {
- }
-
- /**
- * Creates a {@code LinkedTransferQueue}
- * initially containing the elements of the given collection,
- * added in traversal order of the collection's iterator.
- *
- * @param c the collection of elements to initially contain
- * @throws NullPointerException if the specified collection or any
- * of its elements are null
- */
- public LinkedTransferQueue(Collection<? extends E> c) {
- this();
- addAll(c);
- }
-
- /**
- * Inserts the specified element at the tail of this queue.
- * As the queue is unbounded, this method will never block.
- *
- * @throws NullPointerException if the specified element is null
- */
- public void put(E e) {
- xfer(e, true, ASYNC, 0);
- }
-
- /**
- * Inserts the specified element at the tail of this queue.
- * As the queue is unbounded, this method will never block or
- * return {@code false}.
- *
- * @return {@code true} (as specified by
- * {@link java.util.concurrent.BlockingQueue#offer(Object,long,TimeUnit)
- * BlockingQueue.offer})
- * @throws NullPointerException if the specified element is null
- */
- public boolean offer(E e, long timeout, TimeUnit unit) {
- xfer(e, true, ASYNC, 0);
- return true;
- }
-
- /**
- * Inserts the specified element at the tail of this queue.
- * As the queue is unbounded, this method will never return {@code false}.
- *
- * @return {@code true} (as specified by {@link Queue#offer})
- * @throws NullPointerException if the specified element is null
- */
- public boolean offer(E e) {
- xfer(e, true, ASYNC, 0);
- return true;
- }
-
- /**
- * Inserts the specified element at the tail of this queue.
- * As the queue is unbounded, this method will never throw
- * {@link IllegalStateException} or return {@code false}.
- *
- * @return {@code true} (as specified by {@link Collection#add})
- * @throws NullPointerException if the specified element is null
- */
- public boolean add(E e) {
- xfer(e, true, ASYNC, 0);
- return true;
- }
-
- /**
- * Transfers the element to a waiting consumer immediately, if possible.
- *
- * <p>More precisely, transfers the specified element immediately
- * if there exists a consumer already waiting to receive it (in
- * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
- * otherwise returning {@code false} without enqueuing the element.
- *
- * @throws NullPointerException if the specified element is null
- */
- public boolean tryTransfer(E e) {
- return xfer(e, true, NOW, 0) == null;
- }
-
- /**
- * Transfers the element to a consumer, waiting if necessary to do so.
- *
- * <p>More precisely, transfers the specified element immediately
- * if there exists a consumer already waiting to receive it (in
- * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
- * else inserts the specified element at the tail of this queue
- * and waits until the element is received by a consumer.
- *
- * @throws NullPointerException if the specified element is null
- */
- public void transfer(E e) throws InterruptedException {
- if (xfer(e, true, SYNC, 0) != null) {
- Thread.interrupted(); // failure possible only due to interrupt
- throw new InterruptedException();
- }
- }
-
- /**
- * Transfers the element to a consumer if it is possible to do so
- * before the timeout elapses.
- *
- * <p>More precisely, transfers the specified element immediately
- * if there exists a consumer already waiting to receive it (in
- * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
- * else inserts the specified element at the tail of this queue
- * and waits until the element is received by a consumer,
- * returning {@code false} if the specified wait time elapses
- * before the element can be transferred.
- *
- * @throws NullPointerException if the specified element is null
- */
- public boolean tryTransfer(E e, long timeout, TimeUnit unit)
- throws InterruptedException {
- if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
- return true;
- if (!Thread.interrupted())
- return false;
- throw new InterruptedException();
- }
-
- public E take() throws InterruptedException {
- E e = xfer(null, false, SYNC, 0);
- if (e != null)
- return e;
- Thread.interrupted();
- throw new InterruptedException();
- }
-
- public E poll(long timeout, TimeUnit unit) throws InterruptedException {
- E e = xfer(null, false, TIMED, unit.toNanos(timeout));
- if (e != null || !Thread.interrupted())
- return e;
- throw new InterruptedException();
- }
-
- public E poll() {
- return xfer(null, false, NOW, 0);
- }
-
- /**
- * @throws NullPointerException {@inheritDoc}
- * @throws IllegalArgumentException {@inheritDoc}
- */
- public int drainTo(Collection<? super E> c) {
- if (c == null)
- throw new NullPointerException();
- if (c == this)
- throw new IllegalArgumentException();
- int n = 0;
- for (E e; (e = poll()) != null;) {
- c.add(e);
- ++n;
- }
- return n;
- }
-
- /**
- * @throws NullPointerException {@inheritDoc}
- * @throws IllegalArgumentException {@inheritDoc}
- */
- public int drainTo(Collection<? super E> c, int maxElements) {
- if (c == null)
- throw new NullPointerException();
- if (c == this)
- throw new IllegalArgumentException();
- int n = 0;
- for (E e; n < maxElements && (e = poll()) != null;) {
- c.add(e);
- ++n;
- }
- return n;
- }
-
- /**
- * Returns an iterator over the elements in this queue in proper sequence.
- * The elements will be returned in order from first (head) to last (tail).
- *
- * <p>The returned iterator is a "weakly consistent" iterator that
- * will never throw {@link java.util.ConcurrentModificationException
- * ConcurrentModificationException}, and guarantees to traverse
- * elements as they existed upon construction of the iterator, and
- * may (but is not guaranteed to) reflect any modifications
- * subsequent to construction.
- *
- * @return an iterator over the elements in this queue in proper sequence
- */
- public Iterator<E> iterator() {
- return new Itr();
- }
-
- public E peek() {
- return firstDataItem();
- }
-
- /**
- * Returns {@code true} if this queue contains no elements.
- *
- * @return {@code true} if this queue contains no elements
- */
- public boolean isEmpty() {
- for (Node p = head; p != null; p = succ(p)) {
- if (!p.isMatched())
- return !p.isData;
- }
- return true;
- }
-
- public boolean hasWaitingConsumer() {
- return firstOfMode(false) != null;
- }
-
- /**
- * Returns the number of elements in this queue. If this queue
- * contains more than {@code Integer.MAX_VALUE} elements, returns
- * {@code Integer.MAX_VALUE}.
- *
- * <p>Beware that, unlike in most collections, this method is
- * <em>NOT</em> a constant-time operation. Because of the
- * asynchronous nature of these queues, determining the current
- * number of elements requires an O(n) traversal.
- *
- * @return the number of elements in this queue
- */
- public int size() {
- return countOfMode(true);
- }
-
- public int getWaitingConsumerCount() {
- return countOfMode(false);
- }
-
- /**
- * Removes a single instance of the specified element from this queue,
- * if it is present. More formally, removes an element {@code e} such
- * that {@code o.equals(e)}, if this queue contains one or more such
- * elements.
- * Returns {@code true} if this queue contained the specified element
- * (or equivalently, if this queue changed as a result of the call).
- *
- * @param o element to be removed from this queue, if present
- * @return {@code true} if this queue changed as a result of the call
- */
- public boolean remove(Object o) {
- return findAndRemove(o);
- }
-
- /**
- * Returns {@code true} if this queue contains the specified element.
- * More formally, returns {@code true} if and only if this queue contains
- * at least one element {@code e} such that {@code o.equals(e)}.
- *
- * @param o object to be checked for containment in this queue
- * @return {@code true} if this queue contains the specified element
- */
- public boolean contains(Object o) {
- if (o == null) return false;
- for (Node p = head; p != null; p = succ(p)) {
- Object item = p.item;
- if (p.isData) {
- if (item != null && item != p && o.equals(item))
- return true;
- }
- else if (item == null)
- break;
- }
- return false;
- }
-
- /**
- * Always returns {@code Integer.MAX_VALUE} because a
- * {@code LinkedTransferQueue} is not capacity constrained.
- *
- * @return {@code Integer.MAX_VALUE} (as specified by
- * {@link java.util.concurrent.BlockingQueue#remainingCapacity()
- * BlockingQueue.remainingCapacity})
- */
- public int remainingCapacity() {
- return Integer.MAX_VALUE;
- }
-
- /**
- * Saves the state to a stream (that is, serializes it).
- *
- * @serialData All of the elements (each an {@code E}) in
- * the proper order, followed by a null
- * @param s the stream
- */
- private void writeObject(java.io.ObjectOutputStream s)
- throws java.io.IOException {
- s.defaultWriteObject();
- for (E e : this)
- s.writeObject(e);
- // Use trailing null as sentinel
- s.writeObject(null);
- }
-
- /**
- * Reconstitutes the Queue instance from a stream (that is,
- * deserializes it).
- *
- * @param s the stream
- */
- private void readObject(java.io.ObjectInputStream s)
- throws java.io.IOException, ClassNotFoundException {
- s.defaultReadObject();
- for (;;) {
- @SuppressWarnings("unchecked")
- E item = (E) s.readObject();
- if (item == null)
- break;
- else
- offer(item);
- }
- }
-
- // Unsafe mechanics
-
- private static final sun.misc.Unsafe UNSAFE;
- private static final long headOffset;
- private static final long tailOffset;
- private static final long sweepVotesOffset;
- static {
- try {
- UNSAFE = getUnsafe();
- Class<?> k = LinkedTransferQueue.class;
- headOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("head"));
- tailOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("tail"));
- sweepVotesOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("sweepVotes"));
- } catch (Exception e) {
- throw new Error(e);
- }
- }
-
- /**
- * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
- * Replace with a simple call to Unsafe.getUnsafe when integrating
- * into a jdk.
- *
- * @return a sun.misc.Unsafe
- */
- static sun.misc.Unsafe getUnsafe() {
- return scala.concurrent.util.Unsafe.instance;
- }
-
-}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java b/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java
deleted file mode 100644
index f4a77f0f61..0000000000
--- a/src/forkjoin/scala/concurrent/forkjoin/RecursiveAction.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package scala.concurrent.forkjoin;
-
-/**
- * A recursive resultless {@link ForkJoinTask}. This class
- * establishes conventions to parameterize resultless actions as
- * {@code Void} {@code ForkJoinTask}s. Because {@code null} is the
- * only valid value of type {@code Void}, methods such as {@code join}
- * always return {@code null} upon completion.
- *
- * <p><b>Sample Usages.</b> Here is a simple but complete ForkJoin
- * sort that sorts a given {@code long[]} array:
- *
- * <pre> {@code
- * static class SortTask extends RecursiveAction {
- * final long[] array; final int lo, hi;
- * SortTask(long[] array, int lo, int hi) {
- * this.array = array; this.lo = lo; this.hi = hi;
- * }
- * SortTask(long[] array) { this(array, 0, array.length); }
- * protected void compute() {
- * if (hi - lo < THRESHOLD)
- * sortSequentially(lo, hi);
- * else {
- * int mid = (lo + hi) >>> 1;
- * invokeAll(new SortTask(array, lo, mid),
- * new SortTask(array, mid, hi));
- * merge(lo, mid, hi);
- * }
- * }
- * // implementation details follow:
- * final static int THRESHOLD = 1000;
- * void sortSequentially(int lo, int hi) {
- * Arrays.sort(array, lo, hi);
- * }
- * void merge(int lo, int mid, int hi) {
- * long[] buf = Arrays.copyOfRange(array, lo, mid);
- * for (int i = 0, j = lo, k = mid; i < buf.length; j++)
- * array[j] = (k == hi || buf[i] < array[k]) ?
- * buf[i++] : array[k++];
- * }
- * }}</pre>
- *
- * You could then sort {@code anArray} by creating {@code new
- * SortTask(anArray)} and invoking it in a ForkJoinPool. As a more
- * concrete simple example, the following task increments each element
- * of an array:
- * <pre> {@code
- * class IncrementTask extends RecursiveAction {
- * final long[] array; final int lo, hi;
- * IncrementTask(long[] array, int lo, int hi) {
- * this.array = array; this.lo = lo; this.hi = hi;
- * }
- * protected void compute() {
- * if (hi - lo < THRESHOLD) {
- * for (int i = lo; i < hi; ++i)
- * array[i]++;
- * }
- * else {
- * int mid = (lo + hi) >>> 1;
- * invokeAll(new IncrementTask(array, lo, mid),
- * new IncrementTask(array, mid, hi));
- * }
- * }
- * }}</pre>
- *
- * <p>The following example illustrates some refinements and idioms
- * that may lead to better performance: RecursiveActions need not be
- * fully recursive, so long as they maintain the basic
- * divide-and-conquer approach. Here is a class that sums the squares
- * of each element of a double array, by subdividing out only the
- * right-hand-sides of repeated divisions by two, and keeping track of
- * them with a chain of {@code next} references. It uses a dynamic
- * threshold based on method {@code getSurplusQueuedTaskCount}, but
- * counterbalances potential excess partitioning by directly
- * performing leaf actions on unstolen tasks rather than further
- * subdividing.
- *
- * <pre> {@code
- * double sumOfSquares(ForkJoinPool pool, double[] array) {
- * int n = array.length;
- * Applyer a = new Applyer(array, 0, n, null);
- * pool.invoke(a);
- * return a.result;
- * }
- *
- * class Applyer extends RecursiveAction {
- * final double[] array;
- * final int lo, hi;
- * double result;
- * Applyer next; // keeps track of right-hand-side tasks
- * Applyer(double[] array, int lo, int hi, Applyer next) {
- * this.array = array; this.lo = lo; this.hi = hi;
- * this.next = next;
- * }
- *
- * double atLeaf(int l, int h) {
- * double sum = 0;
- * for (int i = l; i < h; ++i) // perform leftmost base step
- * sum += array[i] * array[i];
- * return sum;
- * }
- *
- * protected void compute() {
- * int l = lo;
- * int h = hi;
- * Applyer right = null;
- * while (h - l > 1 && getSurplusQueuedTaskCount() <= 3) {
- * int mid = (l + h) >>> 1;
- * right = new Applyer(array, mid, h, right);
- * right.fork();
- * h = mid;
- * }
- * double sum = atLeaf(l, h);
- * while (right != null) {
- * if (right.tryUnfork()) // directly calculate if not stolen
- * sum += right.atLeaf(right.lo, right.hi);
- * else {
- * right.join();
- * sum += right.result;
- * }
- * right = right.next;
- * }
- * result = sum;
- * }
- * }}</pre>
- *
- * @since 1.7
- * @author Doug Lea
- */
-@Deprecated
-public abstract class RecursiveAction extends ForkJoinTask<Void> {
- private static final long serialVersionUID = 5232453952276485070L;
-
- /**
- * The main computation performed by this task.
- */
- protected abstract void compute();
-
- /**
- * Always returns {@code null}.
- *
- * @return {@code null} always
- */
- public final Void getRawResult() { return null; }
-
- /**
- * Requires null completion value.
- */
- protected final void setRawResult(Void mustBeNull) { }
-
- /**
- * Implements execution conventions for RecursiveActions.
- */
- protected final boolean exec() {
- compute();
- return true;
- }
-
-}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java b/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java
deleted file mode 100644
index 097b7cda1f..0000000000
--- a/src/forkjoin/scala/concurrent/forkjoin/RecursiveTask.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package scala.concurrent.forkjoin;
-
-/**
- * A recursive result-bearing {@link ForkJoinTask}.
- *
- * <p>For a classic example, here is a task computing Fibonacci numbers:
- *
- * <pre> {@code
- * class Fibonacci extends RecursiveTask<Integer> {
- * final int n;
- * Fibonacci(int n) { this.n = n; }
- * Integer compute() {
- * if (n <= 1)
- * return n;
- * Fibonacci f1 = new Fibonacci(n - 1);
- * f1.fork();
- * Fibonacci f2 = new Fibonacci(n - 2);
- * return f2.compute() + f1.join();
- * }
- * }}</pre>
- *
- * However, besides being a dumb way to compute Fibonacci functions
- * (there is a simple fast linear algorithm that you'd use in
- * practice), this is likely to perform poorly because the smallest
- * subtasks are too small to be worthwhile splitting up. Instead, as
- * is the case for nearly all fork/join applications, you'd pick some
- * minimum granularity size (for example 10 here) for which you always
- * sequentially solve rather than subdividing.
- *
- * @since 1.7
- * @author Doug Lea
- */
-@Deprecated
-public abstract class RecursiveTask<V> extends ForkJoinTask<V> {
- private static final long serialVersionUID = 5232453952276485270L;
-
- /**
- * The result of the computation.
- */
- V result;
-
- /**
- * The main computation performed by this task.
- */
- protected abstract V compute();
-
- public final V getRawResult() {
- return result;
- }
-
- protected final void setRawResult(V value) {
- result = value;
- }
-
- /**
- * Implements execution conventions for RecursiveTask.
- */
- protected final boolean exec() {
- result = compute();
- return true;
- }
-
-}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java b/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java
deleted file mode 100644
index 3ea1af66bc..0000000000
--- a/src/forkjoin/scala/concurrent/forkjoin/ThreadLocalRandom.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package scala.concurrent.forkjoin;
-
-import java.util.Random;
-
-/**
- * A random number generator isolated to the current thread. Like the
- * global {@link java.util.Random} generator used by the {@link
- * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized
- * with an internally generated seed that may not otherwise be
- * modified. When applicable, use of {@code ThreadLocalRandom} rather
- * than shared {@code Random} objects in concurrent programs will
- * typically encounter much less overhead and contention. Use of
- * {@code ThreadLocalRandom} is particularly appropriate when multiple
- * tasks (for example, each a {@link ForkJoinTask}) use random numbers
- * in parallel in thread pools.
- *
- * <p>Usages of this class should typically be of the form:
- * {@code ThreadLocalRandom.current().nextX(...)} (where
- * {@code X} is {@code Int}, {@code Long}, etc).
- * When all usages are of this form, it is never possible to
- * accidently share a {@code ThreadLocalRandom} across multiple threads.
- *
- * <p>This class also provides additional commonly used bounded random
- * generation methods.
- *
- * @since 1.7
- * @author Doug Lea
- */
-@Deprecated
-public class ThreadLocalRandom extends Random {
- // same constants as Random, but must be redeclared because private
- private static final long multiplier = 0x5DEECE66DL;
- private static final long addend = 0xBL;
- private static final long mask = (1L << 48) - 1;
-
- /**
- * The random seed. We can't use super.seed.
- */
- private long rnd;
-
- /**
- * Initialization flag to permit calls to setSeed to succeed only
- * while executing the Random constructor. We can't allow others
- * since it would cause setting seed in one part of a program to
- * unintentionally impact other usages by the thread.
- */
- boolean initialized;
-
- // Padding to help avoid memory contention among seed updates in
- // different TLRs in the common case that they are located near
- // each other.
- private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
-
- /**
- * The actual ThreadLocal
- */
- private static final ThreadLocal<ThreadLocalRandom> localRandom =
- new ThreadLocal<ThreadLocalRandom>() {
- protected ThreadLocalRandom initialValue() {
- return new ThreadLocalRandom();
- }
- };
-
-
- /**
- * Constructor called only by localRandom.initialValue.
- */
- ThreadLocalRandom() {
- super();
- initialized = true;
- }
-
- /**
- * Returns the current thread's {@code ThreadLocalRandom}.
- *
- * @return the current thread's {@code ThreadLocalRandom}
- */
- @Deprecated
- public static ThreadLocalRandom current() {
- return localRandom.get();
- }
-
- /**
- * Throws {@code UnsupportedOperationException}. Setting seeds in
- * this generator is not supported.
- *
- * @throws UnsupportedOperationException always
- */
- public void setSeed(long seed) {
- if (initialized)
- throw new UnsupportedOperationException();
- rnd = (seed ^ multiplier) & mask;
- }
-
- protected int next(int bits) {
- rnd = (rnd * multiplier + addend) & mask;
- return (int) (rnd >>> (48-bits));
- }
-
- /**
- * Returns a pseudorandom, uniformly distributed value between the
- * given least value (inclusive) and bound (exclusive).
- *
- * @param least the least value returned
- * @param bound the upper bound (exclusive)
- * @throws IllegalArgumentException if least greater than or equal
- * to bound
- * @return the next value
- */
- public int nextInt(int least, int bound) {
- if (least >= bound)
- throw new IllegalArgumentException();
- return nextInt(bound - least) + least;
- }
-
- /**
- * Returns a pseudorandom, uniformly distributed value
- * between 0 (inclusive) and the specified value (exclusive).
- *
- * @param n the bound on the random number to be returned. Must be
- * positive.
- * @return the next value
- * @throws IllegalArgumentException if n is not positive
- */
- public long nextLong(long n) {
- if (n <= 0)
- throw new IllegalArgumentException("n must be positive");
- // Divide n by two until small enough for nextInt. On each
- // iteration (at most 31 of them but usually much less),
- // randomly choose both whether to include high bit in result
- // (offset) and whether to continue with the lower vs upper
- // half (which makes a difference only if odd).
- long offset = 0;
- while (n >= Integer.MAX_VALUE) {
- int bits = next(2);
- long half = n >>> 1;
- long nextn = ((bits & 2) == 0) ? half : n - half;
- if ((bits & 1) == 0)
- offset += n - nextn;
- n = nextn;
- }
- return offset + nextInt((int) n);
- }
-
- /**
- * Returns a pseudorandom, uniformly distributed value between the
- * given least value (inclusive) and bound (exclusive).
- *
- * @param least the least value returned
- * @param bound the upper bound (exclusive)
- * @return the next value
- * @throws IllegalArgumentException if least greater than or equal
- * to bound
- */
- public long nextLong(long least, long bound) {
- if (least >= bound)
- throw new IllegalArgumentException();
- return nextLong(bound - least) + least;
- }
-
- /**
- * Returns a pseudorandom, uniformly distributed {@code double} value
- * between 0 (inclusive) and the specified value (exclusive).
- *
- * @param n the bound on the random number to be returned. Must be
- * positive.
- * @return the next value
- * @throws IllegalArgumentException if n is not positive
- */
- public double nextDouble(double n) {
- if (n <= 0)
- throw new IllegalArgumentException("n must be positive");
- return nextDouble() * n;
- }
-
- /**
- * Returns a pseudorandom, uniformly distributed value between the
- * given least value (inclusive) and bound (exclusive).
- *
- * @param least the least value returned
- * @param bound the upper bound (exclusive)
- * @return the next value
- * @throws IllegalArgumentException if least greater than or equal
- * to bound
- */
- public double nextDouble(double least, double bound) {
- if (least >= bound)
- throw new IllegalArgumentException();
- return nextDouble() * (bound - least) + least;
- }
-
- private static final long serialVersionUID = -5851777807851030925L;
-}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java b/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java
deleted file mode 100644
index 4fcd8ea601..0000000000
--- a/src/forkjoin/scala/concurrent/forkjoin/TransferQueue.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package scala.concurrent.forkjoin;
-import java.util.concurrent.*;
-
-/**
- * A {@link BlockingQueue} in which producers may wait for consumers
- * to receive elements. A {@code TransferQueue} may be useful for
- * example in message passing applications in which producers
- * sometimes (using method {@link #transfer}) await receipt of
- * elements by consumers invoking {@code take} or {@code poll}, while
- * at other times enqueue elements (via method {@code put}) without
- * waiting for receipt.
- * {@linkplain #tryTransfer(Object) Non-blocking} and
- * {@linkplain #tryTransfer(Object,long,TimeUnit) time-out} versions of
- * {@code tryTransfer} are also available.
- * A {@code TransferQueue} may also be queried, via {@link
- * #hasWaitingConsumer}, whether there are any threads waiting for
- * items, which is a converse analogy to a {@code peek} operation.
- *
- * <p>Like other blocking queues, a {@code TransferQueue} may be
- * capacity bounded. If so, an attempted transfer operation may
- * initially block waiting for available space, and/or subsequently
- * block waiting for reception by a consumer. Note that in a queue
- * with zero capacity, such as {@link SynchronousQueue}, {@code put}
- * and {@code transfer} are effectively synonymous.
- *
- * <p>This interface is a member of the
- * <a href="{@docRoot}/../technotes/guides/collections/index.html">
- * Java Collections Framework</a>.
- *
- * @since 1.7
- * @author Doug Lea
- * @param <E> the type of elements held in this collection
- */
-@Deprecated
-public interface TransferQueue<E> extends BlockingQueue<E> {
- /**
- * Transfers the element to a waiting consumer immediately, if possible.
- *
- * <p>More precisely, transfers the specified element immediately
- * if there exists a consumer already waiting to receive it (in
- * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
- * otherwise returning {@code false} without enqueuing the element.
- *
- * @param e the element to transfer
- * @return {@code true} if the element was transferred, else
- * {@code false}
- * @throws ClassCastException if the class of the specified element
- * prevents it from being added to this queue
- * @throws NullPointerException if the specified element is null
- * @throws IllegalArgumentException if some property of the specified
- * element prevents it from being added to this queue
- */
- boolean tryTransfer(E e);
-
- /**
- * Transfers the element to a consumer, waiting if necessary to do so.
- *
- * <p>More precisely, transfers the specified element immediately
- * if there exists a consumer already waiting to receive it (in
- * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
- * else waits until the element is received by a consumer.
- *
- * @param e the element to transfer
- * @throws InterruptedException if interrupted while waiting,
- * in which case the element is not left enqueued
- * @throws ClassCastException if the class of the specified element
- * prevents it from being added to this queue
- * @throws NullPointerException if the specified element is null
- * @throws IllegalArgumentException if some property of the specified
- * element prevents it from being added to this queue
- */
- void transfer(E e) throws InterruptedException;
-
- /**
- * Transfers the element to a consumer if it is possible to do so
- * before the timeout elapses.
- *
- * <p>More precisely, transfers the specified element immediately
- * if there exists a consumer already waiting to receive it (in
- * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
- * else waits until the element is received by a consumer,
- * returning {@code false} if the specified wait time elapses
- * before the element can be transferred.
- *
- * @param e the element to transfer
- * @param timeout how long to wait before giving up, in units of
- * {@code unit}
- * @param unit a {@code TimeUnit} determining how to interpret the
- * {@code timeout} parameter
- * @return {@code true} if successful, or {@code false} if
- * the specified waiting time elapses before completion,
- * in which case the element is not left enqueued
- * @throws InterruptedException if interrupted while waiting,
- * in which case the element is not left enqueued
- * @throws ClassCastException if the class of the specified element
- * prevents it from being added to this queue
- * @throws NullPointerException if the specified element is null
- * @throws IllegalArgumentException if some property of the specified
- * element prevents it from being added to this queue
- */
- boolean tryTransfer(E e, long timeout, TimeUnit unit)
- throws InterruptedException;
-
- /**
- * Returns {@code true} if there is at least one consumer waiting
- * to receive an element via {@link #take} or
- * timed {@link #poll(long,TimeUnit) poll}.
- * The return value represents a momentary state of affairs.
- *
- * @return {@code true} if there is at least one waiting consumer
- */
- boolean hasWaitingConsumer();
-
- /**
- * Returns an estimate of the number of consumers waiting to
- * receive elements via {@link #take} or timed
- * {@link #poll(long,TimeUnit) poll}. The return value is an
- * approximation of a momentary state of affairs, that may be
- * inaccurate if consumers have completed or given up waiting.
- * The value may be useful for monitoring and heuristics, but
- * not for synchronization control. Implementations of this
- * method are likely to be noticeably slower than those for
- * {@link #hasWaitingConsumer}.
- *
- * @return the number of consumers waiting to receive elements
- */
- int getWaitingConsumerCount();
-}
diff --git a/src/forkjoin/scala/concurrent/forkjoin/package-info.java b/src/forkjoin/scala/concurrent/forkjoin/package-info.java
deleted file mode 100644
index 3561b9b44a..0000000000
--- a/src/forkjoin/scala/concurrent/forkjoin/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Written by Doug Lea with assistance from members of JCP JSR-166
- * Expert Group and released to the public domain, as explained at
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-
-/**
- * Preview versions of classes targeted for Java 7. Includes a
- * fine-grained parallel computation framework: ForkJoinTasks and
- * their related support classes provide a very efficient basis for
- * obtaining platform-independent parallel speed-ups of
- * computation-intensive operations. They are not a full substitute
- * for the kinds of arbitrary processing supported by Executors or
- * Threads. However, when applicable, they typically provide
- * significantly greater performance on multiprocessor platforms.
- *
- * <p>Candidates for fork/join processing mainly include those that
- * can be expressed using parallel divide-and-conquer techniques: To
- * solve a problem, break it in two (or more) parts, and then solve
- * those parts in parallel, continuing on in this way until the
- * problem is too small to be broken up, so is solved directly. The
- * underlying <em>work-stealing</em> framework makes subtasks
- * available to other threads (normally one per CPU), that help
- * complete the tasks. In general, the most efficient ForkJoinTasks
- * are those that directly implement this algorithmic design pattern.
- */
-package scala.concurrent.forkjoin;
diff --git a/src/forkjoin/scala/concurrent/util/Unsafe.java b/src/forkjoin/scala/concurrent/util/Unsafe.java
deleted file mode 100644
index d82e4bbdd5..0000000000
--- a/src/forkjoin/scala/concurrent/util/Unsafe.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/* __ *\
-** ________ ___ / / ___ Scala API **
-** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
-** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
-** /____/\___/_/ |_/____/_/ | | **
-** |/ **
-\* */
-
-package scala.concurrent.util;
-import java.lang.reflect.Field;
-
-
-@Deprecated
-public final class Unsafe {
- @Deprecated
- public final static sun.misc.Unsafe instance;
- static {
- try {
- sun.misc.Unsafe found = null;
- for(Field field : sun.misc.Unsafe.class.getDeclaredFields()) {
- if (field.getType() == sun.misc.Unsafe.class) {
- field.setAccessible(true);
- found = (sun.misc.Unsafe) field.get(null);
- break;
- }
- }
- if (found == null) throw new IllegalStateException("Can't find instance of sun.misc.Unsafe");
- else instance = found;
- } catch(Throwable t) {
- throw new ExceptionInInitializerError(t);
- }
- }
-}
diff --git a/src/intellij/forkjoin.iml.SAMPLE b/src/intellij/forkjoin.iml.SAMPLE
deleted file mode 100644
index 42507b2911..0000000000
--- a/src/intellij/forkjoin.iml.SAMPLE
+++ /dev/null
@@ -1,11 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<module type="JAVA_MODULE" version="4">
- <component name="NewModuleRootManager" inherit-compiler-output="true">
- <exclude-output />
- <content url="file://$MODULE_DIR$/../forkjoin">
- <sourceFolder url="file://$MODULE_DIR$/../forkjoin" isTestSource="false" />
- </content>
- <orderEntry type="inheritedJdk" />
- <orderEntry type="sourceFolder" forTests="false" />
- </component>
-</module> \ No newline at end of file
diff --git a/src/intellij/interactive.iml.SAMPLE b/src/intellij/interactive.iml.SAMPLE
index 047b5c9069..267bd3f12b 100644
--- a/src/intellij/interactive.iml.SAMPLE
+++ b/src/intellij/interactive.iml.SAMPLE
@@ -11,5 +11,6 @@
<orderEntry type="module" module-name="library" />
<orderEntry type="module" module-name="reflect" />
<orderEntry type="library" name="starr" level="project" />
+ <orderEntry type="library" name="asm" level="project" />
</component>
</module> \ No newline at end of file
diff --git a/src/intellij/library.iml.SAMPLE b/src/intellij/library.iml.SAMPLE
index b03fef9414..8ceb9dd3f1 100644
--- a/src/intellij/library.iml.SAMPLE
+++ b/src/intellij/library.iml.SAMPLE
@@ -8,7 +8,6 @@
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
- <orderEntry type="module" module-name="forkjoin" />
<orderEntry type="library" name="starr" level="project" />
</component>
</module> \ No newline at end of file
diff --git a/src/intellij/repl.iml.SAMPLE b/src/intellij/repl.iml.SAMPLE
index 7476f30131..e827a2c6d7 100644
--- a/src/intellij/repl.iml.SAMPLE
+++ b/src/intellij/repl.iml.SAMPLE
@@ -10,6 +10,7 @@
<orderEntry type="module" module-name="compiler" />
<orderEntry type="module" module-name="library" />
<orderEntry type="module" module-name="reflect" />
+ <orderEntry type="module" module-name="interactive" />
<orderEntry type="library" name="starr" level="project" />
<orderEntry type="library" name="repl-deps" level="project" />
<orderEntry type="library" name="asm" level="project" />
diff --git a/src/intellij/scala.ipr.SAMPLE b/src/intellij/scala.ipr.SAMPLE
index 290d53aa5d..3e6e074717 100644
--- a/src/intellij/scala.ipr.SAMPLE
+++ b/src/intellij/scala.ipr.SAMPLE
@@ -36,7 +36,6 @@
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/compiler.iml" filepath="$PROJECT_DIR$/compiler.iml" />
- <module fileurl="file://$PROJECT_DIR$/forkjoin.iml" filepath="$PROJECT_DIR$/forkjoin.iml" />
<module fileurl="file://$PROJECT_DIR$/interactive.iml" filepath="$PROJECT_DIR$/interactive.iml" />
<module fileurl="file://$PROJECT_DIR$/library.iml" filepath="$PROJECT_DIR$/library.iml" />
<module fileurl="file://$PROJECT_DIR$/manual.iml" filepath="$PROJECT_DIR$/manual.iml" />
diff --git a/src/intellij/scaladoc.iml.SAMPLE b/src/intellij/scaladoc.iml.SAMPLE
index 4ba0a848c6..6e6d98b396 100644
--- a/src/intellij/scaladoc.iml.SAMPLE
+++ b/src/intellij/scaladoc.iml.SAMPLE
@@ -14,5 +14,6 @@
<orderEntry type="library" name="scaladoc-deps" level="project" />
<orderEntry type="library" name="ant" level="project" />
<orderEntry type="library" name="partest" level="project" />
+ <orderEntry type="library" name="asm" level="project" />
</component>
</module> \ No newline at end of file
diff --git a/src/intellij/test-junit.iml.SAMPLE b/src/intellij/test-junit.iml.SAMPLE
index 8252ef6d98..326c9813aa 100644
--- a/src/intellij/test-junit.iml.SAMPLE
+++ b/src/intellij/test-junit.iml.SAMPLE
@@ -8,7 +8,6 @@
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module" module-name="compiler" />
- <orderEntry type="module" module-name="forkjoin" />
<orderEntry type="module" module-name="library" />
<orderEntry type="module" module-name="partest-extras" />
<orderEntry type="module" module-name="reflect" />
diff --git a/src/intellij/test.iml.SAMPLE b/src/intellij/test.iml.SAMPLE
index e7eb7576c3..d1f2975fbf 100644
--- a/src/intellij/test.iml.SAMPLE
+++ b/src/intellij/test.iml.SAMPLE
@@ -8,7 +8,6 @@
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module" module-name="compiler" />
- <orderEntry type="module" module-name="forkjoin" />
<orderEntry type="module" module-name="interactive" />
<orderEntry type="module" module-name="library" />
<orderEntry type="module" module-name="partest-extras" />
diff --git a/src/library-aux/scala/Any.scala b/src/library-aux/scala/Any.scala
index 1c25989c30..1be186d114 100644
--- a/src/library-aux/scala/Any.scala
+++ b/src/library-aux/scala/Any.scala
@@ -38,7 +38,7 @@ abstract class Any {
* - It is reflexive: for any instance `x` of type `Any`, `x.equals(x)` should return `true`.
* - It is symmetric: for any instances `x` and `y` of type `Any`, `x.equals(y)` should return `true` if and
* only if `y.equals(x)` returns `true`.
- * - It is transitive: for any instances `x`, `y`, and `z` of type `AnyRef` if `x.equals(y)` returns `true` and
+ * - It is transitive: for any instances `x`, `y`, and `z` of type `Any` if `x.equals(y)` returns `true` and
* `y.equals(z)` returns `true`, then `x.equals(z)` should return `true`.
*
* If you override this method, you should verify that your implementation remains an equivalence relation.
diff --git a/src/library-aux/scala/AnyRef.scala b/src/library-aux/scala/AnyRef.scala
index 8c1862e729..7217499da7 100644
--- a/src/library-aux/scala/AnyRef.scala
+++ b/src/library-aux/scala/AnyRef.scala
@@ -45,7 +45,7 @@ trait AnyRef extends Any {
*/
def synchronized[T](body: => T): T
- /** Tests whether the argument (`arg0`) is a reference to the receiver object (`this`).
+ /** Tests whether the argument (`that`) is a reference to the receiver object (`this`).
*
* The `eq` method implements an [[http://en.wikipedia.org/wiki/Equivalence_relation equivalence relation]] on
* non-null instances of `AnyRef`, and has three additional properties:
@@ -73,7 +73,7 @@ trait AnyRef extends Any {
/** The expression `x == that` is equivalent to `if (x eq null) that eq null else x.equals(that)`.
*
- * @param arg0 the object to compare against this object for equality.
+ * @param that the object to compare against this object for equality.
* @return `true` if the receiver object is equivalent to the argument; `false` otherwise.
*/
final def ==(that: Any): Boolean =
diff --git a/src/library/scala/Predef.scala b/src/library/scala/Predef.scala
index 012e7afd74..334377e838 100644
--- a/src/library/scala/Predef.scala
+++ b/src/library/scala/Predef.scala
@@ -56,7 +56,7 @@ import scala.io.StdIn
* only contain natural numbers (i.e. non-negative), and that the result returned
* will also be natural. `require` is distinct from `assert` in that if the
* condition fails, then the caller of the function is to blame rather than a
- * logical error having been made within `addNaturals` itself. `ensures` is a
+ * logical error having been made within `addNaturals` itself. `ensuring` is a
* form of `assert` that declares the guarantee the function is providing with
* regards to its return value.
*
@@ -82,6 +82,11 @@ object Predef extends LowPriorityImplicits with DeprecatedPredef {
*/
def classOf[T]: Class[T] = null // This is a stub method. The actual implementation is filled in by the compiler.
+ /** The `String` type in Scala has methods that come either from the underlying
+ * Java String (see the documentation corresponding to your Java version, for
+ * example [[http://docs.oracle.com/javase/8/docs/api/java/lang/String.html]]) or
+ * are added implicitly through [[scala.collection.immutable.StringOps]].
+ */
type String = java.lang.String
type Class[T] = java.lang.Class[T]
@@ -264,13 +269,6 @@ object Predef extends LowPriorityImplicits with DeprecatedPredef {
@inline def formatted(fmtstr: String): String = fmtstr format self
}
- // TODO: remove, only needed for binary compatibility of 2.11.0-RC1 with 2.11.0-M8
- // note that `private[scala]` becomes `public` in bytecode
- private[scala] final class StringAdd[A](private val self: A) extends AnyVal {
- def +(other: String): String = String.valueOf(self) + other
- }
- private[scala] def StringAdd(x: Any): Any = new StringAdd(x)
-
// SI-8229 retaining the pre 2.11 name for source compatibility in shadowing this implicit
implicit final class any2stringadd[A](private val self: A) extends AnyVal {
def +(other: String): String = String.valueOf(self) + other
diff --git a/src/library/scala/StringContext.scala b/src/library/scala/StringContext.scala
index e60fa2f290..69533c12da 100644
--- a/src/library/scala/StringContext.scala
+++ b/src/library/scala/StringContext.scala
@@ -173,7 +173,7 @@ object StringContext {
/** An exception that is thrown if a string contains a backslash (`\`) character
* that does not start a valid escape sequence.
* @param str The offending string
- * @param idx The index of the offending backslash character in `str`.
+ * @param index The index of the offending backslash character in `str`.
*/
class InvalidEscapeException(str: String, @deprecatedName('idx) val index: Int) extends IllegalArgumentException(
s"""invalid escape ${
diff --git a/src/library/scala/collection/GenSeqLike.scala b/src/library/scala/collection/GenSeqLike.scala
index f786293822..be1da1660a 100644
--- a/src/library/scala/collection/GenSeqLike.scala
+++ b/src/library/scala/collection/GenSeqLike.scala
@@ -274,7 +274,7 @@ trait GenSeqLike[+A, +Repr] extends Any with GenIterableLike[A, Repr] with Equal
* @tparam B the element type of the returned $coll.
* @tparam That $thatinfo
* @param bf $bfinfo
- * @return a new $coll` which is a copy of this $coll with the element at position `index` replaced by `elem`.
+ * @return a new $coll which is a copy of this $coll with the element at position `index` replaced by `elem`.
* @throws IndexOutOfBoundsException if `index` does not satisfy `0 <= index < length`.
*
* @usecase def updated(index: Int, elem: A): $Coll[A]
diff --git a/src/library/scala/collection/LinearSeqOptimized.scala b/src/library/scala/collection/LinearSeqOptimized.scala
index 9c336e8e31..571d58a3f3 100755
--- a/src/library/scala/collection/LinearSeqOptimized.scala
+++ b/src/library/scala/collection/LinearSeqOptimized.scala
@@ -117,25 +117,25 @@ trait LinearSeqOptimized[+A, +Repr <: LinearSeqOptimized[A, Repr]] extends Linea
}
override /*TraversableLike*/
- def foldLeft[B](z: B)(f: (B, A) => B): B = {
+ def foldLeft[B](z: B)(@deprecatedName('f) op: (B, A) => B): B = {
var acc = z
var these = this
while (!these.isEmpty) {
- acc = f(acc, these.head)
+ acc = op(acc, these.head)
these = these.tail
}
acc
}
override /*IterableLike*/
- def foldRight[B](z: B)(f: (A, B) => B): B =
+ def foldRight[B](z: B)(@deprecatedName('f) op: (A, B) => B): B =
if (this.isEmpty) z
- else f(head, tail.foldRight(z)(f))
+ else op(head, tail.foldRight(z)(op))
override /*TraversableLike*/
- def reduceLeft[B >: A](f: (B, A) => B): B =
+ def reduceLeft[B >: A](@deprecatedName('f) op: (B, A) => B): B =
if (isEmpty) throw new UnsupportedOperationException("empty.reduceLeft")
- else tail.foldLeft[B](head)(f)
+ else tail.foldLeft[B](head)(op)
override /*IterableLike*/
def reduceRight[B >: A](op: (A, B) => B): B =
diff --git a/src/library/scala/collection/concurrent/TrieMap.scala b/src/library/scala/collection/concurrent/TrieMap.scala
index bcfea7a463..74e0e0f7d2 100644
--- a/src/library/scala/collection/concurrent/TrieMap.scala
+++ b/src/library/scala/collection/concurrent/TrieMap.scala
@@ -471,7 +471,7 @@ private[collection] final class CNode[K, V](val bitmap: Int, val array: Array[Ba
val offset =
if (array.length > 0)
//util.Random.nextInt(array.length) /* <-- benchmarks show that this causes observable contention */
- scala.concurrent.forkjoin.ThreadLocalRandom.current.nextInt(0, array.length)
+ java.util.concurrent.ThreadLocalRandom.current.nextInt(0, array.length)
else 0
while (i < array.length) {
val pos = (i + offset) % array.length
diff --git a/src/library/scala/collection/convert/Wrappers.scala b/src/library/scala/collection/convert/Wrappers.scala
index 9f9732c62f..3edc5ba1b4 100644
--- a/src/library/scala/collection/convert/Wrappers.scala
+++ b/src/library/scala/collection/convert/Wrappers.scala
@@ -102,9 +102,9 @@ private[collection] trait Wrappers {
override def clone(): JListWrapper[A] = JListWrapper(new ju.ArrayList[A](underlying))
}
- // Note various overrides to avoid performance gotchas.
- class SetWrapper[A](underlying: Set[A]) extends ju.AbstractSet[A] {
- self =>
+ @SerialVersionUID(1L)
+ class SetWrapper[A](underlying: Set[A]) extends ju.AbstractSet[A] with Serializable { self =>
+ // Note various overrides to avoid performance gotchas.
override def contains(o: Object): Boolean = {
try { underlying.contains(o.asInstanceOf[A]) }
catch { case cce: ClassCastException => false }
@@ -165,7 +165,8 @@ private[collection] trait Wrappers {
new JSetWrapper[A](new ju.LinkedHashSet[A](underlying))
}
- class MapWrapper[A, B](underlying: Map[A, B]) extends ju.AbstractMap[A, B] { self =>
+ @SerialVersionUID(1L)
+ class MapWrapper[A, B](underlying: Map[A, B]) extends ju.AbstractMap[A, B] with Serializable { self =>
override def size = underlying.size
override def get(key: AnyRef): B = try {
@@ -265,17 +266,11 @@ private[collection] trait Wrappers {
def +=(kv: (A, B)): this.type = { underlying.put(kv._1, kv._2); this }
def -=(key: A): this.type = { underlying remove key; this }
- override def put(k: A, v: B): Option[B] = {
- val r = underlying.put(k, v)
- if (r != null) Some(r) else None
- }
+ override def put(k: A, v: B): Option[B] = Option(underlying.put(k, v))
override def update(k: A, v: B) { underlying.put(k, v) }
- override def remove(k: A): Option[B] = {
- val r = underlying remove k
- if (r != null) Some(r) else None
- }
+ override def remove(k: A): Option[B] = Option(underlying remove k)
def iterator: Iterator[(A, B)] = new AbstractIterator[(A, B)] {
val ui = underlying.entrySet.iterator
@@ -326,25 +321,15 @@ private[collection] trait Wrappers {
* are not guaranteed to be atomic.
*/
case class JConcurrentMapWrapper[A, B](underlying: juc.ConcurrentMap[A, B]) extends mutable.AbstractMap[A, B] with JMapWrapperLike[A, B, JConcurrentMapWrapper[A, B]] with concurrent.Map[A, B] {
- override def get(k: A) = {
- val v = underlying get k
- if (v != null) Some(v)
- else None
- }
+ override def get(k: A) = Option(underlying get k)
override def empty = new JConcurrentMapWrapper(new juc.ConcurrentHashMap[A, B])
- def putIfAbsent(k: A, v: B): Option[B] = {
- val r = underlying.putIfAbsent(k, v)
- if (r != null) Some(r) else None
- }
+ def putIfAbsent(k: A, v: B): Option[B] = Option(underlying.putIfAbsent(k, v))
def remove(k: A, v: B): Boolean = underlying.remove(k, v)
- def replace(k: A, v: B): Option[B] = {
- val prev = underlying.replace(k, v)
- if (prev != null) Some(prev) else None
- }
+ def replace(k: A, v: B): Option[B] = Option(underlying.replace(k, v))
def replace(k: A, oldvalue: B, newvalue: B): Boolean =
underlying.replace(k, oldvalue, newvalue)
@@ -380,25 +365,16 @@ private[collection] trait Wrappers {
case class JDictionaryWrapper[A, B](underlying: ju.Dictionary[A, B]) extends mutable.AbstractMap[A, B] with mutable.Map[A, B] {
override def size: Int = underlying.size
- def get(k: A) = {
- val v = underlying get k
- if (v != null) Some(v) else None
- }
+ def get(k: A) = Option(underlying get k)
def +=(kv: (A, B)): this.type = { underlying.put(kv._1, kv._2); this }
def -=(key: A): this.type = { underlying remove key; this }
- override def put(k: A, v: B): Option[B] = {
- val r = underlying.put(k, v)
- if (r != null) Some(r) else None
- }
+ override def put(k: A, v: B): Option[B] = Option(underlying.put(k, v))
override def update(k: A, v: B) { underlying.put(k, v) }
- override def remove(k: A): Option[B] = {
- val r = underlying remove k
- if (r != null) Some(r) else None
- }
+ override def remove(k: A): Option[B] = Option(underlying remove k)
def iterator = enumerationAsScalaIterator(underlying.keys) map (k => (k, underlying get k))
diff --git a/src/library/scala/collection/generic/MutableSortedSetFactory.scala b/src/library/scala/collection/generic/MutableSortedSetFactory.scala
index 0339a523e9..9bb12c2317 100644
--- a/src/library/scala/collection/generic/MutableSortedSetFactory.scala
+++ b/src/library/scala/collection/generic/MutableSortedSetFactory.scala
@@ -24,7 +24,7 @@ abstract class MutableSortedSetFactory[CC[A] <: mutable.SortedSet[A] with Sorted
/**
* mutable.SetBuilder uses '+' which is not a primitive for anything extending mutable.SetLike,
- * this causes serious perfomances issues since each time 'elems = elems + x'
+ * this causes serious performance issues since each time 'elems = elems + x'
* is evaluated elems is cloned (which is O(n)).
*
* Fortunately GrowingBuilder comes to rescue.
diff --git a/src/library/scala/collection/immutable/HashSet.scala b/src/library/scala/collection/immutable/HashSet.scala
index 49b4397cf2..6b71c0fa66 100644
--- a/src/library/scala/collection/immutable/HashSet.scala
+++ b/src/library/scala/collection/immutable/HashSet.scala
@@ -162,13 +162,6 @@ class HashSet[A] extends AbstractSet[A]
def - (e: A): HashSet[A] =
nullToEmpty(removed0(e, computeHash(e), 0))
- /** Returns this $coll as an immutable set.
- *
- * A new set will not be built; lazy collections will stay lazy.
- */
- @deprecatedOverriding("Immutable sets should do nothing on toSet but return themselves cast as a Set.", "2.11.0")
- override def toSet[B >: A]: Set[B] = this.asInstanceOf[Set[B]]
-
override def filter(p: A => Boolean) = {
val buffer = new Array[HashSet[A]](bufferSize(size))
nullToEmpty(filter0(p, false, 0, buffer, 0))
diff --git a/src/library/scala/collection/immutable/ListSet.scala b/src/library/scala/collection/immutable/ListSet.scala
index a6e6fba0a5..2e17677359 100644
--- a/src/library/scala/collection/immutable/ListSet.scala
+++ b/src/library/scala/collection/immutable/ListSet.scala
@@ -138,13 +138,6 @@ class ListSet[A] extends AbstractSet[A]
override def stringPrefix = "ListSet"
- /** Returns this $coll as an immutable set.
- *
- * A new set will not be built; lazy collections will stay lazy.
- */
- @deprecatedOverriding("Immutable sets should do nothing on toSet but return themselves cast as a Set.", "2.11.0")
- override def toSet[B >: A]: Set[B] = this.asInstanceOf[Set[B]]
-
/** Represents an entry in the `ListSet`.
*/
protected class Node(override val head: A) extends ListSet[A] with Serializable {
diff --git a/src/library/scala/collection/immutable/Range.scala b/src/library/scala/collection/immutable/Range.scala
index 0b380517f8..984ea7ba50 100644
--- a/src/library/scala/collection/immutable/Range.scala
+++ b/src/library/scala/collection/immutable/Range.scala
@@ -302,7 +302,7 @@ extends scala.collection.AbstractSeq[Int]
*/
final override def splitAt(n: Int) = (take(n), drop(n))
- /** Creates a new range consisting of the `length - n` last elements of the range.
+ /** Creates a new range consisting of the last `n` elements of the range.
*
* $doesNotUseBuilders
*/
diff --git a/src/library/scala/collection/immutable/Set.scala b/src/library/scala/collection/immutable/Set.scala
index 7725ad9ee3..0fbf7942d4 100644
--- a/src/library/scala/collection/immutable/Set.scala
+++ b/src/library/scala/collection/immutable/Set.scala
@@ -35,7 +35,12 @@ trait Set[A] extends Iterable[A]
override def companion: GenericCompanion[Set] = Set
- override def toSet[B >: A]: Set[B] = to[({type l[a] = immutable.Set[B]})#l] // for bincompat; remove in dev
+ /** Returns this $coll as an immutable map.
+ *
+ * A new map will not be built; lazy collections will stay lazy.
+ */
+ @deprecatedOverriding("Immutable sets should do nothing on toSet but return themselves cast as a Set.", "2.11.0")
+ override def toSet[B >: A]: Set[B] = this.asInstanceOf[Set[B]]
override def seq: Set[A] = this
protected override def parCombiner = ParSet.newCombiner[A] // if `immutable.SetLike` gets introduced, please move this there!
@@ -57,7 +62,6 @@ object Set extends ImmutableSetFactory[Set] {
def - (elem: Any): Set[Any] = this
def iterator: Iterator[Any] = Iterator.empty
override def foreach[U](f: Any => U): Unit = {}
- override def toSet[B >: Any]: Set[B] = this.asInstanceOf[Set[B]]
}
private[collection] def emptyInstance: Set[Any] = EmptySet
@@ -88,8 +92,6 @@ object Set extends ImmutableSetFactory[Set] {
if (f(elem1)) Some(elem1)
else None
}
- @deprecatedOverriding("Immutable sets should do nothing on toSet but return themselves cast as a Set.", "2.11.0")
- override def toSet[B >: A]: Set[B] = this.asInstanceOf[Set[B]]
}
/** An optimized representation for immutable sets of size 2 */
@@ -121,8 +123,6 @@ object Set extends ImmutableSetFactory[Set] {
else if (f(elem2)) Some(elem2)
else None
}
- @deprecatedOverriding("Immutable sets should do nothing on toSet but return themselves cast as a Set.", "2.11.0")
- override def toSet[B >: A]: Set[B] = this.asInstanceOf[Set[B]]
}
/** An optimized representation for immutable sets of size 3 */
@@ -156,8 +156,6 @@ object Set extends ImmutableSetFactory[Set] {
else if (f(elem3)) Some(elem3)
else None
}
- @deprecatedOverriding("Immutable sets should do nothing on toSet but return themselves cast as a Set.", "2.11.0")
- override def toSet[B >: A]: Set[B] = this.asInstanceOf[Set[B]]
}
/** An optimized representation for immutable sets of size 4 */
@@ -193,8 +191,6 @@ object Set extends ImmutableSetFactory[Set] {
else if (f(elem4)) Some(elem4)
else None
}
- @deprecatedOverriding("Immutable sets should do nothing on toSet but return themselves cast as a Set.", "2.11.0")
- override def toSet[B >: A]: Set[B] = this.asInstanceOf[Set[B]]
}
}
diff --git a/src/library/scala/collection/immutable/Stream.scala b/src/library/scala/collection/immutable/Stream.scala
index 6c5b10e73b..5989517532 100644
--- a/src/library/scala/collection/immutable/Stream.scala
+++ b/src/library/scala/collection/immutable/Stream.scala
@@ -509,21 +509,6 @@ self =>
else Stream.Empty
}
- /** Returns all the elements of this `Stream` that satisfy the predicate `p`
- * in a new `Stream` - i.e., it is still a lazy data structure. The order of
- * the elements is preserved
- *
- * @param p the predicate used to filter the stream.
- * @return the elements of this stream satisfying `p`.
- *
- * @example {{{
- * $naturalsEx
- * naturalsFrom(1) 10 } filter { _ % 5 == 0 } take 10 mkString(", ")
- * // produces
- * }}}
- */
- override def filter(p: A => Boolean): Stream[A] = filterImpl(p, isFlipped = false) // This override is only left in 2.11 because of binary compatibility, see PR #3925
-
/** A FilterMonadic which allows GC of the head of stream during processing */
@noinline // Workaround SI-9137, see https://github.com/scala/scala/pull/4284#issuecomment-73180791
override final def withFilter(p: A => Boolean): FilterMonadic[A, Stream[A]] = new Stream.StreamWithFilter(this, p)
@@ -1144,7 +1129,7 @@ object Stream extends SeqFactory[Stream] {
def #:::(prefix: Stream[A]): Stream[A] = prefix append tl
}
- /** A wrapper method that adds `#::` for cons and `#::: for concat as operations
+ /** A wrapper method that adds `#::` for cons and `#:::` for concat as operations
* to streams.
*/
implicit def consWrapper[A](stream: => Stream[A]): ConsWrapper[A] =
diff --git a/src/library/scala/collection/immutable/StringOps.scala b/src/library/scala/collection/immutable/StringOps.scala
index 6737692fb1..77333badf9 100644
--- a/src/library/scala/collection/immutable/StringOps.scala
+++ b/src/library/scala/collection/immutable/StringOps.scala
@@ -12,10 +12,9 @@ package immutable
import mutable.StringBuilder
-/**
- * This class serves as a wrapper providing `String`s with all the operations
- * found in indexed sequences. Where needed, instances of `String` object
- * are implicitly converted into this class.
+/** This class serves as a wrapper providing [[scala.Predef.String]]s with all
+ * the operations found in indexed sequences. Where needed, `String`s are
+ * implicitly converted into instances of this class.
*
* The difference between this class and `WrappedString` is that calling transformer
* methods such as `filter` and `map` will yield a `String` object, whereas a
diff --git a/src/library/scala/collection/mutable/ArrayBuffer.scala b/src/library/scala/collection/mutable/ArrayBuffer.scala
index 011fd415ee..167e04ccbd 100644
--- a/src/library/scala/collection/mutable/ArrayBuffer.scala
+++ b/src/library/scala/collection/mutable/ArrayBuffer.scala
@@ -149,13 +149,16 @@ class ArrayBuffer[A](override protected val initialSize: Int)
/** Removes the element on a given index position. It takes time linear in
* the buffer size.
*
- * @param n the index which refers to the first element to delete.
- * @param count the number of elements to delete
- * @throws IndexOutOfBoundsException if `n` is out of bounds.
+ * @param n the index which refers to the first element to remove.
+ * @param count the number of elements to remove.
+ * @throws IndexOutOfBoundsException if the index `n` is not in the valid range
+ * `0 <= n <= length - count` (with `count > 0`).
+ * @throws IllegalArgumentException if `count < 0`.
*/
override def remove(n: Int, count: Int) {
- require(count >= 0, "removing negative number of elements")
- if (n < 0 || n > size0 - count) throw new IndexOutOfBoundsException(n.toString)
+ if (count < 0) throw new IllegalArgumentException("removing negative number of elements: " + count.toString)
+ else if (count == 0) return // Did nothing
+ if (n < 0 || n > size0 - count) throw new IndexOutOfBoundsException("at " + n.toString + " deleting " + count.toString)
copy(n + count, n, size0 - (n + count))
reduceToSize(size0 - count)
}
diff --git a/src/library/scala/collection/mutable/BufferLike.scala b/src/library/scala/collection/mutable/BufferLike.scala
index 8d24538620..bd9c61ae6a 100644
--- a/src/library/scala/collection/mutable/BufferLike.scala
+++ b/src/library/scala/collection/mutable/BufferLike.scala
@@ -105,15 +105,18 @@ trait BufferLike[A, +This <: BufferLike[A, This] with Buffer[A]]
*/
def remove(n: Int): A
- /** Removes a number of elements from a given index position.
+ /** Removes a number of elements from a given index position. Subclasses of `BufferLike`
+ * will typically override this method to provide better performance than `count`
+ * successive calls to single-element `remove`.
*
* @param n the index which refers to the first element to remove.
* @param count the number of elements to remove.
* @throws IndexOutOfBoundsException if the index `n` is not in the valid range
- * `0 <= n <= length - count`.
+ * `0 <= n <= length - count` (with `count > 0`).
* @throws IllegalArgumentException if `count < 0`.
*/
def remove(n: Int, count: Int) {
+ if (count < 0) throw new IllegalArgumentException("removing negative number of elements: " + count.toString)
for (i <- 0 until count) remove(n)
}
diff --git a/src/library/scala/collection/mutable/ListBuffer.scala b/src/library/scala/collection/mutable/ListBuffer.scala
index 2bc6738e53..eece557fe8 100644
--- a/src/library/scala/collection/mutable/ListBuffer.scala
+++ b/src/library/scala/collection/mutable/ListBuffer.scala
@@ -262,13 +262,14 @@ final class ListBuffer[A]
*
* @param n the index which refers to the first element to remove.
* @param count the number of elements to remove.
+ * @throws IndexOutOfBoundsException if the index `n` is not in the valid range
+ * `0 <= n <= length - count` (with `count > 0`).
+ * @throws IllegalArgumentException if `count < 0`.
*/
- @migration("Invalid input values will be rejected in future releases.", "2.11")
override def remove(n: Int, count: Int) {
- if (n >= len)
- return
- if (count < 0)
- throw new IllegalArgumentException(s"removing negative number ($count) of elements")
+ if (count < 0) throw new IllegalArgumentException("removing negative number of elements: " + count.toString)
+ else if (count == 0) return // Nothing to do
+ if (n < 0 || n > len - count) throw new IndexOutOfBoundsException("at " + n.toString + " deleting " + count.toString)
if (exported) copy()
val n1 = n max 0
val count1 = count min (len - n1)
diff --git a/src/library/scala/collection/mutable/RedBlackTree.scala b/src/library/scala/collection/mutable/RedBlackTree.scala
index 7f12264d45..e4793242bf 100644
--- a/src/library/scala/collection/mutable/RedBlackTree.scala
+++ b/src/library/scala/collection/mutable/RedBlackTree.scala
@@ -55,7 +55,9 @@ private[collection] object RedBlackTree {
// ---- size ----
def size(node: Node[_, _]): Int = if (node eq null) 0 else 1 + size(node.left) + size(node.right)
+ def size(tree: Tree[_, _]): Int = tree.size
def isEmpty(tree: Tree[_, _]) = tree.root eq null
+ def clear(tree: Tree[_, _]): Unit = { tree.root = null; tree.size = 0 }
// ---- search ----
@@ -80,6 +82,11 @@ private[collection] object RedBlackTree {
case node => Some((node.key, node.value))
}
+ def minKey[A](tree: Tree[A, _]): Option[A] = minNode(tree.root) match {
+ case null => None
+ case node => Some(node.key)
+ }
+
private def minNode[A, B](node: Node[A, B]): Node[A, B] =
if (node eq null) null else minNodeNonNull(node)
@@ -91,6 +98,11 @@ private[collection] object RedBlackTree {
case node => Some((node.key, node.value))
}
+ def maxKey[A](tree: Tree[A, _]): Option[A] = maxNode(tree.root) match {
+ case null => None
+ case node => Some(node.key)
+ }
+
private def maxNode[A, B](node: Node[A, B]): Node[A, B] =
if (node eq null) null else maxNodeNonNull(node)
@@ -107,6 +119,12 @@ private[collection] object RedBlackTree {
case node => Some((node.key, node.value))
}
+ def minKeyAfter[A](tree: Tree[A, _], key: A)(implicit ord: Ordering[A]): Option[A] =
+ minNodeAfter(tree.root, key) match {
+ case null => None
+ case node => Some(node.key)
+ }
+
private[this] def minNodeAfter[A, B](node: Node[A, B], key: A)(implicit ord: Ordering[A]): Node[A, B] = {
if (node eq null) null
else {
@@ -131,6 +149,12 @@ private[collection] object RedBlackTree {
case node => Some((node.key, node.value))
}
+ def maxKeyBefore[A](tree: Tree[A, _], key: A)(implicit ord: Ordering[A]): Option[A] =
+ maxNodeBefore(tree.root, key) match {
+ case null => None
+ case node => Some(node.key)
+ }
+
private[this] def maxNodeBefore[A, B](node: Node[A, B], key: A)(implicit ord: Ordering[A]): Node[A, B] = {
if (node eq null) null
else {
@@ -409,6 +433,17 @@ private[collection] object RedBlackTree {
if (node.right ne null) foreachNodeNonNull(node.right, f)
}
+ def foreachKey[A, U](tree: Tree[A, _], f: A => U): Unit = foreachNodeKey(tree.root, f)
+
+ private[this] def foreachNodeKey[A, U](node: Node[A, _], f: A => U): Unit =
+ if (node ne null) foreachNodeKeyNonNull(node, f)
+
+ private[this] def foreachNodeKeyNonNull[A, U](node: Node[A, _], f: A => U): Unit = {
+ if (node.left ne null) foreachNodeKeyNonNull(node.left, f)
+ f(node.key)
+ if (node.right ne null) foreachNodeKeyNonNull(node.right, f)
+ }
+
def transform[A, B](tree: Tree[A, B], f: (A, B) => B): Unit = transformNode(tree.root, f)
private[this] def transformNode[A, B, U](node: Node[A, B], f: (A, B) => B): Unit =
diff --git a/src/library/scala/collection/mutable/SortedSet.scala b/src/library/scala/collection/mutable/SortedSet.scala
index 0f2fa75abd..3dee57eb6d 100644
--- a/src/library/scala/collection/mutable/SortedSet.scala
+++ b/src/library/scala/collection/mutable/SortedSet.scala
@@ -48,3 +48,6 @@ object SortedSet extends MutableSortedSetFactory[SortedSet] {
def empty[A](implicit ord: Ordering[A]): SortedSet[A] = TreeSet.empty[A]
}
+
+/** Explicit instantiation of the `SortedSet` trait to reduce class file size in subclasses. */
+abstract class AbstractSortedSet[A] extends scala.collection.mutable.AbstractSet[A] with SortedSet[A]
diff --git a/src/library/scala/collection/mutable/TreeMap.scala b/src/library/scala/collection/mutable/TreeMap.scala
index 244cc18735..dc7d5d750e 100644
--- a/src/library/scala/collection/mutable/TreeMap.scala
+++ b/src/library/scala/collection/mutable/TreeMap.scala
@@ -52,6 +52,20 @@ sealed class TreeMap[A, B] private (tree: RB.Tree[A, B])(implicit val ordering:
override def empty = TreeMap.empty
override protected[this] def newBuilder = TreeMap.newBuilder[A, B]
+ /**
+ * Creates a ranged projection of this map. Any mutations in the ranged projection will update the original map and
+ * vice versa.
+ *
+ * Only entries with keys between this projection's key range will ever appear as elements of this map, independently
+ * of whether the entries are added through the original map or through this view. That means that if one inserts a
+ * key-value in a view whose key is outside the view's bounds, calls to `get` or `contains` will _not_ consider the
+ * newly added entry. Mutations are always reflected in the original map, though.
+ *
+ * @param from the lower bound (inclusive) of this projection wrapped in a `Some`, or `None` if there is no lower
+ * bound.
+ * @param until the upper bound (exclusive) of this projection wrapped in a `Some`, or `None` if there is no upper
+ * bound.
+ */
def rangeImpl(from: Option[A], until: Option[A]): TreeMap[A, B] = new TreeMapView(from, until)
def -=(key: A): this.type = { RB.delete(tree, key); this }
@@ -64,7 +78,7 @@ sealed class TreeMap[A, B] private (tree: RB.Tree[A, B])(implicit val ordering:
def keysIteratorFrom(start: A) = RB.keysIterator(tree, Some(start))
def valuesIteratorFrom(start: A) = RB.valuesIterator(tree, Some(start))
- override def size = tree.size
+ override def size = RB.size(tree)
override def isEmpty = RB.isEmpty(tree)
override def contains(key: A) = RB.contains(tree, key)
@@ -78,7 +92,7 @@ sealed class TreeMap[A, B] private (tree: RB.Tree[A, B])(implicit val ordering:
override def foreach[U](f: ((A, B)) => U): Unit = RB.foreach(tree, f)
override def transform(f: (A, B) => B) = { RB.transform(tree, f); this }
- override def clear(): Unit = tree.root = null
+ override def clear(): Unit = RB.clear(tree)
override def stringPrefix = "TreeMap"
@@ -157,10 +171,15 @@ sealed class TreeMap[A, B] private (tree: RB.Tree[A, B])(implicit val ordering:
}
}
+ // Using the iterator should be efficient enough; if performance is deemed a problem later, specialized
+ // `foreach(f, from, until)` and `transform(f, from, until)` methods can be created in `RedBlackTree`. See
+ // https://github.com/scala/scala/pull/4608#discussion_r34307985 for a discussion about this.
override def foreach[U](f: ((A, B)) => U): Unit = iterator.foreach(f)
override def transform(f: (A, B) => B) = {
iterator.foreach { case (key, value) => update(key, f(key, value)) }
this
}
+
+ override def clone() = super.clone().rangeImpl(from, until)
}
}
diff --git a/src/library/scala/collection/mutable/TreeSet.scala b/src/library/scala/collection/mutable/TreeSet.scala
index f849eea569..ada6f145ad 100644
--- a/src/library/scala/collection/mutable/TreeSet.scala
+++ b/src/library/scala/collection/mutable/TreeSet.scala
@@ -11,8 +11,7 @@ package collection
package mutable
import generic._
-import scala.collection.immutable.{RedBlackTree => RB}
-import scala.runtime.ObjectRef
+import scala.collection.mutable.{RedBlackTree => RB}
/**
* @define Coll `mutable.TreeSet`
@@ -29,88 +28,162 @@ object TreeSet extends MutableSortedSetFactory[TreeSet] {
*/
def empty[A](implicit ordering: Ordering[A]) = new TreeSet[A]()
+ /** $sortedMapCanBuildFromInfo */
+ implicit def canBuildFrom[A](implicit ord: Ordering[A]): CanBuildFrom[Coll, A, TreeSet[A]] =
+ new SortedSetCanBuildFrom[A]
}
/**
- * A mutable SortedSet using an immutable RedBlack Tree as underlying data structure.
+ * A mutable sorted set implemented using a mutable red-black tree as underlying data structure.
*
- * @author Lucien Pereira
+ * @param ordering the implicit ordering used to compare objects of type `A`.
+ * @tparam A the type of the keys contained in this tree set.
+ *
+ * @author Rui Gonçalves
+ * @version 2.12
+ * @since 2.10
*
+ * @define Coll mutable.TreeSet
+ * @define coll mutable tree set
*/
-@deprecatedInheritance("TreeSet is not designed to enable meaningful subclassing.", "2.11.0")
-class TreeSet[A] private (treeRef: ObjectRef[RB.Tree[A, Null]], from: Option[A], until: Option[A])(implicit val ordering: Ordering[A])
- extends SortedSet[A] with SetLike[A, TreeSet[A]]
- with SortedSetLike[A, TreeSet[A]] with Set[A] with Serializable {
+// Original API designed in part by Lucien Pereira
+@SerialVersionUID(-3642111301929493640L)
+sealed class TreeSet[A] private (tree: RB.Tree[A, Null])(implicit val ordering: Ordering[A])
+ extends AbstractSortedSet[A]
+ with SortedSet[A]
+ with SetLike[A, TreeSet[A]]
+ with SortedSetLike[A, TreeSet[A]]
+ with Serializable {
if (ordering eq null)
throw new NullPointerException("ordering must not be null")
- def this()(implicit ordering: Ordering[A]) = this(new ObjectRef(null), None, None)
+ /**
+ * Creates an empty `TreeSet`.
+ * @param ord the implicit ordering used to compare objects of type `A`.
+ * @return an empty `TreeSet`.
+ */
+ def this()(implicit ord: Ordering[A]) = this(RB.Tree.empty)(ord)
- override def size: Int = RB.countInRange(treeRef.elem, from, until)
+ override def empty = TreeSet.empty
+ override protected[this] def newBuilder = TreeSet.newBuilder[A]
- override def stringPrefix = "TreeSet"
+ /**
+ * Creates a ranged projection of this set. Any mutations in the ranged projection affect will update the original set
+ * and vice versa.
+ *
+ * Only keys between this projection's key range will ever appear as elements of this set, independently of whether
+ * the elements are added through the original set or through this view. That means that if one inserts an element in
+ * a view whose key is outside the view's bounds, calls to `contains` will _not_ consider the newly added element.
+ * Mutations are always reflected in the original set, though.
+ *
+ * @param from the lower bound (inclusive) of this projection wrapped in a `Some`, or `None` if there is no lower
+ * bound.
+ * @param until the upper bound (exclusive) of this projection wrapped in a `Some`, or `None` if there is no upper
+ * bound.
+ */
+ def rangeImpl(from: Option[A], until: Option[A]): TreeSet[A] = new TreeSetView(from, until)
- override def empty: TreeSet[A] = TreeSet.empty
+ def -=(key: A): this.type = { RB.delete(tree, key); this }
+ def +=(elem: A): this.type = { RB.insert(tree, elem, null); this }
- private def pickBound(comparison: (A, A) => A, oldBound: Option[A], newBound: Option[A]) = (newBound, oldBound) match {
- case (Some(newB), Some(oldB)) => Some(comparison(newB, oldB))
- case (None, _) => oldBound
- case _ => newBound
- }
+ def contains(elem: A) = RB.contains(tree, elem)
- override def rangeImpl(fromArg: Option[A], untilArg: Option[A]): TreeSet[A] = {
- val newFrom = pickBound(ordering.max, fromArg, from)
- val newUntil = pickBound(ordering.min, untilArg, until)
+ def iterator = RB.keysIterator(tree)
+ def keysIteratorFrom(start: A) = RB.keysIterator(tree, Some(start))
+ override def iteratorFrom(start: A) = RB.keysIterator(tree, Some(start))
- new TreeSet(treeRef, newFrom, newUntil)
- }
+ override def size = RB.size(tree)
+ override def isEmpty = RB.isEmpty(tree)
- override def -=(elem: A): this.type = {
- treeRef.elem = RB.delete(treeRef.elem, elem)
- this
- }
+ override def head = RB.minKey(tree).get
+ override def headOption = RB.minKey(tree)
+ override def last = RB.maxKey(tree).get
+ override def lastOption = RB.maxKey(tree)
- override def +=(elem: A): this.type = {
- treeRef.elem = RB.update(treeRef.elem, elem, null, overwrite = false)
- this
- }
+ override def foreach[U](f: A => U): Unit = RB.foreachKey(tree, f)
+ override def clear(): Unit = RB.clear(tree)
+
+ override def stringPrefix = "TreeSet"
/**
- * Thanks to the immutable nature of the
- * underlying Tree, we can share it with
- * the clone. So clone complexity in time is O(1).
+ * A ranged projection of a [[TreeSet]]. Mutations on this set affect the original set and vice versa.
*
+ * Only keys between this projection's key range will ever appear as elements of this set, independently of whether
+ * the elements are added through the original set or through this view. That means that if one inserts an element in
+ * a view whose key is outside the view's bounds, calls to `contains` will _not_ consider the newly added element.
+ * Mutations are always reflected in the original set, though.
+ *
+ * @param from the lower bound (inclusive) of this projection wrapped in a `Some`, or `None` if there is no lower
+ * bound.
+ * @param until the upper bound (exclusive) of this projection wrapped in a `Some`, or `None` if there is no upper
+ * bound.
*/
- override def clone(): TreeSet[A] =
- new TreeSet[A](new ObjectRef(treeRef.elem), from, until)
-
- private val notProjection = !(from.isDefined || until.isDefined)
+ @SerialVersionUID(7087824939194006086L)
+ private[this] final class TreeSetView(from: Option[A], until: Option[A]) extends TreeSet[A](tree) {
+
+ /**
+ * Given a possible new lower bound, chooses and returns the most constraining one (the maximum).
+ */
+ private[this] def pickLowerBound(newFrom: Option[A]): Option[A] = (from, newFrom) match {
+ case (Some(fr), Some(newFr)) => Some(ordering.max(fr, newFr))
+ case (None, _) => newFrom
+ case _ => from
+ }
- override def contains(elem: A): Boolean = {
- def leftAcceptable: Boolean = from match {
- case Some(lb) => ordering.gteq(elem, lb)
- case _ => true
+ /**
+ * Given a possible new upper bound, chooses and returns the most constraining one (the minimum).
+ */
+ private[this] def pickUpperBound(newUntil: Option[A]): Option[A] = (until, newUntil) match {
+ case (Some(unt), Some(newUnt)) => Some(ordering.min(unt, newUnt))
+ case (None, _) => newUntil
+ case _ => until
}
- def rightAcceptable: Boolean = until match {
- case Some(ub) => ordering.lt(elem, ub)
- case _ => true
+ /**
+ * Returns true if the argument is inside the view bounds (between `from` and `until`).
+ */
+ private[this] def isInsideViewBounds(key: A): Boolean = {
+ val afterFrom = from.isEmpty || ordering.compare(from.get, key) <= 0
+ val beforeUntil = until.isEmpty || ordering.compare(key, until.get) < 0
+ afterFrom && beforeUntil
}
- (notProjection || (leftAcceptable && rightAcceptable)) &&
- RB.contains(treeRef.elem, elem)
- }
+ override def rangeImpl(from: Option[A], until: Option[A]): TreeSet[A] =
+ new TreeSetView(pickLowerBound(from), pickUpperBound(until))
+
+ override def contains(key: A) = isInsideViewBounds(key) && RB.contains(tree, key)
+
+ override def iterator = RB.keysIterator(tree, from, until)
+ override def keysIteratorFrom(start: A) = RB.keysIterator(tree, pickLowerBound(Some(start)), until)
+ override def iteratorFrom(start: A) = RB.keysIterator(tree, pickLowerBound(Some(start)), until)
- override def iterator: Iterator[A] = iteratorFrom(None)
+ override def size = iterator.length
+ override def isEmpty = !iterator.hasNext
- override def keysIteratorFrom(start: A) = iteratorFrom(Some(start))
+ override def head = headOption.get
+ override def headOption = {
+ val elem = if (from.isDefined) RB.minKeyAfter(tree, from.get) else RB.minKey(tree)
+ (elem, until) match {
+ case (Some(e), Some(unt)) if ordering.compare(e, unt) >= 0 => None
+ case _ => elem
+ }
+ }
- private def iteratorFrom(start: Option[A]) = {
- val it = RB.keysIterator(treeRef.elem, pickBound(ordering.max, from, start))
- until match {
- case None => it
- case Some(ub) => it takeWhile (k => ordering.lt(k, ub))
+ override def last = lastOption.get
+ override def lastOption = {
+ val elem = if (until.isDefined) RB.maxKeyBefore(tree, until.get) else RB.maxKey(tree)
+ (elem, from) match {
+ case (Some(e), Some(fr)) if ordering.compare(e, fr) < 0 => None
+ case _ => elem
+ }
}
+
+ // Using the iterator should be efficient enough; if performance is deemed a problem later, a specialized
+ // `foreachKey(f, from, until)` method can be created in `RedBlackTree`. See
+ // https://github.com/scala/scala/pull/4608#discussion_r34307985 for a discussion about this.
+ override def foreach[U](f: A => U): Unit = iterator.foreach(f)
+
+ override def clone() = super.clone().rangeImpl(from, until)
}
}
diff --git a/src/library/scala/collection/parallel/ParIterableLike.scala b/src/library/scala/collection/parallel/ParIterableLike.scala
index 016255dca4..53f9a7b87a 100644
--- a/src/library/scala/collection/parallel/ParIterableLike.scala
+++ b/src/library/scala/collection/parallel/ParIterableLike.scala
@@ -195,7 +195,7 @@ self: ParIterableLike[T, Repr, Sequential] =>
* import scala.collection.parallel._
* val pc = mutable.ParArray(1, 2, 3)
* pc.tasksupport = new ForkJoinTaskSupport(
- * new scala.concurrent.forkjoin.ForkJoinPool(2))
+ * new java.util.concurrent.ForkJoinPool(2))
* }}}
*
* @see [[scala.collection.parallel.TaskSupport]]
diff --git a/src/library/scala/collection/parallel/TaskSupport.scala b/src/library/scala/collection/parallel/TaskSupport.scala
index 9064018d46..6ab694de04 100644
--- a/src/library/scala/collection/parallel/TaskSupport.scala
+++ b/src/library/scala/collection/parallel/TaskSupport.scala
@@ -10,7 +10,7 @@ package scala
package collection.parallel
import java.util.concurrent.ThreadPoolExecutor
-import scala.concurrent.forkjoin.ForkJoinPool
+import java.util.concurrent.ForkJoinPool
import scala.concurrent.ExecutionContext
/** A trait implementing the scheduling of a parallel collection operation.
@@ -41,7 +41,7 @@ import scala.concurrent.ExecutionContext
* import scala.collection.parallel._
* val pc = mutable.ParArray(1, 2, 3)
* pc.tasksupport = new ForkJoinTaskSupport(
- * new scala.concurrent.forkjoin.ForkJoinPool(2))
+ * new java.util.concurrent.ForkJoinPool(2))
* }}}
*
* @see [[http://docs.scala-lang.org/overviews/parallel-collections/configuration.html Configuring Parallel Collections]] section
diff --git a/src/library/scala/collection/parallel/Tasks.scala b/src/library/scala/collection/parallel/Tasks.scala
index fcf0dff846..c9a75752df 100644
--- a/src/library/scala/collection/parallel/Tasks.scala
+++ b/src/library/scala/collection/parallel/Tasks.scala
@@ -10,7 +10,7 @@ package scala
package collection.parallel
import java.util.concurrent.ThreadPoolExecutor
-import scala.concurrent.forkjoin._
+import java.util.concurrent.{ForkJoinPool, RecursiveAction, ForkJoinWorkerThread}
import scala.concurrent.ExecutionContext
import scala.util.control.Breaks._
import scala.annotation.unchecked.uncheckedVariance
diff --git a/src/library/scala/compat/Platform.scala b/src/library/scala/compat/Platform.scala
index 4c82d6e15b..42dfcbfdde 100644
--- a/src/library/scala/compat/Platform.scala
+++ b/src/library/scala/compat/Platform.scala
@@ -41,7 +41,7 @@ object Platform {
* @throws java.lang.ArrayStoreException If either `src` or `dest` are not of type
* [java.lang.Array]; or if the element type of `src` is not
* compatible with that of `dest`.
- * @throws java.lang.IndexOutOfBoundsException If either srcPos` or `destPos` are
+ * @throws java.lang.IndexOutOfBoundsException If either `srcPos` or `destPos` are
* outside of the bounds of their respective arrays; or if `length`
* is negative; or if there are less than `length` elements available
* after `srcPos` or `destPos` in `src` and `dest` respectively.
diff --git a/src/library/scala/concurrent/forkjoin/package.scala b/src/library/scala/concurrent/forkjoin/package.scala
new file mode 100644
index 0000000000..7f4524fccf
--- /dev/null
+++ b/src/library/scala/concurrent/forkjoin/package.scala
@@ -0,0 +1,60 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2015, LAMP/EPFL and Typesafe, Inc. **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent
+import java.util.{concurrent => juc}
+import java.util.Collection
+
+package object forkjoin {
+ @deprecated("Use java.util.concurrent.ForkJoinPool directly, instead of this alias.", "2.12.0")
+ type ForkJoinPool = juc.ForkJoinPool
+ @deprecated("Use java.util.concurrent.ForkJoinPool directly, instead of this alias.", "2.12.0")
+ object ForkJoinPool {
+ type ForkJoinWorkerThreadFactory = juc.ForkJoinPool.ForkJoinWorkerThreadFactory
+ type ManagedBlocker = juc.ForkJoinPool.ManagedBlocker
+
+ val defaultForkJoinWorkerThreadFactory: ForkJoinWorkerThreadFactory = juc.ForkJoinPool.defaultForkJoinWorkerThreadFactory
+ def managedBlock(blocker: ManagedBlocker): Unit = juc.ForkJoinPool.managedBlock(blocker)
+ }
+
+ @deprecated("Use java.util.concurrent.ForkJoinTask directly, instead of this alias.", "2.12.0")
+ type ForkJoinTask[T] = juc.ForkJoinTask[T]
+ @deprecated("Use java.util.concurrent.ForkJoinTask directly, instead of this alias.", "2.12.0")
+ object ForkJoinTask {
+ def adapt(runnable: Runnable): ForkJoinTask[_] = juc.ForkJoinTask.adapt(runnable)
+ def adapt[T](callable: juc.Callable[_ <: T]): ForkJoinTask[T] = juc.ForkJoinTask.adapt(callable)
+ def adapt[T](runnable: Runnable, result: T): ForkJoinTask[T] = juc.ForkJoinTask.adapt(runnable, result)
+ def getPool(): ForkJoinPool = juc.ForkJoinTask.getPool
+ def getQueuedTaskCount(): Int = juc.ForkJoinTask.getQueuedTaskCount
+ def getSurplusQueuedTaskCount(): Int = juc.ForkJoinTask.getSurplusQueuedTaskCount
+ def helpQuiesce(): Unit = juc.ForkJoinTask.helpQuiesce
+ def inForkJoinPool(): Boolean = juc.ForkJoinTask.inForkJoinPool
+ def invokeAll[T <: ForkJoinTask[_]](tasks: Collection[T]): Collection[T] = juc.ForkJoinTask.invokeAll(tasks)
+ def invokeAll[T](t1: ForkJoinTask[T]): Unit = juc.ForkJoinTask.invokeAll(t1)
+ def invokeAll[T](tasks: ForkJoinTask[T]*): Unit = juc.ForkJoinTask.invokeAll(tasks: _*)
+ }
+
+ @deprecated("Use java.util.concurrent.ForkJoinWorkerThread directly, instead of this alias.", "2.12.0")
+ type ForkJoinWorkerThread = juc.ForkJoinWorkerThread
+ @deprecated("Use java.util.concurrent.LinkedTransferQueue directly, instead of this alias.", "2.12.0")
+ type LinkedTransferQueue[T] = juc.LinkedTransferQueue[T]
+ @deprecated("Use java.util.concurrent.RecursiveAction directly, instead of this alias.", "2.12.0")
+ type RecursiveAction = juc.RecursiveAction
+ @deprecated("Use java.util.concurrent.RecursiveTask directly, instead of this alias.", "2.12.0")
+ type RecursiveTask[T] = juc.RecursiveTask[T]
+
+ @deprecated("Use java.util.concurrent.ThreadLocalRandom directly, instead of this alias.", "2.12.0")
+ type ThreadLocalRandom = juc.ThreadLocalRandom
+ @deprecated("Use java.util.concurrent.ThreadLocalRandom directly, instead of this alias.", "2.12.0")
+ object ThreadLocalRandom {
+ // For source compatibility, current must declare the empty argument list.
+ // Having no argument list makes more sense since it doesn't have any side effects,
+ // but existing callers will break if they invoked it as `current()`.
+ def current() = juc.ThreadLocalRandom.current
+ }
+}
diff --git a/src/library/scala/concurrent/impl/ExecutionContextImpl.scala b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
index 0c7f98ce5a..c98746a98d 100644
--- a/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
+++ b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
@@ -10,10 +10,9 @@ package scala.concurrent.impl
-import java.util.concurrent.{ LinkedBlockingQueue, Callable, Executor, ExecutorService, Executors, ThreadFactory, TimeUnit, ThreadPoolExecutor }
+import java.util.concurrent.{ ForkJoinPool, ForkJoinWorkerThread, ForkJoinTask, LinkedBlockingQueue, Callable, Executor, ExecutorService, Executors, ThreadFactory, TimeUnit, ThreadPoolExecutor }
import java.util.concurrent.atomic.AtomicInteger
import java.util.Collection
-import scala.concurrent.forkjoin._
import scala.concurrent.{ BlockContext, ExecutionContext, Awaitable, CanAwait, ExecutionContextExecutor, ExecutionContextExecutorService }
import scala.util.control.NonFatal
import scala.annotation.tailrec
diff --git a/src/library/scala/concurrent/impl/Promise.scala b/src/library/scala/concurrent/impl/Promise.scala
index 078ad45be9..3538ac6b94 100644
--- a/src/library/scala/concurrent/impl/Promise.scala
+++ b/src/library/scala/concurrent/impl/Promise.scala
@@ -178,7 +178,9 @@ private[concurrent] object Promise {
* DefaultPromises, and `linkedRootOf` is currently only designed to be called
* by Future.flatMap.
*/
- final class DefaultPromise[T] extends AtomicReference[AnyRef](Nil) with Promise[T] {
+ // Left non-final to enable addition of extra fields by Java/Scala converters
+ // in scala-java8-compat.
+ class DefaultPromise[T] extends AtomicReference[AnyRef](Nil) with Promise[T] {
/** Get the root promise for this promise, compressing the link chain to that
* promise if necessary.
@@ -248,12 +250,12 @@ private[concurrent] object Promise {
@throws(classOf[TimeoutException])
@throws(classOf[InterruptedException])
- def ready(atMost: Duration)(implicit permit: CanAwait): this.type =
+ final def ready(atMost: Duration)(implicit permit: CanAwait): this.type =
if (tryAwait(atMost)) this
else throw new TimeoutException("Futures timed out after [" + atMost + "]")
@throws(classOf[Exception])
- def result(atMost: Duration)(implicit permit: CanAwait): T =
+ final def result(atMost: Duration)(implicit permit: CanAwait): T =
ready(atMost).value.get.get // ready throws TimeoutException if timeout so value.get is safe here
def value: Option[Try[T]] = value0
@@ -265,7 +267,7 @@ private[concurrent] object Promise {
case _ => None
}
- override def isCompleted: Boolean = isCompleted0
+ override final def isCompleted: Boolean = isCompleted0
@tailrec
private def isCompleted0: Boolean = get() match {
@@ -274,7 +276,7 @@ private[concurrent] object Promise {
case _ => false
}
- def tryComplete(value: Try[T]): Boolean = {
+ final def tryComplete(value: Try[T]): Boolean = {
val resolved = resolveTry(value)
tryCompleteAndGetListeners(resolved) match {
case null => false
@@ -297,7 +299,7 @@ private[concurrent] object Promise {
}
}
- def onComplete[U](func: Try[T] => U)(implicit executor: ExecutionContext): Unit =
+ final def onComplete[U](func: Try[T] => U)(implicit executor: ExecutionContext): Unit =
dispatchOrAddCallback(new CallbackRunnable[T](executor.prepare(), func))
/** Tries to add the callback, if already completed, it dispatches the callback to be executed.
diff --git a/src/library/scala/concurrent/util/Unsafe.java b/src/library/scala/concurrent/util/Unsafe.java
new file mode 100644
index 0000000000..73739e377d
--- /dev/null
+++ b/src/library/scala/concurrent/util/Unsafe.java
@@ -0,0 +1,38 @@
+/* __ *\
+** ________ ___ / / ___ Scala API **
+** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
+** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
+** /____/\___/_/ |_/____/_/ | | **
+** |/ **
+\* */
+
+package scala.concurrent.util;
+import java.lang.reflect.Field;
+
+// TODO: remove once akka no longer needs it, hopefully by 2.12.0-M3!
+@Deprecated
+public final class Unsafe {
+ @Deprecated
+ public final static sun.misc.Unsafe instance;
+ static {
+ try {
+ sun.misc.Unsafe found = null;
+ for(Field field : sun.misc.Unsafe.class.getDeclaredFields()) {
+ if (field.getType() == sun.misc.Unsafe.class) {
+ field.setAccessible(true);
+ found = (sun.misc.Unsafe) field.get(null);
+ break;
+ }
+ }
+ if (found == null) throw new IllegalStateException("Can't find instance of sun.misc.Unsafe");
+ else instance = found;
+ } catch(Throwable t) {
+ throw new ExceptionInInitializerError(t);
+ }
+ }
+}
+
+// Scala version:
+// classOf[sun.misc.Unsafe].getDeclaredFields.filter(_.getType == classOf[sun.misc.Unsafe]).headOption.map { field =>
+// field.setAccessible(true); field.get(null).asInstanceOf[sun.misc.Unsafe]
+// } getOrElse (throw new IllegalStateException("Can't find instance of sun.misc.Unsafe"))
diff --git a/src/library/scala/io/Source.scala b/src/library/scala/io/Source.scala
index e38c197196..c0ee5f6a75 100644
--- a/src/library/scala/io/Source.scala
+++ b/src/library/scala/io/Source.scala
@@ -167,6 +167,16 @@ object Source {
def fromInputStream(is: InputStream)(implicit codec: Codec): BufferedSource =
createBufferedSource(is, reset = () => fromInputStream(is)(codec), close = () => is.close())(codec)
+
+ /** Reads data from a classpath resource, using either a context classloader (default) or a passed one.
+ *
+ * @param resource name of the resource to load from the classpath
+ * @param classLoader classloader to be used, or context classloader if not specified
+ * @return the buffered source
+ */
+ def fromResource(resource: String, classLoader: ClassLoader = Thread.currentThread().getContextClassLoader())(implicit codec: Codec): BufferedSource =
+ fromInputStream(classLoader.getResourceAsStream(resource))
+
}
/** An iterable representation of source data.
diff --git a/src/library/scala/math/BigDecimal.scala b/src/library/scala/math/BigDecimal.scala
index 6bb35606a6..bb337e7a1d 100644
--- a/src/library/scala/math/BigDecimal.scala
+++ b/src/library/scala/math/BigDecimal.scala
@@ -124,7 +124,7 @@ object BigDecimal {
*/
def exact(s: String): BigDecimal = exact(new BigDec(s))
- /** Constructs a 'BigDecimal` that exactly represents the number
+ /** Constructs a `BigDecimal` that exactly represents the number
* specified in base 10 in a character array.
*/
def exact(cs: Array[Char]): BigDecimal = exact(new BigDec(cs))
diff --git a/src/library/scala/ref/WeakReference.scala b/src/library/scala/ref/WeakReference.scala
index 6ee40aed5c..9dcc0bbe5f 100644
--- a/src/library/scala/ref/WeakReference.scala
+++ b/src/library/scala/ref/WeakReference.scala
@@ -28,10 +28,7 @@ object WeakReference {
def apply[T <: AnyRef](value: T) = new WeakReference(value)
/** Optionally returns the referenced value, or `None` if that value no longer exists */
- def unapply[T <: AnyRef](wr: WeakReference[T]): Option[T] = {
- val x = wr.underlying.get
- if (x != null) Some(x) else None
- }
+ def unapply[T <: AnyRef](wr: WeakReference[T]): Option[T] = Option(wr.underlying.get)
}
/**
diff --git a/src/library/scala/reflect/Manifest.scala b/src/library/scala/reflect/Manifest.scala
index 2f7643bccf..4ff49c44d0 100644
--- a/src/library/scala/reflect/Manifest.scala
+++ b/src/library/scala/reflect/Manifest.scala
@@ -248,7 +248,7 @@ object ManifestFactory {
def arrayType[T](arg: Manifest[_]): Manifest[Array[T]] =
arg.asInstanceOf[Manifest[T]].arrayManifest
- /** Manifest for the abstract type `prefix # name'. `upperBound` is not
+ /** Manifest for the abstract type `prefix # name`. `upperBound` is not
* strictly necessary as it could be obtained by reflection. It was
* added so that erasure can be calculated without reflection. */
def abstractType[T](prefix: Manifest[_], name: String, upperBound: Predef.Class[_], args: Manifest[_]*): Manifest[T] =
@@ -269,7 +269,7 @@ object ManifestFactory {
(if (upperBound eq Nothing) "" else " <: "+upperBound)
}
- /** Manifest for the intersection type `parents_0 with ... with parents_n'. */
+ /** Manifest for the intersection type `parents_0 with ... with parents_n`. */
def intersectionType[T](parents: Manifest[_]*): Manifest[T] =
new Manifest[T] {
def runtimeClass = parents.head.runtimeClass
diff --git a/src/library/scala/runtime/LambdaDeserializer.scala b/src/library/scala/runtime/LambdaDeserializer.scala
new file mode 100644
index 0000000000..ad7d12ba5d
--- /dev/null
+++ b/src/library/scala/runtime/LambdaDeserializer.scala
@@ -0,0 +1,132 @@
+package scala.runtime
+
+import java.lang.invoke._
+
+/**
+ * This class is only intended to be called by synthetic `$deserializeLambda$` method that the Scala 2.12
+ * compiler will add to classes hosting lambdas.
+ *
+ * It is not intended to be consumed directly.
+ */
+object LambdaDeserializer {
+ /**
+ * Deserialize a lambda by calling `LambdaMetafactory.altMetafactory` to spin up a lambda class
+ * and instantiating this class with the captured arguments.
+ *
+ * A cache may be provided to ensure that subsequent deserialization of the same lambda expression
+ * is cheap, it amounts to a reflective call to the constructor of the previously created class.
+ * However, deserialization of the same lambda expression is not guaranteed to use the same class,
+ * concurrent deserialization of the same lambda expression may spin up more than one class.
+ *
+ * Assumptions:
+ * - No additional marker interfaces are required beyond `{java.io,scala.}Serializable`. These are
+ * not stored in `SerializedLambda`, so we can't reconstitute them.
+ * - No additional bridge methods are passed to `altMetafactory`. Again, these are not stored.
+ *
+ * @param lookup The factory for method handles. Must have access to the implementation method, the
+ * functional interface class, and `java.io.Serializable` or `scala.Serializable` as
+ * required.
+ * @param cache A cache used to avoid spinning up a class for each deserialization of a given lambda. May be `null`
+ * @param serialized The lambda to deserialize. Note that this is typically created by the `readResolve`
+ * member of the anonymous class created by `LambdaMetaFactory`.
+ * @return An instance of the functional interface
+ */
+ def deserializeLambda(lookup: MethodHandles.Lookup, cache: java.util.Map[String, MethodHandle], serialized: SerializedLambda): AnyRef = {
+ def slashDot(name: String) = name.replaceAll("/", ".")
+ val loader = lookup.lookupClass().getClassLoader
+ val implClass = loader.loadClass(slashDot(serialized.getImplClass))
+
+ def makeCallSite: CallSite = {
+ import serialized._
+ def parseDescriptor(s: String) =
+ MethodType.fromMethodDescriptorString(s, loader)
+
+ val funcInterfaceSignature = parseDescriptor(getFunctionalInterfaceMethodSignature)
+ val instantiated = parseDescriptor(getInstantiatedMethodType)
+ val functionalInterfaceClass = loader.loadClass(slashDot(getFunctionalInterfaceClass))
+
+ val implMethodSig = parseDescriptor(getImplMethodSignature)
+ // Construct the invoked type from the impl method type. This is the type of a factory
+ // that will be generated by the meta-factory. It is a method type, with param types
+ // coming form the types of the captures, and return type being the functional interface.
+ val invokedType: MethodType = {
+ // 1. Add receiver for non-static impl methods
+ val withReceiver = getImplMethodKind match {
+ case MethodHandleInfo.REF_invokeStatic | MethodHandleInfo.REF_newInvokeSpecial =>
+ implMethodSig
+ case _ =>
+ implMethodSig.insertParameterTypes(0, implClass)
+ }
+ // 2. Remove lambda parameters, leaving only captures. Note: the receiver may be a lambda parameter,
+ // such as in `Function<Object, String> s = Object::toString`
+ val lambdaArity = funcInterfaceSignature.parameterCount()
+ val from = withReceiver.parameterCount() - lambdaArity
+ val to = withReceiver.parameterCount()
+
+ // 3. Drop the lambda return type and replace with the functional interface.
+ withReceiver.dropParameterTypes(from, to).changeReturnType(functionalInterfaceClass)
+ }
+
+ // Lookup the implementation method
+ val implMethod: MethodHandle = try {
+ findMember(lookup, getImplMethodKind, implClass, getImplMethodName, implMethodSig)
+ } catch {
+ case e: ReflectiveOperationException => throw new IllegalArgumentException("Illegal lambda deserialization", e)
+ }
+
+ val flags: Int = LambdaMetafactory.FLAG_SERIALIZABLE | LambdaMetafactory.FLAG_MARKERS
+ val isScalaFunction = functionalInterfaceClass.getName.startsWith("scala.Function")
+ val markerInterface: Class[_] = loader.loadClass(if (isScalaFunction) ScalaSerializable else JavaIOSerializable)
+
+ LambdaMetafactory.altMetafactory(
+ lookup, getFunctionalInterfaceMethodName, invokedType,
+
+ /* samMethodType = */ funcInterfaceSignature,
+ /* implMethod = */ implMethod,
+ /* instantiatedMethodType = */ instantiated,
+ /* flags = */ flags.asInstanceOf[AnyRef],
+ /* markerInterfaceCount = */ 1.asInstanceOf[AnyRef],
+ /* markerInterfaces[0] = */ markerInterface,
+ /* bridgeCount = */ 0.asInstanceOf[AnyRef]
+ )
+ }
+
+ val key = serialized.getImplMethodName + " : " + serialized.getImplMethodSignature
+ val factory: MethodHandle = if (cache == null) {
+ makeCallSite.getTarget
+ } else cache.get(key) match {
+ case null =>
+ val callSite = makeCallSite
+ val temp = callSite.getTarget
+ cache.put(key, temp)
+ temp
+ case target => target
+ }
+
+ val captures = Array.tabulate(serialized.getCapturedArgCount)(n => serialized.getCapturedArg(n))
+ factory.invokeWithArguments(captures: _*)
+ }
+
+ private val ScalaSerializable = "scala.Serializable"
+
+ private val JavaIOSerializable = {
+ // We could actually omit this marker interface as LambdaMetaFactory will add it if
+ // the FLAG_SERIALIZABLE is set and of the provided markers extend it. But the code
+ // is cleaner if we uniformly add a single marker, so I'm leaving it in place.
+ "java.io.Serializable"
+ }
+
+ private def findMember(lookup: MethodHandles.Lookup, kind: Int, owner: Class[_],
+ name: String, signature: MethodType): MethodHandle = {
+ kind match {
+ case MethodHandleInfo.REF_invokeStatic =>
+ lookup.findStatic(owner, name, signature)
+ case MethodHandleInfo.REF_newInvokeSpecial =>
+ lookup.findConstructor(owner, signature)
+ case MethodHandleInfo.REF_invokeVirtual | MethodHandleInfo.REF_invokeInterface =>
+ lookup.findVirtual(owner, name, signature)
+ case MethodHandleInfo.REF_invokeSpecial =>
+ lookup.findSpecial(owner, name, signature, owner)
+ }
+ }
+}
diff --git a/src/library/scala/runtime/java8/JFunction.java b/src/library/scala/runtime/java8/JFunction.java
new file mode 100644
index 0000000000..326aad3fec
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction.java
@@ -0,0 +1,146 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+public final class JFunction {
+ private JFunction() {}
+ public static <R> scala.Function0<R> func(JFunction0<R> f) { return f; }
+ public static scala.Function0<BoxedUnit> proc(JProcedure0 p) { return p; }
+ public static scala.Function0<BoxedUnit> procSpecialized(JFunction0$mcV$sp f) { return f; }
+ public static scala.Function0<Byte> funcSpecialized(JFunction0$mcB$sp f) { return f; }
+ public static scala.Function0<Short> funcSpecialized(JFunction0$mcS$sp f) { return f; }
+ public static scala.Function0<Integer> funcSpecialized(JFunction0$mcI$sp f) { return f; }
+ public static scala.Function0<Long> funcSpecialized(JFunction0$mcJ$sp f) { return f; }
+ public static scala.Function0<Character> funcSpecialized(JFunction0$mcC$sp f) { return f; }
+ public static scala.Function0<Float> funcSpecialized(JFunction0$mcF$sp f) { return f; }
+ public static scala.Function0<Double> funcSpecialized(JFunction0$mcD$sp f) { return f; }
+ public static scala.Function0<Boolean> funcSpecialized(JFunction0$mcZ$sp f) { return f; }
+ public static <T1, R> scala.Function1<T1, R> func(JFunction1<T1, R> f) { return f; }
+ public static <T1> scala.Function1<T1, BoxedUnit> proc(JProcedure1<T1> p) { return p; }
+ public static scala.Function1<Integer, BoxedUnit> procSpecialized(JFunction1$mcVI$sp f) { return f; }
+ public static scala.Function1<Integer, Boolean> funcSpecialized(JFunction1$mcZI$sp f) { return f; }
+ public static scala.Function1<Integer, Integer> funcSpecialized(JFunction1$mcII$sp f) { return f; }
+ public static scala.Function1<Integer, Float> funcSpecialized(JFunction1$mcFI$sp f) { return f; }
+ public static scala.Function1<Integer, Long> funcSpecialized(JFunction1$mcJI$sp f) { return f; }
+ public static scala.Function1<Integer, Double> funcSpecialized(JFunction1$mcDI$sp f) { return f; }
+ public static scala.Function1<Long, BoxedUnit> procSpecialized(JFunction1$mcVJ$sp f) { return f; }
+ public static scala.Function1<Long, Boolean> funcSpecialized(JFunction1$mcZJ$sp f) { return f; }
+ public static scala.Function1<Long, Integer> funcSpecialized(JFunction1$mcIJ$sp f) { return f; }
+ public static scala.Function1<Long, Float> funcSpecialized(JFunction1$mcFJ$sp f) { return f; }
+ public static scala.Function1<Long, Long> funcSpecialized(JFunction1$mcJJ$sp f) { return f; }
+ public static scala.Function1<Long, Double> funcSpecialized(JFunction1$mcDJ$sp f) { return f; }
+ public static scala.Function1<Float, BoxedUnit> procSpecialized(JFunction1$mcVF$sp f) { return f; }
+ public static scala.Function1<Float, Boolean> funcSpecialized(JFunction1$mcZF$sp f) { return f; }
+ public static scala.Function1<Float, Integer> funcSpecialized(JFunction1$mcIF$sp f) { return f; }
+ public static scala.Function1<Float, Float> funcSpecialized(JFunction1$mcFF$sp f) { return f; }
+ public static scala.Function1<Float, Long> funcSpecialized(JFunction1$mcJF$sp f) { return f; }
+ public static scala.Function1<Float, Double> funcSpecialized(JFunction1$mcDF$sp f) { return f; }
+ public static scala.Function1<Double, BoxedUnit> procSpecialized(JFunction1$mcVD$sp f) { return f; }
+ public static scala.Function1<Double, Boolean> funcSpecialized(JFunction1$mcZD$sp f) { return f; }
+ public static scala.Function1<Double, Integer> funcSpecialized(JFunction1$mcID$sp f) { return f; }
+ public static scala.Function1<Double, Float> funcSpecialized(JFunction1$mcFD$sp f) { return f; }
+ public static scala.Function1<Double, Long> funcSpecialized(JFunction1$mcJD$sp f) { return f; }
+ public static scala.Function1<Double, Double> funcSpecialized(JFunction1$mcDD$sp f) { return f; }
+ public static <T1, T2, R> scala.Function2<T1, T2, R> func(JFunction2<T1, T2, R> f) { return f; }
+ public static <T1, T2> scala.Function2<T1, T2, BoxedUnit> proc(JProcedure2<T1, T2> p) { return p; }
+ public static scala.Function2<Integer, Integer, BoxedUnit> procSpecialized(JFunction2$mcVII$sp f) { return f; }
+ public static scala.Function2<Integer, Integer, Boolean> funcSpecialized(JFunction2$mcZII$sp f) { return f; }
+ public static scala.Function2<Integer, Integer, Integer> funcSpecialized(JFunction2$mcIII$sp f) { return f; }
+ public static scala.Function2<Integer, Integer, Float> funcSpecialized(JFunction2$mcFII$sp f) { return f; }
+ public static scala.Function2<Integer, Integer, Long> funcSpecialized(JFunction2$mcJII$sp f) { return f; }
+ public static scala.Function2<Integer, Integer, Double> funcSpecialized(JFunction2$mcDII$sp f) { return f; }
+ public static scala.Function2<Integer, Long, BoxedUnit> procSpecialized(JFunction2$mcVIJ$sp f) { return f; }
+ public static scala.Function2<Integer, Long, Boolean> funcSpecialized(JFunction2$mcZIJ$sp f) { return f; }
+ public static scala.Function2<Integer, Long, Integer> funcSpecialized(JFunction2$mcIIJ$sp f) { return f; }
+ public static scala.Function2<Integer, Long, Float> funcSpecialized(JFunction2$mcFIJ$sp f) { return f; }
+ public static scala.Function2<Integer, Long, Long> funcSpecialized(JFunction2$mcJIJ$sp f) { return f; }
+ public static scala.Function2<Integer, Long, Double> funcSpecialized(JFunction2$mcDIJ$sp f) { return f; }
+ public static scala.Function2<Integer, Double, BoxedUnit> procSpecialized(JFunction2$mcVID$sp f) { return f; }
+ public static scala.Function2<Integer, Double, Boolean> funcSpecialized(JFunction2$mcZID$sp f) { return f; }
+ public static scala.Function2<Integer, Double, Integer> funcSpecialized(JFunction2$mcIID$sp f) { return f; }
+ public static scala.Function2<Integer, Double, Float> funcSpecialized(JFunction2$mcFID$sp f) { return f; }
+ public static scala.Function2<Integer, Double, Long> funcSpecialized(JFunction2$mcJID$sp f) { return f; }
+ public static scala.Function2<Integer, Double, Double> funcSpecialized(JFunction2$mcDID$sp f) { return f; }
+ public static scala.Function2<Long, Integer, BoxedUnit> procSpecialized(JFunction2$mcVJI$sp f) { return f; }
+ public static scala.Function2<Long, Integer, Boolean> funcSpecialized(JFunction2$mcZJI$sp f) { return f; }
+ public static scala.Function2<Long, Integer, Integer> funcSpecialized(JFunction2$mcIJI$sp f) { return f; }
+ public static scala.Function2<Long, Integer, Float> funcSpecialized(JFunction2$mcFJI$sp f) { return f; }
+ public static scala.Function2<Long, Integer, Long> funcSpecialized(JFunction2$mcJJI$sp f) { return f; }
+ public static scala.Function2<Long, Integer, Double> funcSpecialized(JFunction2$mcDJI$sp f) { return f; }
+ public static scala.Function2<Long, Long, BoxedUnit> procSpecialized(JFunction2$mcVJJ$sp f) { return f; }
+ public static scala.Function2<Long, Long, Boolean> funcSpecialized(JFunction2$mcZJJ$sp f) { return f; }
+ public static scala.Function2<Long, Long, Integer> funcSpecialized(JFunction2$mcIJJ$sp f) { return f; }
+ public static scala.Function2<Long, Long, Float> funcSpecialized(JFunction2$mcFJJ$sp f) { return f; }
+ public static scala.Function2<Long, Long, Long> funcSpecialized(JFunction2$mcJJJ$sp f) { return f; }
+ public static scala.Function2<Long, Long, Double> funcSpecialized(JFunction2$mcDJJ$sp f) { return f; }
+ public static scala.Function2<Long, Double, BoxedUnit> procSpecialized(JFunction2$mcVJD$sp f) { return f; }
+ public static scala.Function2<Long, Double, Boolean> funcSpecialized(JFunction2$mcZJD$sp f) { return f; }
+ public static scala.Function2<Long, Double, Integer> funcSpecialized(JFunction2$mcIJD$sp f) { return f; }
+ public static scala.Function2<Long, Double, Float> funcSpecialized(JFunction2$mcFJD$sp f) { return f; }
+ public static scala.Function2<Long, Double, Long> funcSpecialized(JFunction2$mcJJD$sp f) { return f; }
+ public static scala.Function2<Long, Double, Double> funcSpecialized(JFunction2$mcDJD$sp f) { return f; }
+ public static scala.Function2<Double, Integer, BoxedUnit> procSpecialized(JFunction2$mcVDI$sp f) { return f; }
+ public static scala.Function2<Double, Integer, Boolean> funcSpecialized(JFunction2$mcZDI$sp f) { return f; }
+ public static scala.Function2<Double, Integer, Integer> funcSpecialized(JFunction2$mcIDI$sp f) { return f; }
+ public static scala.Function2<Double, Integer, Float> funcSpecialized(JFunction2$mcFDI$sp f) { return f; }
+ public static scala.Function2<Double, Integer, Long> funcSpecialized(JFunction2$mcJDI$sp f) { return f; }
+ public static scala.Function2<Double, Integer, Double> funcSpecialized(JFunction2$mcDDI$sp f) { return f; }
+ public static scala.Function2<Double, Long, BoxedUnit> procSpecialized(JFunction2$mcVDJ$sp f) { return f; }
+ public static scala.Function2<Double, Long, Boolean> funcSpecialized(JFunction2$mcZDJ$sp f) { return f; }
+ public static scala.Function2<Double, Long, Integer> funcSpecialized(JFunction2$mcIDJ$sp f) { return f; }
+ public static scala.Function2<Double, Long, Float> funcSpecialized(JFunction2$mcFDJ$sp f) { return f; }
+ public static scala.Function2<Double, Long, Long> funcSpecialized(JFunction2$mcJDJ$sp f) { return f; }
+ public static scala.Function2<Double, Long, Double> funcSpecialized(JFunction2$mcDDJ$sp f) { return f; }
+ public static scala.Function2<Double, Double, BoxedUnit> procSpecialized(JFunction2$mcVDD$sp f) { return f; }
+ public static scala.Function2<Double, Double, Boolean> funcSpecialized(JFunction2$mcZDD$sp f) { return f; }
+ public static scala.Function2<Double, Double, Integer> funcSpecialized(JFunction2$mcIDD$sp f) { return f; }
+ public static scala.Function2<Double, Double, Float> funcSpecialized(JFunction2$mcFDD$sp f) { return f; }
+ public static scala.Function2<Double, Double, Long> funcSpecialized(JFunction2$mcJDD$sp f) { return f; }
+ public static scala.Function2<Double, Double, Double> funcSpecialized(JFunction2$mcDDD$sp f) { return f; }
+ public static <T1, T2, T3, R> scala.Function3<T1, T2, T3, R> func(JFunction3<T1, T2, T3, R> f) { return f; }
+ public static <T1, T2, T3> scala.Function3<T1, T2, T3, BoxedUnit> proc(JProcedure3<T1, T2, T3> p) { return p; }
+ public static <T1, T2, T3, T4, R> scala.Function4<T1, T2, T3, T4, R> func(JFunction4<T1, T2, T3, T4, R> f) { return f; }
+ public static <T1, T2, T3, T4> scala.Function4<T1, T2, T3, T4, BoxedUnit> proc(JProcedure4<T1, T2, T3, T4> p) { return p; }
+ public static <T1, T2, T3, T4, T5, R> scala.Function5<T1, T2, T3, T4, T5, R> func(JFunction5<T1, T2, T3, T4, T5, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5> scala.Function5<T1, T2, T3, T4, T5, BoxedUnit> proc(JProcedure5<T1, T2, T3, T4, T5> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, R> scala.Function6<T1, T2, T3, T4, T5, T6, R> func(JFunction6<T1, T2, T3, T4, T5, T6, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6> scala.Function6<T1, T2, T3, T4, T5, T6, BoxedUnit> proc(JProcedure6<T1, T2, T3, T4, T5, T6> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, R> scala.Function7<T1, T2, T3, T4, T5, T6, T7, R> func(JFunction7<T1, T2, T3, T4, T5, T6, T7, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7> scala.Function7<T1, T2, T3, T4, T5, T6, T7, BoxedUnit> proc(JProcedure7<T1, T2, T3, T4, T5, T6, T7> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, R> scala.Function8<T1, T2, T3, T4, T5, T6, T7, T8, R> func(JFunction8<T1, T2, T3, T4, T5, T6, T7, T8, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8> scala.Function8<T1, T2, T3, T4, T5, T6, T7, T8, BoxedUnit> proc(JProcedure8<T1, T2, T3, T4, T5, T6, T7, T8> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, R> scala.Function9<T1, T2, T3, T4, T5, T6, T7, T8, T9, R> func(JFunction9<T1, T2, T3, T4, T5, T6, T7, T8, T9, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9> scala.Function9<T1, T2, T3, T4, T5, T6, T7, T8, T9, BoxedUnit> proc(JProcedure9<T1, T2, T3, T4, T5, T6, T7, T8, T9> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R> scala.Function10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R> func(JFunction10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> scala.Function10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, BoxedUnit> proc(JProcedure10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R> scala.Function11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R> func(JFunction11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> scala.Function11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, BoxedUnit> proc(JProcedure11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R> scala.Function12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R> func(JFunction12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> scala.Function12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, BoxedUnit> proc(JProcedure12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R> scala.Function13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R> func(JFunction13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> scala.Function13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, BoxedUnit> proc(JProcedure13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R> scala.Function14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R> func(JFunction14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> scala.Function14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, BoxedUnit> proc(JProcedure14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R> scala.Function15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R> func(JFunction15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> scala.Function15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, BoxedUnit> proc(JProcedure15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R> scala.Function16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R> func(JFunction16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> scala.Function16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, BoxedUnit> proc(JProcedure16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R> scala.Function17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R> func(JFunction17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> scala.Function17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, BoxedUnit> proc(JProcedure17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R> scala.Function18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R> func(JFunction18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18> scala.Function18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, BoxedUnit> proc(JProcedure18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R> scala.Function19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R> func(JFunction19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> scala.Function19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, BoxedUnit> proc(JProcedure19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R> scala.Function20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R> func(JFunction20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> scala.Function20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, BoxedUnit> proc(JProcedure20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R> scala.Function21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R> func(JFunction21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21> scala.Function21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, BoxedUnit> proc(JProcedure21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21> p) { return p; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R> scala.Function22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R> func(JFunction22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R> f) { return f; }
+ public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> scala.Function22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, BoxedUnit> proc(JProcedure22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> p) { return p; }
+}
+
diff --git a/src/library/scala/runtime/java8/JFunction0$mcB$sp.java b/src/library/scala/runtime/java8/JFunction0$mcB$sp.java
new file mode 100644
index 0000000000..c882757630
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0$mcB$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0$mcB$sp extends JFunction0 {
+ byte apply$mcB$sp();
+
+ default Object apply() { return scala.runtime.BoxesRunTime.boxToByte(apply$mcB$sp()); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction0$mcC$sp.java b/src/library/scala/runtime/java8/JFunction0$mcC$sp.java
new file mode 100644
index 0000000000..c804529f71
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0$mcC$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0$mcC$sp extends JFunction0 {
+ char apply$mcC$sp();
+
+ default Object apply() { return scala.runtime.BoxesRunTime.boxToCharacter(apply$mcC$sp()); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction0$mcD$sp.java b/src/library/scala/runtime/java8/JFunction0$mcD$sp.java
new file mode 100644
index 0000000000..dacf50237c
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0$mcD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0$mcD$sp extends JFunction0 {
+ double apply$mcD$sp();
+
+ default Object apply() { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcD$sp()); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction0$mcF$sp.java b/src/library/scala/runtime/java8/JFunction0$mcF$sp.java
new file mode 100644
index 0000000000..2a9f824924
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0$mcF$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0$mcF$sp extends JFunction0 {
+ float apply$mcF$sp();
+
+ default Object apply() { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcF$sp()); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction0$mcI$sp.java b/src/library/scala/runtime/java8/JFunction0$mcI$sp.java
new file mode 100644
index 0000000000..75c612f916
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0$mcI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0$mcI$sp extends JFunction0 {
+ int apply$mcI$sp();
+
+ default Object apply() { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcI$sp()); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction0$mcJ$sp.java b/src/library/scala/runtime/java8/JFunction0$mcJ$sp.java
new file mode 100644
index 0000000000..d08984c794
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0$mcJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0$mcJ$sp extends JFunction0 {
+ long apply$mcJ$sp();
+
+ default Object apply() { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJ$sp()); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction0$mcS$sp.java b/src/library/scala/runtime/java8/JFunction0$mcS$sp.java
new file mode 100644
index 0000000000..d9e36a39f0
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0$mcS$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0$mcS$sp extends JFunction0 {
+ short apply$mcS$sp();
+
+ default Object apply() { return scala.runtime.BoxesRunTime.boxToShort(apply$mcS$sp()); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction0$mcV$sp.java b/src/library/scala/runtime/java8/JFunction0$mcV$sp.java
new file mode 100644
index 0000000000..abd5e6ebbe
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0$mcV$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0$mcV$sp extends JFunction0 {
+ void apply$mcV$sp();
+
+ default Object apply() { apply$mcV$sp(); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction0$mcZ$sp.java b/src/library/scala/runtime/java8/JFunction0$mcZ$sp.java
new file mode 100644
index 0000000000..e1cd62a913
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0$mcZ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0$mcZ$sp extends JFunction0 {
+ boolean apply$mcZ$sp();
+
+ default Object apply() { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZ$sp()); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction0.java b/src/library/scala/runtime/java8/JFunction0.java
new file mode 100644
index 0000000000..bdeb7d5f8e
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction0.java
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction0<R> extends scala.Function0<R> {
+ default void $init$() {
+ };
+ default void apply$mcV$sp() {
+ apply();
+ }
+ default byte apply$mcB$sp() {
+ return scala.runtime.BoxesRunTime.unboxToByte(apply());
+ }
+ default short apply$mcS$sp() {
+ return scala.runtime.BoxesRunTime.unboxToShort(apply());
+ }
+ default int apply$mcI$sp() {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply());
+ }
+ default long apply$mcJ$sp() {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply());
+ }
+ default char apply$mcC$sp() {
+ return scala.runtime.BoxesRunTime.unboxToChar(apply());
+ }
+ default float apply$mcF$sp() {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply());
+ }
+ default double apply$mcD$sp() {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply());
+ }
+ default boolean apply$mcZ$sp() {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply());
+ }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcDD$sp.java b/src/library/scala/runtime/java8/JFunction1$mcDD$sp.java
new file mode 100644
index 0000000000..4fbb370b8b
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcDD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcDD$sp extends JFunction1 {
+ double apply$mcDD$sp(double v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDD$sp(scala.runtime.BoxesRunTime.unboxToDouble(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcDF$sp.java b/src/library/scala/runtime/java8/JFunction1$mcDF$sp.java
new file mode 100644
index 0000000000..ce45666dd1
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcDF$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcDF$sp extends JFunction1 {
+ double apply$mcDF$sp(float v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDF$sp(scala.runtime.BoxesRunTime.unboxToFloat(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcDI$sp.java b/src/library/scala/runtime/java8/JFunction1$mcDI$sp.java
new file mode 100644
index 0000000000..09cac947c9
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcDI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcDI$sp extends JFunction1 {
+ double apply$mcDI$sp(int v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDI$sp(scala.runtime.BoxesRunTime.unboxToInt(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcDJ$sp.java b/src/library/scala/runtime/java8/JFunction1$mcDJ$sp.java
new file mode 100644
index 0000000000..f5154c3854
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcDJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcDJ$sp extends JFunction1 {
+ double apply$mcDJ$sp(long v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDJ$sp(scala.runtime.BoxesRunTime.unboxToLong(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcFD$sp.java b/src/library/scala/runtime/java8/JFunction1$mcFD$sp.java
new file mode 100644
index 0000000000..758b432d99
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcFD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcFD$sp extends JFunction1 {
+ float apply$mcFD$sp(double v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFD$sp(scala.runtime.BoxesRunTime.unboxToDouble(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcFF$sp.java b/src/library/scala/runtime/java8/JFunction1$mcFF$sp.java
new file mode 100644
index 0000000000..7e13e287a5
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcFF$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcFF$sp extends JFunction1 {
+ float apply$mcFF$sp(float v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFF$sp(scala.runtime.BoxesRunTime.unboxToFloat(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcFI$sp.java b/src/library/scala/runtime/java8/JFunction1$mcFI$sp.java
new file mode 100644
index 0000000000..e3c4a203c7
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcFI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcFI$sp extends JFunction1 {
+ float apply$mcFI$sp(int v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFI$sp(scala.runtime.BoxesRunTime.unboxToInt(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcFJ$sp.java b/src/library/scala/runtime/java8/JFunction1$mcFJ$sp.java
new file mode 100644
index 0000000000..d989fa1ea8
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcFJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcFJ$sp extends JFunction1 {
+ float apply$mcFJ$sp(long v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFJ$sp(scala.runtime.BoxesRunTime.unboxToLong(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcID$sp.java b/src/library/scala/runtime/java8/JFunction1$mcID$sp.java
new file mode 100644
index 0000000000..bde5d88d46
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcID$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcID$sp extends JFunction1 {
+ int apply$mcID$sp(double v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcID$sp(scala.runtime.BoxesRunTime.unboxToDouble(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcIF$sp.java b/src/library/scala/runtime/java8/JFunction1$mcIF$sp.java
new file mode 100644
index 0000000000..d1d235aef1
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcIF$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcIF$sp extends JFunction1 {
+ int apply$mcIF$sp(float v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIF$sp(scala.runtime.BoxesRunTime.unboxToFloat(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcII$sp.java b/src/library/scala/runtime/java8/JFunction1$mcII$sp.java
new file mode 100644
index 0000000000..ef44b3830c
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcII$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcII$sp extends JFunction1 {
+ int apply$mcII$sp(int v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcII$sp(scala.runtime.BoxesRunTime.unboxToInt(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcIJ$sp.java b/src/library/scala/runtime/java8/JFunction1$mcIJ$sp.java
new file mode 100644
index 0000000000..373d13cd46
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcIJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcIJ$sp extends JFunction1 {
+ int apply$mcIJ$sp(long v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIJ$sp(scala.runtime.BoxesRunTime.unboxToLong(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcJD$sp.java b/src/library/scala/runtime/java8/JFunction1$mcJD$sp.java
new file mode 100644
index 0000000000..86fd7b7779
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcJD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcJD$sp extends JFunction1 {
+ long apply$mcJD$sp(double v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJD$sp(scala.runtime.BoxesRunTime.unboxToDouble(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcJF$sp.java b/src/library/scala/runtime/java8/JFunction1$mcJF$sp.java
new file mode 100644
index 0000000000..3bcf264034
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcJF$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcJF$sp extends JFunction1 {
+ long apply$mcJF$sp(float v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJF$sp(scala.runtime.BoxesRunTime.unboxToFloat(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcJI$sp.java b/src/library/scala/runtime/java8/JFunction1$mcJI$sp.java
new file mode 100644
index 0000000000..11bc15ef6e
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcJI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcJI$sp extends JFunction1 {
+ long apply$mcJI$sp(int v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJI$sp(scala.runtime.BoxesRunTime.unboxToInt(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcJJ$sp.java b/src/library/scala/runtime/java8/JFunction1$mcJJ$sp.java
new file mode 100644
index 0000000000..2e1ad7878f
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcJJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcJJ$sp extends JFunction1 {
+ long apply$mcJJ$sp(long v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJJ$sp(scala.runtime.BoxesRunTime.unboxToLong(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcVD$sp.java b/src/library/scala/runtime/java8/JFunction1$mcVD$sp.java
new file mode 100644
index 0000000000..c8077e1268
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcVD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcVD$sp extends JFunction1 {
+ void apply$mcVD$sp(double v1);
+
+ default Object apply(Object t) { apply$mcVD$sp(scala.runtime.BoxesRunTime.unboxToDouble(t)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcVF$sp.java b/src/library/scala/runtime/java8/JFunction1$mcVF$sp.java
new file mode 100644
index 0000000000..e7be77f8e3
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcVF$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcVF$sp extends JFunction1 {
+ void apply$mcVF$sp(float v1);
+
+ default Object apply(Object t) { apply$mcVF$sp(scala.runtime.BoxesRunTime.unboxToFloat(t)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcVI$sp.java b/src/library/scala/runtime/java8/JFunction1$mcVI$sp.java
new file mode 100644
index 0000000000..7597ca5294
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcVI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcVI$sp extends JFunction1 {
+ void apply$mcVI$sp(int v1);
+
+ default Object apply(Object t) { apply$mcVI$sp(scala.runtime.BoxesRunTime.unboxToInt(t)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcVJ$sp.java b/src/library/scala/runtime/java8/JFunction1$mcVJ$sp.java
new file mode 100644
index 0000000000..55c6c3997f
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcVJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcVJ$sp extends JFunction1 {
+ void apply$mcVJ$sp(long v1);
+
+ default Object apply(Object t) { apply$mcVJ$sp(scala.runtime.BoxesRunTime.unboxToLong(t)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcZD$sp.java b/src/library/scala/runtime/java8/JFunction1$mcZD$sp.java
new file mode 100644
index 0000000000..883a0e84fa
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcZD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcZD$sp extends JFunction1 {
+ boolean apply$mcZD$sp(double v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZD$sp(scala.runtime.BoxesRunTime.unboxToDouble(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcZF$sp.java b/src/library/scala/runtime/java8/JFunction1$mcZF$sp.java
new file mode 100644
index 0000000000..884832ca37
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcZF$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcZF$sp extends JFunction1 {
+ boolean apply$mcZF$sp(float v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZF$sp(scala.runtime.BoxesRunTime.unboxToFloat(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcZI$sp.java b/src/library/scala/runtime/java8/JFunction1$mcZI$sp.java
new file mode 100644
index 0000000000..8a51aa99a2
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcZI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcZI$sp extends JFunction1 {
+ boolean apply$mcZI$sp(int v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZI$sp(scala.runtime.BoxesRunTime.unboxToInt(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1$mcZJ$sp.java b/src/library/scala/runtime/java8/JFunction1$mcZJ$sp.java
new file mode 100644
index 0000000000..dc619666dc
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1$mcZJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1$mcZJ$sp extends JFunction1 {
+ boolean apply$mcZJ$sp(long v1);
+
+ default Object apply(Object t) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZJ$sp(scala.runtime.BoxesRunTime.unboxToLong(t))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction1.java b/src/library/scala/runtime/java8/JFunction1.java
new file mode 100644
index 0000000000..7c3974e94a
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction1.java
@@ -0,0 +1,240 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction1<T1, R> extends scala.Function1<T1, R> {
+ default void $init$() {
+ };
+
+ @Override
+ default <A> scala.Function1<T1, A> andThen(scala.Function1<R, A> g) {
+ return scala.Function1$class.andThen(this, g);
+ }
+
+ @Override
+ default <A> scala.Function1<A, R> compose(scala.Function1<A, T1> g) {
+ return scala.Function1$class.compose(this, g);
+ }
+ default void apply$mcVI$sp(int v1) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1));
+ }
+ default boolean apply$mcZI$sp(int v1) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1)));
+ }
+ default int apply$mcII$sp(int v1) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1)));
+ }
+ default float apply$mcFI$sp(int v1) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1)));
+ }
+ default long apply$mcJI$sp(int v1) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1)));
+ }
+ default double apply$mcDI$sp(int v1) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1)));
+ }
+ default void apply$mcVJ$sp(long v1) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1));
+ }
+ default boolean apply$mcZJ$sp(long v1) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1)));
+ }
+ default int apply$mcIJ$sp(long v1) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1)));
+ }
+ default float apply$mcFJ$sp(long v1) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1)));
+ }
+ default long apply$mcJJ$sp(long v1) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1)));
+ }
+ default double apply$mcDJ$sp(long v1) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1)));
+ }
+ default void apply$mcVF$sp(float v1) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToFloat(v1));
+ }
+ default boolean apply$mcZF$sp(float v1) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToFloat(v1)));
+ }
+ default int apply$mcIF$sp(float v1) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToFloat(v1)));
+ }
+ default float apply$mcFF$sp(float v1) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToFloat(v1)));
+ }
+ default long apply$mcJF$sp(float v1) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToFloat(v1)));
+ }
+ default double apply$mcDF$sp(float v1) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToFloat(v1)));
+ }
+ default void apply$mcVD$sp(double v1) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1));
+ }
+ default boolean apply$mcZD$sp(double v1) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1)));
+ }
+ default int apply$mcID$sp(double v1) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1)));
+ }
+ default float apply$mcFD$sp(double v1) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1)));
+ }
+ default long apply$mcJD$sp(double v1) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1)));
+ }
+ default double apply$mcDD$sp(double v1) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1)));
+ }
+
+ default scala.Function1 compose$mcVI$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcZI$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcII$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcFI$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcJI$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcDI$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcVJ$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcZJ$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcIJ$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcFJ$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcJJ$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcDJ$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcVF$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcZF$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcIF$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcFF$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcJF$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcDF$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcVD$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcZD$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcID$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcFD$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcJD$sp(scala.Function1 g) {
+ return compose(g);
+ }
+ default scala.Function1 compose$mcDD$sp(scala.Function1 g) {
+ return compose(g);
+ }
+
+ default scala.Function1 andThen$mcVI$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcZI$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcII$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcFI$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcJI$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcDI$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcVJ$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcZJ$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcIJ$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcFJ$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcJJ$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcDJ$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcVF$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcZF$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcIF$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcFF$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcJF$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcDF$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcVD$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcZD$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcID$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcFD$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcJD$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+ default scala.Function1 andThen$mcDD$sp(scala.Function1 g) {
+ return andThen(g);
+ }
+}
diff --git a/src/library/scala/runtime/java8/JFunction10.java b/src/library/scala/runtime/java8/JFunction10.java
new file mode 100644
index 0000000000..f9af616641
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction10.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R> extends scala.Function10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, R>>>>>>>>>> curried() {
+ return scala.Function10$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>, R> tupled() {
+ return scala.Function10$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction11.java b/src/library/scala/runtime/java8/JFunction11.java
new file mode 100644
index 0000000000..ba1235332b
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction11.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R> extends scala.Function11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, R>>>>>>>>>>> curried() {
+ return scala.Function11$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>, R> tupled() {
+ return scala.Function11$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction12.java b/src/library/scala/runtime/java8/JFunction12.java
new file mode 100644
index 0000000000..141388e768
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction12.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R> extends scala.Function12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, R>>>>>>>>>>>> curried() {
+ return scala.Function12$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>, R> tupled() {
+ return scala.Function12$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction13.java b/src/library/scala/runtime/java8/JFunction13.java
new file mode 100644
index 0000000000..8d0be96a74
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction13.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R> extends scala.Function13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, R>>>>>>>>>>>>> curried() {
+ return scala.Function13$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>, R> tupled() {
+ return scala.Function13$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction14.java b/src/library/scala/runtime/java8/JFunction14.java
new file mode 100644
index 0000000000..58ab028716
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction14.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R> extends scala.Function14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, scala.Function1<T14, R>>>>>>>>>>>>>> curried() {
+ return scala.Function14$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>, R> tupled() {
+ return scala.Function14$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction15.java b/src/library/scala/runtime/java8/JFunction15.java
new file mode 100644
index 0000000000..89a4a6cf61
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction15.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R> extends scala.Function15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, scala.Function1<T14, scala.Function1<T15, R>>>>>>>>>>>>>>> curried() {
+ return scala.Function15$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>, R> tupled() {
+ return scala.Function15$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction16.java b/src/library/scala/runtime/java8/JFunction16.java
new file mode 100644
index 0000000000..e3287b42ac
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction16.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R> extends scala.Function16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, scala.Function1<T14, scala.Function1<T15, scala.Function1<T16, R>>>>>>>>>>>>>>>> curried() {
+ return scala.Function16$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>, R> tupled() {
+ return scala.Function16$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction17.java b/src/library/scala/runtime/java8/JFunction17.java
new file mode 100644
index 0000000000..508614e8b4
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction17.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R> extends scala.Function17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, scala.Function1<T14, scala.Function1<T15, scala.Function1<T16, scala.Function1<T17, R>>>>>>>>>>>>>>>>> curried() {
+ return scala.Function17$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>, R> tupled() {
+ return scala.Function17$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction18.java b/src/library/scala/runtime/java8/JFunction18.java
new file mode 100644
index 0000000000..8aa9c5e2c3
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction18.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R> extends scala.Function18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, scala.Function1<T14, scala.Function1<T15, scala.Function1<T16, scala.Function1<T17, scala.Function1<T18, R>>>>>>>>>>>>>>>>>> curried() {
+ return scala.Function18$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>, R> tupled() {
+ return scala.Function18$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction19.java b/src/library/scala/runtime/java8/JFunction19.java
new file mode 100644
index 0000000000..89d739366e
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction19.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R> extends scala.Function19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, scala.Function1<T14, scala.Function1<T15, scala.Function1<T16, scala.Function1<T17, scala.Function1<T18, scala.Function1<T19, R>>>>>>>>>>>>>>>>>>> curried() {
+ return scala.Function19$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>, R> tupled() {
+ return scala.Function19$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcDDD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcDDD$sp.java
new file mode 100644
index 0000000000..1c11fb5252
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcDDD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcDDD$sp extends JFunction2 {
+ double apply$mcDDD$sp(double v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDDD$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcDDI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcDDI$sp.java
new file mode 100644
index 0000000000..e080bc87fa
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcDDI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcDDI$sp extends JFunction2 {
+ double apply$mcDDI$sp(double v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDDI$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcDDJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcDDJ$sp.java
new file mode 100644
index 0000000000..f96b19dff7
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcDDJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcDDJ$sp extends JFunction2 {
+ double apply$mcDDJ$sp(double v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDDJ$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcDID$sp.java b/src/library/scala/runtime/java8/JFunction2$mcDID$sp.java
new file mode 100644
index 0000000000..944f469a6d
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcDID$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcDID$sp extends JFunction2 {
+ double apply$mcDID$sp(int v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDID$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcDII$sp.java b/src/library/scala/runtime/java8/JFunction2$mcDII$sp.java
new file mode 100644
index 0000000000..a04f616b5a
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcDII$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcDII$sp extends JFunction2 {
+ double apply$mcDII$sp(int v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDII$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcDIJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcDIJ$sp.java
new file mode 100644
index 0000000000..3a7d33d4a5
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcDIJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcDIJ$sp extends JFunction2 {
+ double apply$mcDIJ$sp(int v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDIJ$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcDJD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcDJD$sp.java
new file mode 100644
index 0000000000..86b48486e6
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcDJD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcDJD$sp extends JFunction2 {
+ double apply$mcDJD$sp(long v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDJD$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcDJI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcDJI$sp.java
new file mode 100644
index 0000000000..b9375c7870
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcDJI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcDJI$sp extends JFunction2 {
+ double apply$mcDJI$sp(long v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDJI$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcDJJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcDJJ$sp.java
new file mode 100644
index 0000000000..4adbd17e14
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcDJJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcDJJ$sp extends JFunction2 {
+ double apply$mcDJJ$sp(long v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToDouble(apply$mcDJJ$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcFDD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcFDD$sp.java
new file mode 100644
index 0000000000..7e53d117c7
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcFDD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcFDD$sp extends JFunction2 {
+ float apply$mcFDD$sp(double v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFDD$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcFDI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcFDI$sp.java
new file mode 100644
index 0000000000..64c4b2f133
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcFDI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcFDI$sp extends JFunction2 {
+ float apply$mcFDI$sp(double v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFDI$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcFDJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcFDJ$sp.java
new file mode 100644
index 0000000000..c7ffcbc66a
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcFDJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcFDJ$sp extends JFunction2 {
+ float apply$mcFDJ$sp(double v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFDJ$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcFID$sp.java b/src/library/scala/runtime/java8/JFunction2$mcFID$sp.java
new file mode 100644
index 0000000000..43944751e6
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcFID$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcFID$sp extends JFunction2 {
+ float apply$mcFID$sp(int v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFID$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcFII$sp.java b/src/library/scala/runtime/java8/JFunction2$mcFII$sp.java
new file mode 100644
index 0000000000..a9a4540ca3
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcFII$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcFII$sp extends JFunction2 {
+ float apply$mcFII$sp(int v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFII$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcFIJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcFIJ$sp.java
new file mode 100644
index 0000000000..217615c7a3
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcFIJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcFIJ$sp extends JFunction2 {
+ float apply$mcFIJ$sp(int v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFIJ$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcFJD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcFJD$sp.java
new file mode 100644
index 0000000000..8400e47876
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcFJD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcFJD$sp extends JFunction2 {
+ float apply$mcFJD$sp(long v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFJD$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcFJI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcFJI$sp.java
new file mode 100644
index 0000000000..e6b6259f96
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcFJI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcFJI$sp extends JFunction2 {
+ float apply$mcFJI$sp(long v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFJI$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcFJJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcFJJ$sp.java
new file mode 100644
index 0000000000..68a4c8ecc0
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcFJJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcFJJ$sp extends JFunction2 {
+ float apply$mcFJJ$sp(long v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToFloat(apply$mcFJJ$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcIDD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcIDD$sp.java
new file mode 100644
index 0000000000..76fe0b6ead
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcIDD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcIDD$sp extends JFunction2 {
+ int apply$mcIDD$sp(double v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIDD$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcIDI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcIDI$sp.java
new file mode 100644
index 0000000000..908078f735
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcIDI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcIDI$sp extends JFunction2 {
+ int apply$mcIDI$sp(double v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIDI$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcIDJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcIDJ$sp.java
new file mode 100644
index 0000000000..35c943e324
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcIDJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcIDJ$sp extends JFunction2 {
+ int apply$mcIDJ$sp(double v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIDJ$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcIID$sp.java b/src/library/scala/runtime/java8/JFunction2$mcIID$sp.java
new file mode 100644
index 0000000000..f245ec8788
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcIID$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcIID$sp extends JFunction2 {
+ int apply$mcIID$sp(int v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIID$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcIII$sp.java b/src/library/scala/runtime/java8/JFunction2$mcIII$sp.java
new file mode 100644
index 0000000000..f3a7a56dff
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcIII$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcIII$sp extends JFunction2 {
+ int apply$mcIII$sp(int v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIII$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcIIJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcIIJ$sp.java
new file mode 100644
index 0000000000..9736196b9e
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcIIJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcIIJ$sp extends JFunction2 {
+ int apply$mcIIJ$sp(int v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIIJ$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcIJD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcIJD$sp.java
new file mode 100644
index 0000000000..3211432ccb
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcIJD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcIJD$sp extends JFunction2 {
+ int apply$mcIJD$sp(long v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIJD$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcIJI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcIJI$sp.java
new file mode 100644
index 0000000000..74f76404e0
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcIJI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcIJI$sp extends JFunction2 {
+ int apply$mcIJI$sp(long v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIJI$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcIJJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcIJJ$sp.java
new file mode 100644
index 0000000000..7b9060bcb8
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcIJJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcIJJ$sp extends JFunction2 {
+ int apply$mcIJJ$sp(long v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToInteger(apply$mcIJJ$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcJDD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcJDD$sp.java
new file mode 100644
index 0000000000..b4595cdf6a
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcJDD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcJDD$sp extends JFunction2 {
+ long apply$mcJDD$sp(double v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJDD$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcJDI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcJDI$sp.java
new file mode 100644
index 0000000000..59aad669e7
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcJDI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcJDI$sp extends JFunction2 {
+ long apply$mcJDI$sp(double v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJDI$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcJDJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcJDJ$sp.java
new file mode 100644
index 0000000000..8111e03617
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcJDJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcJDJ$sp extends JFunction2 {
+ long apply$mcJDJ$sp(double v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJDJ$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcJID$sp.java b/src/library/scala/runtime/java8/JFunction2$mcJID$sp.java
new file mode 100644
index 0000000000..8a06a40a4a
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcJID$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcJID$sp extends JFunction2 {
+ long apply$mcJID$sp(int v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJID$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcJII$sp.java b/src/library/scala/runtime/java8/JFunction2$mcJII$sp.java
new file mode 100644
index 0000000000..3d2e03ddbc
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcJII$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcJII$sp extends JFunction2 {
+ long apply$mcJII$sp(int v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJII$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcJIJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcJIJ$sp.java
new file mode 100644
index 0000000000..32408269c8
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcJIJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcJIJ$sp extends JFunction2 {
+ long apply$mcJIJ$sp(int v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJIJ$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcJJD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcJJD$sp.java
new file mode 100644
index 0000000000..cf75bc5c19
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcJJD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcJJD$sp extends JFunction2 {
+ long apply$mcJJD$sp(long v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJJD$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcJJI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcJJI$sp.java
new file mode 100644
index 0000000000..eddcea671d
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcJJI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcJJI$sp extends JFunction2 {
+ long apply$mcJJI$sp(long v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJJI$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcJJJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcJJJ$sp.java
new file mode 100644
index 0000000000..4f5626a3e6
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcJJJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcJJJ$sp extends JFunction2 {
+ long apply$mcJJJ$sp(long v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToLong(apply$mcJJJ$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcVDD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcVDD$sp.java
new file mode 100644
index 0000000000..45b9739c91
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcVDD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcVDD$sp extends JFunction2 {
+ void apply$mcVDD$sp(double v1, double v2);
+
+ default Object apply(Object v1, Object v2) { apply$mcVDD$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcVDI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcVDI$sp.java
new file mode 100644
index 0000000000..c344ea5017
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcVDI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcVDI$sp extends JFunction2 {
+ void apply$mcVDI$sp(double v1, int v2);
+
+ default Object apply(Object v1, Object v2) { apply$mcVDI$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToInt(v2)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcVDJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcVDJ$sp.java
new file mode 100644
index 0000000000..94b01d59d5
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcVDJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcVDJ$sp extends JFunction2 {
+ void apply$mcVDJ$sp(double v1, long v2);
+
+ default Object apply(Object v1, Object v2) { apply$mcVDJ$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToLong(v2)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcVID$sp.java b/src/library/scala/runtime/java8/JFunction2$mcVID$sp.java
new file mode 100644
index 0000000000..47c29525a7
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcVID$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcVID$sp extends JFunction2 {
+ void apply$mcVID$sp(int v1, double v2);
+
+ default Object apply(Object v1, Object v2) { apply$mcVID$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcVII$sp.java b/src/library/scala/runtime/java8/JFunction2$mcVII$sp.java
new file mode 100644
index 0000000000..546a994cb9
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcVII$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcVII$sp extends JFunction2 {
+ void apply$mcVII$sp(int v1, int v2);
+
+ default Object apply(Object v1, Object v2) { apply$mcVII$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToInt(v2)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcVIJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcVIJ$sp.java
new file mode 100644
index 0000000000..d9871efee3
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcVIJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcVIJ$sp extends JFunction2 {
+ void apply$mcVIJ$sp(int v1, long v2);
+
+ default Object apply(Object v1, Object v2) { apply$mcVIJ$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToLong(v2)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcVJD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcVJD$sp.java
new file mode 100644
index 0000000000..525c8ee059
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcVJD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcVJD$sp extends JFunction2 {
+ void apply$mcVJD$sp(long v1, double v2);
+
+ default Object apply(Object v1, Object v2) { apply$mcVJD$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcVJI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcVJI$sp.java
new file mode 100644
index 0000000000..98f33bf942
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcVJI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcVJI$sp extends JFunction2 {
+ void apply$mcVJI$sp(long v1, int v2);
+
+ default Object apply(Object v1, Object v2) { apply$mcVJI$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToInt(v2)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcVJJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcVJJ$sp.java
new file mode 100644
index 0000000000..adb8934b57
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcVJJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcVJJ$sp extends JFunction2 {
+ void apply$mcVJJ$sp(long v1, long v2);
+
+ default Object apply(Object v1, Object v2) { apply$mcVJJ$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToLong(v2)); return scala.runtime.BoxedUnit.UNIT; }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcZDD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcZDD$sp.java
new file mode 100644
index 0000000000..9272e025a6
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcZDD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcZDD$sp extends JFunction2 {
+ boolean apply$mcZDD$sp(double v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZDD$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcZDI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcZDI$sp.java
new file mode 100644
index 0000000000..4406e00abd
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcZDI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcZDI$sp extends JFunction2 {
+ boolean apply$mcZDI$sp(double v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZDI$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcZDJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcZDJ$sp.java
new file mode 100644
index 0000000000..1f92dddfaf
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcZDJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcZDJ$sp extends JFunction2 {
+ boolean apply$mcZDJ$sp(double v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZDJ$sp(scala.runtime.BoxesRunTime.unboxToDouble(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcZID$sp.java b/src/library/scala/runtime/java8/JFunction2$mcZID$sp.java
new file mode 100644
index 0000000000..06b73f9897
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcZID$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcZID$sp extends JFunction2 {
+ boolean apply$mcZID$sp(int v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZID$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcZII$sp.java b/src/library/scala/runtime/java8/JFunction2$mcZII$sp.java
new file mode 100644
index 0000000000..729f86063f
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcZII$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcZII$sp extends JFunction2 {
+ boolean apply$mcZII$sp(int v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZII$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcZIJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcZIJ$sp.java
new file mode 100644
index 0000000000..38da681cd1
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcZIJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcZIJ$sp extends JFunction2 {
+ boolean apply$mcZIJ$sp(int v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZIJ$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcZJD$sp.java b/src/library/scala/runtime/java8/JFunction2$mcZJD$sp.java
new file mode 100644
index 0000000000..6dc9534811
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcZJD$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcZJD$sp extends JFunction2 {
+ boolean apply$mcZJD$sp(long v1, double v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZJD$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcZJI$sp.java b/src/library/scala/runtime/java8/JFunction2$mcZJI$sp.java
new file mode 100644
index 0000000000..a86f63be36
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcZJI$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcZJI$sp extends JFunction2 {
+ boolean apply$mcZJI$sp(long v1, int v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZJI$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToInt(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2$mcZJJ$sp.java b/src/library/scala/runtime/java8/JFunction2$mcZJJ$sp.java
new file mode 100644
index 0000000000..728a781e8e
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2$mcZJJ$sp.java
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2$mcZJJ$sp extends JFunction2 {
+ boolean apply$mcZJJ$sp(long v1, long v2);
+
+ default Object apply(Object v1, Object v2) { return scala.runtime.BoxesRunTime.boxToBoolean(apply$mcZJJ$sp(scala.runtime.BoxesRunTime.unboxToLong(v1), scala.runtime.BoxesRunTime.unboxToLong(v2))); }
+}
diff --git a/src/library/scala/runtime/java8/JFunction2.java b/src/library/scala/runtime/java8/JFunction2.java
new file mode 100644
index 0000000000..41f2adeae9
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction2.java
@@ -0,0 +1,509 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction2<T1, T2, R> extends scala.Function2<T1, T2, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, R>> curried() {
+ return scala.Function2$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple2<T1, T2>, R> tupled() {
+ return scala.Function2$class.tupled(this);
+ }
+
+ default void apply$mcVII$sp(int v1, int v2) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2));
+ }
+ default boolean apply$mcZII$sp(int v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default int apply$mcIII$sp(int v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default float apply$mcFII$sp(int v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default long apply$mcJII$sp(int v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default double apply$mcDII$sp(int v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default void apply$mcVIJ$sp(int v1, long v2) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2));
+ }
+ default boolean apply$mcZIJ$sp(int v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default int apply$mcIIJ$sp(int v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default float apply$mcFIJ$sp(int v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default long apply$mcJIJ$sp(int v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default double apply$mcDIJ$sp(int v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default void apply$mcVID$sp(int v1, double v2) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2));
+ }
+ default boolean apply$mcZID$sp(int v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default int apply$mcIID$sp(int v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default float apply$mcFID$sp(int v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default long apply$mcJID$sp(int v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default double apply$mcDID$sp(int v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToInteger(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default void apply$mcVJI$sp(long v1, int v2) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2));
+ }
+ default boolean apply$mcZJI$sp(long v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default int apply$mcIJI$sp(long v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default float apply$mcFJI$sp(long v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default long apply$mcJJI$sp(long v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default double apply$mcDJI$sp(long v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default void apply$mcVJJ$sp(long v1, long v2) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2));
+ }
+ default boolean apply$mcZJJ$sp(long v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default int apply$mcIJJ$sp(long v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default float apply$mcFJJ$sp(long v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default long apply$mcJJJ$sp(long v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default double apply$mcDJJ$sp(long v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default void apply$mcVJD$sp(long v1, double v2) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2));
+ }
+ default boolean apply$mcZJD$sp(long v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default int apply$mcIJD$sp(long v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default float apply$mcFJD$sp(long v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default long apply$mcJJD$sp(long v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default double apply$mcDJD$sp(long v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToLong(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default void apply$mcVDI$sp(double v1, int v2) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2));
+ }
+ default boolean apply$mcZDI$sp(double v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default int apply$mcIDI$sp(double v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default float apply$mcFDI$sp(double v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default long apply$mcJDI$sp(double v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default double apply$mcDDI$sp(double v1, int v2) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToInteger(v2)));
+ }
+ default void apply$mcVDJ$sp(double v1, long v2) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2));
+ }
+ default boolean apply$mcZDJ$sp(double v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default int apply$mcIDJ$sp(double v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default float apply$mcFDJ$sp(double v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default long apply$mcJDJ$sp(double v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default double apply$mcDDJ$sp(double v1, long v2) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToLong(v2)));
+ }
+ default void apply$mcVDD$sp(double v1, double v2) {
+ apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2));
+ }
+ default boolean apply$mcZDD$sp(double v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToBoolean(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default int apply$mcIDD$sp(double v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToInt(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default float apply$mcFDD$sp(double v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToFloat(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default long apply$mcJDD$sp(double v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToLong(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+ default double apply$mcDDD$sp(double v1, double v2) {
+ return scala.runtime.BoxesRunTime.unboxToDouble(apply((T1) scala.runtime.BoxesRunTime.boxToDouble(v1), (T2) scala.runtime.BoxesRunTime.boxToDouble(v2)));
+ }
+
+ default scala.Function1 curried$mcVII$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcZII$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcIII$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcFII$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcJII$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcDII$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcVIJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcZIJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcIIJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcFIJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcJIJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcDIJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcVID$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcZID$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcIID$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcFID$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcJID$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcDID$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcVJI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcZJI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcIJI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcFJI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcJJI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcDJI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcVJJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcZJJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcIJJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcFJJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcJJJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcDJJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcVJD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcZJD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcIJD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcFJD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcJJD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcDJD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcVDI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcZDI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcIDI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcFDI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcJDI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcDDI$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcVDJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcZDJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcIDJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcFDJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcJDJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcDDJ$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcVDD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcZDD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcIDD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcFDD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcJDD$sp() {
+ return curried();
+ }
+ default scala.Function1 curried$mcDDD$sp() {
+ return curried();
+ }
+
+ default scala.Function1 tupled$mcVII$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcZII$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcIII$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcFII$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcJII$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcDII$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcVIJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcZIJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcIIJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcFIJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcJIJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcDIJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcVID$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcZID$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcIID$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcFID$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcJID$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcDID$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcVJI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcZJI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcIJI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcFJI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcJJI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcDJI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcVJJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcZJJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcIJJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcFJJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcJJJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcDJJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcVJD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcZJD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcIJD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcFJD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcJJD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcDJD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcVDI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcZDI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcIDI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcFDI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcJDI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcDDI$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcVDJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcZDJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcIDJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcFDJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcJDJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcDDJ$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcVDD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcZDD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcIDD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcFDD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcJDD$sp() {
+ return tupled();
+ }
+ default scala.Function1 tupled$mcDDD$sp() {
+ return tupled();
+ }
+}
diff --git a/src/library/scala/runtime/java8/JFunction20.java b/src/library/scala/runtime/java8/JFunction20.java
new file mode 100644
index 0000000000..b8b4c11af7
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction20.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R> extends scala.Function20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, scala.Function1<T14, scala.Function1<T15, scala.Function1<T16, scala.Function1<T17, scala.Function1<T18, scala.Function1<T19, scala.Function1<T20, R>>>>>>>>>>>>>>>>>>>> curried() {
+ return scala.Function20$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>, R> tupled() {
+ return scala.Function20$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction21.java b/src/library/scala/runtime/java8/JFunction21.java
new file mode 100644
index 0000000000..dbae0a0479
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction21.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R> extends scala.Function21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, scala.Function1<T14, scala.Function1<T15, scala.Function1<T16, scala.Function1<T17, scala.Function1<T18, scala.Function1<T19, scala.Function1<T20, scala.Function1<T21, R>>>>>>>>>>>>>>>>>>>>> curried() {
+ return scala.Function21$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>, R> tupled() {
+ return scala.Function21$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction22.java b/src/library/scala/runtime/java8/JFunction22.java
new file mode 100644
index 0000000000..2926ae336d
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction22.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R> extends scala.Function22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, scala.Function1<T10, scala.Function1<T11, scala.Function1<T12, scala.Function1<T13, scala.Function1<T14, scala.Function1<T15, scala.Function1<T16, scala.Function1<T17, scala.Function1<T18, scala.Function1<T19, scala.Function1<T20, scala.Function1<T21, scala.Function1<T22, R>>>>>>>>>>>>>>>>>>>>>> curried() {
+ return scala.Function22$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>, R> tupled() {
+ return scala.Function22$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction3.java b/src/library/scala/runtime/java8/JFunction3.java
new file mode 100644
index 0000000000..b75da0669b
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction3.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction3<T1, T2, T3, R> extends scala.Function3<T1, T2, T3, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, R>>> curried() {
+ return scala.Function3$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple3<T1, T2, T3>, R> tupled() {
+ return scala.Function3$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction4.java b/src/library/scala/runtime/java8/JFunction4.java
new file mode 100644
index 0000000000..20f89141bd
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction4.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction4<T1, T2, T3, T4, R> extends scala.Function4<T1, T2, T3, T4, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, R>>>> curried() {
+ return scala.Function4$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple4<T1, T2, T3, T4>, R> tupled() {
+ return scala.Function4$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction5.java b/src/library/scala/runtime/java8/JFunction5.java
new file mode 100644
index 0000000000..ce15f14e22
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction5.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction5<T1, T2, T3, T4, T5, R> extends scala.Function5<T1, T2, T3, T4, T5, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, R>>>>> curried() {
+ return scala.Function5$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple5<T1, T2, T3, T4, T5>, R> tupled() {
+ return scala.Function5$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction6.java b/src/library/scala/runtime/java8/JFunction6.java
new file mode 100644
index 0000000000..07c0ca9665
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction6.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction6<T1, T2, T3, T4, T5, T6, R> extends scala.Function6<T1, T2, T3, T4, T5, T6, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, R>>>>>> curried() {
+ return scala.Function6$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple6<T1, T2, T3, T4, T5, T6>, R> tupled() {
+ return scala.Function6$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction7.java b/src/library/scala/runtime/java8/JFunction7.java
new file mode 100644
index 0000000000..f765ade092
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction7.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction7<T1, T2, T3, T4, T5, T6, T7, R> extends scala.Function7<T1, T2, T3, T4, T5, T6, T7, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, R>>>>>>> curried() {
+ return scala.Function7$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple7<T1, T2, T3, T4, T5, T6, T7>, R> tupled() {
+ return scala.Function7$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction8.java b/src/library/scala/runtime/java8/JFunction8.java
new file mode 100644
index 0000000000..ffd362b0af
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction8.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction8<T1, T2, T3, T4, T5, T6, T7, T8, R> extends scala.Function8<T1, T2, T3, T4, T5, T6, T7, T8, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, R>>>>>>>> curried() {
+ return scala.Function8$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple8<T1, T2, T3, T4, T5, T6, T7, T8>, R> tupled() {
+ return scala.Function8$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JFunction9.java b/src/library/scala/runtime/java8/JFunction9.java
new file mode 100644
index 0000000000..e3fca09be0
--- /dev/null
+++ b/src/library/scala/runtime/java8/JFunction9.java
@@ -0,0 +1,22 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+@FunctionalInterface
+public interface JFunction9<T1, T2, T3, T4, T5, T6, T7, T8, T9, R> extends scala.Function9<T1, T2, T3, T4, T5, T6, T7, T8, T9, R> {
+ default void $init$() {
+ };
+
+ default scala.Function1<T1, scala.Function1<T2, scala.Function1<T3, scala.Function1<T4, scala.Function1<T5, scala.Function1<T6, scala.Function1<T7, scala.Function1<T8, scala.Function1<T9, R>>>>>>>>> curried() {
+ return scala.Function9$class.curried(this);
+ }
+
+ default scala.Function1<scala.Tuple9<T1, T2, T3, T4, T5, T6, T7, T8, T9>, R> tupled() {
+ return scala.Function9$class.tupled(this);
+ }
+
+
+}
diff --git a/src/library/scala/runtime/java8/JProcedure0.java b/src/library/scala/runtime/java8/JProcedure0.java
new file mode 100644
index 0000000000..6004364d03
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure0.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure0 extends JFunction0<BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid();
+
+ default BoxedUnit apply() {
+ applyVoid();
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure1.java b/src/library/scala/runtime/java8/JProcedure1.java
new file mode 100644
index 0000000000..184d943042
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure1.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure1<T1> extends JFunction1<T1, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1);
+
+ default BoxedUnit apply(T1 t1) {
+ applyVoid(t1);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure10.java b/src/library/scala/runtime/java8/JProcedure10.java
new file mode 100644
index 0000000000..2aadd7d215
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure10.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> extends JFunction10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure11.java b/src/library/scala/runtime/java8/JProcedure11.java
new file mode 100644
index 0000000000..c29853be1f
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure11.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> extends JFunction11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure12.java b/src/library/scala/runtime/java8/JProcedure12.java
new file mode 100644
index 0000000000..0607600c33
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure12.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> extends JFunction12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure13.java b/src/library/scala/runtime/java8/JProcedure13.java
new file mode 100644
index 0000000000..c390fed2a5
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure13.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> extends JFunction13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure14.java b/src/library/scala/runtime/java8/JProcedure14.java
new file mode 100644
index 0000000000..d67cff1b5a
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure14.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> extends JFunction14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure15.java b/src/library/scala/runtime/java8/JProcedure15.java
new file mode 100644
index 0000000000..81e0f524f5
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure15.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> extends JFunction15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure16.java b/src/library/scala/runtime/java8/JProcedure16.java
new file mode 100644
index 0000000000..3d29ae25c5
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure16.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> extends JFunction16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure17.java b/src/library/scala/runtime/java8/JProcedure17.java
new file mode 100644
index 0000000000..85f40b2cd5
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure17.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> extends JFunction17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure18.java b/src/library/scala/runtime/java8/JProcedure18.java
new file mode 100644
index 0000000000..fe2ab6f22c
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure18.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18> extends JFunction18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure19.java b/src/library/scala/runtime/java8/JProcedure19.java
new file mode 100644
index 0000000000..9289d639a5
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure19.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> extends JFunction19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure2.java b/src/library/scala/runtime/java8/JProcedure2.java
new file mode 100644
index 0000000000..273357a3b0
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure2.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure2<T1, T2> extends JFunction2<T1, T2, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2);
+
+ default BoxedUnit apply(T1 t1, T2 t2) {
+ applyVoid(t1, t2);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure20.java b/src/library/scala/runtime/java8/JProcedure20.java
new file mode 100644
index 0000000000..8701e9d422
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure20.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> extends JFunction20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19, T20 t20);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19, T20 t20) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19, t20);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure21.java b/src/library/scala/runtime/java8/JProcedure21.java
new file mode 100644
index 0000000000..f8e38f6c70
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure21.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21> extends JFunction21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19, T20 t20, T21 t21);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19, T20 t20, T21 t21) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19, t20, t21);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure22.java b/src/library/scala/runtime/java8/JProcedure22.java
new file mode 100644
index 0000000000..8bae4d7e0d
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure22.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> extends JFunction22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19, T20 t20, T21 t21, T22 t22);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9, T10 t10, T11 t11, T12 t12, T13 t13, T14 t14, T15 t15, T16 t16, T17 t17, T18 t18, T19 t19, T20 t20, T21 t21, T22 t22) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure3.java b/src/library/scala/runtime/java8/JProcedure3.java
new file mode 100644
index 0000000000..7c53187f31
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure3.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure3<T1, T2, T3> extends JFunction3<T1, T2, T3, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3) {
+ applyVoid(t1, t2, t3);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure4.java b/src/library/scala/runtime/java8/JProcedure4.java
new file mode 100644
index 0000000000..33161bc151
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure4.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure4<T1, T2, T3, T4> extends JFunction4<T1, T2, T3, T4, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4) {
+ applyVoid(t1, t2, t3, t4);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure5.java b/src/library/scala/runtime/java8/JProcedure5.java
new file mode 100644
index 0000000000..c834c48bf6
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure5.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure5<T1, T2, T3, T4, T5> extends JFunction5<T1, T2, T3, T4, T5, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5) {
+ applyVoid(t1, t2, t3, t4, t5);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure6.java b/src/library/scala/runtime/java8/JProcedure6.java
new file mode 100644
index 0000000000..995bdd6734
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure6.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure6<T1, T2, T3, T4, T5, T6> extends JFunction6<T1, T2, T3, T4, T5, T6, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6) {
+ applyVoid(t1, t2, t3, t4, t5, t6);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure7.java b/src/library/scala/runtime/java8/JProcedure7.java
new file mode 100644
index 0000000000..1821d8d406
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure7.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure7<T1, T2, T3, T4, T5, T6, T7> extends JFunction7<T1, T2, T3, T4, T5, T6, T7, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure8.java b/src/library/scala/runtime/java8/JProcedure8.java
new file mode 100644
index 0000000000..4b9dd0929a
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure8.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure8<T1, T2, T3, T4, T5, T6, T7, T8> extends JFunction8<T1, T2, T3, T4, T5, T6, T7, T8, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/runtime/java8/JProcedure9.java b/src/library/scala/runtime/java8/JProcedure9.java
new file mode 100644
index 0000000000..c4cbc65b6c
--- /dev/null
+++ b/src/library/scala/runtime/java8/JProcedure9.java
@@ -0,0 +1,21 @@
+
+/*
+ * Copyright (C) 2012-2015 Typesafe Inc. <http://www.typesafe.com>
+ */
+
+package scala.runtime.java8;
+
+import scala.runtime.BoxedUnit;
+
+@FunctionalInterface
+public interface JProcedure9<T1, T2, T3, T4, T5, T6, T7, T8, T9> extends JFunction9<T1, T2, T3, T4, T5, T6, T7, T8, T9, BoxedUnit> {
+ default void $init$() {
+ }
+
+ void applyVoid(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9);
+
+ default BoxedUnit apply(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7, T8 t8, T9 t9) {
+ applyVoid(t1, t2, t3, t4, t5, t6, t7, t8, t9);
+ return BoxedUnit.UNIT;
+ }
+}
diff --git a/src/library/scala/sys/BooleanProp.scala b/src/library/scala/sys/BooleanProp.scala
index e5e4668edb..b0008b41fd 100644
--- a/src/library/scala/sys/BooleanProp.scala
+++ b/src/library/scala/sys/BooleanProp.scala
@@ -50,6 +50,7 @@ object BooleanProp {
def get: String = "" + value
val clear, enable, disable, toggle = ()
def option = if (isSet) Some(value) else None
+ //def or[T1 >: Boolean](alt: => T1): T1 = if (value) true else alt
protected def zero = false
}
diff --git a/src/library/scala/sys/Prop.scala b/src/library/scala/sys/Prop.scala
index 17ae8cb69c..52a3d89ecb 100644
--- a/src/library/scala/sys/Prop.scala
+++ b/src/library/scala/sys/Prop.scala
@@ -58,6 +58,10 @@ trait Prop[+T] {
*/
def option: Option[T]
+ // Do not open until 2.12.
+ //** This value if the property is set, an alternative value otherwise. */
+ //def or[T1 >: T](alt: => T1): T1
+
/** Removes the property from the underlying map.
*/
def clear(): Unit
diff --git a/src/library/scala/sys/SystemProperties.scala b/src/library/scala/sys/SystemProperties.scala
index 6f8b13a89b..3ee4f6c708 100644
--- a/src/library/scala/sys/SystemProperties.scala
+++ b/src/library/scala/sys/SystemProperties.scala
@@ -86,6 +86,8 @@ object SystemProperties {
lazy val headless = bool("java.awt.headless", "system should not utilize a display device")
lazy val preferIPv4Stack = bool("java.net.preferIPv4Stack", "system should prefer IPv4 sockets")
lazy val preferIPv6Addresses = bool("java.net.preferIPv6Addresses", "system should prefer IPv6 addresses")
- lazy val noTraceSupression = bool("scala.control.noTraceSuppression", "scala should not suppress any stack trace creation")
+ lazy val noTraceSuppression = bool("scala.control.noTraceSuppression", "scala should not suppress any stack trace creation")
+ @deprecated("Use noTraceSuppression", "2.12.0")
+ def noTraceSupression = noTraceSuppression
}
diff --git a/src/library/scala/sys/process/package.scala b/src/library/scala/sys/process/package.scala
index 5ec2e73cb9..0e9a1bfc56 100644
--- a/src/library/scala/sys/process/package.scala
+++ b/src/library/scala/sys/process/package.scala
@@ -157,7 +157,8 @@ package scala.sys {
* while(input.read() != -1) count += 1
* input.close()
* }
- * cat ! new ProcessIO(_.close(), byteCounter, _.close())
+ * val p = cat run new ProcessIO(_.close(), byteCounter, _.close())
+ * p.exitValue()
* count
* }
*
diff --git a/src/library/scala/util/control/Exception.scala b/src/library/scala/util/control/Exception.scala
index aa30887ba0..24c297a2fc 100644
--- a/src/library/scala/util/control/Exception.scala
+++ b/src/library/scala/util/control/Exception.scala
@@ -105,7 +105,7 @@ object Exception {
case x if rethrow(x) => throw x
case x if pf isDefinedAt x => pf(x)
}
- finally fin map (_.invoke())
+ finally fin foreach (_.invoke())
/* Create an empty Try container with this Catch and the supplied `Finally`. */
def andFinally(body: => Unit): Catch[T] = fin match {
diff --git a/src/library/scala/util/control/NoStackTrace.scala b/src/library/scala/util/control/NoStackTrace.scala
index b33b6a18dd..3c42944af1 100644
--- a/src/library/scala/util/control/NoStackTrace.scala
+++ b/src/library/scala/util/control/NoStackTrace.scala
@@ -26,7 +26,7 @@ trait NoStackTrace extends Throwable {
object NoStackTrace {
final def noSuppression = _noSuppression
- // two-stage init to make checkinit happy, since sys.SystemProperties.noTraceSupression.value calls back into NoStackTrace.noSuppression
+ // two-stage init to make checkinit happy, since sys.SystemProperties.noTraceSuppression.value calls back into NoStackTrace.noSuppression
final private var _noSuppression = false
- _noSuppression = sys.SystemProperties.noTraceSupression.value
+ _noSuppression = sys.SystemProperties.noTraceSuppression.value
}
diff --git a/src/manual/scala/tools/docutil/ManMaker.scala b/src/manual/scala/tools/docutil/ManMaker.scala
index 47b861a80f..802b357f5f 100644
--- a/src/manual/scala/tools/docutil/ManMaker.scala
+++ b/src/manual/scala/tools/docutil/ManMaker.scala
@@ -18,7 +18,7 @@ class ManMaker extends Task {
/** The directory to put html pages in */
private var htmlout: Option[File] = None
- /** The directory to put man pags in */
+ /** The directory to put man pages in */
private var manout: Option[File] = None
diff --git a/src/partest-extras/scala/tools/partest/ASMConverters.scala b/src/partest-extras/scala/tools/partest/ASMConverters.scala
index f6e2d2a9ec..b4c686473b 100644
--- a/src/partest-extras/scala/tools/partest/ASMConverters.scala
+++ b/src/partest-extras/scala/tools/partest/ASMConverters.scala
@@ -58,21 +58,24 @@ object ASMConverters {
case class Method(instructions: List[Instruction], handlers: List[ExceptionHandler], localVars: List[LocalVariable])
- case class Field (opcode: Int, owner: String, name: String, desc: String) extends Instruction
- case class Incr (opcode: Int, `var`: Int, incr: Int) extends Instruction
- case class Op (opcode: Int) extends Instruction
- case class IntOp (opcode: Int, operand: Int) extends Instruction
- case class Jump (opcode: Int, label: Label) extends Instruction
- case class Ldc (opcode: Int, cst: Any) extends Instruction
- case class LookupSwitch(opcode: Int, dflt: Label, keys: List[Int], labels: List[Label]) extends Instruction
- case class TableSwitch (opcode: Int, min: Int, max: Int, dflt: Label, labels: List[Label]) extends Instruction
- case class Invoke (opcode: Int, owner: String, name: String, desc: String, itf: Boolean) extends Instruction
- case class NewArray (opcode: Int, desc: String, dims: Int) extends Instruction
- case class TypeOp (opcode: Int, desc: String) extends Instruction
- case class VarOp (opcode: Int, `var`: Int) extends Instruction
- case class Label (offset: Int) extends Instruction { def opcode: Int = -1 }
- case class FrameEntry (`type`: Int, local: List[Any], stack: List[Any]) extends Instruction { def opcode: Int = -1 }
- case class LineNumber (line: Int, start: Label) extends Instruction { def opcode: Int = -1 }
+ case class Field (opcode: Int, owner: String, name: String, desc: String) extends Instruction
+ case class Incr (opcode: Int, `var`: Int, incr: Int) extends Instruction
+ case class Op (opcode: Int) extends Instruction
+ case class IntOp (opcode: Int, operand: Int) extends Instruction
+ case class Jump (opcode: Int, label: Label) extends Instruction
+ case class Ldc (opcode: Int, cst: Any) extends Instruction
+ case class LookupSwitch (opcode: Int, dflt: Label, keys: List[Int], labels: List[Label]) extends Instruction
+ case class TableSwitch (opcode: Int, min: Int, max: Int, dflt: Label, labels: List[Label]) extends Instruction
+ case class Invoke (opcode: Int, owner: String, name: String, desc: String, itf: Boolean) extends Instruction
+ case class InvokeDynamic(opcode: Int, name: String, desc: String, bsm: MethodHandle, bsmArgs: List[AnyRef]) extends Instruction
+ case class NewArray (opcode: Int, desc: String, dims: Int) extends Instruction
+ case class TypeOp (opcode: Int, desc: String) extends Instruction
+ case class VarOp (opcode: Int, `var`: Int) extends Instruction
+ case class Label (offset: Int) extends Instruction { def opcode: Int = -1 }
+ case class FrameEntry (`type`: Int, local: List[Any], stack: List[Any]) extends Instruction { def opcode: Int = -1 }
+ case class LineNumber (line: Int, start: Label) extends Instruction { def opcode: Int = -1 }
+
+ case class MethodHandle(tag: Int, owner: String, name: String, desc: String)
case class ExceptionHandler(start: Label, end: Label, handler: Label, desc: Option[String])
case class LocalVariable(name: String, desc: String, signature: Option[String], start: Label, end: Label, index: Int)
@@ -111,6 +114,7 @@ object ASMConverters {
case i: t.LookupSwitchInsnNode => LookupSwitch (op(i), applyLabel(i.dflt), lst(i.keys) map (x => x: Int), lst(i.labels) map applyLabel)
case i: t.TableSwitchInsnNode => TableSwitch (op(i), i.min, i.max, applyLabel(i.dflt), lst(i.labels) map applyLabel)
case i: t.MethodInsnNode => Invoke (op(i), i.owner, i.name, i.desc, i.itf)
+ case i: t.InvokeDynamicInsnNode => InvokeDynamic(op(i), i.name, i.desc, convertMethodHandle(i.bsm), convertBsmArgs(i.bsmArgs))
case i: t.MultiANewArrayInsnNode => NewArray (op(i), i.desc, i.dims)
case i: t.TypeInsnNode => TypeOp (op(i), i.desc)
case i: t.VarInsnNode => VarOp (op(i), i.`var`)
@@ -119,6 +123,13 @@ object ASMConverters {
case i: t.LineNumberNode => LineNumber (i.line, applyLabel(i.start))
}
+ private def convertBsmArgs(a: Array[Object]): List[Object] = a.map({
+ case h: asm.Handle => convertMethodHandle(h)
+ case _ => a // can be: Class, method Type, primitive constant
+ })(collection.breakOut)
+
+ private def convertMethodHandle(h: asm.Handle): MethodHandle = MethodHandle(h.getTag, h.getOwner, h.getName, h.getDesc)
+
private def convertHandlers(method: t.MethodNode): List[ExceptionHandler] = {
method.tryCatchBlocks.asScala.map(h => ExceptionHandler(applyLabel(h.start), applyLabel(h.end), applyLabel(h.handler), Option(h.`type`)))(collection.breakOut)
}
@@ -131,7 +142,7 @@ object ASMConverters {
import collection.mutable.{Map => MMap}
/**
- * Bytecode is equal modula local variable numbering and label numbering.
+ * Bytecode is equal modulo local variable numbering and label numbering.
*/
def equivalentBytecode(as: List[Instruction], bs: List[Instruction], varMap: MMap[Int, Int] = MMap(), labelMap: MMap[Int, Int] = MMap()): Boolean = {
def same(v1: Int, v2: Int, m: MMap[Int, Int]) = {
@@ -197,21 +208,28 @@ object ASMConverters {
case x => x.asInstanceOf[Object]
}
+ def unconvertMethodHandle(h: MethodHandle): asm.Handle = new asm.Handle(h.tag, h.owner, h.name, h.desc)
+ def unconvertBsmArgs(a: List[Object]): Array[Object] = a.map({
+ case h: MethodHandle => unconvertMethodHandle(h)
+ case o => o
+ })(collection.breakOut)
+
private def visitMethod(method: t.MethodNode, instruction: Instruction, asmLabel: Map[Label, asm.Label]): Unit = instruction match {
- case Field(op, owner, name, desc) => method.visitFieldInsn(op, owner, name, desc)
- case Incr(op, vr, incr) => method.visitIincInsn(vr, incr)
- case Op(op) => method.visitInsn(op)
- case IntOp(op, operand) => method.visitIntInsn(op, operand)
- case Jump(op, label) => method.visitJumpInsn(op, asmLabel(label))
- case Ldc(op, cst) => method.visitLdcInsn(cst)
- case LookupSwitch(op, dflt, keys, labels) => method.visitLookupSwitchInsn(asmLabel(dflt), keys.toArray, (labels map asmLabel).toArray)
- case TableSwitch(op, min, max, dflt, labels) => method.visitTableSwitchInsn(min, max, asmLabel(dflt), (labels map asmLabel).toArray: _*)
- case Invoke(op, owner, name, desc, itf) => method.visitMethodInsn(op, owner, name, desc, itf)
- case NewArray(op, desc, dims) => method.visitMultiANewArrayInsn(desc, dims)
- case TypeOp(op, desc) => method.visitTypeInsn(op, desc)
- case VarOp(op, vr) => method.visitVarInsn(op, vr)
- case l: Label => method.visitLabel(asmLabel(l))
- case FrameEntry(tp, local, stack) => method.visitFrame(tp, local.length, frameTypesToAsm(local, asmLabel).toArray, stack.length, frameTypesToAsm(stack, asmLabel).toArray)
- case LineNumber(line, start) => method.visitLineNumber(line, asmLabel(start))
+ case Field(op, owner, name, desc) => method.visitFieldInsn(op, owner, name, desc)
+ case Incr(op, vr, incr) => method.visitIincInsn(vr, incr)
+ case Op(op) => method.visitInsn(op)
+ case IntOp(op, operand) => method.visitIntInsn(op, operand)
+ case Jump(op, label) => method.visitJumpInsn(op, asmLabel(label))
+ case Ldc(op, cst) => method.visitLdcInsn(cst)
+ case LookupSwitch(op, dflt, keys, labels) => method.visitLookupSwitchInsn(asmLabel(dflt), keys.toArray, (labels map asmLabel).toArray)
+ case TableSwitch(op, min, max, dflt, labels) => method.visitTableSwitchInsn(min, max, asmLabel(dflt), (labels map asmLabel).toArray: _*)
+ case Invoke(op, owner, name, desc, itf) => method.visitMethodInsn(op, owner, name, desc, itf)
+ case InvokeDynamic(op, name, desc, bsm, bsmArgs) => method.visitInvokeDynamicInsn(name, desc, unconvertMethodHandle(bsm), unconvertBsmArgs(bsmArgs))
+ case NewArray(op, desc, dims) => method.visitMultiANewArrayInsn(desc, dims)
+ case TypeOp(op, desc) => method.visitTypeInsn(op, desc)
+ case VarOp(op, vr) => method.visitVarInsn(op, vr)
+ case l: Label => method.visitLabel(asmLabel(l))
+ case FrameEntry(tp, local, stack) => method.visitFrame(tp, local.length, frameTypesToAsm(local, asmLabel).toArray, stack.length, frameTypesToAsm(stack, asmLabel).toArray)
+ case LineNumber(line, start) => method.visitLineNumber(line, asmLabel(start))
}
}
diff --git a/src/partest-extras/scala/tools/partest/ReplTest.scala b/src/partest-extras/scala/tools/partest/ReplTest.scala
index 1fde2370d3..20dfe0eb16 100644
--- a/src/partest-extras/scala/tools/partest/ReplTest.scala
+++ b/src/partest-extras/scala/tools/partest/ReplTest.scala
@@ -6,8 +6,9 @@
package scala.tools.partest
import scala.tools.nsc.Settings
-import scala.tools.nsc.interpreter.ILoop
+import scala.tools.nsc.interpreter.{ ILoop, replProps }
import java.lang.reflect.{ Method => JMethod, Field => JField }
+import scala.util.matching.Regex
import scala.util.matching.Regex.Match
/** A class for testing repl code.
@@ -19,30 +20,33 @@ abstract class ReplTest extends DirectTest {
// final because we need to enforce the existence of a couple settings.
final override def settings: Settings = {
val s = super.settings
- // s.Yreplsync.value = true
s.Xnojline.value = true
transformSettings(s)
}
+ def normalize(s: String) = s
/** True for SessionTest to preserve session text. */
def inSession: Boolean = false
- /** True to preserve welcome text. */
+ /** True to preserve welcome header, eliding version number. */
def welcoming: Boolean = false
- lazy val welcome = "(Welcome to Scala) version .*".r
- def normalize(s: String) = s match {
- case welcome(w) => w
- case s => s
- }
- def unwelcoming(s: String) = s match {
- case welcome(w) => false
- case _ => true
- }
+ lazy val header = replProps.welcome
def eval() = {
val s = settings
log("eval(): settings = " + s)
- //ILoop.runForTranscript(code, s).lines drop 1 // not always first line
val lines = ILoop.runForTranscript(code, s, inSession = inSession).lines
- if (welcoming) lines map normalize
- else lines filter unwelcoming
+ (if (welcoming) {
+ val welcome = "(Welcome to Scala).*".r
+ //val welcome = Regex.quote(header.lines.next).r
+ //val version = "(.*version).*".r // version on separate line?
+ //var inHead = false
+ lines map {
+ //case s @ welcome() => inHead = true ; s
+ //case version(s) if inHead => inHead = false ; s
+ case welcome(s) => s
+ case s => s
+ }
+ } else {
+ lines drop header.lines.size
+ }) map normalize
}
def show() = eval() foreach println
}
diff --git a/src/reflect/scala/reflect/api/FlagSets.scala b/src/reflect/scala/reflect/api/FlagSets.scala
index d3294dad9b..2d5d1d5d6b 100644
--- a/src/reflect/scala/reflect/api/FlagSets.scala
+++ b/src/reflect/scala/reflect/api/FlagSets.scala
@@ -173,6 +173,7 @@ trait FlagSets { self: Universe =>
* - the enum's class
* - enum constants
**/
+ @deprecated("Use `isJavaEnum` on the corresponding symbol instead.", since = "2.11.8")
val ENUM: FlagSet
/** Flag indicating that tree represents a parameter of the primary constructor of some class
diff --git a/src/reflect/scala/reflect/api/Internals.scala b/src/reflect/scala/reflect/api/Internals.scala
index 577cd09295..1457fdc133 100644
--- a/src/reflect/scala/reflect/api/Internals.scala
+++ b/src/reflect/scala/reflect/api/Internals.scala
@@ -114,7 +114,7 @@ trait Internals { self: Universe =>
def substituteTypes(tree: Tree, from: List[Symbol], to: List[Type]): Tree
/** Substitute given tree `to` for occurrences of nodes that represent
- * `C.this`, where `C` referes to the given class `clazz`.
+ * `C.this`, where `C` refers to the given class `clazz`.
*/
def substituteThis(tree: Tree, clazz: Symbol, to: Tree): Tree
diff --git a/src/reflect/scala/reflect/api/Symbols.scala b/src/reflect/scala/reflect/api/Symbols.scala
index b7234ba47a..9e9fe5d67b 100644
--- a/src/reflect/scala/reflect/api/Symbols.scala
+++ b/src/reflect/scala/reflect/api/Symbols.scala
@@ -504,6 +504,18 @@ trait Symbols { self: Universe =>
*/
def isImplicit: Boolean
+ /** Does this symbol represent a java enum class or a java enum value?
+ *
+ * @group Tests
+ */
+ def isJavaEnum: Boolean
+
+ /** Does this symbol represent a java annotation interface?
+ *
+ * @group Tests
+ */
+ def isJavaAnnotation: Boolean
+
/******************* helpers *******************/
/** Provides an alternate if symbol is a NoSymbol.
diff --git a/src/reflect/scala/reflect/api/Trees.scala b/src/reflect/scala/reflect/api/Trees.scala
index 2bf407ee19..a43195d9b6 100644
--- a/src/reflect/scala/reflect/api/Trees.scala
+++ b/src/reflect/scala/reflect/api/Trees.scala
@@ -143,7 +143,7 @@ trait Trees { self: Universe =>
/** Find all subtrees matching predicate `p`. Same as `withFilter` */
def filter(f: Tree => Boolean): List[Tree]
- /** Apply `pf' to each subtree on which the function is defined and collect the results.
+ /** Apply `pf` to each subtree on which the function is defined and collect the results.
*/
def collect[T](pf: PartialFunction[Tree, T]): List[T]
diff --git a/src/reflect/scala/reflect/api/TypeTags.scala b/src/reflect/scala/reflect/api/TypeTags.scala
index 7db375ca61..bc239ca870 100644
--- a/src/reflect/scala/reflect/api/TypeTags.scala
+++ b/src/reflect/scala/reflect/api/TypeTags.scala
@@ -53,7 +53,7 @@ import java.io.ObjectStreamException
* Each of these methods constructs a `TypeTag[T]` or `ClassTag[T]` for the given
* type argument `T`.
*
- * === #2 Using an implicit parameter of type `TypeTag[T]`, `ClassTag[T]`, or `WeakTypeTag[T]
+ * === #2 Using an implicit parameter of type `TypeTag[T]`, `ClassTag[T]`, or `WeakTypeTag[T]`
*
* For example:
* {{{
diff --git a/src/reflect/scala/reflect/internal/ClassfileConstants.scala b/src/reflect/scala/reflect/internal/ClassfileConstants.scala
index 53241fb15b..e5d97e8959 100644
--- a/src/reflect/scala/reflect/internal/ClassfileConstants.scala
+++ b/src/reflect/scala/reflect/internal/ClassfileConstants.scala
@@ -344,7 +344,8 @@ object ClassfileConstants {
case JAVA_ACC_STATIC => STATIC
case JAVA_ACC_ABSTRACT => if (isAnnotation) 0L else if (isClass) ABSTRACT else DEFERRED
case JAVA_ACC_INTERFACE => if (isAnnotation) 0L else TRAIT | INTERFACE | ABSTRACT
- case JAVA_ACC_ENUM => ENUM
+ case JAVA_ACC_ENUM => JAVA_ENUM
+ case JAVA_ACC_ANNOTATION => JAVA_ANNOTATION
case _ => 0L
}
private def translateFlags(jflags: Int, baseFlags: Long, isClass: Boolean): Long = {
@@ -360,6 +361,7 @@ object ClassfileConstants {
res |= translateFlag0(jflags & JAVA_ACC_ABSTRACT)
res |= translateFlag0(jflags & JAVA_ACC_INTERFACE)
res |= translateFlag0(jflags & JAVA_ACC_ENUM)
+ res |= translateFlag0(jflags & JAVA_ACC_ANNOTATION)
res
}
diff --git a/src/reflect/scala/reflect/internal/Definitions.scala b/src/reflect/scala/reflect/internal/Definitions.scala
index f3dd6a3280..02fa3c882b 100644
--- a/src/reflect/scala/reflect/internal/Definitions.scala
+++ b/src/reflect/scala/reflect/internal/Definitions.scala
@@ -103,7 +103,7 @@ trait Definitions extends api.StandardDefinitions {
def isNumericValueClass(sym: Symbol) = ScalaNumericValueClasses contains sym
def isGetClass(sym: Symbol) = (
- sym.name == nme.getClass_ // this condition is for performance only, this is called from `Typer#stabliize`.
+ sym.name == nme.getClass_ // this condition is for performance only, this is called from `Typer#stabilize`.
&& getClassMethods(sym)
)
@@ -815,7 +815,7 @@ trait Definitions extends api.StandardDefinitions {
// must filter out "universal" members (getClass is deferred for some reason)
val deferredMembers = (
tp membersBasedOnFlags (excludedFlags = BridgeAndPrivateFlags, requiredFlags = METHOD)
- filter (mem => mem.isDeferredNotDefault && !isUniversalMember(mem)) // TODO: test
+ filter (mem => mem.isDeferredNotJavaDefault && !isUniversalMember(mem)) // TODO: test
)
// if there is only one, it's monomorphic and has a single argument list
@@ -1517,7 +1517,7 @@ trait Definitions extends api.StandardDefinitions {
def isPolymorphicSignature(sym: Symbol) = PolySigMethods(sym)
private lazy val PolySigMethods: Set[Symbol] = Set[Symbol](MethodHandle.info.decl(sn.Invoke), MethodHandle.info.decl(sn.InvokeExact)).filter(_.exists)
- lazy val Scala_Java8_CompatPackage = rootMirror.getPackageIfDefined("scala.compat.java8")
+ lazy val Scala_Java8_CompatPackage = rootMirror.getPackageIfDefined("scala.runtime.java8")
lazy val Scala_Java8_CompatPackage_JFunction = (0 to MaxFunctionArity).toArray map (i => getMemberIfDefined(Scala_Java8_CompatPackage.moduleClass, TypeName("JFunction" + i)))
}
}
diff --git a/src/reflect/scala/reflect/internal/ExistentialsAndSkolems.scala b/src/reflect/scala/reflect/internal/ExistentialsAndSkolems.scala
index 0eeca4aace..3e18f88f80 100644
--- a/src/reflect/scala/reflect/internal/ExistentialsAndSkolems.scala
+++ b/src/reflect/scala/reflect/internal/ExistentialsAndSkolems.scala
@@ -110,7 +110,7 @@ trait ExistentialsAndSkolems {
/**
* Compute an existential type from hidden symbols `hidden` and type `tp`.
* @param hidden The symbols that will be existentially abstracted
- * @param hidden The original type
+ * @param tp The original type
* @param rawOwner The owner for Java raw types.
*/
final def packSymbols(hidden: List[Symbol], tp: Type, rawOwner: Symbol = NoSymbol): Type =
diff --git a/src/reflect/scala/reflect/internal/FlagSets.scala b/src/reflect/scala/reflect/internal/FlagSets.scala
index ef9c77878f..b6521634fb 100644
--- a/src/reflect/scala/reflect/internal/FlagSets.scala
+++ b/src/reflect/scala/reflect/internal/FlagSets.scala
@@ -42,7 +42,7 @@ trait FlagSets extends api.FlagSets { self: SymbolTable =>
val DEFAULTPARAM : FlagSet = Flags.DEFAULTPARAM
val PRESUPER : FlagSet = Flags.PRESUPER
val DEFAULTINIT : FlagSet = Flags.DEFAULTINIT
- val ENUM : FlagSet = Flags.ENUM
+ val ENUM : FlagSet = Flags.JAVA_ENUM
val PARAMACCESSOR : FlagSet = Flags.PARAMACCESSOR
val CASEACCESSOR : FlagSet = Flags.CASEACCESSOR
val SYNTHETIC : FlagSet = Flags.SYNTHETIC
diff --git a/src/reflect/scala/reflect/internal/Flags.scala b/src/reflect/scala/reflect/internal/Flags.scala
index 1707061817..754b96a9dd 100644
--- a/src/reflect/scala/reflect/internal/Flags.scala
+++ b/src/reflect/scala/reflect/internal/Flags.scala
@@ -15,65 +15,65 @@ import scala.collection.{ mutable, immutable }
//
// Generated by mkFlagsTable() at Thu Feb 02 20:31:52 PST 2012
//
-// 0: PROTECTED/M
-// 1: OVERRIDE/M
-// 2: PRIVATE/M
-// 3: ABSTRACT/M
-// 4: DEFERRED/M
-// 5: FINAL/M
-// 6: METHOD
-// 7: INTERFACE/M
-// 8: MODULE
-// 9: IMPLICIT/M
-// 10: SEALED/M
-// 11: CASE/M
-// 12: MUTABLE/M
-// 13: PARAM/M
-// 14: PACKAGE
-// 15: MACRO/M
-// 16: BYNAMEPARAM/M CAPTURED COVARIANT/M
-// 17: CONTRAVARIANT/M INCONSTRUCTOR LABEL
-// 18: ABSOVERRIDE/M
-// 19: LOCAL/M
-// 20: JAVA/M
-// 21: SYNTHETIC
-// 22: STABLE
-// 23: STATIC/M
-// 24: CASEACCESSOR/M
-// 25: DEFAULTPARAM/M TRAIT/M
-// 26: BRIDGE
-// 27: ACCESSOR
-// 28: SUPERACCESSOR
-// 29: PARAMACCESSOR/M
-// 30: MODULEVAR
-// 31: LAZY/M
-// 32: IS_ERROR
-// 33: OVERLOADED
-// 34: LIFTED
-// 35: EXISTENTIAL MIXEDIN
-// 36: EXPANDEDNAME
-// 37: IMPLCLASS PRESUPER/M
-// 38: TRANS_FLAG
-// 39: LOCKED
-// 40: SPECIALIZED
-// 41: DEFAULTINIT/M
-// 42: VBRIDGE
-// 43: VARARGS
-// 44: TRIEDCOOKING
-// 45: SYNCHRONIZED/M
-// 46: ARTIFACT
-// 47: DEFAULTMETHOD/M
-// 48: ENUM
-// 49:
+// 0: PROTECTED/M
+// 1: OVERRIDE/M
+// 2: PRIVATE/M
+// 3: ABSTRACT/M
+// 4: DEFERRED/M
+// 5: FINAL/M
+// 6: METHOD
+// 7: INTERFACE/M
+// 8: MODULE
+// 9: IMPLICIT/M
+// 10: SEALED/M
+// 11: CASE/M
+// 12: MUTABLE/M
+// 13: PARAM/M
+// 14: PACKAGE
+// 15: MACRO/M
+// 16: BYNAMEPARAM/M CAPTURED COVARIANT/M
+// 17: CONTRAVARIANT/M INCONSTRUCTOR LABEL
+// 18: ABSOVERRIDE/M
+// 19: LOCAL/M
+// 20: JAVA/M
+// 21: SYNTHETIC
+// 22: STABLE
+// 23: STATIC/M
+// 24: CASEACCESSOR/M
+// 25: DEFAULTPARAM/M TRAIT/M
+// 26: BRIDGE
+// 27: ACCESSOR
+// 28: SUPERACCESSOR
+// 29: PARAMACCESSOR/M
+// 30: MODULEVAR
+// 31: LAZY/M
+// 32: IS_ERROR
+// 33: OVERLOADED
+// 34: LIFTED
+// 35: EXISTENTIAL MIXEDIN
+// 36: EXPANDEDNAME
+// 37: IMPLCLASS PRESUPER/M
+// 38: TRANS_FLAG
+// 39: LOCKED
+// 40: SPECIALIZED
+// 41: DEFAULTINIT/M
+// 42: VBRIDGE
+// 43: VARARGS
+// 44: TRIEDCOOKING
+// 45: SYNCHRONIZED/M
+// 46: ARTIFACT
+// 47: JAVA_DEFAULTMETHOD/M
+// 48: JAVA_ENUM
+// 49: JAVA_ANNOTATION
// 50:
-// 51: lateDEFERRED
-// 52: lateFINAL
-// 53: lateMETHOD
-// 54: lateINTERFACE
-// 55: lateMODULE
-// 56: notPROTECTED
-// 57: notOVERRIDE
-// 58: notPRIVATE
+// 51: lateDEFERRED
+// 52: lateFINAL
+// 53: lateMETHOD
+// 54: lateINTERFACE
+// 55: lateMODULE
+// 56: notPROTECTED
+// 57: notOVERRIDE
+// 58: notPRIVATE
// 59:
// 60:
// 61:
@@ -119,8 +119,9 @@ class ModifierFlags {
final val DEFAULTINIT = 1L << 41 // symbol is initialized to the default value: used by -Xcheckinit
final val ARTIFACT = 1L << 46 // symbol should be ignored when typechecking; will be marked ACC_SYNTHETIC in bytecode
// to see which symbols are marked as ARTIFACT, see scaladocs for FlagValues.ARTIFACT
- final val DEFAULTMETHOD = 1L << 47 // symbol is a java default method
- final val ENUM = 1L << 48 // symbol is an enum
+ final val JAVA_DEFAULTMETHOD = 1L << 47 // symbol is a java default method
+ final val JAVA_ENUM = 1L << 48 // symbol is a java enum
+ final val JAVA_ANNOTATION = 1L << 49 // symbol is a java annotation
// Overridden.
def flagToString(flag: Long): String = ""
@@ -172,12 +173,28 @@ class Flags extends ModifierFlags {
final val SYNCHRONIZED = 1L << 45 // symbol is a method which should be marked ACC_SYNCHRONIZED
// ------- shift definitions -------------------------------------------------------
+ //
+ // Flags from 1L to (1L << 50) are normal flags.
+ //
+ // The flags DEFERRED (1L << 4) to MODULE (1L << 8) have a `late` counterpart. Late flags change
+ // their counterpart from 0 to 1 after a specific phase (see below). The first late flag
+ // (lateDEFERRED) is at (1L << 51), i.e., late flags are shifted by 47. The last one is (1L << 55).
+ //
+ // The flags PROTECTED (1L) to PRIVATE (1L << 2) have a `not` counterpart. Negated flags change
+ // their counterpart from 1 to 0 after a specific phase (see below). They are shifted by 56, i.e.,
+ // the first negated flag (notPROTECTED) is at (1L << 56), the last at (1L << 58).
+ //
+ // Late and negative flags are only enabled after certain phases, implemented by the phaseNewFlags
+ // method of the SubComponent, so they implement a bit of a flag history.
+ //
+ // The flags (1L << 59) to (1L << 63) are currently unused. If added to the InitialFlags mask,
+ // they could be used as normal flags.
- final val InitialFlags = 0x0001FFFFFFFFFFFFL // flags that are enabled from phase 1.
- final val LateFlags = 0x00FE000000000000L // flags that override flags in 0x1FC.
- final val AntiFlags = 0x7F00000000000000L // flags that cancel flags in 0x07F
- final val LateShift = 47L
- final val AntiShift = 56L
+ final val InitialFlags = 0x0007FFFFFFFFFFFFL // normal flags, enabled from the first phase: 1L to (1L << 50)
+ final val LateFlags = 0x00F8000000000000L // flags that override flags in (1L << 4) to (1L << 8): DEFERRED, FINAL, INTERFACE, METHOD, MODULE
+ final val AntiFlags = 0x0700000000000000L // flags that cancel flags in 1L to (1L << 2): PROTECTED, OVERRIDE, PRIVATE
+ final val LateShift = 47
+ final val AntiShift = 56
// Flags which sketchily share the same slot
// 16: BYNAMEPARAM/M CAPTURED COVARIANT/M
@@ -243,7 +260,7 @@ class Flags extends ModifierFlags {
*/
final val ExplicitFlags =
PRIVATE | PROTECTED | ABSTRACT | FINAL | SEALED |
- OVERRIDE | CASE | IMPLICIT | ABSOVERRIDE | LAZY | DEFAULTMETHOD
+ OVERRIDE | CASE | IMPLICIT | ABSOVERRIDE | LAZY | JAVA_DEFAULTMETHOD
/** The two bridge flags */
final val BridgeFlags = BRIDGE | VBRIDGE
@@ -434,9 +451,9 @@ class Flags extends ModifierFlags {
case TRIEDCOOKING => "<triedcooking>" // (1L << 44)
case SYNCHRONIZED => "<synchronized>" // (1L << 45)
case ARTIFACT => "<artifact>" // (1L << 46)
- case DEFAULTMETHOD => "<defaultmethod>" // (1L << 47)
- case ENUM => "<enum>" // (1L << 48)
- case 0x2000000000000L => "" // (1L << 49)
+ case JAVA_DEFAULTMETHOD => "<defaultmethod>" // (1L << 47)
+ case JAVA_ENUM => "<enum>" // (1L << 48)
+ case JAVA_ANNOTATION => "<annotation>" // (1L << 49)
case 0x4000000000000L => "" // (1L << 50)
case `lateDEFERRED` => "<latedeferred>" // (1L << 51)
case `lateFINAL` => "<latefinal>" // (1L << 52)
diff --git a/src/reflect/scala/reflect/internal/HasFlags.scala b/src/reflect/scala/reflect/internal/HasFlags.scala
index aa8f4c532e..5162b15206 100644
--- a/src/reflect/scala/reflect/internal/HasFlags.scala
+++ b/src/reflect/scala/reflect/internal/HasFlags.scala
@@ -79,49 +79,50 @@ trait HasFlags {
// Tests which come through cleanly: both Symbol and Modifiers use these
// identically, testing for a single flag.
- def hasAbstractFlag = hasFlag(ABSTRACT)
- def hasAccessorFlag = hasFlag(ACCESSOR)
- def hasDefault = hasFlag(DEFAULTPARAM) && hasFlag(METHOD | PARAM) // Second condition disambiguates with TRAIT
- def hasEnumFlag = hasFlag(ENUM)
+ def hasAbstractFlag = hasFlag(ABSTRACT)
+ def hasAccessorFlag = hasFlag(ACCESSOR)
+ def hasDefault = hasFlag(DEFAULTPARAM) && hasFlag(METHOD | PARAM) // Second condition disambiguates with TRAIT
+ def hasJavaEnumFlag = hasFlag(JAVA_ENUM)
+ def hasJavaAnnotationFlag = hasFlag(JAVA_ANNOTATION)
@deprecated("Use isLocalToThis instead", "2.11.0")
- def hasLocalFlag = hasFlag(LOCAL)
- def isLocalToThis = hasFlag(LOCAL)
- def hasModuleFlag = hasFlag(MODULE)
- def hasPackageFlag = hasFlag(PACKAGE)
- def hasStableFlag = hasFlag(STABLE)
- def hasStaticFlag = hasFlag(STATIC)
- def isAbstractOverride = hasFlag(ABSOVERRIDE)
- def isAnyOverride = hasFlag(OVERRIDE | ABSOVERRIDE)
- def isCase = hasFlag(CASE)
- def isCaseAccessor = hasFlag(CASEACCESSOR)
- def isDeferred = hasFlag(DEFERRED)
- def isFinal = hasFlag(FINAL)
- def isArtifact = hasFlag(ARTIFACT)
- def isImplicit = hasFlag(IMPLICIT)
- def isInterface = hasFlag(INTERFACE)
- def isJavaDefined = hasFlag(JAVA)
- def isLabel = hasAllFlags(LABEL | METHOD) && !hasAccessorFlag
- def isLazy = hasFlag(LAZY)
- def isLifted = hasFlag(LIFTED)
- def isMacro = hasFlag(MACRO)
- def isMutable = hasFlag(MUTABLE)
- def isOverride = hasFlag(OVERRIDE)
- def isParamAccessor = hasFlag(PARAMACCESSOR)
- def isPrivate = hasFlag(PRIVATE)
+ def hasLocalFlag = hasFlag(LOCAL)
+ def isLocalToThis = hasFlag(LOCAL)
+ def hasModuleFlag = hasFlag(MODULE)
+ def hasPackageFlag = hasFlag(PACKAGE)
+ def hasStableFlag = hasFlag(STABLE)
+ def hasStaticFlag = hasFlag(STATIC)
+ def isAbstractOverride = hasFlag(ABSOVERRIDE)
+ def isAnyOverride = hasFlag(OVERRIDE | ABSOVERRIDE)
+ def isCase = hasFlag(CASE)
+ def isCaseAccessor = hasFlag(CASEACCESSOR)
+ def isDeferred = hasFlag(DEFERRED)
+ def isFinal = hasFlag(FINAL)
+ def isArtifact = hasFlag(ARTIFACT)
+ def isImplicit = hasFlag(IMPLICIT)
+ def isInterface = hasFlag(INTERFACE)
+ def isJavaDefined = hasFlag(JAVA)
+ def isLabel = hasAllFlags(LABEL | METHOD) && !hasAccessorFlag
+ def isLazy = hasFlag(LAZY)
+ def isLifted = hasFlag(LIFTED)
+ def isMacro = hasFlag(MACRO)
+ def isMutable = hasFlag(MUTABLE)
+ def isOverride = hasFlag(OVERRIDE)
+ def isParamAccessor = hasFlag(PARAMACCESSOR)
+ def isPrivate = hasFlag(PRIVATE)
@deprecated ("Use `hasPackageFlag` instead", "2.11.0")
- def isPackage = hasFlag(PACKAGE)
- def isPrivateLocal = hasAllFlags(PrivateLocal)
- def isProtected = hasFlag(PROTECTED)
- def isProtectedLocal = hasAllFlags(ProtectedLocal)
- def isPublic = hasNoFlags(PRIVATE | PROTECTED) && !hasAccessBoundary
- def isSealed = hasFlag(SEALED)
- def isSpecialized = hasFlag(SPECIALIZED)
- def isSuperAccessor = hasFlag(SUPERACCESSOR)
- def isSynthetic = hasFlag(SYNTHETIC)
- def isTrait = hasFlag(TRAIT) && !hasFlag(PARAM)
-
- def isDeferredOrDefault = hasFlag(DEFERRED | DEFAULTMETHOD)
- def isDeferredNotDefault = isDeferred && !hasFlag(DEFAULTMETHOD)
+ def isPackage = hasFlag(PACKAGE)
+ def isPrivateLocal = hasAllFlags(PrivateLocal)
+ def isProtected = hasFlag(PROTECTED)
+ def isProtectedLocal = hasAllFlags(ProtectedLocal)
+ def isPublic = hasNoFlags(PRIVATE | PROTECTED) && !hasAccessBoundary
+ def isSealed = hasFlag(SEALED)
+ def isSpecialized = hasFlag(SPECIALIZED)
+ def isSuperAccessor = hasFlag(SUPERACCESSOR)
+ def isSynthetic = hasFlag(SYNTHETIC)
+ def isTrait = hasFlag(TRAIT) && !hasFlag(PARAM)
+
+ def isDeferredOrJavaDefault = hasFlag(DEFERRED | JAVA_DEFAULTMETHOD)
+ def isDeferredNotJavaDefault = isDeferred && !hasFlag(JAVA_DEFAULTMETHOD)
def flagBitsToString(bits: Long): String = {
// Fast path for common case
diff --git a/src/reflect/scala/reflect/internal/Reporting.scala b/src/reflect/scala/reflect/internal/Reporting.scala
index f2de83bc5d..2534f59c97 100644
--- a/src/reflect/scala/reflect/internal/Reporting.scala
+++ b/src/reflect/scala/reflect/internal/Reporting.scala
@@ -8,11 +8,11 @@ package reflect
package internal
/** Provides delegates to the reporter doing the actual work.
- * All forwarding methods should be marked final,
- * but some subclasses out of our reach stil override them.
+ * All forwarding methods should be marked final,
+ * but some subclasses out of our reach stil override them.
*
- * Eventually, this interface should be reduced to one method: `reporter`,
- * and clients should indirect themselves (reduce duplication of forwarders).
+ * Eventually, this interface should be reduced to one method: `reporter`,
+ * and clients should indirect themselves (reduce duplication of forwarders).
*/
trait Reporting { self : Positions =>
def reporter: Reporter
@@ -71,8 +71,8 @@ import util.Position
/** Report information, warnings and errors.
*
- * This describes the (future) external interface for issuing information, warnings and errors.
- * Currently, scala.tools.nsc.Reporter is used by sbt/ide/partest.
+ * This describes the (future) external interface for issuing information, warnings and errors.
+ * Currently, scala.tools.nsc.Reporter is used by sbt/ide/partest.
*/
abstract class Reporter {
protected def info0(pos: Position, msg: String, severity: Severity, force: Boolean): Unit
@@ -101,7 +101,10 @@ abstract class Reporter {
resetCount(ERROR)
}
- def flush(): Unit = { }
+ def flush(): Unit = ()
+
+ /** Finish reporting: print summaries, release resources. */
+ def finish(): Unit = ()
}
// TODO: move into superclass once partest cuts tie on Severity
diff --git a/src/reflect/scala/reflect/internal/StdNames.scala b/src/reflect/scala/reflect/internal/StdNames.scala
index 26e3cf6d0b..f1016e1b76 100644
--- a/src/reflect/scala/reflect/internal/StdNames.scala
+++ b/src/reflect/scala/reflect/internal/StdNames.scala
@@ -871,7 +871,7 @@ trait StdNames {
val toFloat: NameType = "toFloat"
val toDouble: NameType = "toDouble"
- // primitive operation methods for structual types mostly
+ // primitive operation methods for structural types mostly
// overlap with the above, but not for these two.
val toCharacter: NameType = "toCharacter"
val toInteger: NameType = "toInteger"
diff --git a/src/reflect/scala/reflect/internal/SymbolPairs.scala b/src/reflect/scala/reflect/internal/SymbolPairs.scala
index 4763e77a34..a52d2d8510 100644
--- a/src/reflect/scala/reflect/internal/SymbolPairs.scala
+++ b/src/reflect/scala/reflect/internal/SymbolPairs.scala
@@ -217,7 +217,7 @@ abstract class SymbolPairs {
bs(nshifted) |= nmask
}
- /** Implements `bs1 * bs2 * {0..n} != 0.
+ /** Implements `bs1 * bs2 * {0..n} != 0`.
* Used in hasCommonParentAsSubclass */
private def intersectionContainsElementLeq(bs1: BitSet, bs2: BitSet, n: Int): Boolean = {
val nshifted = n >> 5
diff --git a/src/reflect/scala/reflect/internal/Symbols.scala b/src/reflect/scala/reflect/internal/Symbols.scala
index 478b1b9732..8a52f0b9d8 100644
--- a/src/reflect/scala/reflect/internal/Symbols.scala
+++ b/src/reflect/scala/reflect/internal/Symbols.scala
@@ -102,6 +102,9 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
def isPrivateThis = (this hasFlag PRIVATE) && (this hasFlag LOCAL)
def isProtectedThis = (this hasFlag PROTECTED) && (this hasFlag LOCAL)
+ def isJavaEnum: Boolean = hasJavaEnumFlag
+ def isJavaAnnotation: Boolean = hasJavaAnnotationFlag
+
def newNestedSymbol(name: Name, pos: Position, newFlags: Long, isClass: Boolean): Symbol = name match {
case n: TermName => newTermSymbol(n, pos, newFlags)
case n: TypeName => if (isClass) newClassSymbol(n, pos, newFlags) else newNonClassSymbol(n, pos, newFlags)
@@ -495,8 +498,8 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
* failure to the point when that name is used for something, which is
* often to the point of never.
*/
- def newStubSymbol(name: Name, missingMessage: String): Symbol = name match {
- case n: TypeName => new StubClassSymbol(this, n, missingMessage)
+ def newStubSymbol(name: Name, missingMessage: String, isPackage: Boolean = false): Symbol = name match {
+ case n: TypeName => if (isPackage) new StubPackageClassSymbol(this, n, missingMessage) else new StubClassSymbol(this, n, missingMessage)
case _ => new StubTermSymbol(this, name.toTermName, missingMessage)
}
@@ -732,7 +735,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
final def flags: Long = {
if (Statistics.hotEnabled) Statistics.incCounter(flagsCount)
val fs = _rawflags & phase.flagMask
- (fs | ((fs & LateFlags) >>> LateShift)) & ~(fs >>> AntiShift)
+ (fs | ((fs & LateFlags) >>> LateShift)) & ~((fs & AntiFlags) >>> AntiShift)
}
def flags_=(fs: Long) = _rawflags = fs
def rawflags_=(x: Long) { _rawflags = x }
@@ -980,7 +983,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
private def isNotOverridden = (
owner.isClass && (
owner.isEffectivelyFinal
- || owner.isSealed && owner.children.forall(c => c.isEffectivelyFinal && (overridingSymbol(c) == NoSymbol))
+ || (owner.isSealed && owner.sealedChildren.forall(c => c.isEffectivelyFinal && (overridingSymbol(c) == NoSymbol)))
)
)
@@ -992,6 +995,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
isPrivate
|| isLocalToBlock
)
+ || isClass && originalOwner.isTerm && children.isEmpty // we track known subclasses of term-owned classes, use that infer finality
)
/** Is this symbol effectively final or a concrete term member of sealed class whose children do not override it */
final def isEffectivelyFinalOrNotOverridden: Boolean = isEffectivelyFinal || (isTerm && !isDeferred && isNotOverridden)
@@ -1159,7 +1163,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
* phase check (if after flatten) in the (overridden) method "def owner" in
* ModuleSymbol / ClassSymbol. The `rawowner` field is not modified.
* - Owners are also changed in other situations, for example when moving trees into a new
- * lexical context, e.g. in the named/default arguments tranformation, or when translating
+ * lexical context, e.g. in the named/default arguments transformation, or when translating
* extension method definitions.
*
* In general when seeking the owner of a symbol, one should call `owner`.
@@ -2495,14 +2499,15 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
def associatedFile: AbstractFile = enclosingTopLevelClass.associatedFile
def associatedFile_=(f: AbstractFile) { abort("associatedFile_= inapplicable for " + this) }
- /** If this is a sealed class, its known direct subclasses.
+ /** If this is a sealed or local class, its known direct subclasses.
* Otherwise, the empty set.
*/
def children: Set[Symbol] = Set()
+ final def sealedChildren: Set[Symbol] = if (!isSealed) Set.empty else children
/** Recursively assemble all children of this symbol.
*/
- def sealedDescendants: Set[Symbol] = children.flatMap(_.sealedDescendants) + this
+ final def sealedDescendants: Set[Symbol] = if (!isSealed) Set(this) else children.flatMap(_.sealedDescendants) + this
@inline final def orElse(alt: => Symbol): Symbol = if (this ne NoSymbol) this else alt
@inline final def andAlso(f: Symbol => Unit): Symbol = { if (this ne NoSymbol) f(this) ; this }
@@ -3471,6 +3476,7 @@ trait Symbols extends api.Symbols { self: SymbolTable =>
override def companionSymbol = fail(NoSymbol)
}
class StubClassSymbol(owner0: Symbol, name0: TypeName, val missingMessage: String) extends ClassSymbol(owner0, owner0.pos, name0) with StubSymbol
+ class StubPackageClassSymbol(owner0: Symbol, name0: TypeName, val missingMessage: String) extends PackageClassSymbol(owner0, owner0.pos, name0) with StubSymbol
class StubTermSymbol(owner0: Symbol, name0: TermName, val missingMessage: String) extends TermSymbol(owner0, owner0.pos, name0) with StubSymbol
trait FreeSymbol extends Symbol {
diff --git a/src/reflect/scala/reflect/internal/TreeGen.scala b/src/reflect/scala/reflect/internal/TreeGen.scala
index 894038dd0a..5c8ae17f5b 100644
--- a/src/reflect/scala/reflect/internal/TreeGen.scala
+++ b/src/reflect/scala/reflect/internal/TreeGen.scala
@@ -594,13 +594,12 @@ abstract class TreeGen {
* TupleN(x_1, ..., x_N)
* } ...)
*
- * If any of the P_i are variable patterns, the corresponding `x_i @ P_i' is not generated
+ * If any of the P_i are variable patterns, the corresponding `x_i @ P_i` is not generated
* and the variable constituting P_i is used instead of x_i
*
- * @param mapName The name to be used for maps (either map or foreach)
- * @param flatMapName The name to be used for flatMaps (either flatMap or foreach)
* @param enums The enumerators in the for expression
- * @param body The body of the for expression
+ * @param sugarBody The body of the for expression
+ * @param fresh A source of new names
*/
def mkFor(enums: List[Tree], sugarBody: Tree)(implicit fresh: FreshNameCreator): Tree = {
val (mapName, flatMapName, body) = sugarBody match {
diff --git a/src/reflect/scala/reflect/internal/Types.scala b/src/reflect/scala/reflect/internal/Types.scala
index 6ded3bcbe3..375218248e 100644
--- a/src/reflect/scala/reflect/internal/Types.scala
+++ b/src/reflect/scala/reflect/internal/Types.scala
@@ -739,7 +739,7 @@ trait Types
*
* `SubstThisAndSymMap` performs a breadth-first map over this type, which meant that
* symbol substitution occurred before `ThisType` substitution. Consequently, in substitution
- * of a `SingleType(ThisType(`from`), sym), symbols were rebound to `from` rather than `to`.
+ * of a `SingleType(ThisType(from), sym)`, symbols were rebound to `from` rather than `to`.
*/
def substThisAndSym(from: Symbol, to: Type, symsFrom: List[Symbol], symsTo: List[Symbol]): Type =
if (symsFrom eq symsTo) substThis(from, to)
@@ -763,7 +763,7 @@ trait Types
/** Apply `f` to each part of this type */
def foreach(f: Type => Unit) { new ForEachTypeTraverser(f).traverse(this) }
- /** Apply `pf' to each part of this type on which the function is defined */
+ /** Apply `pf` to each part of this type on which the function is defined */
def collect[T](pf: PartialFunction[Type, T]): List[T] = new CollectTypeCollector(pf).collect(this)
/** Apply `f` to each part of this type; children get mapped before their parents */
@@ -925,7 +925,7 @@ trait Types
def prefixString = trimPrefix(toString) + "#"
/** Convert toString avoiding infinite recursions by cutting off
- * after `maxTostringRecursions` recursion levels. Uses `safeToString`
+ * after `maxToStringRecursions` recursion levels. Uses `safeToString`
* to produce a string on each level.
*/
override final def toString: String = {
@@ -1496,7 +1496,7 @@ trait Types
} finally {
if (Statistics.canEnable) Statistics.popTimer(typeOpsStack, start)
}
- // [Martin] suppressing memo-ization solves the problem with "same type after erasure" errors
+ // [Martin] suppressing memoization solves the problem with "same type after erasure" errors
// when compiling with
// scalac scala.collection.IterableViewLike.scala scala.collection.IterableLike.scala
// I have not yet figured out precisely why this is the case.
@@ -2059,7 +2059,7 @@ trait Types
/** SI-3731, SI-8177: when prefix is changed to `newPre`, maintain consistency of prefix and sym
* (where the symbol refers to a declaration "embedded" in the prefix).
*
- * @returns newSym so that `newPre` binds `sym.name` to `newSym`,
+ * @return newSym so that `newPre` binds `sym.name` to `newSym`,
* to remain consistent with `pre` previously binding `sym.name` to `sym`.
*
* `newSym` and `sym` are conceptually the same symbols, but some change to our `prefix`
@@ -2602,7 +2602,7 @@ trait Types
* based on the bounds of the type parameters of the quantified type
* In Scala syntax, given a java-defined class C[T <: String], the existential type C[_]
* is improved to C[_ <: String] before skolemization, which captures (get it?) what Java does:
- * enter the type paramers' bounds into the context when checking subtyping/type equality of existential types
+ * enter the type parameters' bounds into the context when checking subtyping/type equality of existential types
*
* Also tried doing this once during class file parsing or when creating the existential type,
* but that causes cyclic errors because it happens too early.
@@ -4277,7 +4277,7 @@ trait Types
matchesType(res1, res2.substSym(tparams2, tparams1), alwaysMatchSimple)
(tp1, tp2) match {
case (MethodType(params1, res1), MethodType(params2, res2)) =>
- params1.length == params2.length && // useful pre-secreening optimization
+ params1.length == params2.length && // useful pre-screening optimization
matchingParams(params1, params2, tp1.isInstanceOf[JavaMethodType], tp2.isInstanceOf[JavaMethodType]) &&
matchesType(res1, res2, alwaysMatchSimple) &&
tp1.isImplicit == tp2.isImplicit
diff --git a/src/reflect/scala/reflect/internal/pickling/UnPickler.scala b/src/reflect/scala/reflect/internal/pickling/UnPickler.scala
index 464bbad2cd..15e0f9cc14 100644
--- a/src/reflect/scala/reflect/internal/pickling/UnPickler.scala
+++ b/src/reflect/scala/reflect/internal/pickling/UnPickler.scala
@@ -168,7 +168,7 @@ abstract class UnPickler {
}
/** If entry at `i` is undefined, define it by performing
- * operation `op` with `readIndex at start of i'th
+ * operation `op` with `readIndex` at start of i'th
* entry. Restore `readIndex` afterwards.
*/
protected def at[T <: AnyRef](i: Int, op: () => T): T = {
@@ -397,7 +397,7 @@ abstract class UnPickler {
val sym = readSymbolRef() match {
case stub: StubSymbol if !stub.isClass =>
// SI-8502 This allows us to create a stub for a unpickled reference to `missingPackage.Foo`.
- stub.owner.newStubSymbol(stub.name.toTypeName, stub.missingMessage)
+ stub.owner.newStubSymbol(stub.name.toTypeName, stub.missingMessage, isPackage = true)
case sym => sym
}
ThisType(sym)
diff --git a/src/reflect/scala/reflect/internal/tpe/TypeConstraints.scala b/src/reflect/scala/reflect/internal/tpe/TypeConstraints.scala
index f79099213a..e321a07f51 100644
--- a/src/reflect/scala/reflect/internal/tpe/TypeConstraints.scala
+++ b/src/reflect/scala/reflect/internal/tpe/TypeConstraints.scala
@@ -99,7 +99,7 @@ private[internal] trait TypeConstraints {
// a lower bound despite the fact that Nothing is always a lower bound. My current
// supposition is that the side-effecting type constraint accumulation mechanism
// depends on these subtype tests being performed to make forward progress when
- // there are mutally recursive type vars.
+ // there are mutually recursive type vars.
// See pos/t6367 and pos/t6499 for the competing test cases.
val mustConsider = tp.typeSymbol match {
case NothingClass => true
diff --git a/src/reflect/scala/reflect/internal/tpe/TypeMaps.scala b/src/reflect/scala/reflect/internal/tpe/TypeMaps.scala
index 891fccb3e1..d6d216863b 100644
--- a/src/reflect/scala/reflect/internal/tpe/TypeMaps.scala
+++ b/src/reflect/scala/reflect/internal/tpe/TypeMaps.scala
@@ -739,7 +739,7 @@ private[internal] trait TypeMaps {
substFor(sym)
case ClassInfoType(parents, decls, sym) =>
val parents1 = parents mapConserve this
- // We don't touch decls here; they will be touched when an enclosing TreeSubstitutor
+ // We don't touch decls here; they will be touched when an enclosing TreeSubstituter
// transforms the tree that defines them.
if (parents1 eq parents) tp
else ClassInfoType(parents1, decls, sym)
diff --git a/src/reflect/scala/reflect/internal/transform/Erasure.scala b/src/reflect/scala/reflect/internal/transform/Erasure.scala
index 5a03c1eeaa..9853a0fa0c 100644
--- a/src/reflect/scala/reflect/internal/transform/Erasure.scala
+++ b/src/reflect/scala/reflect/internal/transform/Erasure.scala
@@ -90,7 +90,7 @@ trait Erasure {
}
}
- /** Does this vakue class have an underlying type that's a type parameter of
+ /** Does this value class have an underlying type that's a type parameter of
* the class itself?
* This method needs to be called at a phase no later than erasurephase
*/
diff --git a/src/reflect/scala/reflect/internal/util/AbstractFileClassLoader.scala b/src/reflect/scala/reflect/internal/util/AbstractFileClassLoader.scala
index 30dcbc21ca..5cbdb92664 100644
--- a/src/reflect/scala/reflect/internal/util/AbstractFileClassLoader.scala
+++ b/src/reflect/scala/reflect/internal/util/AbstractFileClassLoader.scala
@@ -12,7 +12,7 @@ import java.security.cert.Certificate
import java.security.{ ProtectionDomain, CodeSource }
import java.util.{ Collections => JCollections, Enumeration => JEnumeration }
-/** A class loader that loads files from a {@link scala.tools.nsc.io.AbstractFile}.
+/** A class loader that loads files from a [[scala.reflect.io.AbstractFile]].
*
* @author Lex Spoon
*/
diff --git a/src/reflect/scala/reflect/internal/util/ScalaClassLoader.scala b/src/reflect/scala/reflect/internal/util/ScalaClassLoader.scala
index 41011f6c6b..296934e253 100644
--- a/src/reflect/scala/reflect/internal/util/ScalaClassLoader.scala
+++ b/src/reflect/scala/reflect/internal/util/ScalaClassLoader.scala
@@ -11,7 +11,8 @@ import java.lang.reflect.{ Constructor, Modifier, Method }
import java.io.{ File => JFile }
import java.net.{ URLClassLoader => JURLClassLoader }
import java.net.URL
-import scala.reflect.runtime.ReflectionUtils.unwrapHandler
+import scala.reflect.internal.FatalError
+import scala.reflect.runtime.ReflectionUtils.{ show, unwrapHandler }
import ScalaClassLoader._
import scala.util.control.Exception.{ catching }
import scala.language.implicitConversions
@@ -46,6 +47,33 @@ trait ScalaClassLoader extends JClassLoader {
def create(path: String): AnyRef =
tryToInitializeClass[AnyRef](path).map(_.newInstance()).orNull
+ /** Create an instance with ctor args, or invoke errorFn before throwing. */
+ def create[T <: AnyRef : ClassTag](path: String, errorFn: String => Unit)(args: AnyRef*): T = {
+ def fail(msg: String) = error(msg, new IllegalArgumentException(msg))
+ def error(msg: String, e: Throwable) = { errorFn(msg) ; throw e }
+ try {
+ val clazz = Class.forName(path, /*initialize =*/ true, /*loader =*/ this)
+ if (classTag[T].runtimeClass isAssignableFrom clazz) {
+ val ctor = {
+ val maybes = clazz.getConstructors filter (c => c.getParameterCount == args.size &&
+ (c.getParameterTypes zip args).forall { case (k, a) => k isAssignableFrom a.getClass })
+ if (maybes.size == 1) maybes.head
+ else fail(s"Constructor must accept arg list (${args map (_.getClass.getName) mkString ", "}): ${path}")
+ }
+ (ctor.newInstance(args: _*)).asInstanceOf[T]
+ } else {
+ errorFn(s"""Loader for ${classTag[T]}: [${show(classTag[T].runtimeClass.getClassLoader)}]
+ |Loader for ${clazz.getName}: [${show(clazz.getClassLoader)}]""".stripMargin)
+ fail(s"Not a ${classTag[T]}: ${path}")
+ }
+ } catch {
+ case e: ClassNotFoundException =>
+ error(s"Class not found: ${path}", e)
+ case e @ (_: LinkageError | _: ReflectiveOperationException) =>
+ error(s"Unable to create instance: ${path}: ${e.toString}", e)
+ }
+ }
+
/** The actual bytes for a class file, or an empty array if it can't be found. */
def classBytes(className: String): Array[Byte] = classAsStream(className) match {
case null => Array()
diff --git a/src/reflect/scala/reflect/macros/FrontEnds.scala b/src/reflect/scala/reflect/macros/FrontEnds.scala
index a770f325b2..8ad41382a8 100644
--- a/src/reflect/scala/reflect/macros/FrontEnds.scala
+++ b/src/reflect/scala/reflect/macros/FrontEnds.scala
@@ -12,7 +12,7 @@ package macros
trait FrontEnds {
self: blackbox.Context =>
- /** For sending a message which should not be labeled as a warning/error,
+ /** For sending a message which should not be labelled as a warning/error,
* but also shouldn't require -verbose to be visible.
* Use `enclosingPosition` if you're in doubt what position to pass to `pos`.
*/
diff --git a/src/reflect/scala/reflect/runtime/JavaMirrors.scala b/src/reflect/scala/reflect/runtime/JavaMirrors.scala
index 8c32a92ecd..d0670f337a 100644
--- a/src/reflect/scala/reflect/runtime/JavaMirrors.scala
+++ b/src/reflect/scala/reflect/runtime/JavaMirrors.scala
@@ -755,6 +755,7 @@ private[scala] trait JavaMirrors extends internal.SymbolTable with api.JavaUnive
val ifaces = jclazz.getGenericInterfaces.toList map typeToScala
val isAnnotation = JavaAccFlags(jclazz).isAnnotation
if (isAnnotation) AnnotationClass.tpe :: ClassfileAnnotationClass.tpe :: ifaces
+ else if (jclazz.isInterface) ObjectTpe :: ifaces // interfaces have Object as superclass in the classfile (see jvm spec), but getGenericSuperclass seems to return null
else (if (jsuperclazz == null) AnyTpe else typeToScala(jsuperclazz)) :: ifaces
} finally {
parentsLevel -= 1
diff --git a/src/reflect/scala/reflect/runtime/SymbolTable.scala b/src/reflect/scala/reflect/runtime/SymbolTable.scala
index 092bbd711f..eee2118898 100644
--- a/src/reflect/scala/reflect/runtime/SymbolTable.scala
+++ b/src/reflect/scala/reflect/runtime/SymbolTable.scala
@@ -3,7 +3,7 @@ package reflect
package runtime
/**
- * This symbol table trait fills in the definitions so that class information is obtained by refection.
+ * This symbol table trait fills in the definitions so that class information is obtained by reflection.
* It can be used either from a reflexive universe (class scala.reflect.runtime.JavaUniverse), or else from
* a runtime compiler that uses reflection to get a class information (class scala.tools.reflect.ReflectGlobal)
*/
diff --git a/src/reflect/scala/reflect/runtime/SynchronizedTypes.scala b/src/reflect/scala/reflect/runtime/SynchronizedTypes.scala
index 9bcf85dd6f..1d02cc7e89 100644
--- a/src/reflect/scala/reflect/runtime/SynchronizedTypes.scala
+++ b/src/reflect/scala/reflect/runtime/SynchronizedTypes.scala
@@ -82,9 +82,9 @@ private[reflect] trait SynchronizedTypes extends internal.Types { self: SymbolTa
override def toStringSubjects = _toStringSubjects.get
/* The idea of caches is as follows.
- * When in reflexive mode, a cache is either null, or one sentinal
+ * When in reflexive mode, a cache is either null, or one sentinel
* value representing undefined or the final defined
- * value. Hence, we can ask in non-synchronized ode whether the cache field
+ * value. Hence, we can ask in non-synchronized mode whether the cache field
* is non null and different from the sentinel (if a sentinel exists).
* If that's true, the cache value is current.
* Otherwise we arrive in one of the defined... methods listed below
diff --git a/src/reflect/scala/reflect/runtime/TwoWayCache.scala b/src/reflect/scala/reflect/runtime/TwoWayCache.scala
index d0fc3dac74..6c1ca5b571 100644
--- a/src/reflect/scala/reflect/runtime/TwoWayCache.scala
+++ b/src/reflect/scala/reflect/runtime/TwoWayCache.scala
@@ -26,8 +26,7 @@ private[runtime] class TwoWayCache[J, S] {
private object SomeRef {
def unapply[T](optRef: Option[WeakReference[T]]): Option[T] =
if (optRef.nonEmpty) {
- val result = optRef.get.get
- if (result != null) Some(result) else None
+ Option(optRef.get.get)
} else None
}
diff --git a/src/reflect/scala/reflect/runtime/TwoWayCaches.scala b/src/reflect/scala/reflect/runtime/TwoWayCaches.scala
index 6e2890e536..6ce0c0a728 100644
--- a/src/reflect/scala/reflect/runtime/TwoWayCaches.scala
+++ b/src/reflect/scala/reflect/runtime/TwoWayCaches.scala
@@ -26,8 +26,7 @@ private[runtime] trait TwoWayCaches { self: SymbolTable =>
private object SomeRef {
def unapply[T](optRef: Option[WeakReference[T]]): Option[T] =
if (optRef.nonEmpty) {
- val result = optRef.get.get
- if (result != null) Some(result) else None
+ Option(optRef.get.get)
} else None
}
diff --git a/src/repl-jline/scala/tools/nsc/interpreter/jline/FileBackedHistory.scala b/src/repl-jline/scala/tools/nsc/interpreter/jline/FileBackedHistory.scala
index b6c9792ec0..53a06ca972 100644
--- a/src/repl-jline/scala/tools/nsc/interpreter/jline/FileBackedHistory.scala
+++ b/src/repl-jline/scala/tools/nsc/interpreter/jline/FileBackedHistory.scala
@@ -7,9 +7,9 @@ package scala.tools.nsc.interpreter.jline
import _root_.jline.console.history.PersistentHistory
-
import scala.tools.nsc.interpreter
-import scala.tools.nsc.io.{File, Path}
+import scala.reflect.io.{ File, Path }
+import scala.tools.nsc.Properties.{ propOrNone, userHome }
/** TODO: file locking.
*/
@@ -85,9 +85,9 @@ object FileBackedHistory {
// val ContinuationChar = '\003'
// val ContinuationNL: String = Array('\003', '\n').mkString
- import scala.tools.nsc.Properties.userHome
-
- def defaultFileName = ".scala_history"
+ final val defaultFileName = ".scala_history"
- def defaultFile: File = File(Path(userHome) / defaultFileName)
+ def defaultFile: File = File(
+ propOrNone("scala.shell.histfile") map (Path.apply) getOrElse (Path(userHome) / defaultFileName)
+ )
}
diff --git a/src/repl-jline/scala/tools/nsc/interpreter/jline/JLineHistory.scala b/src/repl-jline/scala/tools/nsc/interpreter/jline/JLineHistory.scala
index 1f6a1f7022..3bc259252a 100644
--- a/src/repl-jline/scala/tools/nsc/interpreter/jline/JLineHistory.scala
+++ b/src/repl-jline/scala/tools/nsc/interpreter/jline/JLineHistory.scala
@@ -15,7 +15,7 @@ import scala.tools.nsc.interpreter
import scala.tools.nsc.interpreter.session.{History, SimpleHistory}
-/** A straight scalification of the jline interface which mixes
+/** A straight scalafication of the jline interface which mixes
* in the sparse jline-independent one too.
*/
trait JLineHistory extends JHistory with History {
diff --git a/src/repl/scala/tools/nsc/interpreter/Formatting.scala b/src/repl/scala/tools/nsc/interpreter/Formatting.scala
index 844997429c..4a9548730a 100644
--- a/src/repl/scala/tools/nsc/interpreter/Formatting.scala
+++ b/src/repl/scala/tools/nsc/interpreter/Formatting.scala
@@ -30,3 +30,6 @@ class Formatting(indent: Int) {
}
)
}
+object Formatting {
+ def forPrompt(prompt: String) = new Formatting(prompt.lines.toList.last.length)
+}
diff --git a/src/repl/scala/tools/nsc/interpreter/ILoop.scala b/src/repl/scala/tools/nsc/interpreter/ILoop.scala
index e8c6d02d1e..612bdd98c9 100644
--- a/src/repl/scala/tools/nsc/interpreter/ILoop.scala
+++ b/src/repl/scala/tools/nsc/interpreter/ILoop.scala
@@ -56,13 +56,9 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
private var globalFuture: Future[Boolean] = _
- /** Print a welcome message */
- def printWelcome() {
- echo(s"""
- |Welcome to Scala $versionString ($javaVmName, Java $javaVersion).
- |Type in expressions to have them evaluated.
- |Type :help for more information.""".trim.stripMargin
- )
+ /** Print a welcome message! */
+ def printWelcome(): Unit = {
+ Option(replProps.welcome) filter (!_.isEmpty) foreach echo
replinfo("[info] started at " + new java.util.Date)
}
@@ -111,10 +107,6 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
}
class ILoopInterpreter extends IMain(settings, out) {
- // the expanded prompt but without color escapes and without leading newline, for purposes of indenting
- override lazy val formatting: Formatting = new Formatting(
- (replProps.promptString format Properties.versionNumberString).lines.toList.last.length
- )
override protected def parentClassLoader =
settings.explicitParentLoader.getOrElse( classOf[ILoop].getClassLoader )
}
@@ -679,9 +671,8 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
}
def verbosity() = {
- val old = intp.printResults
- intp.printResults = !old
- echo("Switched " + (if (old) "off" else "on") + " result printing.")
+ intp.printResults = !intp.printResults
+ replinfo(s"Result printing is ${ if (intp.printResults) "on" else "off" }.")
}
/** Run one command submitted by the user. Two values are returned:
@@ -761,7 +752,8 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
private object paste extends Pasted {
import scala.util.matching.Regex.quote
- val ContinueString = " | "
+ val ContinuePrompt = replProps.continuePrompt
+ val ContinueString = replProps.continueText // " | "
val PromptString = prompt.lines.toList.last
val anyPrompt = s"""\\s*(?:${quote(PromptString.trim)}|${quote(AltPromptString.trim)})\\s*""".r
@@ -805,7 +797,7 @@ class ILoop(in0: Option[BufferedReader], protected val out: JPrintWriter)
echo("You typed two blank lines. Starting a new command.")
None
case IR.Incomplete =>
- in.readLine(paste.ContinueString) match {
+ in.readLine(paste.ContinuePrompt) match {
case null =>
// we know compilation is going to fail since we're at EOF and the
// parser thinks the input is still incomplete, but since this is
@@ -948,8 +940,9 @@ object ILoop {
Console.withOut(ostream) {
val output = new JPrintWriter(new OutputStreamWriter(ostream), true) {
// skip margin prefix for continuation lines, unless preserving session text for test
+ // should test for repl.paste.ContinueString or replProps.continueText.contains(ch)
override def write(str: String) =
- if (!inSession && (str forall (ch => ch.isWhitespace || ch == '|'))) () // repl.paste.ContinueString
+ if (!inSession && (str forall (ch => ch.isWhitespace || ch == '|'))) ()
else super.write(str)
}
val input = new BufferedReader(new StringReader(code.trim + "\n")) {
diff --git a/src/repl/scala/tools/nsc/interpreter/IMain.scala b/src/repl/scala/tools/nsc/interpreter/IMain.scala
index 2550a5dc57..06ae179da9 100644
--- a/src/repl/scala/tools/nsc/interpreter/IMain.scala
+++ b/src/repl/scala/tools/nsc/interpreter/IMain.scala
@@ -113,9 +113,7 @@ class IMain(@BeanProperty val factory: ScriptEngineFactory, initialSettings: Set
def this() = this(new Settings())
// the expanded prompt but without color escapes and without leading newline, for purposes of indenting
- lazy val formatting: Formatting = new Formatting(
- (replProps.promptString format Properties.versionNumberString).lines.toList.last.length
- )
+ lazy val formatting = Formatting.forPrompt(replProps.promptText)
lazy val reporter: ReplReporter = new ReplReporter(this)
import formatting.indentCode
@@ -890,7 +888,7 @@ class IMain(@BeanProperty val factory: ScriptEngineFactory, initialSettings: Set
/** Code to import bound names from previous lines - accessPath is code to
* append to objectName to access anything bound by request.
*/
- lazy val ComputedImports(importsPreamble, importsTrailer, accessPath) =
+ lazy val ComputedImports(headerPreamble, importsPreamble, importsTrailer, accessPath) =
exitingTyper(importsCode(referencedNames.toSet, ObjectSourceCode, definesClass))
/** the line of code to compute */
@@ -910,6 +908,7 @@ class IMain(@BeanProperty val factory: ScriptEngineFactory, initialSettings: Set
else List("def %s = %s".format("$line", tquoted(originalLine)), "def %s = Nil".format("$trees"))
}
def preamble = s"""
+ |$headerPreamble
|${preambleHeader format lineRep.readName}
|${envLines mkString (" ", ";\n ", ";\n")}
|$importsPreamble
diff --git a/src/repl/scala/tools/nsc/interpreter/Imports.scala b/src/repl/scala/tools/nsc/interpreter/Imports.scala
index 3ec77e46f1..5b231d94b6 100644
--- a/src/repl/scala/tools/nsc/interpreter/Imports.scala
+++ b/src/repl/scala/tools/nsc/interpreter/Imports.scala
@@ -70,7 +70,10 @@ trait Imports {
/** Compute imports that allow definitions from previous
* requests to be visible in a new request. Returns
- * three pieces of related code:
+ * three or four pieces of related code:
+ *
+ * 0. Header code fragment that should go at the beginning
+ * of the compilation unit, specifically, import Predef.
*
* 1. An initial code fragment that should go before
* the code of the new request.
@@ -91,30 +94,34 @@ trait Imports {
* (3) It imports multiple same-named implicits, but only the
* last one imported is actually usable.
*/
- case class ComputedImports(prepend: String, append: String, access: String)
+ case class ComputedImports(header: String, prepend: String, append: String, access: String)
protected def importsCode(wanted: Set[Name], wrapper: Request#Wrapper, definesClass: Boolean): ComputedImports = {
+ val header, code, trailingBraces, accessPath = new StringBuilder
+ val currentImps = mutable.HashSet[Name]()
+ var predefEscapes = false // only emit predef import header if name not resolved in history, loosely
+
/** Narrow down the list of requests from which imports
* should be taken. Removes requests which cannot contribute
* useful imports for the specified set of wanted names.
*/
- case class ReqAndHandler(req: Request, handler: MemberHandler) { }
+ case class ReqAndHandler(req: Request, handler: MemberHandler)
def reqsToUse: List[ReqAndHandler] = {
/** Loop through a list of MemberHandlers and select which ones to keep.
- * 'wanted' is the set of names that need to be imported.
+ * 'wanted' is the set of names that need to be imported.
*/
def select(reqs: List[ReqAndHandler], wanted: Set[Name]): List[ReqAndHandler] = {
// Single symbol imports might be implicits! See bug #1752. Rather than
// try to finesse this, we will mimic all imports for now.
def keepHandler(handler: MemberHandler) = handler match {
- /* While defining classes in class based mode - implicits are not needed. */
+ // While defining classes in class based mode - implicits are not needed.
case h: ImportHandler if isClassBased && definesClass => h.importedNames.exists(x => wanted.contains(x))
case _: ImportHandler => true
case x => x.definesImplicit || (x.definedNames exists wanted)
}
reqs match {
- case Nil => Nil
+ case Nil => predefEscapes = wanted contains PredefModule.name ; Nil
case rh :: rest if !keepHandler(rh.handler) => select(rest, wanted)
case rh :: rest =>
import rh.handler._
@@ -127,9 +134,6 @@ trait Imports {
select(allReqAndHandlers reverseMap { case (r, h) => ReqAndHandler(r, h) }, wanted).reverse
}
- val code, trailingBraces, accessPath = new StringBuilder
- val currentImps = mutable.HashSet[Name]()
-
// add code for a new object to hold some imports
def addWrapper() {
import nme.{ INTERPRETER_IMPORT_WRAPPER => iw }
@@ -146,6 +150,9 @@ trait Imports {
try op finally addWrapper()
}
+ // imports from Predef are relocated to the template header to allow hiding.
+ def checkHeader(h: ImportHandler) = h.referencedNames contains PredefModule.name
+
// loop through previous requests, adding imports for each one
wrapBeforeAndAfter {
// Reusing a single temporary value when import from a line with multiple definitions.
@@ -153,6 +160,9 @@ trait Imports {
for (ReqAndHandler(req, handler) <- reqsToUse) {
val objName = req.lineRep.readPathInstance
handler match {
+ case h: ImportHandler if checkHeader(h) =>
+ header.clear()
+ header append f"${h.member}%n"
// If the user entered an import, then just use it; add an import wrapping
// level if the import might conflict with some other import
case x: ImportHandler if x.importsWildcard =>
@@ -194,7 +204,8 @@ trait Imports {
}
}
- ComputedImports(code.toString, trailingBraces.toString, accessPath.toString)
+ val computedHeader = if (predefEscapes) header.toString else ""
+ ComputedImports(computedHeader, code.toString, trailingBraces.toString, accessPath.toString)
}
private def allReqAndHandlers =
diff --git a/src/repl/scala/tools/nsc/interpreter/Pasted.scala b/src/repl/scala/tools/nsc/interpreter/Pasted.scala
index 5f388eb15b..f8d8c2ddb1 100644
--- a/src/repl/scala/tools/nsc/interpreter/Pasted.scala
+++ b/src/repl/scala/tools/nsc/interpreter/Pasted.scala
@@ -21,7 +21,13 @@ abstract class Pasted {
def PromptString: String
def AltPromptString: String = "scala> "
- private val testBoth = PromptString != AltPromptString
+ /* `testBoth` cannot be a val, as `Pasted` is inherited by `object paste` in ILoop,
+ which would cause `val testBoth` to be initialized before `val PromptString` was.
+
+ object paste extends Pasted {
+ val PromptString = prompt.lines.toList.last
+ */
+ private def testBoth = PromptString != AltPromptString
private val spacey = " \t".toSet
def matchesPrompt(line: String) = matchesString(line, PromptString) || testBoth && matchesString(line, AltPromptString)
diff --git a/src/repl/scala/tools/nsc/interpreter/ReplProps.scala b/src/repl/scala/tools/nsc/interpreter/ReplProps.scala
index df65e9974d..588d92f81b 100644
--- a/src/repl/scala/tools/nsc/interpreter/ReplProps.scala
+++ b/src/repl/scala/tools/nsc/interpreter/ReplProps.scala
@@ -6,9 +6,11 @@
package scala.tools.nsc
package interpreter
-import Properties.shellPromptString
+import Properties.{ javaVersion, javaVmName, shellPromptString, shellWelcomeString,
+ versionString, versionNumberString }
import scala.sys._
import Prop._
+import java.util.{ Formattable, FormattableFlags, Formatter }
class ReplProps {
private def bool(name: String) = BooleanProp.keyExists(name)
@@ -22,12 +24,44 @@ class ReplProps {
val trace = bool("scala.repl.trace")
val power = bool("scala.repl.power")
- // Handy system prop for shell prompt, or else pick it up from compiler.properties
- val promptString = Prop[String]("scala.repl.prompt").option getOrElse (if (info) "%nscala %s> " else shellPromptString)
- val prompt = {
+ def enversion(s: String) = {
+ import FormattableFlags._
+ val v = new Formattable {
+ override def formatTo(formatter: Formatter, flags: Int, width: Int, precision: Int) = {
+ val version = if ((flags & ALTERNATE) != 0) versionNumberString else versionString
+ val left = if ((flags & LEFT_JUSTIFY) != 0) "-" else ""
+ val w = if (width >= 0) s"$width" else ""
+ val p = if (precision >= 0) s".$precision" else ""
+ val fmt = s"%${left}${w}${p}s"
+ formatter.format(fmt, version)
+ }
+ }
+ s.format(v, javaVersion, javaVmName)
+ }
+ def encolor(s: String) = {
import scala.io.AnsiColor.{ MAGENTA, RESET }
- val p = promptString format Properties.versionNumberString
- if (colorOk) s"$MAGENTA$p$RESET" else p
+ if (colorOk) s"$MAGENTA$s$RESET" else s
+ }
+
+ // Handy system prop for shell prompt, or else pick it up from compiler.properties
+ val promptString = Prop[String]("scala.repl.prompt").option getOrElse (if (info) "%nscala %#s> " else shellPromptString)
+ val promptText = enversion(promptString)
+ val prompt = encolor(promptText)
+
+ // Prompt for continued input, will be right-adjusted to width of the primary prompt
+ val continueString = Prop[String]("scala.repl.continue").option getOrElse "| "
+ val continueText = {
+ val text = enversion(continueString)
+ val margin = promptText.lines.toList.last.length - text.length
+ if (margin > 0) " " * margin + text else text
+ }
+ val continuePrompt = encolor(continueText)
+
+ // Next time.
+ //def welcome = enversion(Prop[String]("scala.repl.welcome") or shellWelcomeString)
+ def welcome = enversion {
+ val p = Prop[String]("scala.repl.welcome")
+ if (p.isSet) p.get else shellWelcomeString
}
/** CSV of paged,across to enable pagination or `-x` style
diff --git a/src/scaladoc/scala/tools/nsc/doc/base/MemberLookupBase.scala b/src/scaladoc/scala/tools/nsc/doc/base/MemberLookupBase.scala
index f853df0484..9de6ec4ab9 100755
--- a/src/scaladoc/scala/tools/nsc/doc/base/MemberLookupBase.scala
+++ b/src/scaladoc/scala/tools/nsc/doc/base/MemberLookupBase.scala
@@ -114,7 +114,7 @@ trait MemberLookupBase {
// Maintaining compatibility with previous links is a bit tricky here:
// we have a preference for term names for all terms except for the last, where we prefer a class:
// How to do this:
- // - at each step we do a DFS search with the prefered strategy
+ // - at each step we do a DFS search with the preferred strategy
// - if the search doesn't return any members, we backtrack on the last decision
// * we look for terms with the last member's name
// * we look for types with the same name, all the way up
diff --git a/src/scaladoc/scala/tools/nsc/doc/base/comment/Comment.scala b/src/scaladoc/scala/tools/nsc/doc/base/comment/Comment.scala
index a3d05ae50b..e5eb68d65a 100644
--- a/src/scaladoc/scala/tools/nsc/doc/base/comment/Comment.scala
+++ b/src/scaladoc/scala/tools/nsc/doc/base/comment/Comment.scala
@@ -93,7 +93,7 @@ abstract class Comment {
def todo: List[Body]
/** Whether the entity is deprecated. Using the `@deprecated` Scala attribute
- * is prefereable to using this Scaladoc tag. */
+ * is preferable to using this Scaladoc tag. */
def deprecated: Option[Body]
/** An additional note concerning the contract of the entity. */
diff --git a/src/scaladoc/scala/tools/nsc/doc/html/page/Template.scala b/src/scaladoc/scala/tools/nsc/doc/html/page/Template.scala
index 99f989aadf..743c2a401c 100644
--- a/src/scaladoc/scala/tools/nsc/doc/html/page/Template.scala
+++ b/src/scaladoc/scala/tools/nsc/doc/html/page/Template.scala
@@ -280,7 +280,7 @@ class Template(universe: doc.Universe, generator: DiagramGenerator, tpl: DocTemp
{
if (Set("epfl", "EPFL").contains(tpl.universe.settings.docfooter.value))
- <div id="footer">Scala programming documentation. Copyright (c) 2003-2013 <a href="http://www.epfl.ch" target="_top">EPFL</a>, with contributions from <a href="http://typesafe.com" target="_top">Typesafe</a>.</div>
+ <div id="footer">Scala programming documentation. Copyright (c) 2003-2015 <a href="http://www.epfl.ch" target="_top">EPFL</a>, with contributions from <a href="http://typesafe.com" target="_top">Typesafe</a>.</div>
else
<div id="footer"> { tpl.universe.settings.docfooter.value } </div>
}
@@ -682,7 +682,6 @@ class Template(universe: doc.Universe, generator: DiagramGenerator, tpl: DocTemp
if (diagramSvg != NodeSeq.Empty) {
<div class="toggleContainer block diagram-container" id={ id + "-container"}>
<span class="toggle diagram-link">{ description }</span>
- <a href="http://docs.scala-lang.org/overviews/scaladoc/usage.html#diagrams" target="_blank" class="diagram-help">Learn more about scaladoc diagrams</a>
<div class="diagram" id={ id }>{
diagramSvg
}</div>
diff --git a/src/scaladoc/scala/tools/nsc/doc/html/page/diagram/DiagramStats.scala b/src/scaladoc/scala/tools/nsc/doc/html/page/diagram/DiagramStats.scala
index ab8e9e2756..12c609af49 100644
--- a/src/scaladoc/scala/tools/nsc/doc/html/page/diagram/DiagramStats.scala
+++ b/src/scaladoc/scala/tools/nsc/doc/html/page/diagram/DiagramStats.scala
@@ -36,7 +36,7 @@ object DiagramStats {
private[this] val filterTrack = new TimeTracker("diagrams model filtering")
private[this] val modelTrack = new TimeTracker("diagrams model generation")
private[this] val dotGenTrack = new TimeTracker("dot diagram generation")
- private[this] val dotRunTrack = new TimeTracker("dot process runnning")
+ private[this] val dotRunTrack = new TimeTracker("dot process running")
private[this] val svgTrack = new TimeTracker("svg processing")
private[this] var brokenImages = 0
private[this] var fixedImages = 0
diff --git a/src/scaladoc/scala/tools/nsc/doc/model/CommentFactory.scala b/src/scaladoc/scala/tools/nsc/doc/model/CommentFactory.scala
index fe157c1cc9..66ce2137f2 100644
--- a/src/scaladoc/scala/tools/nsc/doc/model/CommentFactory.scala
+++ b/src/scaladoc/scala/tools/nsc/doc/model/CommentFactory.scala
@@ -31,7 +31,7 @@ trait CommentFactory extends base.CommentFactoryBase {
defineComment(sym, linkTarget, inTpl)
})
- /** A comment is usualy created by the parser, however for some special
+ /** A comment is usually created by the parser, however for some special
* cases we have to give some `inTpl` comments (parent class for example)
* to the comment of the symbol.
* This function manages some of those cases : Param accessor and Primary constructor */
diff --git a/src/scaladoc/scala/tools/nsc/doc/model/Entity.scala b/src/scaladoc/scala/tools/nsc/doc/model/Entity.scala
index 90de51d763..d55c51b19c 100644
--- a/src/scaladoc/scala/tools/nsc/doc/model/Entity.scala
+++ b/src/scaladoc/scala/tools/nsc/doc/model/Entity.scala
@@ -183,7 +183,7 @@ trait MemberEntity extends Entity {
/** Indicates whether the member is inherited by implicit conversion */
def isImplicitlyInherited: Boolean
- /** Indicates whether there is another member with the same name in the template that will take precendence */
+ /** Indicates whether there is another member with the same name in the template that will take precedence */
def isShadowedImplicit: Boolean
/** Indicates whether there are other implicitly inherited members that have similar signatures (and thus they all
diff --git a/src/scaladoc/scala/tools/nsc/doc/model/ModelFactory.scala b/src/scaladoc/scala/tools/nsc/doc/model/ModelFactory.scala
index 178af3d4ba..3432e5e150 100644
--- a/src/scaladoc/scala/tools/nsc/doc/model/ModelFactory.scala
+++ b/src/scaladoc/scala/tools/nsc/doc/model/ModelFactory.scala
@@ -478,7 +478,7 @@ class ModelFactory(val global: Global, val settings: doc.Settings) {
override lazy val comment = {
def nonRootTemplate(sym: Symbol): Option[DocTemplateImpl] =
if (sym eq RootPackage) None else findTemplateMaybe(sym)
- /* Variable precendence order for implicitly added members: Take the variable definitions from ...
+ /* Variable precedence order for implicitly added members: Take the variable definitions from ...
* 1. the target of the implicit conversion
* 2. the definition template (owner)
* 3. the current template
@@ -994,7 +994,7 @@ class ModelFactory(val global: Global, val settings: doc.Settings) {
// pruning modules that shouldn't be documented
// Why Symbol.isInitialized? Well, because we need to avoid exploring all the space available to scaladoc
// from the classpath -- scaladoc is a hog, it will explore everything starting from the root package unless we
- // somehow prune the tree. And isInitialized is a good heuristic for prunning -- if the package was not explored
+ // somehow prune the tree. And isInitialized is a good heuristic for pruning -- if the package was not explored
// during typer and refchecks, it's not necessary for the current application and there's no need to explore it.
(!sym.isModule || sym.moduleClass.isInitialized) &&
// documenting only public and protected members
diff --git a/src/scaladoc/scala/tools/nsc/doc/model/ModelFactoryTypeSupport.scala b/src/scaladoc/scala/tools/nsc/doc/model/ModelFactoryTypeSupport.scala
index ea72fa6095..45745b5f55 100644
--- a/src/scaladoc/scala/tools/nsc/doc/model/ModelFactoryTypeSupport.scala
+++ b/src/scaladoc/scala/tools/nsc/doc/model/ModelFactoryTypeSupport.scala
@@ -112,7 +112,7 @@ trait ModelFactoryTypeSupport {
def needsPrefix: Boolean = {
if ((owner != bSym.owner || preSym.isRefinementClass) && (normalizeTemplate(owner) != inTpl.sym))
return true
- // don't get tricked into prefixng method type params and existentials:
+ // don't get tricked into prefixing method type params and existentials:
// I tried several tricks BUT adding the method for which I'm creating the type => that simply won't scale,
// as ValueParams are independent of their parent member, and I really don't want to add this information to
// all terms, as we're already over the allowed memory footprint
diff --git a/src/scaladoc/scala/tools/nsc/doc/model/diagram/Diagram.scala b/src/scaladoc/scala/tools/nsc/doc/model/diagram/Diagram.scala
index 1846f375cd..e15963bda9 100644
--- a/src/scaladoc/scala/tools/nsc/doc/model/diagram/Diagram.scala
+++ b/src/scaladoc/scala/tools/nsc/doc/model/diagram/Diagram.scala
@@ -99,7 +99,7 @@ case class NormalNode(tpe: TypeEntity, tpl: Option[TemplateEntity])(val tooltip:
/** A class or trait the thisnode can be converted to by an implicit conversion
* TODO: I think it makes more sense to use the tpe links to templates instead of the TemplateEntity for implicit nodes
- * since some implicit conversions convert the class to complex types that cannot be represented as a single tmeplate
+ * since some implicit conversions convert the class to complex types that cannot be represented as a single template
*/
case class ImplicitNode(tpe: TypeEntity, tpl: Option[TemplateEntity])(val tooltip: Option[String] = None) extends Node { override def isImplicitNode = true }
diff --git a/src/scalap/decoder.properties b/src/scalap/decoder.properties
index 961c60f48c..333f6ce715 100644
--- a/src/scalap/decoder.properties
+++ b/src/scalap/decoder.properties
@@ -1,2 +1,2 @@
version.number=2.0.1
-copyright.string=(c) 2002-2013 LAMP/EPFL
+copyright.string=(c) 2002-2015 LAMP/EPFL
diff --git a/src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala b/src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala
index cfd750055b..eed76c3774 100644
--- a/src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala
+++ b/src/scalap/scala/tools/scalap/scalax/rules/scalasig/ClassFileParser.scala
@@ -114,6 +114,9 @@ object ClassFileParser extends ByteCodeReader {
val methodRef = memberRef("Method")
val interfaceMethodRef = memberRef("InterfaceMethod")
val nameAndType = u2 ~ u2 ^^ add1 { case name ~ descriptor => pool => "NameAndType: " + pool(name) + ", " + pool(descriptor) }
+ val methodHandle = u1 ~ u2 ^^ add1 { case referenceKind ~ referenceIndex => pool => "MethodHandle: " + referenceKind + ", " + pool(referenceIndex) }
+ val methodType = u2 ^^ add1 { case descriptorIndex => pool => "MethodType: " + pool(descriptorIndex) }
+ val invokeDynamic = u2 ~ u2 ^^ add1 { case bootstrapMethodAttrIndex ~ nameAndTypeIndex => pool => "InvokeDynamic: " + "bootstrapMethodAttrIndex = " + bootstrapMethodAttrIndex + ", " + pool(nameAndTypeIndex) }
val constantPoolEntry = u1 >> {
case 1 => utf8String
@@ -127,6 +130,9 @@ object ClassFileParser extends ByteCodeReader {
case 10 => methodRef
case 11 => interfaceMethodRef
case 12 => nameAndType
+ case 15 => methodHandle
+ case 16 => methodType
+ case 18 => invokeDynamic
}
val interfaces = u2 >> u2.times