summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAdriaan Moors <adriaan.moors@typesafe.com>2013-02-11 13:02:40 -0800
committerAdriaan Moors <adriaan.moors@typesafe.com>2013-02-11 13:02:40 -0800
commitb87e9b39d9ee606aab5d86f84ca94df416451e65 (patch)
tree34dbc0c8054303eb92a6a6fea2d48b885f8c5ac4 /src
parentdb5919a7d3b18be94e79899c2f7e33c535e15a27 (diff)
parent85b63b81d5951a78547641e3feab0886f6013ea1 (diff)
downloadscala-0d949cc8c906c5e58564853058f508fb0ea8da8c.tar.gz
scala-0d949cc8c906c5e58564853058f508fb0ea8da8c.tar.bz2
scala-0d949cc8c906c5e58564853058f508fb0ea8da8c.zip
Merge pull request #2103 from adriaanm/bin-compatv2.10.1-RC1
Forward and backward binary compatibility between 2.10.0 and 2.10.1-RC1
Diffstat (limited to 'src')
-rw-r--r--src/library/scala/collection/immutable/List.scala4
-rw-r--r--src/library/scala/collection/immutable/Range.scala1
-rw-r--r--src/library/scala/collection/mutable/MutableList.scala3
-rw-r--r--src/library/scala/collection/mutable/Queue.scala7
-rw-r--r--src/library/scala/concurrent/BatchingExecutor.scala117
-rw-r--r--src/library/scala/concurrent/Future.scala106
-rw-r--r--src/library/scala/concurrent/impl/ExecutionContextImpl.scala33
-rw-r--r--src/library/scala/util/Random.scala2
-rw-r--r--src/reflect/scala/reflect/internal/PrivateWithin.scala23
-rw-r--r--src/reflect/scala/reflect/internal/SymbolTable.scala16
-rw-r--r--src/reflect/scala/reflect/runtime/JavaMirrors.scala18
-rw-r--r--src/reflect/scala/reflect/runtime/SymbolLoaders.scala8
12 files changed, 160 insertions, 178 deletions
diff --git a/src/library/scala/collection/immutable/List.scala b/src/library/scala/collection/immutable/List.scala
index 9765e7c52f..2d6952ff92 100644
--- a/src/library/scala/collection/immutable/List.scala
+++ b/src/library/scala/collection/immutable/List.scala
@@ -643,6 +643,10 @@ object List extends SeqFactory[List] {
}
/** Only used for list serialization */
+@SerialVersionUID(0L - 8287891243975527522L)
+private[scala] case object ListSerializeStart
+
+/** Only used for list serialization */
@SerialVersionUID(0L - 8476791151975527571L)
private[scala] case object ListSerializeEnd
diff --git a/src/library/scala/collection/immutable/Range.scala b/src/library/scala/collection/immutable/Range.scala
index 02c10700b1..802e16605d 100644
--- a/src/library/scala/collection/immutable/Range.scala
+++ b/src/library/scala/collection/immutable/Range.scala
@@ -77,7 +77,6 @@ extends scala.collection.AbstractSeq[Int]
final val terminalElement = start + numRangeElements * step
override def last = if (isEmpty) Nil.last else lastElement
- override def head = if (isEmpty) Nil.head else start
override def min[A1 >: Int](implicit ord: Ordering[A1]): Int =
if (ord eq Ordering.Int) {
diff --git a/src/library/scala/collection/mutable/MutableList.scala b/src/library/scala/collection/mutable/MutableList.scala
index fd92d2e555..bc6272bfdb 100644
--- a/src/library/scala/collection/mutable/MutableList.scala
+++ b/src/library/scala/collection/mutable/MutableList.scala
@@ -61,7 +61,8 @@ extends AbstractSeq[A]
tl
}
- protected final def tailImpl(tl: MutableList[A]) {
+ // this method must be private for binary compatibility
+ private final def tailImpl(tl: MutableList[A]) {
require(nonEmpty, "tail of empty list")
tl.first0 = first0.tail
tl.len = len - 1
diff --git a/src/library/scala/collection/mutable/Queue.scala b/src/library/scala/collection/mutable/Queue.scala
index b947fa3cca..8ef5f6aeb7 100644
--- a/src/library/scala/collection/mutable/Queue.scala
+++ b/src/library/scala/collection/mutable/Queue.scala
@@ -167,6 +167,13 @@ extends MutableList[A]
*/
def front: A = head
+ // this method (duplicated from MutableList) must be private for binary compatibility
+ private final def tailImpl(tl: Queue[A]) {
+ require(nonEmpty, "tail of empty list")
+ tl.first0 = first0.tail
+ tl.len = len - 1
+ tl.last0 = if (tl.len == 0) tl.first0 else last0
+ }
// TODO - Don't override this just for new to create appropriate type....
override def tail: Queue[A] = {
diff --git a/src/library/scala/concurrent/BatchingExecutor.scala b/src/library/scala/concurrent/BatchingExecutor.scala
deleted file mode 100644
index a0d7aaea47..0000000000
--- a/src/library/scala/concurrent/BatchingExecutor.scala
+++ /dev/null
@@ -1,117 +0,0 @@
-/* __ *\
-** ________ ___ / / ___ Scala API **
-** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
-** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
-** /____/\___/_/ |_/____/_/ | | **
-** |/ **
-\* */
-
-package scala.concurrent
-
-import java.util.concurrent.Executor
-import scala.annotation.tailrec
-
-/**
- * Mixin trait for an Executor
- * which groups multiple nested `Runnable.run()` calls
- * into a single Runnable passed to the original
- * Executor. This can be a useful optimization
- * because it bypasses the original context's task
- * queue and keeps related (nested) code on a single
- * thread which may improve CPU affinity. However,
- * if tasks passed to the Executor are blocking
- * or expensive, this optimization can prevent work-stealing
- * and make performance worse. Also, some ExecutionContext
- * may be fast enough natively that this optimization just
- * adds overhead.
- * The default ExecutionContext.global is already batching
- * or fast enough not to benefit from it; while
- * `fromExecutor` and `fromExecutorService` do NOT add
- * this optimization since they don't know whether the underlying
- * executor will benefit from it.
- * A batching executor can create deadlocks if code does
- * not use `scala.concurrent.blocking` when it should,
- * because tasks created within other tasks will block
- * on the outer task completing.
- * This executor may run tasks in any order, including LIFO order.
- * There are no ordering guarantees.
- *
- * WARNING: The underlying Executor's execute-method must not execute the submitted Runnable
- * in the calling thread synchronously. It must enqueue/handoff the Runnable.
- */
-private[concurrent] trait BatchingExecutor extends Executor {
-
- // invariant: if "_tasksLocal.get ne null" then we are inside BatchingRunnable.run; if it is null, we are outside
- private val _tasksLocal = new ThreadLocal[List[Runnable]]()
-
- private class Batch(val initial: List[Runnable]) extends Runnable with BlockContext {
- private var parentBlockContext: BlockContext = _
- // this method runs in the delegate ExecutionContext's thread
- override def run(): Unit = {
- require(_tasksLocal.get eq null)
-
- val prevBlockContext = BlockContext.current
- BlockContext.withBlockContext(this) {
- try {
- parentBlockContext = prevBlockContext
-
- @tailrec def processBatch(batch: List[Runnable]): Unit = batch match {
- case Nil => ()
- case head :: tail =>
- _tasksLocal set tail
- try {
- head.run()
- } catch {
- case t: Throwable =>
- // if one task throws, move the
- // remaining tasks to another thread
- // so we can throw the exception
- // up to the invoking executor
- val remaining = _tasksLocal.get
- _tasksLocal set Nil
- unbatchedExecute(new Batch(remaining)) //TODO what if this submission fails?
- throw t // rethrow
- }
- processBatch(_tasksLocal.get) // since head.run() can add entries, always do _tasksLocal.get here
- }
-
- processBatch(initial)
- } finally {
- _tasksLocal.remove()
- parentBlockContext = null
- }
- }
- }
-
- override def blockOn[T](thunk: => T)(implicit permission: CanAwait): T = {
- // if we know there will be blocking, we don't want to keep tasks queued up because it could deadlock.
- {
- val tasks = _tasksLocal.get
- _tasksLocal set Nil
- if ((tasks ne null) && tasks.nonEmpty)
- unbatchedExecute(new Batch(tasks))
- }
-
- // now delegate the blocking to the previous BC
- require(parentBlockContext ne null)
- parentBlockContext.blockOn(thunk)
- }
- }
-
- protected def unbatchedExecute(r: Runnable): Unit
-
- override def execute(runnable: Runnable): Unit = {
- if (batchable(runnable)) { // If we can batch the runnable
- _tasksLocal.get match {
- case null => unbatchedExecute(new Batch(List(runnable))) // If we aren't in batching mode yet, enqueue batch
- case some => _tasksLocal.set(runnable :: some) // If we are already in batching mode, add to batch
- }
- } else unbatchedExecute(runnable) // If not batchable, just delegate to underlying
- }
-
- /** Override this to define which runnables will be batched. */
- def batchable(runnable: Runnable): Boolean = runnable match {
- case _: OnCompleteRunnable => true
- case _ => false
- }
-}
diff --git a/src/library/scala/concurrent/Future.scala b/src/library/scala/concurrent/Future.scala
index 36f3be341f..5a51e97072 100644
--- a/src/library/scala/concurrent/Future.scala
+++ b/src/library/scala/concurrent/Future.scala
@@ -675,11 +675,111 @@ object Future {
// by just not ever using it itself. scala.concurrent
// doesn't need to create defaultExecutionContext as
// a side effect.
- private[concurrent] object InternalCallbackExecutor extends ExecutionContext with BatchingExecutor {
- override protected def unbatchedExecute(r: Runnable): Unit =
- r.run()
+ private[concurrent] object InternalCallbackExecutor extends ExecutionContext with java.util.concurrent.Executor {
override def reportFailure(t: Throwable): Unit =
throw new IllegalStateException("problem in scala.concurrent internal callback", t)
+
+ /**
+ * The BatchingExecutor trait had to be inlined into InternalCallbackExecutor for binary compatibility.
+ *
+ * BatchingExecutor is a trait for an Executor
+ * which groups multiple nested `Runnable.run()` calls
+ * into a single Runnable passed to the original
+ * Executor. This can be a useful optimization
+ * because it bypasses the original context's task
+ * queue and keeps related (nested) code on a single
+ * thread which may improve CPU affinity. However,
+ * if tasks passed to the Executor are blocking
+ * or expensive, this optimization can prevent work-stealing
+ * and make performance worse. Also, some ExecutionContext
+ * may be fast enough natively that this optimization just
+ * adds overhead.
+ * The default ExecutionContext.global is already batching
+ * or fast enough not to benefit from it; while
+ * `fromExecutor` and `fromExecutorService` do NOT add
+ * this optimization since they don't know whether the underlying
+ * executor will benefit from it.
+ * A batching executor can create deadlocks if code does
+ * not use `scala.concurrent.blocking` when it should,
+ * because tasks created within other tasks will block
+ * on the outer task completing.
+ * This executor may run tasks in any order, including LIFO order.
+ * There are no ordering guarantees.
+ *
+ * WARNING: The underlying Executor's execute-method must not execute the submitted Runnable
+ * in the calling thread synchronously. It must enqueue/handoff the Runnable.
+ */
+ // invariant: if "_tasksLocal.get ne null" then we are inside BatchingRunnable.run; if it is null, we are outside
+ private val _tasksLocal = new ThreadLocal[List[Runnable]]()
+
+ private class Batch(val initial: List[Runnable]) extends Runnable with BlockContext {
+ private[this] var parentBlockContext: BlockContext = _
+ // this method runs in the delegate ExecutionContext's thread
+ override def run(): Unit = {
+ require(_tasksLocal.get eq null)
+
+ val prevBlockContext = BlockContext.current
+ BlockContext.withBlockContext(this) {
+ try {
+ parentBlockContext = prevBlockContext
+
+ @tailrec def processBatch(batch: List[Runnable]): Unit = batch match {
+ case Nil => ()
+ case head :: tail =>
+ _tasksLocal set tail
+ try {
+ head.run()
+ } catch {
+ case t: Throwable =>
+ // if one task throws, move the
+ // remaining tasks to another thread
+ // so we can throw the exception
+ // up to the invoking executor
+ val remaining = _tasksLocal.get
+ _tasksLocal set Nil
+ unbatchedExecute(new Batch(remaining)) //TODO what if this submission fails?
+ throw t // rethrow
+ }
+ processBatch(_tasksLocal.get) // since head.run() can add entries, always do _tasksLocal.get here
+ }
+
+ processBatch(initial)
+ } finally {
+ _tasksLocal.remove()
+ parentBlockContext = null
+ }
+ }
+ }
+
+ override def blockOn[T](thunk: => T)(implicit permission: CanAwait): T = {
+ // if we know there will be blocking, we don't want to keep tasks queued up because it could deadlock.
+ {
+ val tasks = _tasksLocal.get
+ _tasksLocal set Nil
+ if ((tasks ne null) && tasks.nonEmpty)
+ unbatchedExecute(new Batch(tasks))
+ }
+
+ // now delegate the blocking to the previous BC
+ require(parentBlockContext ne null)
+ parentBlockContext.blockOn(thunk)
+ }
+ }
+
+ override def execute(runnable: Runnable): Unit = runnable match {
+ // If we can batch the runnable
+ case _: OnCompleteRunnable =>
+ _tasksLocal.get match {
+ case null => unbatchedExecute(new Batch(List(runnable))) // If we aren't in batching mode yet, enqueue batch
+ case some => _tasksLocal.set(runnable :: some) // If we are already in batching mode, add to batch
+ }
+
+ // If not batchable, just delegate to underlying
+ case _ =>
+ unbatchedExecute(runnable)
+ }
+
+ private def unbatchedExecute(r: Runnable): Unit = r.run()
}
}
diff --git a/src/library/scala/concurrent/impl/ExecutionContextImpl.scala b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
index 77625e381c..43b437dbc6 100644
--- a/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
+++ b/src/library/scala/concurrent/impl/ExecutionContextImpl.scala
@@ -25,7 +25,7 @@ private[scala] class ExecutionContextImpl private[impl] (es: Executor, reporter:
case some => some
}
- private val uncaughtExceptionHandler: Thread.UncaughtExceptionHandler = new Thread.UncaughtExceptionHandler {
+ private[this] val uncaughtExceptionHandler: Thread.UncaughtExceptionHandler = new Thread.UncaughtExceptionHandler {
def uncaughtException(thread: Thread, cause: Throwable): Unit = reporter(cause)
}
@@ -96,11 +96,24 @@ private[scala] class ExecutionContextImpl private[impl] (es: Executor, reporter:
}
}
+
def execute(runnable: Runnable): Unit = executor match {
case fj: ForkJoinPool =>
val fjt = runnable match {
case t: ForkJoinTask[_] => t
- case r => new ExecutionContextImpl.AdaptedForkJoinTask(r)
+ case runnable => new ForkJoinTask[Unit] {
+ final override def setRawResult(u: Unit): Unit = ()
+ final override def getRawResult(): Unit = ()
+ final override def exec(): Boolean = try { runnable.run(); true } catch {
+ case anything: Throwable ⇒
+ val t = Thread.currentThread
+ t.getUncaughtExceptionHandler match {
+ case null ⇒
+ case some ⇒ some.uncaughtException(t, anything)
+ }
+ throw anything
+ }
+ }
}
Thread.currentThread match {
case fjw: ForkJoinWorkerThread if fjw.getPool eq fj => fjt.fork()
@@ -112,23 +125,7 @@ private[scala] class ExecutionContextImpl private[impl] (es: Executor, reporter:
def reportFailure(t: Throwable) = reporter(t)
}
-
private[concurrent] object ExecutionContextImpl {
-
- final class AdaptedForkJoinTask(runnable: Runnable) extends ForkJoinTask[Unit] {
- final override def setRawResult(u: Unit): Unit = ()
- final override def getRawResult(): Unit = ()
- final override def exec(): Boolean = try { runnable.run(); true } catch {
- case anything: Throwable ⇒
- val t = Thread.currentThread
- t.getUncaughtExceptionHandler match {
- case null ⇒
- case some ⇒ some.uncaughtException(t, anything)
- }
- throw anything
- }
- }
-
def fromExecutor(e: Executor, reporter: Throwable => Unit = ExecutionContext.defaultReporter): ExecutionContextImpl = new ExecutionContextImpl(e, reporter)
def fromExecutorService(es: ExecutorService, reporter: Throwable => Unit = ExecutionContext.defaultReporter): ExecutionContextImpl with ExecutionContextExecutorService =
new ExecutionContextImpl(es, reporter) with ExecutionContextExecutorService {
diff --git a/src/library/scala/util/Random.scala b/src/library/scala/util/Random.scala
index 2b11594f66..24c4cd7a32 100644
--- a/src/library/scala/util/Random.scala
+++ b/src/library/scala/util/Random.scala
@@ -17,7 +17,7 @@ import scala.language.{implicitConversions, higherKinds}
* @author Stephane Micheloud
*
*/
-class Random(val self: java.util.Random) extends AnyRef with Serializable {
+class Random(val self: java.util.Random) {
/** Creates a new random number generator using a single long seed. */
def this(seed: Long) = this(new java.util.Random(seed))
diff --git a/src/reflect/scala/reflect/internal/PrivateWithin.scala b/src/reflect/scala/reflect/internal/PrivateWithin.scala
deleted file mode 100644
index 9b99b94b41..0000000000
--- a/src/reflect/scala/reflect/internal/PrivateWithin.scala
+++ /dev/null
@@ -1,23 +0,0 @@
-package scala.reflect
-package internal
-
-import ClassfileConstants._
-
-trait PrivateWithin {
- self: SymbolTable =>
-
- def importPrivateWithinFromJavaFlags(sym: Symbol, jflags: Int): Symbol = {
- if ((jflags & (JAVA_ACC_PRIVATE | JAVA_ACC_PROTECTED | JAVA_ACC_PUBLIC)) == 0)
- // See ticket #1687 for an example of when topLevelClass is NoSymbol: it
- // apparently occurs when processing v45.3 bytecode.
- if (sym.enclosingTopLevelClass != NoSymbol)
- sym.privateWithin = sym.enclosingTopLevelClass.owner
-
- // protected in java means package protected. #3946
- if ((jflags & JAVA_ACC_PROTECTED) != 0)
- if (sym.enclosingTopLevelClass != NoSymbol)
- sym.privateWithin = sym.enclosingTopLevelClass.owner
-
- sym
- }
-} \ No newline at end of file
diff --git a/src/reflect/scala/reflect/internal/SymbolTable.scala b/src/reflect/scala/reflect/internal/SymbolTable.scala
index f75855f1ec..5ccf81b4b5 100644
--- a/src/reflect/scala/reflect/internal/SymbolTable.scala
+++ b/src/reflect/scala/reflect/internal/SymbolTable.scala
@@ -38,7 +38,6 @@ abstract class SymbolTable extends macros.Universe
with StdAttachments
with StdCreators
with BuildUtils
- with PrivateWithin
{
val gen = new TreeGen { val global: SymbolTable.this.type = SymbolTable.this }
@@ -352,6 +351,21 @@ abstract class SymbolTable extends macros.Universe
*/
implicit val StringContextStripMarginOps: StringContext => StringContextStripMarginOps = util.StringContextStripMarginOps
+ def importPrivateWithinFromJavaFlags(sym: Symbol, jflags: Int): Symbol = {
+ import ClassfileConstants._
+ if ((jflags & (JAVA_ACC_PRIVATE | JAVA_ACC_PROTECTED | JAVA_ACC_PUBLIC)) == 0)
+ // See ticket #1687 for an example of when topLevelClass is NoSymbol: it
+ // apparently occurs when processing v45.3 bytecode.
+ if (sym.enclosingTopLevelClass != NoSymbol)
+ sym.privateWithin = sym.enclosingTopLevelClass.owner
+
+ // protected in java means package protected. #3946
+ if ((jflags & JAVA_ACC_PROTECTED) != 0)
+ if (sym.enclosingTopLevelClass != NoSymbol)
+ sym.privateWithin = sym.enclosingTopLevelClass.owner
+
+ sym
+ }
}
object SymbolTableStats {
diff --git a/src/reflect/scala/reflect/runtime/JavaMirrors.scala b/src/reflect/scala/reflect/runtime/JavaMirrors.scala
index 778c826dc0..3442e3d22e 100644
--- a/src/reflect/scala/reflect/runtime/JavaMirrors.scala
+++ b/src/reflect/scala/reflect/runtime/JavaMirrors.scala
@@ -574,7 +574,7 @@ private[reflect] trait JavaMirrors extends internal.SymbolTable with api.JavaUni
case None =>
// class does not have a Scala signature; it's a Java class
info("translating reflection info for Java " + jclazz) //debug
- initClassAndModule(clazz, module, new FromJavaClassCompleter(clazz, module, jclazz))
+ initClassModule(clazz, module, new FromJavaClassCompleter(clazz, module, jclazz))
}
}
} catch {
@@ -686,9 +686,9 @@ private[reflect] trait JavaMirrors extends internal.SymbolTable with api.JavaUni
def enter(sym: Symbol, mods: Int) =
(if (jModifier.isStatic(mods)) module.moduleClass else clazz).info.decls enter sym
- for (jinner <- jclazz.getDeclaredClasses)
- jclassAsScala(jinner) // inner class is entered as a side-effect
- // no need to call enter explicitly
+ for (jinner <- jclazz.getDeclaredClasses) {
+ enter(jclassAsScala(jinner, clazz), jinner.getModifiers)
+ }
pendingLoadActions = { () =>
@@ -1046,14 +1046,14 @@ private[reflect] trait JavaMirrors extends internal.SymbolTable with api.JavaUni
* @param jclazz The Java class
* @return A Scala class symbol that wraps all reflection info of `jclazz`
*/
- private def jclassAsScala(jclazz: jClass[_]): ClassSymbol =
- toScala(classCache, jclazz)(_ jclassAsScala1 _)
+ private def jclassAsScala(jclazz: jClass[_]): Symbol = jclassAsScala(jclazz, sOwner(jclazz))
- private def jclassAsScala1(jclazz: jClass[_]): ClassSymbol = {
- val owner = sOwner(jclazz)
+ private def jclassAsScala(jclazz: jClass[_], owner: Symbol): ClassSymbol = {
val name = scalaSimpleName(jclazz)
val completer = (clazz: Symbol, module: Symbol) => new FromJavaClassCompleter(clazz, module, jclazz)
- initAndEnterClassAndModule(owner, name, completer)._1
+ val (clazz, module) = createClassModule(owner, name, completer)
+ classCache enter (jclazz, clazz)
+ clazz
}
/**
diff --git a/src/reflect/scala/reflect/runtime/SymbolLoaders.scala b/src/reflect/scala/reflect/runtime/SymbolLoaders.scala
index 311db64b91..61663f6181 100644
--- a/src/reflect/scala/reflect/runtime/SymbolLoaders.scala
+++ b/src/reflect/scala/reflect/runtime/SymbolLoaders.scala
@@ -57,7 +57,7 @@ private[reflect] trait SymbolLoaders { self: SymbolTable =>
* @param name The simple name of the newly created class
* @param completer The completer to be used to set the info of the class and the module
*/
- protected def initAndEnterClassAndModule(owner: Symbol, name: TypeName, completer: (Symbol, Symbol) => LazyType) = {
+ protected def createClassModule(owner: Symbol, name: TypeName, completer: (Symbol, Symbol) => LazyType) = {
assert(!(name.toString endsWith "[]"), name)
val clazz = owner.newClass(name)
val module = owner.newModule(name.toTermName)
@@ -67,7 +67,7 @@ private[reflect] trait SymbolLoaders { self: SymbolTable =>
owner.info.decls enter clazz
owner.info.decls enter module
}
- initClassAndModule(clazz, module, completer(clazz, module))
+ initClassModule(clazz, module, completer(clazz, module))
(clazz, module)
}
@@ -75,7 +75,7 @@ private[reflect] trait SymbolLoaders { self: SymbolTable =>
List(clazz, module, module.moduleClass) foreach (_ setInfo info)
}
- protected def initClassAndModule(clazz: Symbol, module: Symbol, completer: LazyType) =
+ protected def initClassModule(clazz: Symbol, module: Symbol, completer: LazyType) =
setAllInfos(clazz, module, completer)
/** The type completer for packages.
@@ -118,7 +118,7 @@ private[reflect] trait SymbolLoaders { self: SymbolTable =>
val loadingMirror = currentMirror.mirrorDefining(cls)
val (clazz, module) =
if (loadingMirror eq currentMirror) {
- initAndEnterClassAndModule(pkgClass, name.toTypeName, new TopClassCompleter(_, _))
+ createClassModule(pkgClass, name.toTypeName, new TopClassCompleter(_, _))
} else {
val origOwner = loadingMirror.packageNameToScala(pkgClass.fullName)
val clazz = origOwner.info decl name.toTypeName