summaryrefslogtreecommitdiff
path: root/src/library/scala/collection/mutable/Ctrie.scala
diff options
context:
space:
mode:
Diffstat (limited to 'src/library/scala/collection/mutable/Ctrie.scala')
-rw-r--r--src/library/scala/collection/mutable/Ctrie.scala248
1 files changed, 124 insertions, 124 deletions
diff --git a/src/library/scala/collection/mutable/Ctrie.scala b/src/library/scala/collection/mutable/Ctrie.scala
index 699b96b87c..cbec118aa9 100644
--- a/src/library/scala/collection/mutable/Ctrie.scala
+++ b/src/library/scala/collection/mutable/Ctrie.scala
@@ -22,29 +22,29 @@ import annotation.switch
private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends INodeBase[K, V](g) {
import INodeBase._
-
+
WRITE(bn)
-
+
def this(g: Gen) = this(null, g)
-
+
@inline final def WRITE(nval: MainNode[K, V]) = INodeBase.updater.set(this, nval)
-
+
@inline final def CAS(old: MainNode[K, V], n: MainNode[K, V]) = INodeBase.updater.compareAndSet(this, old, n)
-
+
final def gcasRead(ct: Ctrie[K, V]): MainNode[K, V] = GCAS_READ(ct)
-
+
@inline final def GCAS_READ(ct: Ctrie[K, V]): MainNode[K, V] = {
val m = /*READ*/mainnode
val prevval = /*READ*/m.prev
if (prevval eq null) m
else GCAS_Complete(m, ct)
}
-
+
@tailrec private def GCAS_Complete(m: MainNode[K, V], ct: Ctrie[K, V]): MainNode[K, V] = if (m eq null) null else {
// complete the GCAS
val prev = /*READ*/m.prev
val ctr = ct.readRoot(true)
-
+
prev match {
case null =>
m
@@ -71,7 +71,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
}
}
}
-
+
@inline final def GCAS(old: MainNode[K, V], n: MainNode[K, V], ct: Ctrie[K, V]): Boolean = {
n.WRITE_PREV(old)
if (CAS(old, n)) {
@@ -79,27 +79,27 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
/*READ*/n.prev eq null
} else false
}
-
+
@inline private def inode(cn: MainNode[K, V]) = {
val nin = new INode[K, V](gen)
nin.WRITE(cn)
nin
}
-
+
final def copyToGen(ngen: Gen, ct: Ctrie[K, V]) = {
val nin = new INode[K, V](ngen)
val main = GCAS_READ(ct)
nin.WRITE(main)
nin
}
-
+
/** Inserts a key value pair, overwriting the old pair if the keys match.
- *
+ *
* @return true if successful, false otherwise
*/
@tailrec final def rec_insert(k: K, v: V, hc: Int, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): Boolean = {
val m = GCAS_READ(ct) // use -Yinline!
-
+
m match {
case cn: CNode[K, V] => // 1) a multiway node
val idx = (hc >>> lev) & 0x1f
@@ -137,7 +137,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
GCAS(ln, nn, ct)
}
}
-
+
/** Inserts a new key value pair, given that a specific condition is met.
*
* @param cond null - don't care if the key was there; KEY_ABSENT - key wasn't there; KEY_PRESENT - key was there; other value `v` - key must be bound to `v`
@@ -145,7 +145,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
*/
@tailrec final def rec_insertif(k: K, v: V, hc: Int, cond: AnyRef, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): Option[V] = {
val m = GCAS_READ(ct) // use -Yinline!
-
+
m match {
case cn: CNode[K, V] => // 1) a multiway node
val idx = (hc >>> lev) & 0x1f
@@ -228,14 +228,14 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
}
}
}
-
+
/** Looks up the value associated with the key.
- *
+ *
* @return null if no value has been found, RESTART if the operation wasn't successful, or any other value otherwise
*/
@tailrec final def rec_lookup(k: K, hc: Int, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): AnyRef = {
val m = GCAS_READ(ct) // use -Yinline!
-
+
m match {
case cn: CNode[K, V] => // 1) a multinode
val idx = (hc >>> lev) & 0x1f
@@ -270,15 +270,15 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
ln.get(k).asInstanceOf[Option[AnyRef]].orNull
}
}
-
+
/** Removes the key associated with the given value.
- *
+ *
* @param v if null, will remove the key irregardless of the value; otherwise removes only if binding contains that exact key and value
* @return null if not successful, an Option[V] indicating the previous value otherwise
*/
final def rec_remove(k: K, v: V, hc: Int, lev: Int, parent: INode[K, V], startgen: Gen, ct: Ctrie[K, V]): Option[V] = {
val m = GCAS_READ(ct) // use -Yinline!
-
+
m match {
case cn: CNode[K, V] =>
val idx = (hc >>> lev) & 0x1f
@@ -289,7 +289,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
val pos = Integer.bitCount(bmp & (flag - 1))
val sub = cn.array(pos)
val res = sub match {
- case in: INode[K, V] =>
+ case in: INode[K, V] =>
if (startgen eq in.gen) in.rec_remove(k, v, hc, lev + 5, this, startgen, ct)
else {
if (GCAS(cn, cn.renewed(startgen, ct), ct)) rec_remove(k, v, hc, lev, parent, startgen, ct)
@@ -301,7 +301,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
if (GCAS(cn, ncn, ct)) Some(sn.v) else null
} else None
}
-
+
if (res == None || (res eq null)) res
else {
@tailrec def cleanParent(nonlive: AnyRef) {
@@ -325,13 +325,13 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
case _ => // parent is no longer a cnode, we're done
}
}
-
+
if (parent ne null) { // never tomb at root
val n = GCAS_READ(ct)
if (n.isInstanceOf[TNode[_, _]])
cleanParent(n)
}
-
+
res
}
}
@@ -351,7 +351,7 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
}
}
}
-
+
private def clean(nd: INode[K, V], ct: Ctrie[K, V], lev: Int) {
val m = nd.GCAS_READ(ct)
m match {
@@ -359,14 +359,14 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
case _ =>
}
}
-
+
final def isNullInode(ct: Ctrie[K, V]) = GCAS_READ(ct) eq null
-
+
final def cachedSize(ct: Ctrie[K, V]): Int = {
val m = GCAS_READ(ct)
m.cachedSize(ct)
}
-
+
/* this is a quiescent method! */
def string(lev: Int) = "%sINode -> %s".format(" " * lev, mainnode match {
case null => "<null>"
@@ -375,14 +375,14 @@ private[collection] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends
case ln: LNode[_, _] => ln.string(lev)
case x => "<elem: %s>".format(x)
})
-
+
}
private[mutable] object INode {
val KEY_PRESENT = new AnyRef
val KEY_ABSENT = new AnyRef
-
+
def newRootNode[K, V] = {
val gen = new Gen
val cn = new CNode[K, V](0, new Array(0), gen)
@@ -393,11 +393,11 @@ private[mutable] object INode {
private[mutable] final class FailedNode[K, V](p: MainNode[K, V]) extends MainNode[K, V] {
WRITE_PREV(p)
-
+
def string(lev: Int) = throw new UnsupportedOperationException
-
+
def cachedSize(ct: AnyRef): Int = throw new UnsupportedOperationException
-
+
override def toString = "FailedNode(%s)".format(p)
}
@@ -449,7 +449,7 @@ extends MainNode[K, V] {
private[collection] final class CNode[K, V](final val bitmap: Int, final val array: Array[BasicNode], final val gen: Gen)
extends CNodeBase[K, V] {
-
+
// this should only be called from within read-only snapshots
final def cachedSize(ct: AnyRef) = {
val currsz = READ_SIZE()
@@ -460,7 +460,7 @@ extends CNodeBase[K, V] {
READ_SIZE()
}
}
-
+
// lends itself towards being parallelizable by choosing
// a random starting offset in the array
// => if there are concurrent size computations, they start
@@ -480,7 +480,7 @@ extends CNodeBase[K, V] {
}
sz
}
-
+
final def updatedAt(pos: Int, nn: BasicNode, gen: Gen) = {
val len = array.length
val narr = new Array[BasicNode](len)
@@ -488,7 +488,7 @@ extends CNodeBase[K, V] {
narr(pos) = nn
new CNode[K, V](bitmap, narr, gen)
}
-
+
final def removedAt(pos: Int, flag: Int, gen: Gen) = {
val arr = array
val len = arr.length
@@ -497,7 +497,7 @@ extends CNodeBase[K, V] {
Array.copy(arr, pos + 1, narr, pos, len - pos - 1)
new CNode[K, V](bitmap ^ flag, narr, gen)
}
-
+
final def insertedAt(pos: Int, flag: Int, nn: BasicNode, gen: Gen) = {
val len = array.length
val bmp = bitmap
@@ -507,7 +507,7 @@ extends CNodeBase[K, V] {
Array.copy(array, pos, narr, pos + 1, len - pos)
new CNode[K, V](bmp | flag, narr, gen)
}
-
+
/** Returns a copy of this cnode such that all the i-nodes below it are copied
* to the specified generation `ngen`.
*/
@@ -525,17 +525,17 @@ extends CNodeBase[K, V] {
}
new CNode[K, V](bitmap, narr, ngen)
}
-
+
private def resurrect(inode: INode[K, V], inodemain: AnyRef): BasicNode = inodemain match {
case tn: TNode[_, _] => tn.copyUntombed
case _ => inode
}
-
+
final def toContracted(lev: Int): MainNode[K, V] = if (array.length == 1 && lev > 0) array(0) match {
case sn: SNode[K, V] => sn.copyTombed
case _ => this
} else this
-
+
// - if the branching factor is 1 for this CNode, and the child
// is a tombed SNode, returns its tombed version
// - otherwise, if there is at least one non-null node below,
@@ -559,12 +559,12 @@ extends CNodeBase[K, V] {
}
i += 1
}
-
+
new CNode[K, V](bmp, tmparray, gen).toContracted(lev)
}
-
+
private[mutable] def string(lev: Int): String = "CNode %x\n%s".format(bitmap, array.map(_.string(lev + 1)).mkString("\n"))
-
+
/* quiescently consistent - don't call concurrently to anything involving a GCAS!! */
protected def collectElems: Seq[(K, V)] = array flatMap {
case sn: SNode[K, V] => Some(sn.kvPair)
@@ -574,12 +574,12 @@ extends CNodeBase[K, V] {
case cn: CNode[K, V] => cn.collectElems
}
}
-
+
protected def collectLocalElems: Seq[String] = array flatMap {
case sn: SNode[K, V] => Some(sn.kvPair._2.toString)
case in: INode[K, V] => Some(in.toString.drop(14) + "(" + in.gen + ")")
}
-
+
override def toString = {
val elems = collectLocalElems
"CNode(sz: %d; %s)".format(elems.size, elems.sorted.mkString(", "))
@@ -588,7 +588,7 @@ extends CNodeBase[K, V] {
private[mutable] object CNode {
-
+
def dual[K, V](x: SNode[K, V], xhc: Int, y: SNode[K, V], yhc: Int, lev: Int, gen: Gen): MainNode[K, V] = if (lev < 35) {
val xidx = (xhc >>> lev) & 0x1f
val yidx = (yhc >>> lev) & 0x1f
@@ -604,7 +604,7 @@ private[mutable] object CNode {
} else {
new LNode(x.k, x.v, y.k, y.v)
}
-
+
}
@@ -620,9 +620,9 @@ private[mutable] case class RDCSS_Descriptor[K, V](old: INode[K, V], expectedmai
* lock-free snapshots which are used to implement linearizable lock-free size,
* iterator and clear operations. The cost of evaluating the (lazy) snapshot is
* distributed across subsequent updates, thus making snapshot evaluation horizontally scalable.
- *
+ *
* For details, see: http://lampwww.epfl.ch/~prokopec/ctries-snapshot.pdf
- *
+ *
* @author Aleksandar Prokopec
* @since 2.10
*/
@@ -634,17 +634,17 @@ extends ConcurrentMap[K, V]
with Serializable
{
import Ctrie.computeHash
-
+
private var rootupdater = rtupd
@volatile var root = r
-
+
def this() = this(
INode.newRootNode,
AtomicReferenceFieldUpdater.newUpdater(classOf[Ctrie[K, V]], classOf[AnyRef], "root")
)
-
+
/* internal methods */
-
+
private def writeObject(out: java.io.ObjectOutputStream) {
val it = iterator
while (it.hasNext) {
@@ -654,11 +654,11 @@ extends ConcurrentMap[K, V]
}
out.writeObject(CtrieSerializationEnd)
}
-
+
private def readObject(in: java.io.ObjectInputStream) {
root = INode.newRootNode
rootupdater = AtomicReferenceFieldUpdater.newUpdater(classOf[Ctrie[K, V]], classOf[AnyRef], "root")
-
+
var obj: AnyRef = null
do {
obj = in.readObject()
@@ -669,11 +669,11 @@ extends ConcurrentMap[K, V]
}
} while (obj != CtrieSerializationEnd)
}
-
+
@inline final def CAS_ROOT(ov: AnyRef, nv: AnyRef) = rootupdater.compareAndSet(this, ov, nv)
-
+
final def readRoot(abort: Boolean = false): INode[K, V] = RDCSS_READ_ROOT(abort)
-
+
@inline final def RDCSS_READ_ROOT(abort: Boolean = false): INode[K, V] = {
val r = /*READ*/root
r match {
@@ -681,7 +681,7 @@ extends ConcurrentMap[K, V]
case desc: RDCSS_Descriptor[K, V] => RDCSS_Complete(abort)
}
}
-
+
@tailrec private def RDCSS_Complete(abort: Boolean): INode[K, V] = {
val v = /*READ*/root
v match {
@@ -705,7 +705,7 @@ extends ConcurrentMap[K, V]
}
}
}
-
+
private def RDCSS_ROOT(ov: INode[K, V], expectedmain: MainNode[K, V], nv: INode[K, V]): Boolean = {
val desc = RDCSS_Descriptor(ov, expectedmain, nv)
if (CAS_ROOT(ov, desc)) {
@@ -713,27 +713,27 @@ extends ConcurrentMap[K, V]
/*READ*/desc.committed
} else false
}
-
+
@tailrec private def inserthc(k: K, hc: Int, v: V) {
val r = RDCSS_READ_ROOT()
if (!r.rec_insert(k, v, hc, 0, null, r.gen, this)) inserthc(k, hc, v)
}
-
+
@tailrec private def insertifhc(k: K, hc: Int, v: V, cond: AnyRef): Option[V] = {
val r = RDCSS_READ_ROOT()
-
+
val ret = r.rec_insertif(k, v, hc, cond, 0, null, r.gen, this)
if (ret eq null) insertifhc(k, hc, v, cond)
else ret
}
-
+
@tailrec private def lookuphc(k: K, hc: Int): AnyRef = {
val r = RDCSS_READ_ROOT()
val res = r.rec_lookup(k, hc, 0, null, r.gen, this)
if (res eq INodeBase.RESTART) lookuphc(k, hc)
else res
}
-
+
/* slower:
//@tailrec
private def lookuphc(k: K, hc: Int): AnyRef = {
@@ -746,31 +746,31 @@ extends ConcurrentMap[K, V]
}
}
*/
-
+
@tailrec private def removehc(k: K, v: V, hc: Int): Option[V] = {
val r = RDCSS_READ_ROOT()
val res = r.rec_remove(k, v, hc, 0, null, r.gen, this)
if (res ne null) res
else removehc(k, v, hc)
}
-
+
def string = RDCSS_READ_ROOT().string(0)
-
+
/* public methods */
-
+
override def seq = this
-
+
override def par = new ParCtrie(this)
-
+
override def empty: Ctrie[K, V] = new Ctrie[K, V]
-
+
final def isReadOnly = rootupdater eq null
-
+
final def nonReadOnly = rootupdater ne null
-
+
/** Returns a snapshot of this Ctrie.
* This operation is lock-free and linearizable.
- *
+ *
* The snapshot is lazily updated - the first time some branch
* in the snapshot or this Ctrie are accessed, they are rewritten.
* This means that the work of rebuilding both the snapshot and this
@@ -783,17 +783,17 @@ extends ConcurrentMap[K, V]
if (RDCSS_ROOT(r, expmain, r.copyToGen(new Gen, this))) new Ctrie(r.copyToGen(new Gen, this), rootupdater)
else snapshot()
}
-
+
/** Returns a read-only snapshot of this Ctrie.
* This operation is lock-free and linearizable.
- *
+ *
* The snapshot is lazily updated - the first time some branch
* of this Ctrie are accessed, it is rewritten. The work of creating
* the snapshot is thus distributed across subsequent updates
* and accesses on this Ctrie by all threads.
* Note that the snapshot itself is never rewritten unlike when calling
* the `snapshot` method, but the obtained snapshot cannot be modified.
- *
+ *
* This method is used by other methods such as `size` and `iterator`.
*/
@tailrec final def readOnlySnapshot(): collection.Map[K, V] = {
@@ -802,106 +802,106 @@ extends ConcurrentMap[K, V]
if (RDCSS_ROOT(r, expmain, r.copyToGen(new Gen, this))) new Ctrie(r, null)
else readOnlySnapshot()
}
-
+
@tailrec final override def clear() {
val r = RDCSS_READ_ROOT()
if (!RDCSS_ROOT(r, r.gcasRead(this), INode.newRootNode[K, V])) clear()
}
-
+
final def lookup(k: K): V = {
val hc = computeHash(k)
lookuphc(k, hc).asInstanceOf[V]
}
-
+
final override def apply(k: K): V = {
val hc = computeHash(k)
val res = lookuphc(k, hc)
if (res eq null) throw new NoSuchElementException
else res.asInstanceOf[V]
}
-
+
final def get(k: K): Option[V] = {
val hc = computeHash(k)
Option(lookuphc(k, hc)).asInstanceOf[Option[V]]
}
-
+
override def put(key: K, value: V): Option[V] = {
val hc = computeHash(key)
insertifhc(key, hc, value, null)
}
-
+
final override def update(k: K, v: V) {
val hc = computeHash(k)
inserthc(k, hc, v)
}
-
+
final def +=(kv: (K, V)) = {
update(kv._1, kv._2)
this
}
-
+
final override def remove(k: K): Option[V] = {
val hc = computeHash(k)
removehc(k, null.asInstanceOf[V], hc)
}
-
+
final def -=(k: K) = {
remove(k)
this
}
-
+
def putIfAbsent(k: K, v: V): Option[V] = {
val hc = computeHash(k)
insertifhc(k, hc, v, INode.KEY_ABSENT)
}
-
+
def remove(k: K, v: V): Boolean = {
val hc = computeHash(k)
removehc(k, v, hc).nonEmpty
}
-
+
def replace(k: K, oldvalue: V, newvalue: V): Boolean = {
val hc = computeHash(k)
insertifhc(k, hc, newvalue, oldvalue.asInstanceOf[AnyRef]).nonEmpty
}
-
+
def replace(k: K, v: V): Option[V] = {
val hc = computeHash(k)
insertifhc(k, hc, v, INode.KEY_PRESENT)
}
-
+
def iterator: Iterator[(K, V)] =
if (nonReadOnly) readOnlySnapshot().iterator
else new CtrieIterator(0, this)
-
+
private def cachedSize() = {
val r = RDCSS_READ_ROOT()
r.cachedSize(this)
}
-
+
override def size: Int =
if (nonReadOnly) readOnlySnapshot().size
else cachedSize()
-
+
override def stringPrefix = "Ctrie"
-
+
}
object Ctrie extends MutableMapFactory[Ctrie] {
val inodeupdater = AtomicReferenceFieldUpdater.newUpdater(classOf[INodeBase[_, _]], classOf[MainNode[_, _]], "mainnode")
-
+
implicit def canBuildFrom[K, V]: CanBuildFrom[Coll, (K, V), Ctrie[K, V]] = new MapCanBuildFrom[K, V]
-
+
def empty[K, V]: Ctrie[K, V] = new Ctrie[K, V]
-
+
@inline final def computeHash[K](k: K): Int = {
var hcode = k.hashCode
hcode = hcode * 0x9e3775cd
hcode = java.lang.Integer.reverseBytes(hcode)
hcode * 0x9e3775cd
}
-
+
}
@@ -911,11 +911,11 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
var depth = -1
var subiter: Iterator[(K, V)] = null
var current: KVNode[K, V] = null
-
+
if (mustInit) initialize()
-
+
def hasNext = (current ne null) || (subiter ne null)
-
+
def next() = if (hasNext) {
var r: (K, V) = null
if (subiter ne null) {
@@ -927,7 +927,7 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
}
r
} else Iterator.empty.next()
-
+
private def readin(in: INode[K, V]) = in.gcasRead(ct) match {
case cn: CNode[K, V] =>
depth += 1
@@ -942,19 +942,19 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
case null =>
current = null
}
-
+
@inline private def checkSubiter() = if (!subiter.hasNext) {
subiter = null
advance()
}
-
+
@inline private def initialize() {
assert(ct.isReadOnly)
-
+
val r = ct.RDCSS_READ_ROOT()
readin(r)
}
-
+
def advance(): Unit = if (depth >= 0) {
val npos = stackpos(depth) + 1
if (npos < stack(depth).length) {
@@ -970,19 +970,19 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
advance()
}
} else current = null
-
+
protected def newIterator(_lev: Int, _ct: Ctrie[K, V], _mustInit: Boolean) = new CtrieIterator[K, V](_lev, _ct, _mustInit)
-
+
protected def dupTo(it: CtrieIterator[K, V]) = {
it.level = this.level
it.ct = this.ct
it.depth = this.depth
it.current = this.current
-
+
// these need a deep copy
Array.copy(this.stack, 0, it.stack, 0, 7)
Array.copy(this.stackpos, 0, it.stackpos, 0, 7)
-
+
// this one needs to be evaluated
if (this.subiter == null) it.subiter = null
else {
@@ -991,7 +991,7 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
it.subiter = lst.iterator
}
}
-
+
/** Returns a sequence of iterators over subsets of this iterator.
* It's used to ease the implementation of splitters for a parallel version of the Ctrie.
*/
@@ -1026,7 +1026,7 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
this.level += 1
Seq(this)
}
-
+
def printDebug {
println("ctrie iterator")
println(stackpos.mkString(","))
@@ -1034,7 +1034,7 @@ private[collection] class CtrieIterator[K, V](var level: Int, private var ct: Ct
println("curr.: " + current)
println(stack.mkString("\n"))
}
-
+
}
@@ -1048,20 +1048,20 @@ private[mutable] case object CtrieSerializationEnd
private[mutable] object Debug {
import collection._
-
+
lazy val logbuffer = new java.util.concurrent.ConcurrentLinkedQueue[AnyRef]
-
+
def log(s: AnyRef) = logbuffer.add(s)
-
+
def flush() {
for (s <- JavaConversions.asScalaIterator(logbuffer.iterator())) Console.out.println(s.toString)
logbuffer.clear()
}
-
+
def clear() {
logbuffer.clear()
}
-
+
}