From ada6771679aa63e8aa57a294dfb268b2a7a32df4 Mon Sep 17 00:00:00 2001 From: Aleksandar Prokopec Date: Wed, 15 Feb 2012 13:08:43 +0100 Subject: Add lazy size evaluation to Ctries. Size of the Ctrie is now cached and only recomputed for those parts of the Ctrie that changed since the last snapshot. --- .../scala/collection/mutable/CNodeBase.java | 35 +++++++++++++++++ src/library/scala/collection/mutable/Ctrie.scala | 44 +++++++++++++++++++++- src/library/scala/collection/mutable/MainNode.java | 4 ++ test/benchmarking/ParCtrie-size.scala | 34 +++++++++++++++++ 4 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 src/library/scala/collection/mutable/CNodeBase.java create mode 100644 test/benchmarking/ParCtrie-size.scala diff --git a/src/library/scala/collection/mutable/CNodeBase.java b/src/library/scala/collection/mutable/CNodeBase.java new file mode 100644 index 0000000000..4374943b8d --- /dev/null +++ b/src/library/scala/collection/mutable/CNodeBase.java @@ -0,0 +1,35 @@ +/* __ *\ +** ________ ___ / / ___ Scala API ** +** / __/ __// _ | / / / _ | (c) 2003-2012, LAMP/EPFL ** +** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** +** /____/\___/_/ |_/____/_/ | | ** +** |/ ** +\* */ + +package scala.collection.mutable; + + + +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; + + + +abstract class CNodeBase extends MainNode { + + public static final AtomicIntegerFieldUpdater updater = AtomicIntegerFieldUpdater.newUpdater(CNodeBase.class, "csize"); + + public volatile int csize = -1; + + public boolean CAS_SIZE(int oldval, int nval) { + return updater.compareAndSet(this, oldval, nval); + } + + public void WRITE_SIZE(int nval) { + updater.set(this, nval); + } + + public int READ_SIZE() { + return updater.get(this); + } + +} \ No newline at end of file diff --git a/src/library/scala/collection/mutable/Ctrie.scala b/src/library/scala/collection/mutable/Ctrie.scala index dbd2129f0c..f208d6555e 100644 --- a/src/library/scala/collection/mutable/Ctrie.scala +++ b/src/library/scala/collection/mutable/Ctrie.scala @@ -360,6 +360,11 @@ private[mutable] final class INode[K, V](bn: MainNode[K, V], g: Gen) extends INo final def isNullInode(ct: Ctrie[K, V]) = GCAS_READ(ct) eq null + final def cachedSize(ct: Ctrie[K, V]): Int = { + val m = GCAS_READ(ct) + m.cachedSize(ct) + } + /* this is a quiescent method! */ def string(lev: Int) = "%sINode -> %s".format(" " * lev, mainnode match { case null => "" @@ -389,6 +394,8 @@ private[mutable] final class FailedNode[K, V](p: MainNode[K, V]) extends MainNod def string(lev: Int) = throw new UnsupportedOperationException + def cachedSize(ct: AnyRef): Int = throw new UnsupportedOperationException + override def toString = "FailedNode(%s)".format(p) } @@ -414,6 +421,7 @@ extends MainNode[K, V] with KVNode[K, V] { final def copyTombed = new TNode(k, v, hc) final def copyUntombed = new SNode(k, v, hc) final def kvPair = (k, v) + final def cachedSize(ct: AnyRef): Int = 1 final def string(lev: Int) = (" " * lev) + "TNode(%s, %s, %x, !)".format(k, v, hc) } @@ -432,12 +440,37 @@ extends MainNode[K, V] { } } def get(k: K) = listmap.get(k) + def cachedSize(ct: AnyRef): Int = listmap.size def string(lev: Int) = (" " * lev) + "LNode(%s)".format(listmap.mkString(", ")) } private[mutable] final class CNode[K, V](final val bitmap: Int, final val array: Array[BasicNode], final val gen: Gen) -extends MainNode[K, V] { +extends CNodeBase[K, V] { + + // this should only be called from within read-only snapshots + final def cachedSize(ct: AnyRef) = { + val currsz = READ_SIZE() + if (currsz != -1) currsz + else { + val sz = computeSize(ct.asInstanceOf[Ctrie[K, V]]) + while (READ_SIZE() == -1) CAS_SIZE(-1, sz) + READ_SIZE() + } + } + + private def computeSize(ct: Ctrie[K, V]): Int = { + var i = 0 + var sz = 0 + while (i < array.length) { + array(i) match { + case sn: SNode[_, _] => sz += 1 + case in: INode[K, V] => sz += in.cachedSize(ct) + } + i += 1 + } + sz + } final def updatedAt(pos: Int, nn: BasicNode, gen: Gen) = { val len = array.length @@ -830,6 +863,15 @@ extends ConcurrentMap[K, V] if (nonReadOnly) readOnlySnapshot().iterator else new CtrieIterator(0, this) + private def cachedSize() = { + val r = RDCSS_READ_ROOT() + r.cachedSize(this) + } + + override def size: Int = + if (nonReadOnly) readOnlySnapshot().size + else cachedSize() + override def stringPrefix = "Ctrie" } diff --git a/src/library/scala/collection/mutable/MainNode.java b/src/library/scala/collection/mutable/MainNode.java index 09bc858edc..0578de676d 100644 --- a/src/library/scala/collection/mutable/MainNode.java +++ b/src/library/scala/collection/mutable/MainNode.java @@ -20,6 +20,8 @@ abstract class MainNode extends BasicNode { public volatile MainNode prev = null; + public abstract int cachedSize(Object ct); + public boolean CAS_PREV(MainNode oldval, MainNode nval) { return updater.compareAndSet(this, oldval, nval); } @@ -29,6 +31,8 @@ abstract class MainNode extends BasicNode { } // do we need this? unclear in the javadocs... + // apparently not - volatile reads are supposed to be safe + // irregardless of whether there are concurrent ARFU updates public MainNode READ_PREV() { return updater.get(this); } diff --git a/test/benchmarking/ParCtrie-size.scala b/test/benchmarking/ParCtrie-size.scala new file mode 100644 index 0000000000..5a6191fb62 --- /dev/null +++ b/test/benchmarking/ParCtrie-size.scala @@ -0,0 +1,34 @@ + + + + +import collection.parallel.mutable.ParCtrie + + + +object Size extends testing.Benchmark { + val length = sys.props("length").toInt + val par = sys.props("par").toInt + var parctrie = ParCtrie((0 until length) zip (0 until length): _*) + + collection.parallel.ForkJoinTasks.defaultForkJoinPool.setParallelism(par) + + def run = { + parctrie.size + } + + var iteration = 0 + + override def tearDown() { + iteration += 1 + if (iteration % 4 == 0) parctrie = ParCtrie((0 until length) zip (0 until length): _*) + } + +} + + + + + + + -- cgit v1.2.3