summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKato Kazuyoshi <kato.kazuyoshi@gmail.com>2011-06-18 14:32:44 +0000
committerKato Kazuyoshi <kato.kazuyoshi@gmail.com>2011-06-18 14:32:44 +0000
commit1db8243e72c0a4e780841e6b6de8bcff9ea4f768 (patch)
tree180467a0cedbbdb8790345ea9004ab48f16b04d9
parent3a63a796c84a8230d457b47fccfc54372950e485 (diff)
downloadscala-1db8243e72c0a4e780841e6b6de8bcff9ea4f768.tar.gz
scala-1db8243e72c0a4e780841e6b6de8bcff9ea4f768.tar.bz2
scala-1db8243e72c0a4e780841e6b6de8bcff9ea4f768.zip
Formatting fixes for scala.util.
-rw-r--r--src/library/scala/util/DynamicVariable.scala10
-rw-r--r--src/library/scala/util/Properties.scala8
-rw-r--r--src/library/scala/util/Random.scala6
-rw-r--r--src/library/scala/util/Sorting.scala22
-rw-r--r--src/library/scala/util/automata/BaseBerrySethi.scala17
-rw-r--r--src/library/scala/util/automata/Inclusion.scala4
-rw-r--r--src/library/scala/util/automata/NondetWordAutom.scala14
-rw-r--r--src/library/scala/util/automata/WordBerrySethi.scala25
-rw-r--r--src/library/scala/util/control/Breaks.scala11
-rw-r--r--src/library/scala/util/control/ControlThrowable.scala17
-rw-r--r--src/library/scala/util/grammar/HedgeRHS.scala6
-rw-r--r--src/library/scala/util/grammar/TreeRHS.scala4
-rw-r--r--src/library/scala/util/logging/ConsoleLogger.scala6
-rw-r--r--src/library/scala/util/logging/Logged.scala24
-rw-r--r--src/library/scala/util/matching/Regex.scala58
-rw-r--r--src/library/scala/util/parsing/ast/AbstractSyntax.scala2
-rw-r--r--src/library/scala/util/parsing/ast/Binders.scala167
-rw-r--r--src/library/scala/util/parsing/combinator/ImplicitConversions.scala23
-rw-r--r--src/library/scala/util/parsing/combinator/PackratParsers.scala75
-rw-r--r--src/library/scala/util/parsing/combinator/Parsers.scala153
-rw-r--r--src/library/scala/util/parsing/combinator/lexical/Lexical.scala21
-rw-r--r--src/library/scala/util/parsing/combinator/lexical/Scanners.scala30
-rw-r--r--src/library/scala/util/parsing/combinator/syntactical/TokenParsers.scala7
-rw-r--r--src/library/scala/util/parsing/combinator/testing/Tester.scala33
-rw-r--r--src/library/scala/util/parsing/combinator/token/StdTokens.scala3
-rw-r--r--src/library/scala/util/parsing/combinator/token/Tokens.scala10
-rw-r--r--src/library/scala/util/parsing/input/CharArrayReader.scala11
-rw-r--r--src/library/scala/util/parsing/input/CharSequenceReader.scala18
-rw-r--r--src/library/scala/util/parsing/input/NoPosition.scala5
-rw-r--r--src/library/scala/util/parsing/input/OffsetPosition.scala18
-rw-r--r--src/library/scala/util/parsing/input/PagedSeqReader.scala20
-rw-r--r--src/library/scala/util/parsing/input/Position.scala43
-rw-r--r--src/library/scala/util/parsing/input/Reader.scala21
-rw-r--r--src/library/scala/util/parsing/syntax/package.scala5
-rw-r--r--src/library/scala/util/regexp/Base.scala4
-rw-r--r--src/library/scala/util/regexp/PointedHedgeExp.scala3
-rw-r--r--src/library/scala/util/regexp/WordExp.scala14
37 files changed, 437 insertions, 481 deletions
diff --git a/src/library/scala/util/DynamicVariable.scala b/src/library/scala/util/DynamicVariable.scala
index fa9d17a4e9..740e2a3b3a 100644
--- a/src/library/scala/util/DynamicVariable.scala
+++ b/src/library/scala/util/DynamicVariable.scala
@@ -10,13 +10,13 @@ package scala.util
import java.lang.InheritableThreadLocal
-/** DynamicVariables provide a binding mechanism where the current
+/** `DynamicVariables` provide a binding mechanism where the current
* value is found through dynamic scope, but where access to the
* variable itself is resolved through static scope.
*
- * The current value can be retrieved with the value method. New values
- * should be pushed using the withValue method. Values pushed via
- * withValue only stay valid while the withValue's second argument, a
+ * The current value can be retrieved with the value method. New values
+ * should be pushed using the `withValue` method. Values pushed via
+ * `withValue` only stay valid while the `withValue`'s second argument, a
* parameterless closure, executes. When the second argument finishes,
* the variable reverts to the previous value.
*
@@ -28,7 +28,7 @@ import java.lang.InheritableThreadLocal
* }}}
*
* Each thread gets its own stack of bindings. When a
- * new thread is created, the DynamicVariable gets a copy
+ * new thread is created, the `DynamicVariable` gets a copy
* of the stack of bindings from the parent thread, and
* from then on the bindings for the new thread
* are independent of those for the original thread.
diff --git a/src/library/scala/util/Properties.scala b/src/library/scala/util/Properties.scala
index f86df0ee96..d2c9e6770b 100644
--- a/src/library/scala/util/Properties.scala
+++ b/src/library/scala/util/Properties.scala
@@ -11,7 +11,7 @@ package scala.util
import java.io.{ IOException, PrintWriter }
-/** Loads library.properties from the jar. */
+/** Loads `library.properties` from the jar. */
object Properties extends PropertiesTrait {
protected def propCategory = "library"
protected def pickJarBasedOn = classOf[ScalaObject]
@@ -59,7 +59,7 @@ private[scala] trait PropertiesTrait {
def scalaPropOrEmpty(name: String): String = scalaPropOrElse(name, "")
def scalaPropOrNone(name: String): Option[String] = Option(scalaProps.getProperty(name))
- /** The numeric portion of the runtime scala version, if this is a final
+ /** The numeric portion of the runtime Scala version, if this is a final
* release. If for instance the versionString says "version 2.9.0.final",
* this would return Some("2.9.0").
*
@@ -72,7 +72,7 @@ private[scala] trait PropertiesTrait {
if (segments.size == 4 && segments.last == "final") Some(segments take 3 mkString ".") else None
}
- /** The development scala version, if this is not a final release.
+ /** The development Scala version, if this is not a final release.
* The precise contents are not guaranteed, but it aims to provide a
* unique repository identifier (currently the svn revision) in the
* fourth dotted segment if the running version was built from source.
@@ -103,7 +103,7 @@ private[scala] trait PropertiesTrait {
def sourceReader = scalaPropOrElse("source.reader", "scala.tools.nsc.io.SourceReader")
/** This is the default text encoding, overridden (unreliably) with
- * JAVA_OPTS="-Dfile.encoding=Foo"
+ * `JAVA_OPTS="-Dfile.encoding=Foo"`
*/
def encodingString = propOrElse("file.encoding", "UTF-8")
diff --git a/src/library/scala/util/Random.scala b/src/library/scala/util/Random.scala
index cfd6154874..791582c9ec 100644
--- a/src/library/scala/util/Random.scala
+++ b/src/library/scala/util/Random.scala
@@ -100,8 +100,8 @@ class Random(val self: java.util.Random) {
/** Returns a new collection of the same type in a randomly chosen order.
*
- * @param coll the TraversableOnce to shuffle
- * @return the shuffled TraversableOnce
+ * @param coll the [[scala.collection.TraversableOnce]] to shuffle
+ * @return the shuffled [[scala.collection.TraversableOnce]]
*/
def shuffle[T, CC[X] <: TraversableOnce[X]](xs: CC[T])(implicit bf: CanBuildFrom[CC[T], T, CC[T]]): CC[T] = {
val buf = new ArrayBuffer[T] ++= xs
@@ -122,7 +122,7 @@ class Random(val self: java.util.Random) {
}
-/** The object <code>Random</code> offers a default implementation
+/** The object `Random` offers a default implementation
* of scala.util.Random and random-related convenience methods.
*
* @since 2.8
diff --git a/src/library/scala/util/Sorting.scala b/src/library/scala/util/Sorting.scala
index f286670f25..bf460a118f 100644
--- a/src/library/scala/util/Sorting.scala
+++ b/src/library/scala/util/Sorting.scala
@@ -13,12 +13,12 @@ import scala.math.Ordering
/** The Sorting object provides functions that can sort various kinds of
* objects. You can provide a comparison function, or you can request a sort
- * of items that are viewable as <code>Ordered</code>. Some sorts that
+ * of items that are viewable as [[scala.math.Ordered]]. Some sorts that
* operate directly on a subset of value types are also provided. These
* implementations are derived from those in the Sun JDK.
*
- * Note that stability doesn't matter for value types, so use the quickSort
- * variants for those. <code>stableSort</code> is intended to be used with
+ * Note that stability doesn't matter for value types, so use the `quickSort`
+ * variants for those. `stableSort` is intended to be used with
* objects when the prior ordering should be preserved, where possible.
*
* @author Ross Judson
@@ -43,17 +43,15 @@ object Sorting {
stableSort(a, 0, a.length-1, new Array[K](a.length), Ordering[K].lt _)
}
- /** Sorts an array of <code>K</code> given an ordering function
- * <code>f</code>. <code>f</code> should return <code>true</code> iff
- * its first parameter is strictly less than its second parameter.
+ /** Sorts an array of `K` given an ordering function `f`.
+ * `f` should return `true` iff its first parameter is strictly less than its second parameter.
*/
def stableSort[K: ClassManifest](a: Array[K], f: (K, K) => Boolean) {
stableSort(a, 0, a.length-1, new Array[K](a.length), f)
}
/** Sorts an arbitrary sequence into an array, given a comparison function
- * that should return <code>true</code> iff parameter one is strictly less
- * than parameter two.
+ * that should return `true` iff parameter one is strictly less than parameter two.
*
* @param a the sequence to be sorted.
* @param f the comparison function.
@@ -125,7 +123,7 @@ object Sorting {
var l = off
var n = off + len - 1
if (len > 40) { // Big arrays, pseudomedian of 9
- var s = len / 8
+ val s = len / 8
l = med3(l, l+s, l+2*s)
m = med3(m-s, m, m+s)
n = med3(n-2*s, n-s, n)
@@ -226,7 +224,7 @@ object Sorting {
var l = off
var n = off + len - 1
if (len > 40) { // Big arrays, pseudomedian of 9
- var s = len / 8
+ val s = len / 8
l = med3(l, l+s, l+2*s)
m = med3(m-s, m, m+s)
n = med3(n-2*s, n-s, n)
@@ -330,7 +328,7 @@ object Sorting {
var l = off
var n = off + len - 1
if (len > 40) { // Big arrays, pseudomedian of 9
- var s = len / 8
+ val s = len / 8
l = med3(l, l+s, l+2*s)
m = med3(m-s, m, m+s)
n = med3(n-2*s, n-s, n)
@@ -438,7 +436,7 @@ object Sorting {
var l = off
var n = off + len - 1
if (len > 40) { // Big arrays, pseudomedian of 9
- var s = len / 8
+ val s = len / 8
l = med3(l, l+s, l+2*s)
m = med3(m-s, m, m+s)
n = med3(n-2*s, n-s, n)
diff --git a/src/library/scala/util/automata/BaseBerrySethi.scala b/src/library/scala/util/automata/BaseBerrySethi.scala
index 4b1003ba33..51e15f0e71 100644
--- a/src/library/scala/util/automata/BaseBerrySethi.scala
+++ b/src/library/scala/util/automata/BaseBerrySethi.scala
@@ -15,10 +15,9 @@ import scala.collection.{ mutable, immutable }
// todo: replace global variable pos with acc
-/** this turns a regexp over A into a NondetWorkAutom over A using the
- * celebrated position automata construction (also called Berry-Sethi or
- * Glushkov)
- */
+/** This class turns a regular expression over A into a [[scala.util.automata.NondetWorkAutom]]
+ * over A using the celebrated position automata construction (also called ''Berry-Sethi'' or ''Glushkov'').
+ */
abstract class BaseBerrySethi {
val lang: Base
import lang.{ Alt, Eps, Meta, RegExp, Sequ, Star }
@@ -47,10 +46,10 @@ abstract class BaseBerrySethi {
case _ => throw new IllegalArgumentException("unexpected pattern " + r.getClass())
}
- /** computes first( r ) for the word regexp r */
+ /** Computes `first(r)` for the word regexp `r`. */
protected def compFirst(r: RegExp): Set[Int] = doComp(r, compFirst)
- /** computes last( r ) for the regexp r */
+ /** Computes `last(r)` for the regexp `r`. */
protected def compLast(r: RegExp): Set[Int] = doComp(r, compLast)
/** Starts from the right-to-left
@@ -73,8 +72,7 @@ abstract class BaseBerrySethi {
follow(0)
}
- /** returns the first set of an expression, setting the follow set along
- * the way.
+ /** Returns the first set of an expression, setting the follow set along the way.
*
* @param fol1 ...
* @param r ...
@@ -94,8 +92,7 @@ abstract class BaseBerrySethi {
case _ => throw new IllegalArgumentException("unexpected pattern: " + r.getClass())
}
- /** returns "Sethi-length" of a pattern, creating the set of position
- * along the way.
+ /** Returns the "Sethi-length" of a pattern, creating the set of position along the way.
*
* @param r ...
*/
diff --git a/src/library/scala/util/automata/Inclusion.scala b/src/library/scala/util/automata/Inclusion.scala
index c45fca5824..63133998b4 100644
--- a/src/library/scala/util/automata/Inclusion.scala
+++ b/src/library/scala/util/automata/Inclusion.scala
@@ -12,7 +12,7 @@ package scala.util.automata
/** A fast test of language inclusion between minimal automata.
- * inspired by the AMoRE automata library
+ * inspired by the ''AMoRE automata library''.
*
* @author Burak Emir
* @version 1.0
@@ -21,7 +21,7 @@ trait Inclusion[A <: AnyRef] {
val labels: Seq[A]
- /** Returns true if dfa1 is included in dfa2.
+ /** Returns true if `dfa1` is included in `dfa2`.
*
* @param dfa1 ...
* @param dfa2 ...
diff --git a/src/library/scala/util/automata/NondetWordAutom.scala b/src/library/scala/util/automata/NondetWordAutom.scala
index 05bb442e75..ece0a6c5e3 100644
--- a/src/library/scala/util/automata/NondetWordAutom.scala
+++ b/src/library/scala/util/automata/NondetWordAutom.scala
@@ -16,7 +16,7 @@ import scala.collection.{ immutable, mutable }
* in the delta function. Default transitions are transitions that
* are taken when no other transitions can be applied.
* All states are reachable. Accepting states are those for which
- * the partial function 'finals' is defined.
+ * the partial function `finals` is defined.
*/
abstract class NondetWordAutom[T <: AnyRef] {
import immutable.BitSet
@@ -27,22 +27,22 @@ abstract class NondetWordAutom[T <: AnyRef] {
val delta: Array[mutable.Map[T, BitSet]]
val default: Array[BitSet]
- /** returns true if the state is final */
+ /** @return true if the state is final */
final def isFinal(state: Int) = finals(state) > 0
- /** returns tag of final state */
+ /** @return tag of final state */
final def finalTag(state: Int) = finals(state)
- /** returns true if the set of states contains at least one final state */
+ /** @return true if the set of states contains at least one final state */
final def containsFinal(Q: BitSet): Boolean = Q exists isFinal
- /** returns true if there are no accepting states */
+ /** @return true if there are no accepting states */
final def isEmpty = (0 until nstates) forall (x => !isFinal(x))
- /** returns a BitSet with the next states for given state and label */
+ /** @return a BitSet with the next states for given state and label */
def next(q: Int, a: T): BitSet = delta(q).getOrElse(a, default(q))
- /** returns a BitSet with the next states for given state and label */
+ /** @return a BitSet with the next states for given state and label */
def next(Q: BitSet, a: T): BitSet = next(Q, next(_, a))
def nextDefault(Q: BitSet): BitSet = next(Q, default)
diff --git a/src/library/scala/util/automata/WordBerrySethi.scala b/src/library/scala/util/automata/WordBerrySethi.scala
index ad15dc9ff3..4763b6fb9c 100644
--- a/src/library/scala/util/automata/WordBerrySethi.scala
+++ b/src/library/scala/util/automata/WordBerrySethi.scala
@@ -14,13 +14,12 @@ import scala.collection.{ immutable, mutable }
import mutable.{ HashSet, HashMap }
import scala.util.regexp.WordExp
-/** This class turns a regexp into a NondetWordAutom using the
- * celebrated position automata construction (also called Berry-Sethi or
- * Glushkov)
- *
- * @author Burak Emir
- * @version 1.0
- */
+/** This class turns a regular expression into a [[scala.util.automata.NondetWorkAutom]]
+ * celebrated position automata construction (also called ''Berry-Sethi'' or ''Glushkov'').
+ *
+ * @author Burak Emir
+ * @version 1.0
+ */
abstract class WordBerrySethi extends BaseBerrySethi {
override val lang: WordExp
@@ -33,20 +32,20 @@ abstract class WordBerrySethi extends BaseBerrySethi {
protected var defaultq: Array[List[Int]] = _ // default transitions
protected var initials: Set[Int] = _
- /** Computes <code>first(r)</code> where the word regexp <code>r</code>.
+ /** Computes `first(r)` where the word regexp `r`.
*
* @param r the regular expression
- * @return the computed set <code>first(r)</code>
+ * @return the computed set `first(r)`
*/
protected override def compFirst(r: RegExp): Set[Int] = r match {
case x: Letter => Set(x.pos)
case _ => super.compFirst(r)
}
- /** Computes <code>last(r)</code> where the word regexp <code>r</code>.
+ /** Computes `last(r)` where the word regexp `r`.
*
* @param r the regular expression
- * @return the computed set <code>last(r)</code>
+ * @return the computed set `last(r)`
*/
protected override def compLast(r: RegExp): Set[Int] = r match {
case x: Letter => Set(x.pos)
@@ -66,11 +65,11 @@ abstract class WordBerrySethi extends BaseBerrySethi {
case _ => super.compFollow1(fol1, r)
}
- /** returns "Sethi-length" of a pattern, creating the set of position
+ /** Returns "Sethi-length" of a pattern, creating the set of position
* along the way
*/
- /** called at the leaves of the regexp */
+ /** Called at the leaves of the regexp */
protected def seenLabel(r: RegExp, i: Int, label: _labelT) {
labelAt = labelAt.updated(i, label)
this.labels += label
diff --git a/src/library/scala/util/control/Breaks.scala b/src/library/scala/util/control/Breaks.scala
index 331d30e0bb..80df8cc0bd 100644
--- a/src/library/scala/util/control/Breaks.scala
+++ b/src/library/scala/util/control/Breaks.scala
@@ -11,8 +11,9 @@
package scala.util.control
/** A class that can be instantiated for the break control abstraction.
- * Example usage:<pre>
+ * Example usage:
*
+ * <pre>
* val mybreaks = new Breaks
* import</b> mybreaks.{break, breakable}
*
@@ -22,7 +23,7 @@ package scala.util.control
* }
* }</pre>
*
- * Calls to break from one instantiation of Breaks will never
+ * Calls to break from one instantiation of `Breaks` will never
* target breakable objects of some other instantion.
*/
class Breaks {
@@ -53,9 +54,9 @@ class Breaks {
}
}
- /* Break from dynamically closest enclosing breakable block
- * @note this might be different than the statically closest enclosing
- * block!
+ /* Break from dynamically closest enclosing breakable block.
+ *
+ * @note This might be different than the statically closest enclosing block!
*/
def break() { throw breakException }
}
diff --git a/src/library/scala/util/control/ControlThrowable.scala b/src/library/scala/util/control/ControlThrowable.scala
index 122b0c937a..4d1116ba6f 100644
--- a/src/library/scala/util/control/ControlThrowable.scala
+++ b/src/library/scala/util/control/ControlThrowable.scala
@@ -10,18 +10,17 @@
package scala.util.control
/**
- * A marker trait indicating that the <code>Throwable</code> it is mixed
+ * A marker trait indicating that the `Throwable` it is mixed
* into is intended for flow control.
*
- * <p>Note that <code>Throwable</code> subclasses which extend this trait
- * may extend any other <code>Throwable</code> subclass (eg.
- * <code>RuntimeException</code>) and are not required to extend
- * <code>Throwable</code> directly.</p>
+ * Note that `Throwable` subclasses which extend this trait
+ * may extend any other `Throwable` subclass (eg.
+ * `RuntimeException`) and are not required to extend
+ * `Throwable` directly.
*
- * <p>Instances of <code>Throwable</code> subclasses marked in
- * this way should not normally be caught. Where catch-all behaviour is
- * required <code>ControlThrowable</code>s should be propagated, for
- * example,</p>
+ * Instances of `Throwable` subclasses marked in this way should
+ * not normally be caught. Where catch-all behaviour is required
+ * `ControlThrowable`s should be propagated, for example:
*
* <pre>
* import scala.util.control.ControlThrowable
diff --git a/src/library/scala/util/grammar/HedgeRHS.scala b/src/library/scala/util/grammar/HedgeRHS.scala
index 46c474a6eb..8fb3d4c5dc 100644
--- a/src/library/scala/util/grammar/HedgeRHS.scala
+++ b/src/library/scala/util/grammar/HedgeRHS.scala
@@ -12,11 +12,11 @@ package scala.util.grammar
abstract class HedgeRHS
-/** right hand side of a hedge production, deriving a single tree */
+/** Right hand side of a hedge production, deriving a single tree. */
case class ConsRHS(tnt: Int, hnt: Int) extends HedgeRHS
-/** right hand side of a hedge production, deriving any hedge */
+/** Right hand side of a hedge production, deriving any hedge. */
case object AnyHedgeRHS extends HedgeRHS
-/** right hand side of a hedge production, deriving the empty hedge */
+/** Right hand side of a hedge production, deriving the empty hedge. */
case object EmptyHedgeRHS extends HedgeRHS
diff --git a/src/library/scala/util/grammar/TreeRHS.scala b/src/library/scala/util/grammar/TreeRHS.scala
index b6e2b1a29f..ebe16b25bd 100644
--- a/src/library/scala/util/grammar/TreeRHS.scala
+++ b/src/library/scala/util/grammar/TreeRHS.scala
@@ -10,10 +10,10 @@
package scala.util.grammar
-/** right hand side of a tree production */
+/** Right hand side of a tree production. */
abstract class TreeRHS
-/** right hand side of a tree production, labelled with a letter from an alphabet */
+/** Right hand side of a tree production, labelled with a letter from an alphabet. */
case class LabelledRHS[A](label: A, hnt: Int) extends TreeRHS
case object AnyTreeRHS extends TreeRHS
diff --git a/src/library/scala/util/logging/ConsoleLogger.scala b/src/library/scala/util/logging/ConsoleLogger.scala
index 362a8e9f75..58284797b4 100644
--- a/src/library/scala/util/logging/ConsoleLogger.scala
+++ b/src/library/scala/util/logging/ConsoleLogger.scala
@@ -11,15 +11,15 @@
package scala.util.logging
/**
- * The trait <code>ConsoleLogger</code> is mixed into a concrete class who
- * has class <code>Logged</code> among its base classes.
+ * The trait `ConsoleLogger` is mixed into a concrete class who
+ * has class `Logged` among its base classes.
*
* @author Burak Emir
* @version 1.0
*/
trait ConsoleLogger extends Logged {
- /** logs argument to Console using <code>Console.println</code>
+ /** logs argument to Console using [[scala.Console.println]]
*/
override def log(msg: String): Unit = Console.println(msg)
}
diff --git a/src/library/scala/util/logging/Logged.scala b/src/library/scala/util/logging/Logged.scala
index 01757e1bfa..d23b38c569 100644
--- a/src/library/scala/util/logging/Logged.scala
+++ b/src/library/scala/util/logging/Logged.scala
@@ -9,17 +9,19 @@
package scala.util.logging
/** Mixing in Logged indicates that a class provides support for logging.
- * For instance:
-{{{
- // The developer of the library writes:
- class MyClass extends Logged {
- // do stuff, call log
- }
- // The user of the library instantiates:
- val x = new MyClass() with ConsoleLogger
-}}}
- * and the logging is sent to the [[scala.util.logging.ConsoleLogger]] object.
- */
+ *
+ * For instance:
+ * {{{
+ * // The developer of the library writes:
+ * class MyClass extends Logged {
+ * // do stuff, call log
+ * }
+ *
+ * // The user of the library instantiates:
+ * val x = new MyClass() with ConsoleLogger
+ * }}}
+ * and the logging is sent to the [[scala.util.logging.ConsoleLogger]] object.
+ */
trait Logged {
/** This method should log the message given as argument somewhere
* as a side-effect.
diff --git a/src/library/scala/util/matching/Regex.scala b/src/library/scala/util/matching/Regex.scala
index 663900dd0a..9ee2b37bbf 100644
--- a/src/library/scala/util/matching/Regex.scala
+++ b/src/library/scala/util/matching/Regex.scala
@@ -15,11 +15,9 @@ import java.util.regex.{ Pattern, Matcher }
/** This class provides methods for creating and using regular expressions.
* It is based on the regular expressions of the JDK since 1.4.
*
- * <p>
- * You can use special pattern syntax construct <code>(?idmsux-idmsux)</code> to switch
- * various regex compilation options like <code>CASE_INSENSITIVE</code> or <code>UNICODE_CASE</code>.
- * See <code>java.util.regex.Pattern</code> javadoc for details.
- * </p>
+ * You can use special pattern syntax construct `(?idmsux-idmsux)` to switch
+ * various regex compilation options like `CASE_INSENSITIVE` or `UNICODE_CASE`.
+ * See [[java.util.regex.Pattern]] for details.
*
* @author Thibaud Hottelier
* @author Philipp Haller
@@ -37,8 +35,7 @@ class Regex(regex: String, groupNames: String*) extends Serializable {
/** The compiled pattern */
val pattern = Pattern.compile(regex)
- /** Tries to match target (whole match) and returns
- * the matches.
+ /** Tries to match target (whole match) and returns the matches.
*
* @param target The string to match
* @return The matches
@@ -135,8 +132,7 @@ class Regex(regex: String, groupNames: String*) extends Serializable {
m.replaceFirst(replacement)
}
- /** Splits the provided character sequence around matches of this
- * regexp.
+ /** Splits the provided character sequence around matches of this regexp.
*
* @param toSplit The character sequence to split
* @return The array of strings computed by splitting the
@@ -150,11 +146,13 @@ class Regex(regex: String, groupNames: String*) extends Serializable {
}
/** This object defines inner classes that describe
- * regex matches. The class hierarchy is as follows.
+ * regex matches. The class hierarchy is as follows:
*
+ * {{{
* MatchData
- * | \
- * MatchIterator Match
+ * / \
+ * MatchIterator Match
+ * }}}
*/
object Regex {
@@ -175,25 +173,24 @@ object Regex {
/** The index of the first matched character, or -1 if nothing was matched */
def start: Int
- /** The index of the first matched character in group <code>i</code>,
+ /** The index of the first matched character in group `i`,
* or -1 if nothing was matched for that group */
def start(i: Int): Int
/** The index of the last matched character, or -1 if nothing was matched */
def end: Int
- /** The index following the last matched character in group <code>i</code>,
+ /** The index following the last matched character in group `i`,
* or -1 if nothing was matched for that group */
def end(i: Int): Int
- /** The matched string,
- * of <code>null</code> if nothing was matched */
+ /** The matched string, or `null` if nothing was matched */
def matched: String =
if (start >= 0) source.subSequence(start, end).toString
else null
- /** The matched string in group <code>i</code>,
- * or <code>null</code> if nothing was matched */
+ /** The matched string in group `i`,
+ * or `null` if nothing was matched */
def group(i: Int): String =
if (start(i) >= 0) source.subSequence(start(i), end(i)).toString
else null
@@ -202,25 +199,25 @@ object Regex {
def subgroups: List[String] = (1 to groupCount).toList map group
/** The char sequence before first character of match,
- * or <code>null</code> if nothing was matched */
+ * or `null` if nothing was matched */
def before: java.lang.CharSequence =
if (start >= 0) source.subSequence(0, start)
else null
- /** The char sequence before first character of match in group <code>i</code>,
- * or <code>null</code> if nothing was matched for that group */
+ /** The char sequence before first character of match in group `i`,
+ * or `null` if nothing was matched for that group */
def before(i: Int): java.lang.CharSequence =
if (start(i) >= 0) source.subSequence(0, start(i))
else null
/** Returns char sequence after last character of match,
- * or <code>null</code> if nothing was matched */
+ * or `null` if nothing was matched */
def after: java.lang.CharSequence =
if (end >= 0) source.subSequence(end, source.length)
else null
- /** The char sequence after last character of match in group <code>i</code>,
- * or <code>null</code> if nothing was matched for that group */
+ /** The char sequence after last character of match in group `i`,
+ * or `null` if nothing was matched for that group */
def after(i: Int): java.lang.CharSequence =
if (end(i) >= 0) source.subSequence(end(i), source.length)
else null
@@ -231,15 +228,14 @@ object Regex {
*
* @param id The group name
* @return The requested group
- * @throws <code>NoSuchElementException</code> if the requested
- * group name is not defined
+ * @throws NoSuchElementException if the requested group name is not defined
*/
def group(id: String): String = nameToIndex.get(id) match {
case None => throw new NoSuchElementException("group name "+id+" not defined")
case Some(index) => group(index)
}
- /** The matched string; equivalent to <code>matched.toString</code> */
+ /** The matched string; equivalent to `matched.toString` */
override def toString = matched
}
@@ -264,10 +260,10 @@ object Regex {
private lazy val ends: Array[Int] =
((0 to groupCount) map matcher.end).toArray
- /** The index of the first matched character in group <code>i</code> */
+ /** The index of the first matched character in group `i` */
def start(i: Int) = starts(i)
- /** The index following the last matched character in group <code>i</code> */
+ /** The index following the last matched character in group `i` */
def end(i: Int) = ends(i)
/** The match itself with matcher-dependent lazy vals forced,
@@ -312,13 +308,13 @@ object Regex {
/** The index of the first matched character */
def start: Int = matcher.start
- /** The index of the first matched character in group <code>i</code> */
+ /** The index of the first matched character in group `i` */
def start(i: Int): Int = matcher.start(i)
/** The index of the last matched character */
def end: Int = matcher.end
- /** The index following the last matched character in group <code>i</code> */
+ /** The index following the last matched character in group `i` */
def end(i: Int): Int = matcher.end(i)
/** The number of subgroups */
diff --git a/src/library/scala/util/parsing/ast/AbstractSyntax.scala b/src/library/scala/util/parsing/ast/AbstractSyntax.scala
index f0f0ec425f..220643a0d7 100644
--- a/src/library/scala/util/parsing/ast/AbstractSyntax.scala
+++ b/src/library/scala/util/parsing/ast/AbstractSyntax.scala
@@ -19,7 +19,7 @@ trait AbstractSyntax {
*/
trait Element extends Positional
- /** The base class for elements in the AST that represent names {@see Binders}.
+ /** The base class for elements in the AST that represent names [[scala.util.parsing.ast.Binders]].
*/
trait NameElement extends Element {
def name: String
diff --git a/src/library/scala/util/parsing/ast/Binders.scala b/src/library/scala/util/parsing/ast/Binders.scala
index 2ecb702446..c7b527984d 100644
--- a/src/library/scala/util/parsing/ast/Binders.scala
+++ b/src/library/scala/util/parsing/ast/Binders.scala
@@ -15,14 +15,10 @@ import scala.collection.mutable.Map
// TODO: avoid clashes when substituting
// TODO: check binders in the same scope are distinct
-/** <p>
- * This trait provides the core Scrap-Your-Boilerplate abstractions as
- * well as implementations for common datatypes.
- * </p>
- * <p>
- * Based on Ralph Laemmel's <a target="_top"
- * href="http://homepages.cwi.nl/~ralf/publications.html">SYB papers</a>.
- * </p>
+/** This trait provides the core Scrap-Your-Boilerplate abstractions as
+ * well as implementations for common datatypes.
+ *
+ * Based on Ralph Laemmel's [[http://homepages.cwi.nl/~ralf/publications.html SYB papers]].
*
* @author Adriaan Moors
*/
@@ -58,57 +54,52 @@ trait Mappable {
}
}
-/** <p>
- * This component provides functionality for enforcing variable binding
- * during parse-time.
- * </p>
- * <p>
- * When parsing simple languages, like Featherweight Scala, these parser
- * combinators will fully enforce the binding discipline. When names are
- * allowed to be left unqualified, these mechanisms would have to be
- * complemented by an extra phase that resolves names that couldn't be
- * resolved using the naive binding rules. (Maybe some machinery to
- * model `implicit` binders (e.g., `this` and imported qualifiers)
- * and selection on a binder will suffice?)
- * </p>
+/** This component provides functionality for enforcing variable binding
+ * during parse-time.
+ *
+ * When parsing simple languages, like Featherweight Scala, these parser
+ * combinators will fully enforce the binding discipline. When names are
+ * allowed to be left unqualified, these mechanisms would have to be
+ * complemented by an extra phase that resolves names that couldn't be
+ * resolved using the naive binding rules. (Maybe some machinery to
+ * model `implicit` binders (e.g., `this` and imported qualifiers)
+ * and selection on a binder will suffice?)
*
* @author Adriaan Moors
*/
trait Binders extends AbstractSyntax with Mappable {
/** A `Scope` keeps track of one or more syntactic elements that represent bound names.
- * The elements it contains share the same scope and must all be distinct, as determined by `==`.
+ * The elements it contains share the same scope and must all be distinct, as determined by `==`.
*
- * A `NameElement` `n` in the AST that is conceptually bound by a `Scope` `s`, is replaced by a
- * `BoundElement(n, s)'. (For example, in `val x:Int=x+1', the first `x` is modelled by a
- * Scope `s` that contains `x` and the second `x` is represented by a `BoundElement(`x`, s)')
- * The term (`x+1`) in scope of the Scope becomes an `UnderBinder(s, `x+1`).
+ * A `NameElement` `n` in the AST that is conceptually bound by a `Scope` `s`, is replaced by a
+ * `BoundElement(n, s)`. (For example, in `val x:Int=x+1`, the first `x` is modelled by a
+ * Scope `s` that contains `x` and the second `x` is represented by a `BoundElement(x, s)`)
+ * The term (`x+1`) in scope of the Scope becomes an `UnderBinder(s, x+1)`.
*
- * A `NameElement` `n` is bound by a `Scope` `s` if it is wrapped as a `BoundElement(`n`, s)', and
- * `s` has a binder element that is semantically equal (`equals` or `==`) to `n`.
+ * A `NameElement` `n` is bound by a `Scope` `s` if it is wrapped as a `BoundElement(n, s)`, and
+ * `s` has a binder element that is semantically equal (`equals` or `==`) to `n`.
*
- * A `Scope` is represented textually by its list of binder elements, followed by the scope's `id`.
- * For example: `[x, y]!1` represents the scope with `id` `1` and binder elements `x` and `y`.
- * (`id` is solely used for this textual representation.)
+ * A `Scope` is represented textually by its list of binder elements, followed by the scope's `id`.
+ * For example: `[x, y]!1` represents the scope with `id` `1` and binder elements `x` and `y`.
+ * (`id` is solely used for this textual representation.)
*/
class Scope[binderType <: NameElement] extends Iterable[binderType]{
private val substitution: Map[binderType, Element] =
new scala.collection.mutable.LinkedHashMap[binderType, Element] // a LinkedHashMap is ordered by insertion order -- important!
- /** Returns a unique number identifying this Scope (only used for representation purposes).
- */
+ /** Returns a unique number identifying this Scope (only used for representation purposes). */
val id: Int = _Binder.genId
/** Returns the binders in this scope.
- * For a typical let-binding, this is just the variable name. For an argument list to a method body,
- * there is one binder per formal argument.
+ * For a typical let-binding, this is just the variable name. For an argument list to a method body,
+ * there is one binder per formal argument.
*/
def iterator = substitution.keysIterator
- /** Return the `i`th binder in this scope.*/
+ /** Return the `i`th binder in this scope. */
def apply(i: Int): binderType = this.iterator.toList(i)
- /** Returns true if this container has a binder equal (==) to `b`
- */
+ /** Returns true if this container has a binder equal (as determined by `==`) to `b`. */
def binds(b: binderType): Boolean = substitution.contains(b)
def indexFor(b: binderType): Option[Int] = {
@@ -123,53 +114,53 @@ trait Binders extends AbstractSyntax with Mappable {
None
}
- /** Adds a new binder.
- * (e.g. the variable name in a local variable declaration)
+ /** Adds a new binder, for example the variable name in a local variable declaration.
*
* @param b a new binder that is distinct from the existing binders in this scope,
- * and shares their conceptual scope. canAddBinder(b)` must hold.`
+ * and shares their conceptual scope. `canAddBinder(b)` must hold.
* @return `binds(b)` and `getElementFor(b) eq b` will hold.
*/
def addBinder(b: binderType) { substitution += Pair(b, b) }
+ // TODO: strengthen this condition so that no binders may be added after this scope has been
+ // linked to its `UnderBinder` (i.e., while parsing, BoundElements may be added to the Scope
+ // associated to the UnderBinder, but after that, no changes are allowed, except for substitution)?
/** `canAddElement` indicates whether `b` may be added to this scope.
*
- * TODO: strengthen this condition so that no binders may be added after this scope has been
- * linked to its `UnderBinder` (i.e., while parsing, BoundElements may be added to the Scope
- * associated to the UnderBinder, but after that, no changes are allowed, except for substitution)?
*
* @return true if `b` had not been added yet
*/
def canAddBinder(b: binderType): Boolean = !binds(b)
- /** ``Replaces'' the bound occurrences of a contained binder by their new value.
- * The bound occurrences of `b` are not actually replaced; the scope keeps track
- * of a substitution that maps every binder to its current value. Since a `BoundElement` is
- * a proxy for the element it is bound to by its binder, `substitute` may thus be thought of
- * as replacing all the bound occurrences of the given binder `b` by their new value `value`.
+ /** ''Replaces'' the bound occurrences of a contained binder by their new value.
+ * The bound occurrences of `b` are not actually replaced; the scope keeps track
+ * of a substitution that maps every binder to its current value. Since a `BoundElement` is
+ * a proxy for the element it is bound to by its binder, `substitute` may thus be thought of
+ * as replacing all the bound occurrences of the given binder `b` by their new value `value`.
*
- * @param b the binder whose bound occurrences should be given a new value. `binds(b)` must hold.
- * @param value the new value for the bound occurrences of `b`
- * @return `getElementFor(b) eq value` will hold.
+ * @param b the binder whose bound occurrences should be given a new value. `binds(b)` must hold.
+ * @param value the new value for the bound occurrences of `b`
+ * @return `getElementFor(b) eq value` will hold.
*/
def substitute(b: binderType, value: Element): Unit = substitution(b) = value
/** Returns the current value for the bound occurrences of `b`.
*
- * @param b the contained binder whose current value should be returned `binds(b)` must hold.
+ * @param b the contained binder whose current value should be returned `binds(b)` must hold.
*/
def getElementFor(b: binderType): Element = substitution(b)
override def toString: String = this.iterator.toList.mkString("[",", ","]")+"!"+id // TODO show substitution?
- /** Returns a list of strings that represent the binder elements, each tagged with this scope's id.*/
+ /** Returns a list of strings that represent the binder elements, each tagged with this scope's id. */
def bindersToString: List[String] = (for(b <- this.iterator) yield b+"!"+id).toList
- /** Return a new inheriting scope that won't check whether binding is respected until the scope is left (so as to support forward references) **/
+ /** Return a new inheriting scope that won't check whether binding is respected until the scope is left (so as to support forward references). */
def allowForwardRef: Scope[binderType] = this // TODO
/** Return a nested scope -- binders entered into it won't be visible in this scope, but if this scope allows forward references,
- * the binding in the returned scope also does, and thus the check that all variables are bound is deferred until this scope is left **/
+ * the binding in the returned scope also does, and thus the check that all variables are bound is deferred until this scope is left.
+ */
def nested: Scope[binderType] = this // TODO
def onEnter() {}
@@ -184,17 +175,17 @@ trait Binders extends AbstractSyntax with Mappable {
}
/** A `BoundElement` is bound in a certain scope `scope`, which keeps track of the actual element that
- * `el` stands for.
+ * `el` stands for.
*
- * A `BoundElement` is represented textually by its bound element, followed by its scope's `id`.
- * For example: `x@1` represents the variable `x` that is bound in the scope with `id` `1`.
+ * A `BoundElement` is represented textually by its bound element, followed by its scope's `id`.
+ * For example: `x@1` represents the variable `x` that is bound in the scope with `id` `1`.
*
- * @note `scope.binds(el)` holds before and after.
+ * @note `scope.binds(el)` holds before and after.
*/
case class BoundElement[boundElement <: NameElement](el: boundElement, scope: Scope[boundElement]) extends NameElement with Proxy with BindingSensitive {
/** Returns the element this `BoundElement` stands for.
- * The `Proxy` trait ensures `equals`, `hashCode` and `toString` are forwarded to
- * the result of this method.
+ * The `Proxy` trait ensures `equals`, `hashCode` and `toString` are forwarded to
+ * the result of this method.
*/
def self: Element = scope.getElementFor(el)
@@ -206,8 +197,7 @@ trait Binders extends AbstractSyntax with Mappable {
def alpha_==[t <: NameElement](other: BoundElement[t]): Boolean = scope.indexFor(el) == other.scope.indexFor(other.el)
}
- /** A variable that escaped its scope (i.e., a free variable) -- we don't deal very well with these yet
- */
+ /** A variable that escaped its scope (i.e., a free variable) -- we don't deal very well with these yet. */
class UnboundElement[N <: NameElement](private val el: N) extends NameElement {
def name = el.name+"@??"
}
@@ -216,20 +206,22 @@ trait Binders extends AbstractSyntax with Mappable {
// if we knew a more specific type for the element that the bound element represents, this could make sense
// implicit def BoundElementProxy[t <: NameElement](e: BoundElement[t]): Element = e.self
- /** Represents an element with variables that are bound in a certain scope.
- */
+ /** Represents an element with variables that are bound in a certain scope. */
class UnderBinder[binderType <: NameElement, elementT <% Mappable[elementT]](val scope: Scope[binderType], private[Binders] val element: elementT) extends Element with BindingSensitive {
override def toString: String = "(" + scope.toString + ") in { "+element.toString+" }"
/** Alpha-equivalence -- TODO
- * Returns true if the `element` of the `other` `UnderBinder` is equal to this `element` up to alpha-conversion.
+ * Returns true if the `element` of the `other` `UnderBinder` is equal to this `element` up to alpha-conversion.
*
- * That is, regular equality is used for all elements but `BoundElement`s: such an element is
- * equal to a `BoundElement` in `other` if their binders are equal. Binders are equal if they
- * are at the same index in their respective scope.
+ * That is, regular equality is used for all elements but `BoundElement`s: such an element is
+ * equal to a `BoundElement` in `other` if their binders are equal. Binders are equal if they
+ * are at the same index in their respective scope.
*
- * Example: UnderBinder([x, y]!1, x@1) alpha_== UnderBinder([a, b]!2, a@2)
- * ! (UnderBinder([x, y]!1, y@1) alpha_== UnderBinder([a, b]!2, a@2))
+ * Example:
+ * {{{
+ * UnderBinder([x, y]!1, x@1) alpha_== UnderBinder([a, b]!2, a@2)
+ * ! (UnderBinder([x, y]!1, y@1) alpha_== UnderBinder([a, b]!2, a@2))
+ * }}}
*/
/*def alpha_==[bt <: binderType, st <: elementT](other: UnderBinder[bt, st]): Boolean = {
var result = true
@@ -259,7 +251,7 @@ trait Binders extends AbstractSyntax with Mappable {
def extract: elementT = cloneElementNoBoundElements
def extract(subst: scala.collection.immutable.Map[NameElement, NameElement]): elementT = cloneElementWithSubst(subst)
- /** Get a string representation of element, normally we don't allow direct access to element, but just getting a string representation is ok*/
+ /** Get a string representation of element, normally we don't allow direct access to element, but just getting a string representation is ok. */
def elementToString: String = element.toString
}
@@ -291,12 +283,12 @@ trait Binders extends AbstractSyntax with Mappable {
def unit[bt <: NameElement, elementT <% Mappable[elementT]](x: elementT) = UnderBinder(new Scope[bt](), x)
}
- /** If a list of `UnderBinder`s all have the same scope, they can be turned in to an UnderBinder
- * containing a list of the elements in the original `UnderBinder`.
+ /** If a list of `UnderBinder`s all have the same scope, they can be turned in to an `UnderBinder`
+ * containing a list of the elements in the original `UnderBinder`.
*
- * The name `sequence` comes from the fact that this method's type is equal to the type of monadic sequence.
+ * The name `sequence` comes from the fact that this method's type is equal to the type of monadic sequence.
*
- * @note `!orig.isEmpty` implies `orig.forall(ub => ub.scope eq orig(0).scope)`
+ * @note `!orig.isEmpty` implies `orig.forall(ub => ub.scope eq orig(0).scope)`
*
*/
def sequence[bt <: NameElement, st <% Mappable[st]](orig: List[UnderBinder[bt, st]]): UnderBinder[bt, List[st]] =
@@ -307,13 +299,13 @@ trait Binders extends AbstractSyntax with Mappable {
def unsequence[bt <: NameElement, st <% Mappable[st]](orig: UnderBinder[bt, List[st]]): List[UnderBinder[bt, st]] =
orig.element.map(sc => UnderBinder(orig.scope, sc))
+ //TODO: more documentation
/** An environment that maps a `NameElement` to the scope in which it is bound.
- * This can be used to model scoping during parsing.
- *
- * (This class is similar to Burak's ECOOP paper on pattern matching, except that we use `==`
- * instead of `eq`, thus types can't be unified in general)
+ * This can be used to model scoping during parsing.
*
- * TODO: more documentation
+ * @note This class uses similar techniques as described by ''Burak Emir'' in
+ * [[http://library.epfl.ch/theses/?nr=3899 Object-oriented pattern matching]],
+ * but uses `==` instead of `eq`, thus types can't be unified in general.
*/
abstract class BinderEnv {
def apply[A <: NameElement](v: A): Option[Scope[A]]
@@ -328,17 +320,16 @@ trait Binders extends AbstractSyntax with Mappable {
def apply[A <: NameElement](v: A): Option[Scope[A]] = None
}
+ // TODO: move this to some utility object higher in the scala hierarchy?
/** Returns a given result, but executes the supplied closure before returning.
- * (The effect of this closure does not influence the returned value.)
+ * (The effect of this closure does not influence the returned value.)
*
- * TODO: move this to some utility object higher in the scala hierarchy?
- *
- * @param result the result to be returned
- * @param block code to be executed, purely for its side-effects
+ * @param result the result to be returned
+ * @param block code to be executed, purely for its side-effects
*/
trait ReturnAndDo[T]{
def andDo(block: => Unit): T
- } // gotta love Smalltalk syntax :-)
+ }
def return_[T](result: T): ReturnAndDo[T] =
new ReturnAndDo[T] {
diff --git a/src/library/scala/util/parsing/combinator/ImplicitConversions.scala b/src/library/scala/util/parsing/combinator/ImplicitConversions.scala
index 32261c102e..e993628e88 100644
--- a/src/library/scala/util/parsing/combinator/ImplicitConversions.scala
+++ b/src/library/scala/util/parsing/combinator/ImplicitConversions.scala
@@ -9,19 +9,22 @@
package scala.util.parsing.combinator
-/** This object contains implicit conversions that come in handy when using the `^^` combinator
- * {@see Parsers} to construct an AST from the concrete syntax.
- *<p>
+/** This object contains implicit conversions that come in handy when using the `^^` combinator.
+ *
+ * Refer to [[scala.util.parsing.combinator.Parsers]] to construct an AST from the concrete syntax.
+ *
* The reason for this is that the sequential composition combinator (`~`) combines its constituents
* into a ~. When several `~`s are combined, this results in nested `~`s (to the left).
- * The `flatten*` coercions makes it easy to apply an `n`-argument function to a nested ~ of
- * depth (`n-1`)</p>
- *<p>
- * The `headOptionTailToFunList` converts a function that takes a List[A] to a function that
- * accepts a ~[A, Option[List[A]]] (this happens when, e.g., parsing something of the following
- * shape: p ~ opt("." ~ repsep(p, ".")) -- where `p` is a parser that yields an A)</p>
+ * The `flatten*` coercions makes it easy to apply an `n`-argument function to a nested `~` of
+ * depth `n-1`
+ *
+ * The `headOptionTailToFunList` converts a function that takes a `List[A]` to a function that
+ * accepts a `~[A, Option[List[A]]]` (this happens when parsing something of the following
+ * shape: `p ~ opt("." ~ repsep(p, "."))` -- where `p` is a parser that yields an `A`).
*
- * @author Martin Odersky, Iulian Dragos, Adriaan Moors
+ * @author Martin Odersky
+ * @author Iulian Dragos
+ * @author Adriaan Moors
*/
trait ImplicitConversions { self: Parsers =>
implicit def flatten2[A, B, C] (f: (A, B) => C) =
diff --git a/src/library/scala/util/parsing/combinator/PackratParsers.scala b/src/library/scala/util/parsing/combinator/PackratParsers.scala
index f53b998e07..3f09ae506d 100644
--- a/src/library/scala/util/parsing/combinator/PackratParsers.scala
+++ b/src/library/scala/util/parsing/combinator/PackratParsers.scala
@@ -14,44 +14,36 @@ import scala.util.parsing.input.{ Reader, Position }
import scala.collection.mutable
/**
- * <p>
- * <code>PackratParsers</code> is a component that extends the parser combinators
- * provided by <a href="Parsers.html"><code>Parsers</code></a> with a memoization facility
- * (``Packrat Parsing'').
- * </p>
- * <p>
- * Packrat Parsing is a technique for implementing backtracking, recursive-descent parsers, with the
- * advantage that it guarantees unlimited lookahead and a linear parse time. Using this technique,
- * left recursive grammars can also be accepted.
- * </p>
- * <p>
- * Using <code>PackratParsers</code> is very similar to using <code>Parsers</code>:
- * <ul>
- * <li> any class/trait that extends <code>Parsers</code> (directly or through a subclass) can
- * mix in <code>PackratParsers</code>. Example:
- * <code>object MyGrammar extends StandardTokenParsers with PackratParsers </code>
- * <li> each grammar production previously declared as a <code>def</code> without formal parameters
- * becomes a <code>lazy val</code>, and its type is changed from <code>Parser[Elem]</code>
- * to <code>PackratParser[Elem]</code>. So, for example, <code>def production: Parser[Int] = {...}</code>
- * becomes <code>lazy val production: PackratParser[Int] = {...}</code>
- * <li> Important: using <code>PackratParser</code>s is not an ``all or nothing'' decision. They
- * can be free mixed with regular <code>Parser</code>s in a single grammar.
- * </ul>
- * </p>
- * <p>
- * Cached parse results are attached to the <i>input</i>, not the grammar.
- * Therefore, <code>PackratsParser</code>s require a <code>PackratReader</code> as input, which
- * adds memoization to an underlying <code>Reader</code>. Programmers can create <code>PackratReader</code>
- * objects either manually, as in <code>production(new PackratReader(new lexical.Scanner("input")))</code>,
- * but the common way should be to rely on the combinator <code>phrase</code> to wrap a given
- * input with a <code>PackratReader</code> if the input is not one itself.
- * </p>
+ * `PackratParsers` is a component that extends the parser combinators provided by
+ * [[scala.util.parsing.combinator.Parsers]] with a memoization facility (``Packrat Parsing'').
+ *
+ * Packrat Parsing is a technique for implementing backtracking, recursive-descent parsers, with the
+ * advantage that it guarantees unlimited lookahead and a linear parse time. Using this technique,
+ * left recursive grammars can also be accepted.
+ *
+ * Using `PackratParsers` is very similar to using `Parsers`:
+ * - any class/trait that extends `Parsers` (directly or through a subclass) can mix in `PackratParsers`.
+ * Example: `object MyGrammar extends StandardTokenParsers with PackratParsers `
+ * - each grammar production previously declared as a `def` without formal parameters
+ * becomes a `lazy val`, and its type is changed from `Parser[Elem]` to `PackratParser[Elem]`.
+ * So, for example, `def production: Parser[Int] = {...}`
+ * becomes `lazy val production: PackratParser[Int] = {...}`
+ * - Important: using `PackratParser`s is not an ``all or nothing'' decision.
+ * They can be free mixed with regular `Parser`s in a single grammar.
+ *
+ * Cached parse results are attached to the ''input'', not the grammar.
+ * Therefore, `PackratsParser`s require a `PackratReader` as input, which
+ * adds memoization to an underlying `Reader`. Programmers can create `PackratReader`
+ * objects either manually, as in `production(new PackratReader(new lexical.Scanner("input")))`,
+ * but the common way should be to rely on the combinator `phrase` to wrap a given
+ * input with a `PackratReader` if the input is not one itself.
*
* @see Bryan Ford: "Packrat Parsing: Simple, Powerful, Lazy, Linear Time." ICFP'02
* @see Alessandro Warth, James R. Douglass, Todd Millstein: "Packrat Parsers Can Support Left Recursion." PEPM'08
*
* @since 2.8
- * @author Manohar Jonnalagedda, Tiark Rompf
+ * @author Manohar Jonnalagedda
+ * @author Tiark Rompf
*/
trait PackratParsers extends Parsers {
@@ -59,7 +51,7 @@ trait PackratParsers extends Parsers {
//type Input = PackratReader[Elem]
/**
- * A specialized <code>Reader</code> class that wraps an underlying <code>Reader</code>
+ * A specialized `Reader` class that wraps an underlying `Reader`
* and provides memoization of parse results.
*/
class PackratReader[+T](underlying: Reader[T]) extends Reader[T] { outer =>
@@ -67,7 +59,6 @@ trait PackratParsers extends Parsers {
/*
* caching of intermediate parse results and information about recursion
*/
-
private[PackratParsers] val cache = mutable.HashMap.empty[(Parser[_], Position), MemoEntry[_]]
private[PackratParsers] def getFromCache[T](p: Parser[T]): Option[MemoEntry[T]] = {
@@ -102,13 +93,10 @@ trait PackratParsers extends Parsers {
/**
- * <p>
- * A parser generator delimiting whole phrases (i.e. programs).
- * </p>
- * <p>
- * Overridden to make sure any input passed to the argument parser
- * is wrapped in a <code>PackratReader</code>.
- * </p>
+ * A parser generator delimiting whole phrases (i.e. programs).
+ *
+ * Overridden to make sure any input passed to the argument parser
+ * is wrapped in a `PackratReader`.
*/
override def phrase[T](p: Parser[T]) = {
val q = super.phrase(p)
@@ -162,7 +150,6 @@ trait PackratParsers extends Parsers {
* In the former case, it makes sure that rules involved in the recursion are evaluated.
* It also prevents non-involved rules from getting evaluated further
*/
-
private def recall(p: super.Parser[_], in: PackratReader[Elem]): Option[MemoEntry[_]] = {
val cached = in.getFromCache(p)
val head = in.recursionHeads.get(in.pos)
@@ -237,7 +224,7 @@ to update each parser involved in the recursion.
/**
* Explicitly convert a given parser to a memoizing packrat parser.
- * In most cases, client code should avoid calling <code>memo</code> directly
+ * In most cases, client code should avoid calling `memo` directly
* and rely on implicit conversion instead.
*/
def memo[T](p: super.Parser[T]): PackratParser[T] = {
diff --git a/src/library/scala/util/parsing/combinator/Parsers.scala b/src/library/scala/util/parsing/combinator/Parsers.scala
index b0760f42ae..e9155afc55 100644
--- a/src/library/scala/util/parsing/combinator/Parsers.scala
+++ b/src/library/scala/util/parsing/combinator/Parsers.scala
@@ -16,69 +16,61 @@ import annotation.migration
// TODO: better error handling (labelling like parsec's <?>)
-/** <p>
- * <code>Parsers</code> is a component that <i>provides</i> generic
- * parser combinators.
- * </p>
- * <p>
- * It <i>requires</i> the type of the elements these parsers should parse
- * (each parser is polymorphic in the type of result it produces).
- * </p>
- * <p>
- * There are two aspects to the result of a parser: (1) success or failure,
- * and (2) the result. A <code>Parser[T]</code> provides both kinds of
- * information.
- * </p>
- * <p>
- * The term ``parser combinator'' refers to the fact that these parsers
- * are constructed from primitive parsers and composition operators, such
- * as sequencing, alternation, optionality, repetition, lifting, and so on.
- * </p>
- * <p>
- * A ``primitive parser'' is a parser that accepts or rejects a single
- * piece of input, based on a certain criterion, such as whether the
- * input...
- * </p><ul>
- * <li> is equal to some given object, </li>
- * <li> satisfies a certain predicate, </li>
- * <li> is in the domain of a given partial function,.... </li>
- * </ul>
- * <p>
- * Even more primitive parsers always produce the same result, irrespective
- * of the input.
- * </p>
+/** `Parsers` is a component that ''provides'' generic parser combinators.
*
- * @author Martin Odersky, Iulian Dragos, Adriaan Moors
+ * It ''requires'' the type of the elements these parsers should parse
+ * (each parser is polymorphic in the type of result it produces).
+ *
+ * There are two aspects to the result of a parser:
+ * 1. success or failure
+ * 2. the result.
+ * A `Parser[T]` provides both kinds of information.
+ *
+ * The term ''parser combinator'' refers to the fact that these parsers
+ * are constructed from primitive parsers and composition operators, such
+ * as sequencing, alternation, optionality, repetition, lifting, and so on.
+ *
+ * A ''primitive parser'' is a parser that accepts or rejects a single
+ * piece of input, based on a certain criterion, such as whether the
+ * input...
+ * - is equal to some given object,
+ * - satisfies a certain predicate,
+ * - is in the domain of a given partial function, ...
+ *
+ * Even more primitive parsers always produce the same result, irrespective of the input.
+ *
+ * @author Martin Odersky
+ * @author Iulian Dragos
+ * @author Adriaan Moors
*/
trait Parsers {
/** the type of input elements the provided parsers consume (When consuming invidual characters, a parser is typically
- * called a ``scanner'', which produces ``tokens'' that are consumed by what is normally called a ``parser''.
- * Nonetheless, the same principles apply, regardless of the input type.) */
+ * called a ''scanner'', which produces ''tokens'' that are consumed by what is normally called a ''parser''.
+ * Nonetheless, the same principles apply, regardless of the input type.) */
type Elem
/** The parser input is an abstract reader of input elements, i.e. the type of input the parsers in this component
- * expect. */
+ * expect. */
type Input = Reader[Elem]
/** A base class for parser results. A result is either successful or not (failure may be fatal, i.e., an Error, or
- * not, i.e., a Failure). On success, provides a result of type `T` which consists of some result (and the rest of
- * the input). */
+ * not, i.e., a Failure). On success, provides a result of type `T` which consists of some result (and the rest of
+ * the input). */
sealed abstract class ParseResult[+T] {
- /** Functional composition of ParseResults
+ /** Functional composition of ParseResults.
*
* @param `f` the function to be lifted over this result
* @return `f` applied to the result of this `ParseResult`, packaged up as a new `ParseResult`
*/
def map[U](f: T => U): ParseResult[U]
- /** Partial functional composition of ParseResults
+ /** Partial functional composition of ParseResults.
*
* @param `f` the partial function to be lifted over this result
* @param error a function that takes the same argument as `f` and produces an error message
* to explain why `f` wasn't applicable (it is called when this is the case)
- * @return <i>if `f` f is defined at the result in this `ParseResult`,</i>
- * `f` applied to the result of this `ParseResult`, packaged up as a new `ParseResult`.
- * If `f` is not defined, `Failure`.
+ * @return if `f` f is defined at the result in this `ParseResult`, `f` applied to the result
+ * of this `ParseResult`, packaged up as a new `ParseResult`. If `f` is not defined, `Failure`.
*/
def mapPartial[U](f: PartialFunction[T, U], error: T => String): ParseResult[U]
@@ -88,7 +80,7 @@ trait Parsers {
def isEmpty = !successful
- /** Returns the embedded result */
+ /** Returns the embedded result. */
def get: T
def getOrElse[B >: T](default: => B): B =
@@ -117,7 +109,7 @@ trait Parsers {
def get: T = result
- /** The toString method of a Success */
+ /** The toString method of a Success. */
override def toString = "["+next.pos+"] parsed: "+result
val successful = true
@@ -125,8 +117,7 @@ trait Parsers {
var lastNoSuccess: NoSuccess = null
- /** A common super-class for unsuccessful parse results
- */
+ /** A common super-class for unsuccessful parse results. */
sealed abstract class NoSuccess(val msg: String, override val next: Input) extends ParseResult[Nothing] { // when we don't care about the difference between Failure and Error
val successful = false
if (!(lastNoSuccess != null && next.pos < lastNoSuccess.next.pos))
@@ -140,8 +131,7 @@ trait Parsers {
def get: Nothing = sys.error("No result when parsing failed")
}
- /** An extractor so NoSuccess(msg, next) can be used in matches.
- */
+ /** An extractor so NoSuccess(msg, next) can be used in matches. */
object NoSuccess {
def unapply[T](x: ParseResult[T]) = x match {
case Failure(msg, next) => Some(msg, next)
@@ -151,13 +141,13 @@ trait Parsers {
}
/** The failure case of ParseResult: contains an error-message and the remaining input.
- * Parsing will back-track when a failure occurs.
+ * Parsing will back-track when a failure occurs.
*
* @param msg An error message string describing the failure.
* @param next The parser's unconsumed input at the point where the failure occurred.
*/
case class Failure(override val msg: String, override val next: Input) extends NoSuccess(msg, next) {
- /** The toString method of a Failure yields an error message */
+ /** The toString method of a Failure yields an error message. */
override def toString = "["+next.pos+"] failure: "+msg+"\n\n"+next.pos.longString
def append[U >: Nothing](a: => ParseResult[U]): ParseResult[U] = { val alt = a; alt match {
@@ -167,13 +157,13 @@ trait Parsers {
}
/** The fatal failure case of ParseResult: contains an error-message and the remaining input.
- * No back-tracking is done when a parser returns an `Error`
+ * No back-tracking is done when a parser returns an `Error`
*
* @param msg An error message string describing the error.
* @param next The parser's unconsumed input at the point where the error occurred.
*/
case class Error(override val msg: String, override val next: Input) extends NoSuccess(msg, next) {
- /** The toString method of an Error yields an error message */
+ /** The toString method of an Error yields an error message. */
override def toString = "["+next.pos+"] error: "+msg+"\n\n"+next.pos.longString
def append[U >: Nothing](a: => ParseResult[U]): ParseResult[U] = this
}
@@ -186,15 +176,14 @@ trait Parsers {
= new Parser[T] with OnceParser[T] { def apply(in: Input) = f(in) }
/** The root class of parsers.
- * Parsers are functions from the Input type to ParseResult
+ * Parsers are functions from the Input type to ParseResult.
*/
abstract class Parser[+T] extends (Input => ParseResult[T]) {
private var name: String = ""
def named(n: String): this.type = {name=n; this}
override def toString() = "Parser ("+ name +")"
- /** An unspecified method that defines the behaviour of this parser.
- */
+ /** An unspecified method that defines the behaviour of this parser. */
def apply(in: Input): ParseResult[T]
def flatMap[U](f: T => Parser[U]): Parser[U]
@@ -246,7 +235,7 @@ trait Parsers {
* <p> `p <~ q` succeeds if `p` succeeds and `q` succeeds on the input
* left over by `p`.</p>
*
- * '''Note:''' <~ has lower operator precedence than ~ or ~>.
+ * @note <~ has lower operator precedence than ~ or ~>.
*
* @param q a parser that will be executed after `p` (this parser) succeeds -- evaluated at most once, and only when necessary
* @return a `Parser` that -- on success -- returns the result of `p`.
@@ -372,7 +361,7 @@ trait Parsers {
* Use this combinator when a parser depends on the result of a previous parser. `p` should be
* a function that takes the result from the first parser and returns the second parser.
*
- * `p into fq` (with `fq` typically `{x => q}') first applies `p`, and then, if `p` successfully
+ * `p into fq` (with `fq` typically `{x => q}`) first applies `p`, and then, if `p` successfully
* returned result `r`, applies `fq(r)` to the rest of the input.
*
* ''From: G. Hutton. Higher-order functions for parsing. J. Funct. Program., 2(3):323--343, 1992.''
@@ -384,20 +373,20 @@ trait Parsers {
// shortcuts for combinators:
- /** Returns into(fq) */
+ /** Returns `into(fq)`. */
def >>[U](fq: T => Parser[U])=into(fq)
- /** Returns a parser that repeatedly parses what this parser parses
+ /** Returns a parser that repeatedly parses what this parser parses.
*
- * @return rep(this)
+ * @return rep(this)
*/
def * = rep(this)
/** Returns a parser that repeatedly parses what this parser parses, interleaved with the `sep` parser.
- * The `sep` parser specifies how the results parsed by this parser should be combined.
+ * The `sep` parser specifies how the results parsed by this parser should be combined.
*
- * @return chainl1(this, sep)
+ * @return chainl1(this, sep)
*/
def *[U >: T](sep: => Parser[(U, U) => U]) = chainl1(this, sep)
@@ -405,13 +394,13 @@ trait Parsers {
/** Returns a parser that repeatedly (at least once) parses what this parser parses.
*
- * @return rep1(this)
+ * @return rep1(this)
*/
def + = rep1(this)
/** Returns a parser that optionally parses what this parser parses.
*
- * @return opt(this)
+ * @return opt(this)
*/
def ? = opt(this)
}
@@ -434,31 +423,31 @@ trait Parsers {
/** A parser matching input elements that satisfy a given predicate.
*
- * `elem(kind, p)` succeeds if the input starts with an element `e` for which p(e) is true.
+ * `elem(kind, p)` succeeds if the input starts with an element `e` for which `p(e)` is true.
*
- * @param kind The element kind, used for error messages
- * @param p A predicate that determines which elements match.
- * @return
+ * @param kind The element kind, used for error messages
+ * @param p A predicate that determines which elements match.
+ * @return
*/
def elem(kind: String, p: Elem => Boolean) = acceptIf(p)(inEl => kind+" expected")
/** A parser that matches only the given element `e`.
*
- * `elem(e)` succeeds if the input starts with an element `e`.
+ * `elem(e)` succeeds if the input starts with an element `e`.
*
- * @param e the `Elem` that must be the next piece of input for the returned parser to succeed
- * @return a `Parser` that succeeds if `e` is the next available input (and returns it).
+ * @param e the `Elem` that must be the next piece of input for the returned parser to succeed
+ * @return a `Parser` that succeeds if `e` is the next available input (and returns it).
*/
def elem(e: Elem): Parser[Elem] = accept(e)
/** A parser that matches only the given element `e`.
*
- * The method is implicit so that elements can automatically be lifted to their parsers.
- * For example, when parsing `Token`s, `Identifier("new")` (which is a `Token`) can be used directly,
- * instead of first creating a `Parser` using `accept(Identifier("new"))`.
+ * The method is implicit so that elements can automatically be lifted to their parsers.
+ * For example, when parsing `Token`s, `Identifier("new")` (which is a `Token`) can be used directly,
+ * instead of first creating a `Parser` using `accept(Identifier("new"))`.
*
- * @param e the `Elem` that must be the next piece of input for the returned parser to succeed
- * @return a `tParser` that succeeds if `e` is the next available input.
+ * @param e the `Elem` that must be the next piece of input for the returned parser to succeed
+ * @return a `tParser` that succeeds if `e` is the next available input.
*/
implicit def accept(e: Elem): Parser[Elem] = acceptIf(_ == e)("`"+e+"' expected but " + _ + " found")
@@ -467,8 +456,8 @@ trait Parsers {
*
* `accept(es)` succeeds if the input subsequently provides the elements in the list `es`.
*
- * @param es the list of expected elements
- * @return a Parser that recognizes a specified list of elements
+ * @param es the list of expected elements
+ * @return a Parser that recognizes a specified list of elements
*/
def accept[ES <% List[Elem]](es: ES): Parser[List[Elem]] = acceptSeq(es)
@@ -480,10 +469,10 @@ trait Parsers {
* Example: The parser `accept("name", {case Identifier(n) => Name(n)})`
* accepts an `Identifier(n)` and returns a `Name(n)`
*
- * @param expected a description of the kind of element this parser expects (for error messages)
- * @param f a partial function that determines when this parser is successful and what its output is
- * @return A parser that succeeds if `f` is applicable to the first element of the input,
- * applying `f` to it to produce the result.
+ * @param expected a description of the kind of element this parser expects (for error messages)
+ * @param f a partial function that determines when this parser is successful and what its output is
+ * @return A parser that succeeds if `f` is applicable to the first element of the input,
+ * applying `f` to it to produce the result.
*/
def accept[U](expected: String, f: PartialFunction[Elem, U]): Parser[U] = acceptMatch(expected, f)
diff --git a/src/library/scala/util/parsing/combinator/lexical/Lexical.scala b/src/library/scala/util/parsing/combinator/lexical/Lexical.scala
index 917a4d3a44..9979a420d6 100644
--- a/src/library/scala/util/parsing/combinator/lexical/Lexical.scala
+++ b/src/library/scala/util/parsing/combinator/lexical/Lexical.scala
@@ -15,28 +15,25 @@ package lexical
import token._
import input.CharArrayReader.EofCh
-/** <p>
- * This component complements the <code>Scanners</code> component with
- * common operations for lexical parsers.
- * </p>
- * <p>
- * {@see StdLexical} for a concrete implementation for a simple, Scala-like
- * language.
- * </p>
+/** This component complements the `Scanners` component with
+ * common operations for lexical parsers.
+ *
+ * Refer to [[scala.util.parsing.combinator.lexical.StdLexical]]
+ * for a concrete implementation for a simple, Scala-like language.
*
* @author Martin Odersky, Adriaan Moors
*/
abstract class Lexical extends Scanners with Tokens {
- /** A character-parser that matches a letter (and returns it)*/
+ /** A character-parser that matches a letter (and returns it).*/
def letter = elem("letter", _.isLetter)
- /** A character-parser that matches a digit (and returns it)*/
+ /** A character-parser that matches a digit (and returns it).*/
def digit = elem("digit", _.isDigit)
- /** A character-parser that matches any character except the ones given in `cs` (and returns it)*/
+ /** A character-parser that matches any character except the ones given in `cs` (and returns it).*/
def chrExcept(cs: Char*) = elem("", ch => (cs forall (ch !=)))
- /** A character-parser that matches a white-space character (and returns it)*/
+ /** A character-parser that matches a white-space character (and returns it).*/
def whitespaceChar = elem("space char", ch => ch <= ' ' && ch != EofCh)
}
diff --git a/src/library/scala/util/parsing/combinator/lexical/Scanners.scala b/src/library/scala/util/parsing/combinator/lexical/Scanners.scala
index 80331e4e7c..d961f98f59 100644
--- a/src/library/scala/util/parsing/combinator/lexical/Scanners.scala
+++ b/src/library/scala/util/parsing/combinator/lexical/Scanners.scala
@@ -15,13 +15,10 @@ package lexical
import token._
import input._
-/** <p>
- * This component provides core functionality for lexical parsers.
- * </p>
- * <p>
- * See its subclasses {@see Lexical} and -- most interestingly
- * {@see StdLexical}, for more functionality.
- * </p>
+/** This component provides core functionality for lexical parsers.
+ *
+ * See its subclasses [[scala.util.parsing.combinator.lexical.Lexical]] and -- most interestingly
+ * [[scala.util.parsing.combinator.lexical.StdLexical]], for more functionality.
*
* @author Martin Odersky, Adriaan Moors
*/
@@ -29,23 +26,20 @@ trait Scanners extends Parsers {
type Elem = Char
type Token
- /** This token is produced by a scanner {@see Scanner} when scanning failed. */
+ /** This token is produced by a scanner `Scanner` when scanning failed. */
def errorToken(msg: String): Token
- /** a parser that produces a token (from a stream of characters) */
+ /** A parser that produces a token (from a stream of characters). */
def token: Parser[Token]
- /** a parser for white-space -- its result will be discarded */
+ /** A parser for white-space -- its result will be discarded. */
def whitespace: Parser[Any]
- /** <p>
- * <code>Scanner</code> is essentially(*) a parser that produces `Token`s
- * from a stream of characters. The tokens it produces are typically
- * passed to parsers in <code>TokenParsers</code>.
- * </p>
- * <p>
- * Note: (*) <code>Scanner</code> is really a `Reader` of `Token`s
- * </p>
+ /** `Scanner` is essentially¹ a parser that produces `Token`s
+ * from a stream of characters. The tokens it produces are typically
+ * passed to parsers in `TokenParsers`.
+ *
+ * @note ¹ `Scanner` is really a `Reader` of `Token`s
*/
class Scanner(in: Reader[Char]) extends Reader[Token] {
/** Convenience constructor (makes a character reader out of the given string) */
diff --git a/src/library/scala/util/parsing/combinator/syntactical/TokenParsers.scala b/src/library/scala/util/parsing/combinator/syntactical/TokenParsers.scala
index 550589ea66..4571416e4e 100644
--- a/src/library/scala/util/parsing/combinator/syntactical/TokenParsers.scala
+++ b/src/library/scala/util/parsing/combinator/syntactical/TokenParsers.scala
@@ -13,13 +13,14 @@ package syntactical
/** This is the core component for token-based parsers.
*
- * @author Martin Odersky, Adriaan Moors
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
trait TokenParsers extends Parsers {
- /** Tokens is the abstract type of the `Token`s consumed by the parsers in this component. */
+ /** `Tokens` is the abstract type of the `Token`s consumed by the parsers in this component. */
type Tokens <: token.Tokens
- /** lexical is the component responsible for consuming some basic kind of
+ /** `lexical` is the component responsible for consuming some basic kind of
* input (usually character-based) and turning it into the tokens
* understood by these parsers.
*/
diff --git a/src/library/scala/util/parsing/combinator/testing/Tester.scala b/src/library/scala/util/parsing/combinator/testing/Tester.scala
index 490442482a..ca1df2f6d4 100644
--- a/src/library/scala/util/parsing/combinator/testing/Tester.scala
+++ b/src/library/scala/util/parsing/combinator/testing/Tester.scala
@@ -13,22 +13,23 @@ import scala.util.parsing.combinator._
import scala.util.parsing.combinator.lexical.Lexical
import scala.util.parsing.combinator.syntactical.TokenParsers
-/** <p>
- * Facilitates testing a given parser on various input strings.
- * </p>
- * <p>
- * Example use:
- * </p><pre>
- * <b>val</b> syntactic = <b>new</b> MyParsers</pre>
- * <p>
- * and
- * </p><pre>
- * <b>val</b> parser = syntactic.term</pre>
- * <p>
- * (if MyParsers extends TokenParsers with a parser called `term`)
- * </p>
+/** Facilitates testing a given parser on various input strings.
*
- * @author Martin Odersky, Adriaan Moors
+ * Example use:
+ *
+ * <pre>
+ * <b>val</b> syntactic = <b>new</b> MyParsers
+ * </pre>
+ * and
+ * <pre>
+ * <b>val</b> parser = syntactic.term
+ * </pre>
+ *
+ * (If `MyParsers` extends [[scala.util.parsing.combinator.syntactical.TokenParsers]]
+ * with a parser called `term`.)
+ *
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
abstract class Tester {
@@ -37,7 +38,7 @@ abstract class Tester {
/** Scans a String (using a `syntactic.lexical.Scanner`), parses it
- * using <code>phrase(parser)</code>, and prints the input and the
+ * using `phrase(parser)`, and prints the input and the
* parsed result to the console.
*/
def test(in: String) {
diff --git a/src/library/scala/util/parsing/combinator/token/StdTokens.scala b/src/library/scala/util/parsing/combinator/token/StdTokens.scala
index 4ce62659f6..ce04da1fa4 100644
--- a/src/library/scala/util/parsing/combinator/token/StdTokens.scala
+++ b/src/library/scala/util/parsing/combinator/token/StdTokens.scala
@@ -12,7 +12,8 @@ package token
/** This component provides the standard `Token`s for a simple, Scala-like language.
*
- * @author Martin Odersky, Adriaan Moors
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
trait StdTokens extends Tokens {
/** The class of keyword tokens */
diff --git a/src/library/scala/util/parsing/combinator/token/Tokens.scala b/src/library/scala/util/parsing/combinator/token/Tokens.scala
index ad05427fa1..6b515fe6d9 100644
--- a/src/library/scala/util/parsing/combinator/token/Tokens.scala
+++ b/src/library/scala/util/parsing/combinator/token/Tokens.scala
@@ -13,11 +13,13 @@ package token
/** This component provides the notion of `Token`, the unit of information that is passed from lexical
* parsers in the `Lexical` component to the parsers in the `TokenParsers` component.
*
- * @author Martin Odersky, Adriaan Moors
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
trait Tokens {
- /** Objects of this type are produced by a lexical parser or ``scanner'', and consumed by a parser
- * {@see scala.util.parsing.combinator.syntactical.TokenParsers}.
+ /** Objects of this type are produced by a lexical parser or ``scanner'', and consumed by a parser.
+ *
+ * @see [[scala.util.parsing.combinator.syntactical.TokenParsers]]
*/
abstract class Token {
def chars: String
@@ -35,6 +37,6 @@ trait Tokens {
def chars = "<eof>"
}
- /** This token is produced by a scanner {@see Scanner} when scanning failed. */
+ /** This token is produced by a scanner `Scanner` when scanning failed. */
def errorToken(msg: String): Token = new ErrorToken(msg)
}
diff --git a/src/library/scala/util/parsing/input/CharArrayReader.scala b/src/library/scala/util/parsing/input/CharArrayReader.scala
index 67629eacd4..e798c9883a 100644
--- a/src/library/scala/util/parsing/input/CharArrayReader.scala
+++ b/src/library/scala/util/parsing/input/CharArrayReader.scala
@@ -9,9 +9,10 @@
package scala.util.parsing.input
-/** An object encapsulating basic character constants
+/** An object encapsulating basic character constants.
*
- * @author Martin Odersky, Adriaan Moors
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
object CharArrayReader {
final val EofCh = '\032'
@@ -25,10 +26,10 @@ object CharArrayReader {
* @param line the line number of the first element (counting from index `0` of `source`)
* @param column the column number of the first element (counting from index `0` of `source`)
*
- * @author Martin Odersky, Adriaan Moors
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
-class CharArrayReader(chars: Array[Char], index: Int)
-extends CharSequenceReader(chars, index) {
+class CharArrayReader(chars: Array[Char], index: Int) extends CharSequenceReader(chars, index) {
def this(chars: Array[Char]) = this(chars, 0)
diff --git a/src/library/scala/util/parsing/input/CharSequenceReader.scala b/src/library/scala/util/parsing/input/CharSequenceReader.scala
index 8c3e1b24a8..ec1dce7be9 100644
--- a/src/library/scala/util/parsing/input/CharSequenceReader.scala
+++ b/src/library/scala/util/parsing/input/CharSequenceReader.scala
@@ -9,7 +9,7 @@
package scala.util.parsing.input
-/** An object encapsulating basic character constants
+/** An object encapsulating basic character constants.
*
* @author Martin Odersky, Adriaan Moors
*/
@@ -29,27 +29,27 @@ class CharSequenceReader(override val source: java.lang.CharSequence,
override val offset: Int) extends Reader[Char] {
import CharSequenceReader._
- /** Construct a <code>CharSequenceReader</code> with its first element at
- * <code>source(0)</code> and position <code>(1,1)</code>.
+ /** Construct a `CharSequenceReader` with its first element at
+ * `source(0)` and position `(1,1)`.
*/
def this(source: java.lang.CharSequence) = this(source, 0)
- /** Returns the first element of the reader, or EofCh if reader is at its end
+ /** Returns the first element of the reader, or EofCh if reader is at its end.
*/
def first =
if (offset < source.length) source.charAt(offset) else EofCh
- /** Returns a CharSequenceReader consisting of all elements except the first
+ /** Returns a CharSequenceReader consisting of all elements except the first.
*
- * @return If <code>atEnd</code> is <code>true</code>, the result will be
- * <code>this'; otherwise, it's a <code>CharSequenceReader</code> containing
+ * @return If `atEnd` is `true`, the result will be
+ * `this'; otherwise, it's a `CharSequenceReader` containing
* the rest of input.
*/
def rest: CharSequenceReader =
if (offset < source.length) new CharSequenceReader(source, offset + 1)
else this
- /** The position of the first element in the reader
+ /** The position of the first element in the reader.
*/
def pos: Position = new OffsetPosition(source, offset)
@@ -59,7 +59,7 @@ class CharSequenceReader(override val source: java.lang.CharSequence,
def atEnd = offset >= source.length
/** Returns an abstract reader consisting of all elements except the first
- * <code>n</code> elements.
+ * `n` elements.
*/
override def drop(n: Int): CharSequenceReader =
new CharSequenceReader(source, offset + n)
diff --git a/src/library/scala/util/parsing/input/NoPosition.scala b/src/library/scala/util/parsing/input/NoPosition.scala
index 174850165d..b34ce957d8 100644
--- a/src/library/scala/util/parsing/input/NoPosition.scala
+++ b/src/library/scala/util/parsing/input/NoPosition.scala
@@ -10,9 +10,10 @@
package scala.util.parsing.input
-/** Undefined position
+/** Undefined position.
*
- * @author Martin Odersky, Adriaan Moors
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
object NoPosition extends Position {
def line = 0
diff --git a/src/library/scala/util/parsing/input/OffsetPosition.scala b/src/library/scala/util/parsing/input/OffsetPosition.scala
index 1df1fa050e..c2483c44e3 100644
--- a/src/library/scala/util/parsing/input/OffsetPosition.scala
+++ b/src/library/scala/util/parsing/input/OffsetPosition.scala
@@ -10,17 +10,17 @@ package scala.util.parsing.input
import collection.mutable.ArrayBuffer
-/** <p>
- * <code>OffsetPosition</code> is a standard class for positions
- * represented as offsets into a source ``document''.
- * @param source The source document
- * @param offset The offset indicating the position
+/** `OffsetPosition` is a standard class for positions
+ * represented as offsets into a source ``document''.
+ *
+ * @param source The source document
+ * @param offset The offset indicating the position
*
* @author Martin Odersky
*/
case class OffsetPosition(source: java.lang.CharSequence, offset: Int) extends Position {
- /** An index that contains all line starts, including first line, and eof */
+ /** An index that contains all line starts, including first line, and eof. */
private lazy val index: Array[Int] = {
var lineStarts = new ArrayBuffer[Int]
lineStarts += 0
@@ -30,7 +30,7 @@ case class OffsetPosition(source: java.lang.CharSequence, offset: Int) extends P
lineStarts.toArray
}
- /** The line number referred to by the position; line numbers start at 1 */
+ /** The line number referred to by the position; line numbers start at 1. */
def line: Int = {
var lo = 0
var hi = index.length - 1
@@ -42,7 +42,7 @@ case class OffsetPosition(source: java.lang.CharSequence, offset: Int) extends P
lo + 1
}
- /** The column number referred to by the position; column numbers start at 1 */
+ /** The column number referred to by the position; column numbers start at 1. */
def column: Int = offset - index(line - 1) + 1
/** The contents of the line numbered `lnum` (must not contain a new-line character).
@@ -53,7 +53,7 @@ case class OffsetPosition(source: java.lang.CharSequence, offset: Int) extends P
def lineContents: String =
source.subSequence(index(line - 1), index(line)).toString
- /** Returns a string representation of the `Position`, of the form `line.column` */
+ /** Returns a string representation of the `Position`, of the form `line.column`. */
override def toString = line+"."+column
/** Compare this position to another, by first comparing their line numbers,
diff --git a/src/library/scala/util/parsing/input/PagedSeqReader.scala b/src/library/scala/util/parsing/input/PagedSeqReader.scala
index 725287a9b8..8b69aa074b 100644
--- a/src/library/scala/util/parsing/input/PagedSeqReader.scala
+++ b/src/library/scala/util/parsing/input/PagedSeqReader.scala
@@ -11,9 +11,10 @@ package scala.util.parsing.input
import scala.collection.immutable.PagedSeq
-/** An object encapsulating basic character constants
+/** An object encapsulating basic character constants.
*
- * @author Martin Odersky, Adriaan Moors
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
object PagedSeqReader {
final val EofCh = '\032'
@@ -33,8 +34,8 @@ class PagedSeqReader(seq: PagedSeq[Char],
override lazy val source: java.lang.CharSequence = seq
- /** Construct a <code>PagedSeqReader</code> with its first element at
- * <code>source(0)</code> and position <code>(1,1)</code>.
+ /** Construct a `PagedSeqReader` with its first element at
+ * `source(0)` and position `(1,1)`.
*/
def this(seq: PagedSeq[Char]) = this(seq, 0)
@@ -45,25 +46,24 @@ class PagedSeqReader(seq: PagedSeq[Char],
/** Returns a PagedSeqReader consisting of all elements except the first
*
- * @return If <code>atEnd</code> is <code>true</code>, the result will be
- * <code>this'; otherwise, it's a <code>PagedSeqReader</code> containing
- * the rest of input.
+ * @return If `atEnd` is `true`, the result will be `this';
+ * otherwise, it's a `PagedSeqReader` containing the rest of input.
*/
def rest: PagedSeqReader =
if (seq.isDefinedAt(offset)) new PagedSeqReader(seq, offset + 1)
else this
- /** The position of the first element in the reader
+ /** The position of the first element in the reader.
*/
def pos: Position = new OffsetPosition(source, offset)
/** true iff there are no more elements in this reader (except for trailing
- * EofCh's)
+ * EofCh's).
*/
def atEnd = !seq.isDefinedAt(offset)
/** Returns an abstract reader consisting of all elements except the first
- * <code>n</code> elements.
+ * `n` elements.
*/
override def drop(n: Int): PagedSeqReader =
new PagedSeqReader(seq, offset + n)
diff --git a/src/library/scala/util/parsing/input/Position.scala b/src/library/scala/util/parsing/input/Position.scala
index b25f03667d..be817013a0 100644
--- a/src/library/scala/util/parsing/input/Position.scala
+++ b/src/library/scala/util/parsing/input/Position.scala
@@ -8,29 +8,23 @@
package scala.util.parsing.input
-/** <p>
- * <code>Position</code> is the base trait for objects describing a
- * position in a ``document''.
- * </p>
- * <p>
- * It provides functionality for:
- * </p><ul>
- * <li> generating a visual representation of this position (`longString`);
- * <li> comparing two positions (`<`).
- * </ul>
- * <p>
- * To use this class for a concrete kind of ``document'', implement the
- * <code>lineContents</code> method.
- * </p>
+/** `Position` is the base trait for objects describing a position in a ``document''.
*
- * @author Martin Odersky, Adriaan Moors
+ * It provides functionality for:
+ * - generating a visual representation of this position (`longString`);
+ * - comparing two positions (`<`).
+ *
+ * To use this class for a concrete kind of ``document'', implement the `lineContents` method.
+ *
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
trait Position {
- /** The line number referred to by the position; line numbers start at 1 */
+ /** The line number referred to by the position; line numbers start at 1. */
def line: Int
- /** The column number referred to by the position; column numbers start at 1 */
+ /** The column number referred to by the position; column numbers start at 1. */
def column: Int
/** The contents of the line numbered `lnum` (must not contain a new-line character).
@@ -40,18 +34,19 @@ trait Position {
*/
protected def lineContents: String
- /** Returns a string representation of the `Position`, of the form `line.column` */
+ /** Returns a string representation of the `Position`, of the form `line.column`. */
override def toString = ""+line+"."+column
/** Returns a more ``visual'' representation of this position.
- * More precisely, the resulting string consists of two lines: <ol>
- * <li> the line in the document referred to by this position </li>
- * <li>a caret indicating the column</li></ol>
+ * More precisely, the resulting string consists of two lines:
+ * 1. the line in the document referred to by this position
+ * 2. a caret indicating the column
*
* Example:
- *
- *<pre> List(this, is, a, line, from, the, document)
- * ^</pre>
+ * {{{
+ * List(this, is, a, line, from, the, document)
+ * ^
+ * }}}
*/
def longString = lineContents+"\n"+lineContents.take(column-1).map{x => if (x == '\t') x else ' ' } + "^"
diff --git a/src/library/scala/util/parsing/input/Reader.scala b/src/library/scala/util/parsing/input/Reader.scala
index d0fddd2c6d..c44ed5ed00 100644
--- a/src/library/scala/util/parsing/input/Reader.scala
+++ b/src/library/scala/util/parsing/input/Reader.scala
@@ -13,12 +13,15 @@ package scala.util.parsing.input
/** An interface for streams of values that have positions.
*
- * @author Martin Odersky, Adriaan Moors
+ * @author Martin Odersky
+ * @author Adriaan Moors
*/
abstract class Reader[+T] {
- /** If this is a reader over character sequences, the underlying char sequence
- * If not, throws a <code>NoSuchMethodError</code> exception.
+ /** If this is a reader over character sequences, the underlying char sequence.
+ * If not, throws a `NoSuchMethodError` exception.
+ *
+ * @throws [[java.lang.NoSuchMethodError]] if this not a char sequence reader.
*/
def source: java.lang.CharSequence =
throw new NoSuchMethodError("not a char sequence reader")
@@ -32,14 +35,12 @@ abstract class Reader[+T] {
/** Returns an abstract reader consisting of all elements except the first
*
- * @return If <code>atEnd</code> is <code>true</code>, the result will be
- * <code>this'; otherwise, it's a <code>Reader</code> containing
- * more elements.
+ * @return If `atEnd` is `true`, the result will be `this';
+ * otherwise, it's a `Reader` containing more elements.
*/
def rest: Reader[T]
- /** Returns an abstract reader consisting of all elements except the first
- * <code>n</code> elements.
+ /** Returns an abstract reader consisting of all elements except the first `n` elements.
*/
def drop(n: Int): Reader[T] = {
var r: Reader[T] = this
@@ -50,11 +51,11 @@ abstract class Reader[+T] {
r
}
- /** The position of the first element in the reader
+ /** The position of the first element in the reader.
*/
def pos: Position
- /** true iff there are no more elements in this reader
+ /** `true` iff there are no more elements in this reader.
*/
def atEnd: Boolean
}
diff --git a/src/library/scala/util/parsing/syntax/package.scala b/src/library/scala/util/parsing/syntax/package.scala
index 547136c21d..59053e2f54 100644
--- a/src/library/scala/util/parsing/syntax/package.scala
+++ b/src/library/scala/util/parsing/syntax/package.scala
@@ -10,9 +10,8 @@ package scala.util.parsing
import scala.util.parsing.combinator.token
-/** If deprecating the whole package worked, that's what would best
- * be done, but it doesn't (yet) so it isn't.
- */
+// If deprecating the whole package worked, that's what would best
+// be done, but it doesn't (yet) so it isn't.
package object syntax {
@deprecated("Moved to scala.util.parsing.combinator.token", "2.8.0")
type Tokens = token.Tokens
diff --git a/src/library/scala/util/regexp/Base.scala b/src/library/scala/util/regexp/Base.scala
index 1404e9fa1b..8e23d46eb9 100644
--- a/src/library/scala/util/regexp/Base.scala
+++ b/src/library/scala/util/regexp/Base.scala
@@ -24,7 +24,7 @@ abstract class Base
}
object Alt {
- /** Alt( R,R,R* ) */
+ /** `Alt( R,R,R* )`. */
def apply(rs: _regexpT*) =
if (rs.size < 2) throw new SyntaxError("need at least 2 branches in Alt")
else new Alt(rs: _*)
@@ -57,7 +57,7 @@ abstract class Base
override def toString() = "Eps"
}
- /** this class can be used to add meta information to regexps */
+ /** this class can be used to add meta information to regexps. */
class Meta(r1: _regexpT) extends RegExp {
final val isNullable = r1.isNullable
def r = r1
diff --git a/src/library/scala/util/regexp/PointedHedgeExp.scala b/src/library/scala/util/regexp/PointedHedgeExp.scala
index ef68e600c8..23aa46448c 100644
--- a/src/library/scala/util/regexp/PointedHedgeExp.scala
+++ b/src/library/scala/util/regexp/PointedHedgeExp.scala
@@ -10,8 +10,7 @@
package scala.util.regexp
-/** pointed regular hedge expressions, a useful subclass of
- * regular hedge expressions.
+/** Pointed regular hedge expressions, a useful subclass of regular hedge expressions.
*
* @author Burak Emir
* @version 1.0
diff --git a/src/library/scala/util/regexp/WordExp.scala b/src/library/scala/util/regexp/WordExp.scala
index 1168b1e423..6a83794ef4 100644
--- a/src/library/scala/util/regexp/WordExp.scala
+++ b/src/library/scala/util/regexp/WordExp.scala
@@ -10,12 +10,14 @@
package scala.util.regexp
-/** <p>
- * The class <code>WordExp</code> provides regular word expressions.
- * Users have to instantiate type member <code>_regexpT &lt;: RegExp</code>
- * (from class <code>Base</code>) and a type member
- * <code>_labelT &lt;: Label</code>. Here is a short example:
- * </p><pre>
+/**
+ * The class `WordExp` provides regular word expressions.
+ *
+ * Users have to instantiate type member `_regexpT <;: RegExp` (from class `Base`)
+ * and a type member `_labelT <;: Label`.
+ *
+ * Here is a short example:
+ * <pre>
* <b>import</b> scala.util.regexp._
* <b>import</b> scala.util.automata._
* <b>object</b> MyLang <b>extends</b> WordExp {