diff options
author | Adriaan Moors <adriaan.moors@epfl.ch> | 2008-02-07 12:39:51 +0000 |
---|---|---|
committer | Adriaan Moors <adriaan.moors@epfl.ch> | 2008-02-07 12:39:51 +0000 |
commit | 0eae9599357eb76fb38d2a95d1d4d6f089b814b3 (patch) | |
tree | 0cb296642e3d3f8b4d9d63bfab76d374750d3c13 | |
parent | f3e42a50ab77e51040b3698c710b1ca86bfc0611 (diff) | |
download | scala-0eae9599357eb76fb38d2a95d1d4d6f089b814b3.tar.gz scala-0eae9599357eb76fb38d2a95d1d4d6f089b814b3.tar.bz2 scala-0eae9599357eb76fb38d2a95d1d4d6f089b814b3.zip |
updated examples and json parser to use combina...
updated examples and json parser to use combinator1-style parsing,
have not yet renamed combinator -> combinatorold, and combinator1 ->
combinator --> doing that using SVN rename, so history is preserved
(thus, the build for this revision will break, but the next one should
be okay. sorry)
25 files changed, 77 insertions, 66 deletions
diff --git a/docs/examples/parsing/ArithmeticParser.scala b/docs/examples/parsing/ArithmeticParser.scala index 2aac4b6f6c..57ead13fef 100644 --- a/docs/examples/parsing/ArithmeticParser.scala +++ b/docs/examples/parsing/ArithmeticParser.scala @@ -21,9 +21,9 @@ object arithmeticParser extends StdTokenParsers { type Tokens = StdLexical ; val lexical = new StdLexical lexical.delimiters ++= List("(", ")", "+", "-", "*", "/") - lazy val expr = term*("+" ^^ {(x: int, y: int) => x + y} | "-" ^^ {(x: int, y: int) => x - y}) - lazy val term = factor*("*" ^^ {(x: int, y: int) => x * y} | "/" ^^ {(x: int, y: int) => x / y}) - lazy val factor: Parser[int] = "(" ~ expr ~ ")" | numericLit ^^ (_.toInt) + lazy val expr = term*("+" ^^^ {(x: int, y: int) => x + y} | "-" ^^^ {(x: int, y: int) => x - y}) + lazy val term = factor*("*" ^^^ {(x: int, y: int) => x * y} | "/" ^^^ {(x: int, y: int) => x / y}) + lazy val factor: Parser[int] = "(" ~> expr <~ ")" | numericLit ^^ (_.toInt) def main(args: Array[String]) { println( @@ -41,9 +41,9 @@ object arithmeticParserDesugared extends StdTokenParsers { type Tokens = StdLexical ; val lexical = new StdLexical lexical.delimiters ++= List("(", ")", "+", "-", "*", "/") - lazy val expr = chainl1(term, (keyword("+").^^{(x: int, y: int) => x + y}).|(keyword("-").^^{(x: int, y: int) => x - y})) - lazy val term = chainl1(factor, (keyword("*").^^{(x: int, y: int) => x * y}).|(keyword("/").^^{(x: int, y: int) => x / y})) - lazy val factor: Parser[int] = keyword("(").~(expr.~(keyword(")"))).|(numericLit.^^(x => x.toInt)) + lazy val expr = chainl1(term, (keyword("+").^^^{(x: int, y: int) => x + y}).|(keyword("-").^^^{(x: int, y: int) => x - y})) + lazy val term = chainl1(factor, (keyword("*").^^^{(x: int, y: int) => x * y}).|(keyword("/").^^^{(x: int, y: int) => x / y})) + lazy val factor: Parser[int] = keyword("(").~>(expr.<~(keyword(")"))).|(numericLit.^^(x => x.toInt)) def main(args: Array[String]) { println( diff --git a/docs/examples/parsing/ListParser.scala b/docs/examples/parsing/ListParser.scala index 65354482f3..12805e5e50 100644 --- a/docs/examples/parsing/ListParser.scala +++ b/docs/examples/parsing/ListParser.scala @@ -16,8 +16,8 @@ object listParser { type Elem = Char lazy val ident = rep1(elem("letter", isLetter), elem("letter or digit", isLetterOrDigit)) ^^ {cs => Id(mkString(cs))} - lazy val number = chainl1(elem("digit", isDigit) ^^ (_ - '0'), success ^^ {(accum: Int, d: Int) => accum * 10 + d}) ^^ Num - lazy val list = '(' ~ repsep(expr, ',') ~ ')' ^^ Lst + lazy val number = chainl1(elem("digit", isDigit) ^^ (_ - '0'), success{(accum: Int, d: Int) => accum * 10 + d}) ^^ Num + lazy val list = '(' ~> repsep(expr, ',') <~ ')' ^^ Lst lazy val expr: Parser[Tree] = list | ident | number } diff --git a/docs/examples/parsing/lambda/Main.scala b/docs/examples/parsing/lambda/Main.scala index c0453e75a8..81a175de77 100755 --- a/docs/examples/parsing/lambda/Main.scala +++ b/docs/examples/parsing/lambda/Main.scala @@ -12,7 +12,7 @@ import java.io.InputStreamReader * * Usage: scala examples.parsing.lambda.Main <file> * - * (example files: see test/*.kwi) + * (example files: see test/ *.kwi) * * @author Miles Sabin (adapted slightly by Adriaan Moors) */ diff --git a/docs/examples/parsing/lambda/TestParser.scala b/docs/examples/parsing/lambda/TestParser.scala index 92d370c29b..22257c1731 100755 --- a/docs/examples/parsing/lambda/TestParser.scala +++ b/docs/examples/parsing/lambda/TestParser.scala @@ -34,33 +34,33 @@ trait TestParser extends StdTokenParsers with ImplicitConversions with TestSynt chainl1(expr4, expr3, op3 ^^ {o => (a: Term, b: Term) => App(App(o, a), b)}) def expr4 : Parser[Term] = - ( "\\" ~ lambdas - | "let" ~ name ~ "=" ~ expr1 ~ "in" ~ expr1 ^^ flatten3(Let) - | "if" ~ expr1 ~ "then" ~ expr1 ~ "else" ~ expr1 ^^ flatten3(If) + ( "\\" ~> lambdas + | ("let" ~> name) ~ ("=" ~> expr1) ~ ("in" ~> expr1) ^^ flatten3(Let) + | ("if" ~> expr1) ~ ("then" ~> expr1) ~ ("else" ~> expr1) ^^ flatten3(If) | chainl1(aexpr, success(App(_: Term, _: Term))) ) def lambdas : Parser[Term] = - name ~ ("->" ~ expr1 | lambdas) ^^ flatten2(Lam) + name ~ ("->" ~> expr1 | lambdas) ^^ flatten2(Lam) def aexpr : Parser[Term] = ( numericLit ^^ (_.toInt) ^^ Lit | name ^^ Ref - | "unit" ^^ Unit - | "(" ~ expr1 ~ ")" + | "unit" ^^^ Unit() + | "(" ~> expr1 <~ ")" ) def op1 : Parser[Term] = - "==" ^^ Ref(Name("==")) + "==" ^^^ Ref(Name("==")) def op2 : Parser[Term] = - ( "+" ^^ Ref(Name("+")) - | "-" ^^ Ref(Name("-")) + ( "+" ^^^ Ref(Name("+")) + | "-" ^^^ Ref(Name("-")) ) def op3 : Parser[Term] = - ( "*" ^^ Ref(Name("*")) - | "/" ^^ Ref(Name("/")) + ( "*" ^^^ Ref(Name("*")) + | "/" ^^^ Ref(Name("/")) ) def parse(r: Reader[char]) : ParseResult[Term] = diff --git a/src/library/scala/util/parsing/combinator/$tilde.scala b/src/library/scala/util/parsing/combinator/$tilde.scala index 02b8349929..6473d3b40e 100644 --- a/src/library/scala/util/parsing/combinator/$tilde.scala +++ b/src/library/scala/util/parsing/combinator/$tilde.scala @@ -1,10 +1,10 @@ -package scala.util.parsing.combinator; +package scala.util.parsing.combinatorold; // p ~ q ~ r ^^ {case a ~ b ~ c => } case class ~[+a, +b](_1: a, _2: b) // extends Pair[a, b] -// shortcut for scala.util.parsing.combinator.~(_, _) -- just ~(_, _) resolves to unary_~ -object mkTilde { def apply[a, b](_1: a, _2: b) = scala.util.parsing.combinator.~(_1, _2) } +// shortcut for scala.util.parsing.combinatorold.~(_, _) -- just ~(_, _) resolves to unary_~ +object mkTilde { def apply[a, b](_1: a, _2: b) = scala.util.parsing.combinatorold.~(_1, _2) } //def flatten[t, s <: (~[t, s] \/ t)](p: ~[t, s]): List[t] = p match { diff --git a/src/library/scala/util/parsing/combinator/ImplicitConversions.scala b/src/library/scala/util/parsing/combinator/ImplicitConversions.scala index fc6e74254e..046f3691ea 100644 --- a/src/library/scala/util/parsing/combinator/ImplicitConversions.scala +++ b/src/library/scala/util/parsing/combinator/ImplicitConversions.scala @@ -6,7 +6,7 @@ ** |/ ** \* */ -package scala.util.parsing.combinator +package scala.util.parsing.combinatorold /** This object contains implicit conversions that come in handy when using the `^^' combinator * {@see Parsers} to construct an AST from the concrete syntax. diff --git a/src/library/scala/util/parsing/combinator/Parsers.scala b/src/library/scala/util/parsing/combinator/Parsers.scala index 84415c0fc9..e7bc626b00 100644 --- a/src/library/scala/util/parsing/combinator/Parsers.scala +++ b/src/library/scala/util/parsing/combinator/Parsers.scala @@ -8,7 +8,7 @@ // $Id$ -package scala.util.parsing.combinator +package scala.util.parsing.combinatorold import scala.util.parsing.input._ import scala.collection.mutable.{Map=>MutableMap} diff --git a/src/library/scala/util/parsing/combinator/lexical/Lexical.scala b/src/library/scala/util/parsing/combinator/lexical/Lexical.scala index d703b89ae8..4b4fa0d2af 100644 --- a/src/library/scala/util/parsing/combinator/lexical/Lexical.scala +++ b/src/library/scala/util/parsing/combinator/lexical/Lexical.scala @@ -9,7 +9,7 @@ // $Id$ -package scala.util.parsing.combinator.lexical +package scala.util.parsing.combinatorold.lexical import scala.util.parsing.syntax._ import scala.util.parsing.input.CharArrayReader.EofCh diff --git a/src/library/scala/util/parsing/combinator/lexical/Scanners.scala b/src/library/scala/util/parsing/combinator/lexical/Scanners.scala index 4b35c849fa..2256c76d62 100644 --- a/src/library/scala/util/parsing/combinator/lexical/Scanners.scala +++ b/src/library/scala/util/parsing/combinator/lexical/Scanners.scala @@ -9,7 +9,7 @@ // $Id$ -package scala.util.parsing.combinator.lexical +package scala.util.parsing.combinatorold.lexical import scala.util.parsing.syntax._ import scala.util.parsing.input._ diff --git a/src/library/scala/util/parsing/combinator/lexical/StdLexical.scala b/src/library/scala/util/parsing/combinator/lexical/StdLexical.scala index fd406e6b37..84a9ed5f57 100644 --- a/src/library/scala/util/parsing/combinator/lexical/StdLexical.scala +++ b/src/library/scala/util/parsing/combinator/lexical/StdLexical.scala @@ -9,7 +9,7 @@ // $Id$ -package scala.util.parsing.combinator.lexical +package scala.util.parsing.combinatorold.lexical import scala.util.parsing.syntax._ import scala.util.parsing.input.CharArrayReader.EofCh diff --git a/src/library/scala/util/parsing/combinator/syntactical/BindingParsers.scala b/src/library/scala/util/parsing/combinator/syntactical/BindingParsers.scala index 892f566565..72c9516fbc 100644 --- a/src/library/scala/util/parsing/combinator/syntactical/BindingParsers.scala +++ b/src/library/scala/util/parsing/combinator/syntactical/BindingParsers.scala @@ -9,7 +9,7 @@ // $Id$ -package scala.util.parsing.combinator.syntactical +package scala.util.parsing.combinatorold.syntactical import scala.util.parsing.ast._ diff --git a/src/library/scala/util/parsing/combinator/syntactical/StdTokenParsers.scala b/src/library/scala/util/parsing/combinator/syntactical/StdTokenParsers.scala index 968c363f7a..ca2a53a54e 100644 --- a/src/library/scala/util/parsing/combinator/syntactical/StdTokenParsers.scala +++ b/src/library/scala/util/parsing/combinator/syntactical/StdTokenParsers.scala @@ -9,7 +9,7 @@ // $Id$ -package scala.util.parsing.combinator.syntactical +package scala.util.parsing.combinatorold.syntactical import scala.util.parsing.syntax._ diff --git a/src/library/scala/util/parsing/combinator/syntactical/TokenParsers.scala b/src/library/scala/util/parsing/combinator/syntactical/TokenParsers.scala index 013ac400c4..59a873d5e8 100644 --- a/src/library/scala/util/parsing/combinator/syntactical/TokenParsers.scala +++ b/src/library/scala/util/parsing/combinator/syntactical/TokenParsers.scala @@ -9,7 +9,7 @@ // $Id$ -package scala.util.parsing.combinator.syntactical +package scala.util.parsing.combinatorold.syntactical /** <p> * This is the core component for token-based parsers. diff --git a/src/library/scala/util/parsing/combinator/testing/Tester.scala b/src/library/scala/util/parsing/combinator/testing/Tester.scala index 0f58057a47..8d030bd875 100644 --- a/src/library/scala/util/parsing/combinator/testing/Tester.scala +++ b/src/library/scala/util/parsing/combinator/testing/Tester.scala @@ -6,10 +6,10 @@ ** |/ ** \* */ -package scala.util.parsing.combinator.testing +package scala.util.parsing.combinatorold.testing -import scala.util.parsing.combinator.lexical.Lexical -import scala.util.parsing.combinator.syntactical.TokenParsers +import scala.util.parsing.combinatorold.lexical.Lexical +import scala.util.parsing.combinatorold.syntactical.TokenParsers /** <p> * Facilitates testing a given parser on various input strings. diff --git a/src/library/scala/util/parsing/combinator1/ImplicitConversions.scala b/src/library/scala/util/parsing/combinator1/ImplicitConversions.scala index dc21fa0b87..fc6e74254e 100644 --- a/src/library/scala/util/parsing/combinator1/ImplicitConversions.scala +++ b/src/library/scala/util/parsing/combinator1/ImplicitConversions.scala @@ -6,7 +6,7 @@ ** |/ ** \* */ -package scala.util.parsing.combinator1 +package scala.util.parsing.combinator /** This object contains implicit conversions that come in handy when using the `^^' combinator * {@see Parsers} to construct an AST from the concrete syntax. diff --git a/src/library/scala/util/parsing/combinator1/Parsers.scala b/src/library/scala/util/parsing/combinator1/Parsers.scala index ca4a8462f5..15845c8e84 100644 --- a/src/library/scala/util/parsing/combinator1/Parsers.scala +++ b/src/library/scala/util/parsing/combinator1/Parsers.scala @@ -8,7 +8,7 @@ // $Id: Parsers.scala 12357 2007-07-18 21:55:08Z moors $ -package scala.util.parsing.combinator1 +package scala.util.parsing.combinator import scala.util.parsing.input._ import scala.collection.mutable.{Map=>MutableMap} @@ -668,6 +668,17 @@ trait Parsers { def opt[T](p: => Parser[T]): Parser[Option[T]] = p ^^ (x => Some(x)) | success(None) + /** Wrap a parser so that its failures&errors become success and vice versa -- it never consumes any input + */ + def not[T](p: => Parser[T]): Parser[Unit] = Parser { in => + p(in) match { + case s @ Success(_, _) => Failure("Expected failure", in) + case e @ Error(_, _) => Success((), in) + case f @ Failure(msg, next) => Success((), in) + } + } + + /** `positioned' decorates a parser's result with the start position of the input it consumed. * * @param p a `Parser' whose result conforms to `Positional'. diff --git a/src/library/scala/util/parsing/combinator1/lexical/Lexical.scala b/src/library/scala/util/parsing/combinator1/lexical/Lexical.scala index 31ecdc629d..7c67f1390d 100644 --- a/src/library/scala/util/parsing/combinator1/lexical/Lexical.scala +++ b/src/library/scala/util/parsing/combinator1/lexical/Lexical.scala @@ -9,7 +9,7 @@ // $Id: Lexical.scala 12268 2007-07-11 13:45:53Z michelou $ -package scala.util.parsing.combinator1.lexical +package scala.util.parsing.combinator.lexical import scala.util.parsing.syntax._ import scala.util.parsing.input.CharArrayReader.EofCh diff --git a/src/library/scala/util/parsing/combinator1/lexical/Scanners.scala b/src/library/scala/util/parsing/combinator1/lexical/Scanners.scala index af5c141267..8b9e7724b7 100644 --- a/src/library/scala/util/parsing/combinator1/lexical/Scanners.scala +++ b/src/library/scala/util/parsing/combinator1/lexical/Scanners.scala @@ -9,7 +9,7 @@ // $Id: Scanners.scala 12407 2007-07-25 02:33:18Z emir $ -package scala.util.parsing.combinator1.lexical +package scala.util.parsing.combinator.lexical import scala.util.parsing.syntax._ import scala.util.parsing.input._ diff --git a/src/library/scala/util/parsing/combinator1/lexical/StdLexical.scala b/src/library/scala/util/parsing/combinator1/lexical/StdLexical.scala index e278869f7a..60bca9fedd 100644 --- a/src/library/scala/util/parsing/combinator1/lexical/StdLexical.scala +++ b/src/library/scala/util/parsing/combinator1/lexical/StdLexical.scala @@ -9,7 +9,7 @@ // $Id: StdLexical.scala 12242 2007-07-09 09:43:09Z michelou $ -package scala.util.parsing.combinator1.lexical +package scala.util.parsing.combinator.lexical import scala.util.parsing.syntax._ import scala.util.parsing.input.CharArrayReader.EofCh diff --git a/src/library/scala/util/parsing/combinator1/syntactical/StandardTokenParsers.scala b/src/library/scala/util/parsing/combinator1/syntactical/StandardTokenParsers.scala index 68d0d07c39..2bc07dac37 100644 --- a/src/library/scala/util/parsing/combinator1/syntactical/StandardTokenParsers.scala +++ b/src/library/scala/util/parsing/combinator1/syntactical/StandardTokenParsers.scala @@ -9,10 +9,10 @@ // $Id: StdTokenParsers.scala 12242 2007-07-09 09:43:09Z michelou $ -package scala.util.parsing.combinator1.syntactical +package scala.util.parsing.combinator.syntactical import scala.util.parsing.syntax._ -import scala.util.parsing.combinator1.lexical.StdLexical +import scala.util.parsing.combinator.lexical.StdLexical /** This component provides primitive parsers for the standard tokens defined in `StdTokens'. * diff --git a/src/library/scala/util/parsing/combinator1/syntactical/StdTokenParsers.scala b/src/library/scala/util/parsing/combinator1/syntactical/StdTokenParsers.scala index e344f988f8..102b29c4ac 100644 --- a/src/library/scala/util/parsing/combinator1/syntactical/StdTokenParsers.scala +++ b/src/library/scala/util/parsing/combinator1/syntactical/StdTokenParsers.scala @@ -9,7 +9,7 @@ // $Id: StdTokenParsers.scala 12242 2007-07-09 09:43:09Z michelou $ -package scala.util.parsing.combinator1.syntactical +package scala.util.parsing.combinator.syntactical import scala.util.parsing.syntax._ diff --git a/src/library/scala/util/parsing/combinator1/syntactical/TokenParsers.scala b/src/library/scala/util/parsing/combinator1/syntactical/TokenParsers.scala index 6668d8a37e..9cbbdb0936 100644 --- a/src/library/scala/util/parsing/combinator1/syntactical/TokenParsers.scala +++ b/src/library/scala/util/parsing/combinator1/syntactical/TokenParsers.scala @@ -9,7 +9,7 @@ // $Id: TokenParsers.scala 12242 2007-07-09 09:43:09Z michelou $ -package scala.util.parsing.combinator1.syntactical +package scala.util.parsing.combinator.syntactical /** <p> * This is the core component for token-based parsers. diff --git a/src/library/scala/util/parsing/combinator1/testing/Tester.scala b/src/library/scala/util/parsing/combinator1/testing/Tester.scala index e2f5d61e53..0f58057a47 100644 --- a/src/library/scala/util/parsing/combinator1/testing/Tester.scala +++ b/src/library/scala/util/parsing/combinator1/testing/Tester.scala @@ -6,10 +6,10 @@ ** |/ ** \* */ -package scala.util.parsing.combinator1.testing +package scala.util.parsing.combinator.testing -import scala.util.parsing.combinator1.lexical.Lexical -import scala.util.parsing.combinator1.syntactical.TokenParsers +import scala.util.parsing.combinator.lexical.Lexical +import scala.util.parsing.combinator.syntactical.TokenParsers /** <p> * Facilitates testing a given parser on various input strings. diff --git a/src/library/scala/util/parsing/json/Lexer.scala b/src/library/scala/util/parsing/json/Lexer.scala index 143871c8da..0eedd8389b 100644 --- a/src/library/scala/util/parsing/json/Lexer.scala +++ b/src/library/scala/util/parsing/json/Lexer.scala @@ -25,12 +25,12 @@ class Lexer extends StdLexical with ImplicitConversions { //( '\"' ~ rep(charSeq | letter) ~ '\"' ^^ lift(StringLit) ( string ^^ StringLit | number ~ letter ^^ { case n ~ l => ErrorToken("Invalid number format : " + n + l) } - | '-' ~ whitespace ~ number ~ letter ^^ { case ws ~ num ~ l => ErrorToken("Invalid number format : -" + num + l) } - | '-' ~ whitespace ~ number ^^ { case ws ~ num => NumericLit("-" + num) } + | '-' ~> whitespace ~ number ~ letter ^^ { case ws ~ num ~ l => ErrorToken("Invalid number format : -" + num + l) } + | '-' ~> whitespace ~ number ^^ { case ws ~ num => NumericLit("-" + num) } | number ^^ NumericLit - | EofCh ^^ EOF + | EofCh ^^^ EOF | delim - | '\"' ~ failure("Unterminated string") + | '\"' ~> failure("Unterminated string") | rep(letter) ^^ checkKeyword | failure("Illegal character") ) @@ -43,7 +43,7 @@ class Lexer extends StdLexical with ImplicitConversions { /** A string is a collection of zero or more Unicode characters, wrapped in * double quotes, using backslash escapes (cf. http://www.json.org/). */ - def string = '\"' ~ rep(charSeq | chrExcept('\"', '\n', EofCh)) ~ '\"' ^^ { _ mkString "" } + def string = '\"' ~> rep(charSeq | chrExcept('\"', '\n', EofCh)) <~ '\"' ^^ { _ mkString "" } override def whitespace = rep(whitespaceChar) @@ -52,7 +52,7 @@ class Lexer extends StdLexical with ImplicitConversions { } def intPart = zero | intList def intList = nonzero ~ rep(digit) ^^ {case x ~ y => (x :: y) mkString ""} - def fracPart = '.' ~ rep(digit) ^^ { _ mkString "" } + def fracPart = '.' ~> rep(digit) ^^ { _ mkString "" } def expPart = exponent ~ opt(sign) ~ rep1(digit) ^^ { case e ~ s ~ d => e + optString("", s) + d.mkString("") } @@ -62,21 +62,21 @@ class Lexer extends StdLexical with ImplicitConversions { case None => "" } - def zero: Parser[String] = '0' ^^ "0" + def zero: Parser[String] = '0' ^^^ "0" def nonzero = elem("nonzero digit", d => d.isDigit && d != '0') def exponent = elem("exponent character", d => d == 'e' || d == 'E') def sign = elem("sign character", d => d == '-' || d == '+') def charSeq: Parser[String] = - ('\\' ~ '\"' ^^ "\"" - |'\\' ~ '\\' ^^ "\\" - |'\\' ~ '/' ^^ "/" - |'\\' ~ 'b' ^^ "\b" - |'\\' ~ 'f' ^^ "\f" - |'\\' ~ 'n' ^^ "\n" - |'\\' ~ 'r' ^^ "\r" - |'\\' ~ 't' ^^ "\t" - |'\\' ~ 'u' ~ unicodeBlock) + ('\\' ~ '\"' ^^^ "\"" + |'\\' ~ '\\' ^^^ "\\" + |'\\' ~ '/' ^^^ "/" + |'\\' ~ 'b' ^^^ "\b" + |'\\' ~ 'f' ^^^ "\f" + |'\\' ~ 'n' ^^^ "\n" + |'\\' ~ 'r' ^^^ "\r" + |'\\' ~ 't' ^^^ "\t" + |'\\' ~> 'u' ~> unicodeBlock) val hexDigits = Set[Char]() ++ "0123456789abcdefABCDEF".toArray def hexDigit = elem("hex digit", hexDigits.contains(_)) diff --git a/src/library/scala/util/parsing/json/Parser.scala b/src/library/scala/util/parsing/json/Parser.scala index 50101c85eb..70b48fb394 100644 --- a/src/library/scala/util/parsing/json/Parser.scala +++ b/src/library/scala/util/parsing/json/Parser.scala @@ -29,10 +29,10 @@ class Parser extends StdTokenParsers with ImplicitConversions { // Define the grammar def root = jsonObj | jsonArray - def jsonObj = "{" ~ repsep(objEntry, ",") ~ "}" - def jsonArray = "[" ~ repsep(value, ",") ~ "]" - def objEntry = stringVal ~ ":" ~ value ^^ { case x ~ y => (x, y) } - def value: Parser[Any] = (jsonObj | jsonArray | number | "true" ^^ true | "false" ^^ false | "null" ^^ null | stringVal) + def jsonObj = "{" ~> repsep(objEntry, ",") <~ "}" + def jsonArray = "[" ~> repsep(value, ",") <~ "]" + def objEntry = stringVal ~ (":" ~> value) ^^ { case x ~ y => (x, y) } + def value: Parser[Any] = (jsonObj | jsonArray | number | "true" ^^^ true | "false" ^^^ false | "null" ^^^ null | stringVal) def stringVal = accept("string", { case lexical.StringLit(n) => n} ) def number = accept("number", { case lexical.NumericLit(n) => n.toDouble} ) } |