aboutsummaryrefslogtreecommitdiff
path: root/sql/catalyst
diff options
context:
space:
mode:
authorMichael Armbrust <michael@databricks.com>2014-04-24 18:21:00 -0700
committerReynold Xin <rxin@apache.org>2014-04-24 18:21:10 -0700
commit2a35fba34f72dfd3a192a65e4ad77954a23bec40 (patch)
tree62015714c03efe48b5b952bb9af0c1a08cabd017 /sql/catalyst
parenta3b6d852337c5c4c9afd4942699f35baaa6f691d (diff)
downloadspark-2a35fba34f72dfd3a192a65e4ad77954a23bec40.tar.gz
spark-2a35fba34f72dfd3a192a65e4ad77954a23bec40.tar.bz2
spark-2a35fba34f72dfd3a192a65e4ad77954a23bec40.zip
[SQL] Add support for parsing indexing into arrays in SQL.
Author: Michael Armbrust <michael@databricks.com> Closes #518 from marmbrus/parseArrayIndex and squashes the following commits: afd2d6b [Michael Armbrust] 100 chars c3d6026 [Michael Armbrust] Add support for parsing indexing into arrays in SQL. (cherry picked from commit 4660991e679eda158a3ae8039b686eae197a71d1) Signed-off-by: Reynold Xin <rxin@apache.org>
Diffstat (limited to 'sql/catalyst')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala10
1 files changed, 7 insertions, 3 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
index 13a19d0adf..8c76a3aa96 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql.catalyst
import scala.language.implicitConversions
import scala.util.parsing.combinator.lexical.StdLexical
import scala.util.parsing.combinator.syntactical.StandardTokenParsers
+import scala.util.parsing.combinator.PackratParsers
import scala.util.parsing.input.CharArrayReader.EofCh
import org.apache.spark.sql.catalyst.analysis._
@@ -39,7 +40,7 @@ import org.apache.spark.sql.catalyst.types._
* This is currently included mostly for illustrative purposes. Users wanting more complete support
* for a SQL like language should checkout the HiveQL support in the sql/hive sub-project.
*/
-class SqlParser extends StandardTokenParsers {
+class SqlParser extends StandardTokenParsers with PackratParsers {
def apply(input: String): LogicalPlan = {
phrase(query)(new lexical.Scanner(input)) match {
case Success(r, x) => r
@@ -152,7 +153,7 @@ class SqlParser extends StandardTokenParsers {
lexical.delimiters += (
"@", "*", "+", "-", "<", "=", "<>", "!=", "<=", ">=", ">", "/", "(", ")",
- ",", ";", "%", "{", "}", ":"
+ ",", ";", "%", "{", "}", ":", "[", "]"
)
protected def assignAliases(exprs: Seq[Expression]): Seq[NamedExpression] = {
@@ -339,7 +340,10 @@ class SqlParser extends StandardTokenParsers {
protected lazy val floatLit: Parser[String] =
elem("decimal", _.isInstanceOf[lexical.FloatLit]) ^^ (_.chars)
- protected lazy val baseExpression: Parser[Expression] =
+ protected lazy val baseExpression: PackratParser[Expression] =
+ expression ~ "[" ~ expression <~ "]" ^^ {
+ case base ~ _ ~ ordinal => GetItem(base, ordinal)
+ } |
TRUE ^^^ Literal(true, BooleanType) |
FALSE ^^^ Literal(false, BooleanType) |
cast |