aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaoyuan Wang <daoyuan.wang@intel.com>2015-11-03 22:30:23 +0800
committerCheng Lian <lian@databricks.com>2015-11-03 22:30:23 +0800
commitd188a67762dfc09929e30931509be5851e29dfa5 (patch)
treede256b7a8f66226784d566614ac7665c83d43746
parent233e534ac43ea25ac1b0e6a985f6928d46c5d03a (diff)
downloadspark-d188a67762dfc09929e30931509be5851e29dfa5.tar.gz
spark-d188a67762dfc09929e30931509be5851e29dfa5.tar.bz2
spark-d188a67762dfc09929e30931509be5851e29dfa5.zip
[SPARK-10533][SQL] handle scientific notation in sqlParser
https://issues.apache.org/jira/browse/SPARK-10533 val df = sqlContext.createDataFrame(Seq(("a",1.0),("b",2.0),("c",3.0))) df.filter("_2 < 2.0e1").show Scientific notation didn't work. Author: Daoyuan Wang <daoyuan.wang@intel.com> Closes #9085 from adrian-wang/scinotation.
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala15
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala11
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala11
3 files changed, 32 insertions, 5 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
index 2bac08eac4..04ac4f20c6 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
@@ -82,6 +82,10 @@ class SqlLexical extends StdLexical {
override def toString: String = chars
}
+ case class DecimalLit(chars: String) extends Token {
+ override def toString: String = chars
+ }
+
/* This is a work around to support the lazy setting */
def initialize(keywords: Seq[String]): Unit = {
reserved.clear()
@@ -102,8 +106,12 @@ class SqlLexical extends StdLexical {
}
override lazy val token: Parser[Token] =
- ( identChar ~ (identChar | digit).* ^^
- { case first ~ rest => processIdent((first :: rest).mkString) }
+ ( rep1(digit) ~ ('.' ~> digit.*).? ~ (exp ~> sign.? ~ rep1(digit)) ^^ {
+ case i ~ None ~ (sig ~ rest) =>
+ DecimalLit(i.mkString + "e" + sig.mkString + rest.mkString)
+ case i ~ Some(d) ~ (sig ~ rest) =>
+ DecimalLit(i.mkString + "." + d.mkString + "e" + sig.mkString + rest.mkString)
+ }
| digit.* ~ identChar ~ (identChar | digit).* ^^
{ case first ~ middle ~ rest => processIdent((first ++ (middle :: rest)).mkString) }
| rep1(digit) ~ ('.' ~> digit.*).? ^^ {
@@ -125,6 +133,9 @@ class SqlLexical extends StdLexical {
override def identChar: Parser[Elem] = letter | elem('_')
+ private lazy val sign: Parser[Elem] = elem("s", c => c == '+' || c == '-')
+ private lazy val exp: Parser[Elem] = elem("e", c => c == 'E' || c == 'e')
+
override def whitespace: Parser[Any] =
( whitespaceChar
| '/' ~ '*' ~ comment
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
index d7567e8613..1ba559d9e3 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
@@ -337,6 +337,9 @@ object SqlParser extends AbstractSparkSQLParser with DataTypeParser {
| sign.? ~ unsignedFloat ^^ {
case s ~ f => Literal(toDecimalOrDouble(s.getOrElse("") + f))
}
+ | sign.? ~ unsignedDecimal ^^ {
+ case s ~ d => Literal(toDecimalOrDouble(s.getOrElse("") + d))
+ }
)
protected lazy val unsignedFloat: Parser[String] =
@@ -344,6 +347,14 @@ object SqlParser extends AbstractSparkSQLParser with DataTypeParser {
| elem("decimal", _.isInstanceOf[lexical.FloatLit]) ^^ (_.chars)
)
+ protected lazy val unsignedDecimal: Parser[String] =
+ ( "." ~> decimalLit ^^ { u => "0." + u }
+ | elem("scientific_notation", _.isInstanceOf[lexical.DecimalLit]) ^^ (_.chars)
+ )
+
+ def decimalLit: Parser[String] =
+ elem("scientific_notation", _.isInstanceOf[lexical.DecimalLit]) ^^ (_.chars)
+
protected lazy val sign: Parser[String] = ("+" | "-")
protected lazy val integral: Parser[String] =
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
index 6b86c5951b..a883bcb7b1 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
@@ -177,9 +177,14 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
}
test("filterExpr") {
- checkAnswer(
- testData.filter("key > 90"),
- testData.collect().filter(_.getInt(0) > 90).toSeq)
+ val res = testData.collect().filter(_.getInt(0) > 90).toSeq
+ checkAnswer(testData.filter("key > 90"), res)
+ checkAnswer(testData.filter("key > 9.0e1"), res)
+ checkAnswer(testData.filter("key > .9e+2"), res)
+ checkAnswer(testData.filter("key > 0.9e+2"), res)
+ checkAnswer(testData.filter("key > 900e-1"), res)
+ checkAnswer(testData.filter("key > 900.0E-1"), res)
+ checkAnswer(testData.filter("key > 9.e+1"), res)
}
test("filterExpr using where") {