aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorJoseph Batchik <joseph.batchik@cloudera.com>2015-08-03 11:17:38 -0700
committerMichael Armbrust <michael@databricks.com>2015-08-03 11:17:38 -0700
commitdfe7bd168d9bcf8c53f993f459ab473d893457b0 (patch)
treed293acf9e1c35f45ce22b91acdeeb5c9c6f43609 /sql
parentb41a32718d615b304efba146bf97be0229779b01 (diff)
downloadspark-dfe7bd168d9bcf8c53f993f459ab473d893457b0.tar.gz
spark-dfe7bd168d9bcf8c53f993f459ab473d893457b0.tar.bz2
spark-dfe7bd168d9bcf8c53f993f459ab473d893457b0.zip
[SPARK-9511] [SQL] Fixed Table Name Parsing
The issue was that the tokenizer was parsing "1one" into the numeric 1 using the code on line 110. I added another case to accept strings that start with a number and then have a letter somewhere else in it as well. Author: Joseph Batchik <joseph.batchik@cloudera.com> Closes #7844 from JDrit/parse_error and squashes the following commits: b8ca12f [Joseph Batchik] fixed parsing issue by adding another case
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala10
2 files changed, 12 insertions, 0 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
index d494ae7b71..5898a5f93f 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
@@ -104,6 +104,8 @@ class SqlLexical extends StdLexical {
override lazy val token: Parser[Token] =
( identChar ~ (identChar | digit).* ^^
{ case first ~ rest => processIdent((first :: rest).mkString) }
+ | digit.* ~ identChar ~ (identChar | digit).* ^^
+ { case first ~ middle ~ rest => processIdent((first ++ (middle :: rest)).mkString) }
| rep1(digit) ~ ('.' ~> digit.*).? ^^ {
case i ~ None => NumericLit(i.mkString)
case i ~ Some(d) => FloatLit(i.mkString + "." + d.mkString)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index bbadc202a4..f1abae0720 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -1604,4 +1604,14 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll with SQLTestUtils {
checkAnswer(df.select(-df("i")),
Row(new CalendarInterval(-(12 * 3 - 3), -(7L * MICROS_PER_WEEK + 123))))
}
+
+ test("SPARK-9511: error with table starting with number") {
+ val df = sqlContext.sparkContext.parallelize(1 to 10).map(i => (i, i.toString))
+ .toDF("num", "str")
+ df.registerTempTable("1one")
+
+ checkAnswer(sqlContext.sql("select count(num) from 1one"), Row(10))
+
+ sqlContext.dropTempTable("1one")
+ }
}