aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorHerman van Hovell <hvanhovell@questtec.nl>2016-01-15 15:19:10 -0800
committerReynold Xin <rxin@databricks.com>2016-01-15 15:19:10 -0800
commit7cd7f2202547224593517b392f56e49e4c94cabc (patch)
tree3deb6f260ce94c59d2e25bc29095582dfd637173 /sql/core
parent3f1c58d60b85625ab3abf16456ce27c820453ecf (diff)
downloadspark-7cd7f2202547224593517b392f56e49e4c94cabc.tar.gz
spark-7cd7f2202547224593517b392f56e49e4c94cabc.tar.bz2
spark-7cd7f2202547224593517b392f56e49e4c94cabc.zip
[SPARK-12575][SQL] Grammar parity with existing SQL parser
In this PR the new CatalystQl parser stack reaches grammar parity with the old Parser-Combinator based SQL Parser. This PR also replaces all uses of the old Parser, and removes it from the code base. Although the existing Hive and SQL parser dialects were mostly the same, some kinks had to be worked out: - The SQL Parser allowed syntax like ```APPROXIMATE(0.01) COUNT(DISTINCT a)```. In order to make this work we needed to hardcode approximate operators in the parser, or we would have to create an approximate expression. ```APPROXIMATE_COUNT_DISTINCT(a, 0.01)``` would also do the job and is much easier to maintain. So, this PR **removes** this keyword. - The old SQL Parser supports ```LIMIT``` clauses in nested queries. This is **not supported** anymore. See https://github.com/apache/spark/pull/10689 for the rationale for this. - Hive has a charset name char set literal combination it supports, for instance the following expression ```_ISO-8859-1 0x4341464562616265``` would yield this string: ```CAFEbabe```. Hive will only allow charset names to start with an underscore. This is quite annoying in spark because as soon as you use a tuple names will start with an underscore. In this PR we **remove** this feature from the parser. It would be quite easy to implement such a feature as an Expression later on. - Hive and the SQL Parser treat decimal literals differently. Hive will turn any decimal into a ```Double``` whereas the SQL Parser would convert a non-scientific decimal into a ```BigDecimal```, and would turn a scientific decimal into a Double. We follow Hive's behavior here. The new parser supports a big decimal literal, for instance: ```81923801.42BD```, which can be used when a big decimal is needed. cc rxin viirya marmbrus yhuai cloud-fan Author: Herman van Hovell <hvanhovell@questtec.nl> Closes #10745 from hvanhovell/SPARK-12575-2.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Column.scala1
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala17
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSQLParser.scala19
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala17
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/functions.scala7
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/MathExpressionsSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala73
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala12
11 files changed, 92 insertions, 74 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
index 6a020f9f28..97bf7a0cc4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
@@ -21,7 +21,6 @@ import scala.language.implicitConversions
import org.apache.spark.Logging
import org.apache.spark.annotation.Experimental
-import org.apache.spark.sql.catalyst.SqlParser._
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder}
import org.apache.spark.sql.catalyst.expressions._
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index 91bf2f8ce4..3422d0ead4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -30,7 +30,7 @@ import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.python.PythonRDD
import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow, ScalaReflection, SqlParser}
+import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
@@ -737,7 +737,7 @@ class DataFrame private[sql](
@scala.annotation.varargs
def selectExpr(exprs: String*): DataFrame = {
select(exprs.map { expr =>
- Column(SqlParser.parseExpression(expr))
+ Column(sqlContext.sqlParser.parseExpression(expr))
}: _*)
}
@@ -764,7 +764,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
def filter(conditionExpr: String): DataFrame = {
- filter(Column(SqlParser.parseExpression(conditionExpr)))
+ filter(Column(sqlContext.sqlParser.parseExpression(conditionExpr)))
}
/**
@@ -788,7 +788,7 @@ class DataFrame private[sql](
* @since 1.5.0
*/
def where(conditionExpr: String): DataFrame = {
- filter(Column(SqlParser.parseExpression(conditionExpr)))
+ filter(Column(sqlContext.sqlParser.parseExpression(conditionExpr)))
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index d948e48942..8f852e5216 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -29,7 +29,7 @@ import org.apache.spark.annotation.Experimental
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.catalyst.SqlParser
+import org.apache.spark.sql.catalyst.{CatalystQl}
import org.apache.spark.sql.execution.datasources.{LogicalRelation, ResolvedDataSource}
import org.apache.spark.sql.execution.datasources.jdbc.{JDBCPartition, JDBCPartitioningInfo, JDBCRelation}
import org.apache.spark.sql.execution.datasources.json.JSONRelation
@@ -337,7 +337,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
*/
def table(tableName: String): DataFrame = {
DataFrame(sqlContext,
- sqlContext.catalog.lookupRelation(SqlParser.parseTableIdentifier(tableName)))
+ sqlContext.catalog.lookupRelation(sqlContext.sqlParser.parseTableIdentifier(tableName)))
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
index 00f9817b53..ab63fe4aa8 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
@@ -22,7 +22,7 @@ import java.util.Properties
import scala.collection.JavaConverters._
import org.apache.spark.annotation.Experimental
-import org.apache.spark.sql.catalyst.{SqlParser, TableIdentifier}
+import org.apache.spark.sql.catalyst.{CatalystQl, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoTable, Project}
import org.apache.spark.sql.execution.datasources.{BucketSpec, CreateTableUsingAsSelect, ResolvedDataSource}
@@ -192,7 +192,7 @@ final class DataFrameWriter private[sql](df: DataFrame) {
* @since 1.4.0
*/
def insertInto(tableName: String): Unit = {
- insertInto(SqlParser.parseTableIdentifier(tableName))
+ insertInto(df.sqlContext.sqlParser.parseTableIdentifier(tableName))
}
private def insertInto(tableIdent: TableIdentifier): Unit = {
@@ -282,7 +282,7 @@ final class DataFrameWriter private[sql](df: DataFrame) {
* @since 1.4.0
*/
def saveAsTable(tableName: String): Unit = {
- saveAsTable(SqlParser.parseTableIdentifier(tableName))
+ saveAsTable(df.sqlContext.sqlParser.parseTableIdentifier(tableName))
}
private def saveAsTable(tableIdent: TableIdentifier): Unit = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index b909765a7c..a0939adb6d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -39,6 +39,7 @@ import org.apache.spark.sql.catalyst.encoders.encoderFor
import org.apache.spark.sql.catalyst.errors.DialectException
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.optimizer.Optimizer
+import org.apache.spark.sql.catalyst.parser.ParserConf
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan, Range}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.execution._
@@ -205,15 +206,17 @@ class SQLContext private[sql](
protected[sql] lazy val optimizer: Optimizer = new SparkOptimizer(this)
@transient
- protected[sql] val ddlParser = new DDLParser(sqlParser.parse(_))
+ protected[sql] val ddlParser = new DDLParser(sqlParser)
@transient
- protected[sql] val sqlParser = new SparkSQLParser(getSQLDialect().parse(_))
+ protected[sql] val sqlParser = new SparkSQLParser(getSQLDialect())
protected[sql] def getSQLDialect(): ParserDialect = {
try {
val clazz = Utils.classForName(dialectClassName)
- clazz.newInstance().asInstanceOf[ParserDialect]
+ clazz.getConstructor(classOf[ParserConf])
+ .newInstance(conf)
+ .asInstanceOf[ParserDialect]
} catch {
case NonFatal(e) =>
// Since we didn't find the available SQL Dialect, it will fail even for SET command:
@@ -237,7 +240,7 @@ class SQLContext private[sql](
new sparkexecution.QueryExecution(this, plan)
protected[sql] def dialectClassName = if (conf.dialect == "sql") {
- classOf[DefaultParserDialect].getCanonicalName
+ classOf[SparkQl].getCanonicalName
} else {
conf.dialect
}
@@ -682,7 +685,7 @@ class SQLContext private[sql](
tableName: String,
source: String,
options: Map[String, String]): DataFrame = {
- val tableIdent = SqlParser.parseTableIdentifier(tableName)
+ val tableIdent = sqlParser.parseTableIdentifier(tableName)
val cmd =
CreateTableUsing(
tableIdent,
@@ -728,7 +731,7 @@ class SQLContext private[sql](
source: String,
schema: StructType,
options: Map[String, String]): DataFrame = {
- val tableIdent = SqlParser.parseTableIdentifier(tableName)
+ val tableIdent = sqlParser.parseTableIdentifier(tableName)
val cmd =
CreateTableUsing(
tableIdent,
@@ -833,7 +836,7 @@ class SQLContext private[sql](
* @since 1.3.0
*/
def table(tableName: String): DataFrame = {
- table(SqlParser.parseTableIdentifier(tableName))
+ table(sqlParser.parseTableIdentifier(tableName))
}
private def table(tableIdent: TableIdentifier): DataFrame = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSQLParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSQLParser.scala
index b3e8d0d849..1af2c756cd 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSQLParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSQLParser.scala
@@ -19,8 +19,8 @@ package org.apache.spark.sql.execution
import scala.util.parsing.combinator.RegexParsers
-import org.apache.spark.sql.catalyst.AbstractSparkSQLParser
-import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
+import org.apache.spark.sql.catalyst.{AbstractSparkSQLParser, ParserDialect, TableIdentifier}
+import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Expression}
import org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.types.StringType
@@ -29,9 +29,16 @@ import org.apache.spark.sql.types.StringType
* The top level Spark SQL parser. This parser recognizes syntaxes that are available for all SQL
* dialects supported by Spark SQL, and delegates all the other syntaxes to the `fallback` parser.
*
- * @param fallback A function that parses an input string to a logical plan
+ * @param fallback A function that returns the next parser in the chain. This is a call-by-name
+ * parameter because this allows us to return a different dialect if we
+ * have to.
*/
-class SparkSQLParser(fallback: String => LogicalPlan) extends AbstractSparkSQLParser {
+class SparkSQLParser(fallback: => ParserDialect) extends AbstractSparkSQLParser {
+
+ override def parseExpression(sql: String): Expression = fallback.parseExpression(sql)
+
+ override def parseTableIdentifier(sql: String): TableIdentifier =
+ fallback.parseTableIdentifier(sql)
// A parser for the key-value part of the "SET [key = [value ]]" syntax
private object SetCommandParser extends RegexParsers {
@@ -74,7 +81,7 @@ class SparkSQLParser(fallback: String => LogicalPlan) extends AbstractSparkSQLPa
private lazy val cache: Parser[LogicalPlan] =
CACHE ~> LAZY.? ~ (TABLE ~> ident) ~ (AS ~> restInput).? ^^ {
case isLazy ~ tableName ~ plan =>
- CacheTableCommand(tableName, plan.map(fallback), isLazy.isDefined)
+ CacheTableCommand(tableName, plan.map(fallback.parsePlan), isLazy.isDefined)
}
private lazy val uncache: Parser[LogicalPlan] =
@@ -111,7 +118,7 @@ class SparkSQLParser(fallback: String => LogicalPlan) extends AbstractSparkSQLPa
private lazy val others: Parser[LogicalPlan] =
wholeInput ^^ {
- case input => fallback(input)
+ case input => fallback.parsePlan(input)
}
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
index d8d21b06b8..10655a85cc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DDLParser.scala
@@ -22,25 +22,30 @@ import scala.util.matching.Regex
import org.apache.spark.Logging
import org.apache.spark.sql.SaveMode
-import org.apache.spark.sql.catalyst.{AbstractSparkSQLParser, TableIdentifier}
+import org.apache.spark.sql.catalyst.{AbstractSparkSQLParser, ParserDialect, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
+import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.DataTypeParser
import org.apache.spark.sql.types._
-
/**
* A parser for foreign DDL commands.
*/
-class DDLParser(parseQuery: String => LogicalPlan)
+class DDLParser(fallback: => ParserDialect)
extends AbstractSparkSQLParser with DataTypeParser with Logging {
+ override def parseExpression(sql: String): Expression = fallback.parseExpression(sql)
+
+ override def parseTableIdentifier(sql: String): TableIdentifier =
+
+ fallback.parseTableIdentifier(sql)
def parse(input: String, exceptionOnError: Boolean): LogicalPlan = {
try {
- parse(input)
+ parsePlan(input)
} catch {
case ddlException: DDLException => throw ddlException
- case _ if !exceptionOnError => parseQuery(input)
+ case _ if !exceptionOnError => fallback.parsePlan(input)
case x: Throwable => throw x
}
}
@@ -104,7 +109,7 @@ class DDLParser(parseQuery: String => LogicalPlan)
SaveMode.ErrorIfExists
}
- val queryPlan = parseQuery(query.get)
+ val queryPlan = fallback.parsePlan(query.get)
CreateTableUsingAsSelect(tableIdent,
provider,
temp.isDefined,
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index b8ea2261e9..8c2530fd68 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -22,7 +22,7 @@ import scala.reflect.runtime.universe.{typeTag, TypeTag}
import scala.util.Try
import org.apache.spark.annotation.Experimental
-import org.apache.spark.sql.catalyst.{ScalaReflection, SqlParser}
+import org.apache.spark.sql.catalyst.{CatalystQl, ScalaReflection}
import org.apache.spark.sql.catalyst.analysis.{Star, UnresolvedFunction}
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions._
@@ -1063,7 +1063,10 @@ object functions extends LegacyFunctions {
*
* @group normal_funcs
*/
- def expr(expr: String): Column = Column(SqlParser.parseExpression(expr))
+ def expr(expr: String): Column = {
+ val parser = SQLContext.getActive().map(_.getSQLDialect()).getOrElse(new CatalystQl())
+ Column(parser.parseExpression(expr))
+ }
//////////////////////////////////////////////////////////////////////////////////////////////
// Math Functions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/MathExpressionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/MathExpressionsSuite.scala
index 58f982c2bc..aec450e0a6 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/MathExpressionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/MathExpressionsSuite.scala
@@ -212,7 +212,7 @@ class MathExpressionsSuite extends QueryTest with SharedSQLContext {
Seq(Row(5, 10, 0), Row(55, 60, 100), Row(555, 560, 600))
)
- val pi = 3.1415
+ val pi = "3.1415BD"
checkAnswer(
sql(s"SELECT round($pi, -3), round($pi, -2), round($pi, -1), " +
s"round($pi, 0), round($pi, 1), round($pi, 2), round($pi, 3)"),
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index 03d67c4e91..75e81b9c91 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -21,10 +21,11 @@ import java.math.MathContext
import java.sql.Timestamp
import org.apache.spark.AccumulatorSuite
-import org.apache.spark.sql.catalyst.DefaultParserDialect
+import org.apache.spark.sql.catalyst.CatalystQl
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry
import org.apache.spark.sql.catalyst.errors.DialectException
-import org.apache.spark.sql.execution.aggregate
+import org.apache.spark.sql.catalyst.parser.ParserConf
+import org.apache.spark.sql.execution.{aggregate, SparkQl}
import org.apache.spark.sql.execution.joins.{CartesianProduct, SortMergeJoin}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.{SharedSQLContext, TestSQLContext}
@@ -32,7 +33,7 @@ import org.apache.spark.sql.test.SQLTestData._
import org.apache.spark.sql.types._
/** A SQL Dialect for testing purpose, and it can not be nested type */
-class MyDialect extends DefaultParserDialect
+class MyDialect(conf: ParserConf) extends CatalystQl(conf)
class SQLQuerySuite extends QueryTest with SharedSQLContext {
import testImplicits._
@@ -161,7 +162,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
newContext.sql("SELECT 1")
}
// test if the dialect set back to DefaultSQLDialect
- assert(newContext.getSQLDialect().getClass === classOf[DefaultParserDialect])
+ assert(newContext.getSQLDialect().getClass === classOf[SparkQl])
}
test("SPARK-4625 support SORT BY in SimpleSQLParser & DSL") {
@@ -586,7 +587,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
test("Allow only a single WITH clause per query") {
- intercept[RuntimeException] {
+ intercept[AnalysisException] {
sql(
"with q1 as (select * from testData) with q2 as (select * from q1) select * from q2")
}
@@ -602,8 +603,8 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("from follow multiple brackets") {
checkAnswer(sql(
"""
- |select key from ((select * from testData limit 1)
- | union all (select * from testData limit 1)) x limit 1
+ |select key from ((select * from testData)
+ | union all (select * from testData)) x limit 1
""".stripMargin),
Row(1)
)
@@ -616,7 +617,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
checkAnswer(sql(
"""
|select key from
- | (select * from testData limit 1 union all select * from testData limit 1) x
+ | (select * from testData union all select * from testData) x
| limit 1
""".stripMargin),
Row(1)
@@ -649,13 +650,13 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("approximate count distinct") {
checkAnswer(
- sql("SELECT APPROXIMATE COUNT(DISTINCT a) FROM testData2"),
+ sql("SELECT APPROX_COUNT_DISTINCT(a) FROM testData2"),
Row(3))
}
test("approximate count distinct with user provided standard deviation") {
checkAnswer(
- sql("SELECT APPROXIMATE(0.04) COUNT(DISTINCT a) FROM testData2"),
+ sql("SELECT APPROX_COUNT_DISTINCT(a, 0.04) FROM testData2"),
Row(3))
}
@@ -1192,19 +1193,19 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
test("Floating point number format") {
checkAnswer(
- sql("SELECT 0.3"), Row(BigDecimal(0.3).underlying())
+ sql("SELECT 0.3"), Row(0.3)
)
checkAnswer(
- sql("SELECT -0.8"), Row(BigDecimal(-0.8).underlying())
+ sql("SELECT -0.8"), Row(-0.8)
)
checkAnswer(
- sql("SELECT .5"), Row(BigDecimal(0.5))
+ sql("SELECT .5"), Row(0.5)
)
checkAnswer(
- sql("SELECT -.18"), Row(BigDecimal(-0.18))
+ sql("SELECT -.18"), Row(-0.18)
)
}
@@ -1218,11 +1219,11 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
)
checkAnswer(
- sql("SELECT 9223372036854775808"), Row(new java.math.BigDecimal("9223372036854775808"))
+ sql("SELECT 9223372036854775808BD"), Row(new java.math.BigDecimal("9223372036854775808"))
)
checkAnswer(
- sql("SELECT -9223372036854775809"), Row(new java.math.BigDecimal("-9223372036854775809"))
+ sql("SELECT -9223372036854775809BD"), Row(new java.math.BigDecimal("-9223372036854775809"))
)
}
@@ -1237,11 +1238,11 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
)
checkAnswer(
- sql("SELECT -5.2"), Row(BigDecimal(-5.2))
+ sql("SELECT -5.2BD"), Row(BigDecimal(-5.2))
)
checkAnswer(
- sql("SELECT +6.8"), Row(BigDecimal(6.8))
+ sql("SELECT +6.8"), Row(6.8d)
)
checkAnswer(
@@ -1616,20 +1617,20 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
test("decimal precision with multiply/division") {
- checkAnswer(sql("select 10.3 * 3.0"), Row(BigDecimal("30.90")))
- checkAnswer(sql("select 10.3000 * 3.0"), Row(BigDecimal("30.90000")))
- checkAnswer(sql("select 10.30000 * 30.0"), Row(BigDecimal("309.000000")))
- checkAnswer(sql("select 10.300000000000000000 * 3.000000000000000000"),
+ checkAnswer(sql("select 10.3BD * 3.0BD"), Row(BigDecimal("30.90")))
+ checkAnswer(sql("select 10.3000BD * 3.0BD"), Row(BigDecimal("30.90000")))
+ checkAnswer(sql("select 10.30000BD * 30.0BD"), Row(BigDecimal("309.000000")))
+ checkAnswer(sql("select 10.300000000000000000BD * 3.000000000000000000BD"),
Row(BigDecimal("30.900000000000000000000000000000000000", new MathContext(38))))
- checkAnswer(sql("select 10.300000000000000000 * 3.0000000000000000000"),
+ checkAnswer(sql("select 10.300000000000000000BD * 3.0000000000000000000BD"),
Row(null))
- checkAnswer(sql("select 10.3 / 3.0"), Row(BigDecimal("3.433333")))
- checkAnswer(sql("select 10.3000 / 3.0"), Row(BigDecimal("3.4333333")))
- checkAnswer(sql("select 10.30000 / 30.0"), Row(BigDecimal("0.343333333")))
- checkAnswer(sql("select 10.300000000000000000 / 3.00000000000000000"),
+ checkAnswer(sql("select 10.3BD / 3.0BD"), Row(BigDecimal("3.433333")))
+ checkAnswer(sql("select 10.3000BD / 3.0BD"), Row(BigDecimal("3.4333333")))
+ checkAnswer(sql("select 10.30000BD / 30.0BD"), Row(BigDecimal("0.343333333")))
+ checkAnswer(sql("select 10.300000000000000000BD / 3.00000000000000000BD"),
Row(BigDecimal("3.433333333333333333333333333", new MathContext(38))))
- checkAnswer(sql("select 10.3000000000000000000 / 3.00000000000000000"),
+ checkAnswer(sql("select 10.3000000000000000000BD / 3.00000000000000000BD"),
Row(BigDecimal("3.4333333333333333333333333333", new MathContext(38))))
}
@@ -1655,13 +1656,13 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
}
test("precision smaller than scale") {
- checkAnswer(sql("select 10.00"), Row(BigDecimal("10.00")))
- checkAnswer(sql("select 1.00"), Row(BigDecimal("1.00")))
- checkAnswer(sql("select 0.10"), Row(BigDecimal("0.10")))
- checkAnswer(sql("select 0.01"), Row(BigDecimal("0.01")))
- checkAnswer(sql("select 0.001"), Row(BigDecimal("0.001")))
- checkAnswer(sql("select -0.01"), Row(BigDecimal("-0.01")))
- checkAnswer(sql("select -0.001"), Row(BigDecimal("-0.001")))
+ checkAnswer(sql("select 10.00BD"), Row(BigDecimal("10.00")))
+ checkAnswer(sql("select 1.00BD"), Row(BigDecimal("1.00")))
+ checkAnswer(sql("select 0.10BD"), Row(BigDecimal("0.10")))
+ checkAnswer(sql("select 0.01BD"), Row(BigDecimal("0.01")))
+ checkAnswer(sql("select 0.001BD"), Row(BigDecimal("0.001")))
+ checkAnswer(sql("select -0.01BD"), Row(BigDecimal("-0.01")))
+ checkAnswer(sql("select -0.001BD"), Row(BigDecimal("-0.001")))
}
test("external sorting updates peak execution memory") {
@@ -1750,7 +1751,7 @@ class SQLQuerySuite extends QueryTest with SharedSQLContext {
assert(e1.message.contains("Table not found"))
val e2 = intercept[AnalysisException] {
- sql("select * from no_db.no_table")
+ sql("select * from no_db.no_table").show()
}
assert(e2.message.contains("Table not found"))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
index 860e07c68c..e70eb2a060 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
@@ -442,13 +442,13 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
- sql("select num_str + 1.2 from jsonTable where num_str > 14"),
+ sql("select num_str + 1.2BD from jsonTable where num_str > 14"),
Row(BigDecimal("92233720368547758071.2"))
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
- sql("select num_str + 1.2 from jsonTable where num_str >= 92233720368547758060"),
+ sql("select num_str + 1.2BD from jsonTable where num_str >= 92233720368547758060BD"),
Row(new java.math.BigDecimal("92233720368547758071.2"))
)
@@ -856,7 +856,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
jsonWithSimpleMap.registerTempTable("jsonWithSimpleMap")
checkAnswer(
- sql("select map from jsonWithSimpleMap"),
+ sql("select `map` from jsonWithSimpleMap"),
Row(Map("a" -> 1)) ::
Row(Map("b" -> 2)) ::
Row(Map("c" -> 3)) ::
@@ -865,7 +865,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
)
checkAnswer(
- sql("select map['c'] from jsonWithSimpleMap"),
+ sql("select `map`['c'] from jsonWithSimpleMap"),
Row(null) ::
Row(null) ::
Row(3) ::
@@ -884,7 +884,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
jsonWithComplexMap.registerTempTable("jsonWithComplexMap")
checkAnswer(
- sql("select map from jsonWithComplexMap"),
+ sql("select `map` from jsonWithComplexMap"),
Row(Map("a" -> Row(Seq(1, 2, 3, null), null))) ::
Row(Map("b" -> Row(null, 2))) ::
Row(Map("c" -> Row(Seq(), 4))) ::
@@ -894,7 +894,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
)
checkAnswer(
- sql("select map['a'].field1, map['c'].field2 from jsonWithComplexMap"),
+ sql("select `map`['a'].field1, `map`['c'].field2 from jsonWithComplexMap"),
Row(Seq(1, 2, 3, null), null) ::
Row(null, null) ::
Row(null, 4) ::