From 16696759e9a292378cbfdf695a63d6d0cff0d79a Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Mon, 11 May 2015 22:06:56 -0700 Subject: [SQL] Rename Dialect -> ParserDialect. Author: Reynold Xin Closes #6071 from rxin/parserdialect and squashes the following commits: ca2eb31 [Reynold Xin] Rename Dialect -> ParserDialect. --- .../src/main/scala/org/apache/spark/sql/SQLContext.scala | 12 ++++++------ .../src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'sql/core/src') diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index 28fc9d0443..648021806f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -37,7 +37,7 @@ import org.apache.spark.sql.catalyst.errors.DialectException import org.apache.spark.sql.catalyst.optimizer.{DefaultOptimizer, Optimizer} import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan} import org.apache.spark.sql.catalyst.rules.RuleExecutor -import org.apache.spark.sql.catalyst.Dialect +import org.apache.spark.sql.catalyst.ParserDialect import org.apache.spark.sql.catalyst.{CatalystTypeConverters, ScalaReflection, expressions} import org.apache.spark.sql.execution.{Filter, _} import org.apache.spark.sql.jdbc.{JDBCPartition, JDBCPartitioningInfo, JDBCRelation} @@ -49,7 +49,7 @@ import org.apache.spark.{Partition, SparkContext} /** * Currently we support the default dialect named "sql", associated with the class - * [[DefaultDialect]] + * [[DefaultParserDialect]] * * And we can also provide custom SQL Dialect, for example in Spark SQL CLI: * {{{ @@ -74,7 +74,7 @@ import org.apache.spark.{Partition, SparkContext} *-- "hiveql" (for HiveContext) * }}} */ -private[spark] class DefaultDialect extends Dialect { +private[spark] class DefaultParserDialect extends ParserDialect { @transient protected val sqlParser = new catalyst.SqlParser @@ -176,10 +176,10 @@ class SQLContext(@transient val sparkContext: SparkContext) @transient protected[sql] val sqlParser = new SparkSQLParser(getSQLDialect().parse(_)) - protected[sql] def getSQLDialect(): Dialect = { + protected[sql] def getSQLDialect(): ParserDialect = { try { val clazz = Utils.classForName(dialectClassName) - clazz.newInstance().asInstanceOf[Dialect] + clazz.newInstance().asInstanceOf[ParserDialect] } catch { case NonFatal(e) => // Since we didn't find the available SQL Dialect, it will fail even for SET command: @@ -209,7 +209,7 @@ class SQLContext(@transient val sparkContext: SparkContext) protected[sql] val defaultSession = createSession() protected[sql] def dialectClassName = if (conf.dialect == "sql") { - classOf[DefaultDialect].getCanonicalName + classOf[DefaultParserDialect].getCanonicalName } else { conf.dialect } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala index b44eb223c8..ec0e76cde6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala @@ -30,7 +30,7 @@ import org.apache.spark.sql.test.TestSQLContext.{udf => _, _} import org.apache.spark.sql.types._ /** A SQL Dialect for testing purpose, and it can not be nested type */ -class MyDialect extends DefaultDialect +class MyDialect extends DefaultParserDialect class SQLQuerySuite extends QueryTest with BeforeAndAfterAll { // Make sure the tables are loaded. @@ -94,7 +94,7 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll { newContext.sql("SELECT 1") } // test if the dialect set back to DefaultSQLDialect - assert(newContext.getSQLDialect().getClass === classOf[DefaultDialect]) + assert(newContext.getSQLDialect().getClass === classOf[DefaultParserDialect]) } test("SPARK-4625 support SORT BY in SimpleSQLParser & DSL") { -- cgit v1.2.3