aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-05-11 22:06:56 -0700
committerReynold Xin <rxin@databricks.com>2015-05-11 22:06:56 -0700
commit16696759e9a292378cbfdf695a63d6d0cff0d79a (patch)
tree0a2a442c7fd2a58e899208ccf168354a0e48b0e4 /sql
parentb94a93371cf219a88edee7677d22f1eaefc1ea5b (diff)
downloadspark-16696759e9a292378cbfdf695a63d6d0cff0d79a.tar.gz
spark-16696759e9a292378cbfdf695a63d6d0cff0d79a.tar.bz2
spark-16696759e9a292378cbfdf695a63d6d0cff0d79a.zip
[SQL] Rename Dialect -> ParserDialect.
Author: Reynold Xin <rxin@databricks.com> Closes #6071 from rxin/parserdialect and squashes the following commits: ca2eb31 [Reynold Xin] Rename Dialect -> ParserDialect.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala (renamed from sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/Dialect.scala)2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala12
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala6
5 files changed, 14 insertions, 14 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/Dialect.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala
index 977003493d..05a92b06f9 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/Dialect.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala
@@ -27,7 +27,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
*
*/
@DeveloperApi
-abstract class Dialect {
+abstract class ParserDialect {
// this is the main function that will be implemented by sql parser.
def parse(sqlText: String): LogicalPlan
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index 28fc9d0443..648021806f 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -37,7 +37,7 @@ import org.apache.spark.sql.catalyst.errors.DialectException
import org.apache.spark.sql.catalyst.optimizer.{DefaultOptimizer, Optimizer}
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
-import org.apache.spark.sql.catalyst.Dialect
+import org.apache.spark.sql.catalyst.ParserDialect
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, ScalaReflection, expressions}
import org.apache.spark.sql.execution.{Filter, _}
import org.apache.spark.sql.jdbc.{JDBCPartition, JDBCPartitioningInfo, JDBCRelation}
@@ -49,7 +49,7 @@ import org.apache.spark.{Partition, SparkContext}
/**
* Currently we support the default dialect named "sql", associated with the class
- * [[DefaultDialect]]
+ * [[DefaultParserDialect]]
*
* And we can also provide custom SQL Dialect, for example in Spark SQL CLI:
* {{{
@@ -74,7 +74,7 @@ import org.apache.spark.{Partition, SparkContext}
*-- "hiveql" (for HiveContext)
* }}}
*/
-private[spark] class DefaultDialect extends Dialect {
+private[spark] class DefaultParserDialect extends ParserDialect {
@transient
protected val sqlParser = new catalyst.SqlParser
@@ -176,10 +176,10 @@ class SQLContext(@transient val sparkContext: SparkContext)
@transient
protected[sql] val sqlParser = new SparkSQLParser(getSQLDialect().parse(_))
- protected[sql] def getSQLDialect(): Dialect = {
+ protected[sql] def getSQLDialect(): ParserDialect = {
try {
val clazz = Utils.classForName(dialectClassName)
- clazz.newInstance().asInstanceOf[Dialect]
+ clazz.newInstance().asInstanceOf[ParserDialect]
} catch {
case NonFatal(e) =>
// Since we didn't find the available SQL Dialect, it will fail even for SET command:
@@ -209,7 +209,7 @@ class SQLContext(@transient val sparkContext: SparkContext)
protected[sql] val defaultSession = createSession()
protected[sql] def dialectClassName = if (conf.dialect == "sql") {
- classOf[DefaultDialect].getCanonicalName
+ classOf[DefaultParserDialect].getCanonicalName
} else {
conf.dialect
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index b44eb223c8..ec0e76cde6 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -30,7 +30,7 @@ import org.apache.spark.sql.test.TestSQLContext.{udf => _, _}
import org.apache.spark.sql.types._
/** A SQL Dialect for testing purpose, and it can not be nested type */
-class MyDialect extends DefaultDialect
+class MyDialect extends DefaultParserDialect
class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
// Make sure the tables are loaded.
@@ -94,7 +94,7 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
newContext.sql("SELECT 1")
}
// test if the dialect set back to DefaultSQLDialect
- assert(newContext.getSQLDialect().getClass === classOf[DefaultDialect])
+ assert(newContext.getSQLDialect().getClass === classOf[DefaultParserDialect])
}
test("SPARK-4625 support SORT BY in SimpleSQLParser & DSL") {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index 3bab648e31..61e8c154e8 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -22,7 +22,7 @@ import java.sql.Timestamp
import java.util.{ArrayList => JArrayList}
import org.apache.hadoop.hive.ql.parse.VariableSubstitution
-import org.apache.spark.sql.catalyst.Dialect
+import org.apache.spark.sql.catalyst.ParserDialect
import scala.collection.JavaConversions._
import scala.language.implicitConversions
@@ -54,7 +54,7 @@ import org.apache.spark.util.Utils
/**
* This is the HiveQL Dialect, this dialect is strongly bind with HiveContext
*/
-private[hive] class HiveQLDialect extends Dialect {
+private[hive] class HiveQLDialect extends ParserDialect {
override def parse(sqlText: String): LogicalPlan = {
HiveQl.parseSql(sqlText)
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 0d739dead4..a5744ccc68 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -19,7 +19,7 @@ package org.apache.spark.sql.hive.execution
import org.apache.spark.sql.catalyst.analysis.EliminateSubQueries
import org.apache.spark.sql.catalyst.errors.DialectException
-import org.apache.spark.sql.DefaultDialect
+import org.apache.spark.sql.DefaultParserDialect
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SQLConf}
import org.apache.spark.sql.hive.MetastoreRelation
import org.apache.spark.sql.hive.test.TestHive
@@ -53,7 +53,7 @@ case class WindowData(
area: String,
product: Int)
/** A SQL Dialect for testing purpose, and it can not be nested type */
-class MyDialect extends DefaultDialect
+class MyDialect extends DefaultParserDialect
/**
* A collection of hive query tests where we generate the answers ourselves instead of depending on
@@ -247,7 +247,7 @@ class SQLQuerySuite extends QueryTest {
// set the dialect back to the DefaultSQLDialect
sql("SET spark.sql.dialect=sql")
- assert(getSQLDialect().getClass === classOf[DefaultDialect])
+ assert(getSQLDialect().getClass === classOf[DefaultParserDialect])
sql("SET spark.sql.dialect=hiveql")
assert(getSQLDialect().getClass === classOf[HiveQLDialect])