aboutsummaryrefslogtreecommitdiff
path: root/sql/catalyst
diff options
context:
space:
mode:
authorAndrew Or <andrew@databricks.com>2016-03-11 15:13:48 -0800
committerYin Huai <yhuai@databricks.com>2016-03-11 15:13:48 -0800
commit66d9d0edfef986895490bcdeacbc0ca38e091702 (patch)
tree84037a13e7040fa88e0ace5aab18087d3e206e95 /sql/catalyst
parent42afd72c654318e9fb1f2a204198221e797c2485 (diff)
downloadspark-66d9d0edfef986895490bcdeacbc0ca38e091702.tar.gz
spark-66d9d0edfef986895490bcdeacbc0ca38e091702.tar.bz2
spark-66d9d0edfef986895490bcdeacbc0ca38e091702.zip
[SPARK-13139][SQL] Parse Hive DDL commands ourselves
## What changes were proposed in this pull request? This patch is ported over from viirya's changes in #11048. Currently for most DDLs we just pass the query text directly to Hive. Instead, we should parse these commands ourselves and in the future (not part of this patch) use the `HiveCatalog` to process these DDLs. This is a pretext to merging `SQLContext` and `HiveContext`. Note: As of this patch we still pass the query text to Hive. The difference is that we now parse the commands ourselves so in the future we can just use our own catalog. ## How was this patch tested? Jenkins, new `DDLCommandSuite`, which comprises of about 40% of the changes here. Author: Andrew Or <andrew@databricks.com> Closes #11573 from andrewor14/parser-plus-plus.
Diffstat (limited to 'sql/catalyst')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala11
1 files changed, 10 insertions, 1 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
index c105b53f1f..0c2e481954 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
@@ -17,6 +17,7 @@
package org.apache.spark.sql.catalyst.parser
+import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.trees.CurrentOrigin
import org.apache.spark.sql.types._
@@ -29,6 +30,7 @@ import org.apache.spark.sql.types._
object ParserUtils {
object Token {
+ // Match on (text, children)
def unapply(node: ASTNode): Some[(String, List[ASTNode])] = {
CurrentOrigin.setPosition(node.line, node.positionInLine)
node.pattern
@@ -160,7 +162,14 @@ object ParserUtils {
}
/**
- * Throw an exception because we cannot parse the given node.
+ * Throw an exception because we cannot parse the given node for some unexpected reason.
+ */
+ def parseFailed(msg: String, node: ASTNode): Nothing = {
+ throw new AnalysisException(s"$msg: '${node.source}")
+ }
+
+ /**
+ * Throw an exception because there are no rules to parse the node.
*/
def noParseRule(msg: String, node: ASTNode): Nothing = {
throw new NotImplementedError(