aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src
diff options
context:
space:
mode:
authorYin Huai <yhuai@databricks.com>2016-04-15 17:48:41 -0700
committerReynold Xin <rxin@databricks.com>2016-04-15 17:48:41 -0700
commitb2dfa849599843269a43e6e0f2ab8c539dfc32b6 (patch)
tree7ab85d25e84b91529b5ef7aa310154a645027c56 /sql/core/src
parent4df65184b6b865a26e4d5c99bbfd3c24ab7179dc (diff)
downloadspark-b2dfa849599843269a43e6e0f2ab8c539dfc32b6.tar.gz
spark-b2dfa849599843269a43e6e0f2ab8c539dfc32b6.tar.bz2
spark-b2dfa849599843269a43e6e0f2ab8c539dfc32b6.zip
[SPARK-14668][SQL] Move CurrentDatabase to Catalyst
## What changes were proposed in this pull request? This PR moves `CurrentDatabase` from sql/hive package to sql/catalyst. It also adds the function description, which looks like the following. ``` scala> sqlContext.sql("describe function extended current_database").collect.foreach(println) [Function: current_database] [Class: org.apache.spark.sql.execution.command.CurrentDatabase] [Usage: current_database() - Returns the current database.] [Extended Usage: > SELECT current_database()] ``` ## How was this patch tested? Existing tests Author: Yin Huai <yhuai@databricks.com> Closes #12424 from yhuai/SPARK-14668.
Diffstat (limited to 'sql/core/src')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkOptimizer.scala7
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala2
2 files changed, 7 insertions, 2 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkOptimizer.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkOptimizer.scala
index cbde777d98..8dfbba779d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkOptimizer.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkOptimizer.scala
@@ -18,9 +18,14 @@
package org.apache.spark.sql.execution
import org.apache.spark.sql.ExperimentalMethods
+import org.apache.spark.sql.catalyst.CatalystConf
+import org.apache.spark.sql.catalyst.catalog.SessionCatalog
import org.apache.spark.sql.catalyst.optimizer.Optimizer
-class SparkOptimizer(experimentalMethods: ExperimentalMethods) extends Optimizer {
+class SparkOptimizer(
+ conf: CatalystConf,
+ sessionCatalog: SessionCatalog,
+ experimentalMethods: ExperimentalMethods) extends Optimizer(conf, sessionCatalog) {
override def batches: Seq[Batch] = super.batches :+ Batch(
"User Provided Optimizers", FixedPoint(100), experimentalMethods.extraOptimizations: _*)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala
index 69e3358d4e..10497e4fdf 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SessionState.scala
@@ -80,7 +80,7 @@ private[sql] class SessionState(ctx: SQLContext) {
/**
* Logical query plan optimizer.
*/
- lazy val optimizer: Optimizer = new SparkOptimizer(experimentalMethods)
+ lazy val optimizer: Optimizer = new SparkOptimizer(conf, catalog, experimentalMethods)
/**
* Parser that extracts expressions, plans, table identifiers etc. from SQL texts.