aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-03-14 23:58:57 -0700
committerReynold Xin <rxin@databricks.com>2016-03-14 23:58:57 -0700
commit276c2d51a3bbe2531763a11580adfec7e39fdd58 (patch)
treee2e6d063986795847167109e6d58464d1b376a39 /sql/hive
parenta51f877b5dc56b7bb9ef95044a50024c6b64718e (diff)
downloadspark-276c2d51a3bbe2531763a11580adfec7e39fdd58.tar.gz
spark-276c2d51a3bbe2531763a11580adfec7e39fdd58.tar.bz2
spark-276c2d51a3bbe2531763a11580adfec7e39fdd58.zip
[SPARK-13890][SQL] Remove some internal classes' dependency on SQLContext
## What changes were proposed in this pull request? In general it is better for internal classes to not depend on the external class (in this case SQLContext) to reduce coupling between user-facing APIs and the internal implementations. This patch removes SQLContext dependency from some internal classes such as SparkPlanner, SparkOptimizer. As part of this patch, I also removed the following internal methods from SQLContext: ``` protected[sql] def functionRegistry: FunctionRegistry protected[sql] def optimizer: Optimizer protected[sql] def sqlParser: ParserInterface protected[sql] def planner: SparkPlanner protected[sql] def continuousQueryManager protected[sql] def prepareForExecution: RuleExecutor[SparkPlan] ``` ## How was this patch tested? Existing unit/integration tests. Author: Reynold Xin <rxin@databricks.com> Closes #11712 from rxin/sqlContext-planner.
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala6
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala2
3 files changed, 6 insertions, 6 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index 8244dd4230..a78b7b0cc4 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -348,12 +348,12 @@ class HiveContext private[hive](
* @since 1.3.0
*/
def refreshTable(tableName: String): Unit = {
- val tableIdent = sqlParser.parseTableIdentifier(tableName)
+ val tableIdent = sessionState.sqlParser.parseTableIdentifier(tableName)
catalog.refreshTable(tableIdent)
}
protected[hive] def invalidateTable(tableName: String): Unit = {
- val tableIdent = sqlParser.parseTableIdentifier(tableName)
+ val tableIdent = sessionState.sqlParser.parseTableIdentifier(tableName)
catalog.invalidateTable(tableIdent)
}
@@ -367,7 +367,7 @@ class HiveContext private[hive](
* @since 1.2.0
*/
def analyze(tableName: String) {
- val tableIdent = sqlParser.parseTableIdentifier(tableName)
+ val tableIdent = sessionState.sqlParser.parseTableIdentifier(tableName)
val relation = EliminateSubqueryAliases(catalog.lookupRelation(tableIdent))
relation match {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
index cbb6333336..d9cd96d66f 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
@@ -74,11 +74,11 @@ private[hive] class HiveSessionState(ctx: HiveContext) extends SessionState(ctx)
* Planner that takes into account Hive-specific strategies.
*/
override lazy val planner: SparkPlanner = {
- new SparkPlanner(ctx) with HiveStrategies {
+ new SparkPlanner(ctx.sparkContext, conf, experimentalMethods) with HiveStrategies {
override val hiveContext = ctx
override def strategies: Seq[Strategy] = {
- ctx.experimental.extraStrategies ++ Seq(
+ experimentalMethods.extraStrategies ++ Seq(
FileSourceStrategy,
DataSourceStrategy,
HiveCommandStrategy(ctx),
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala
index d77c88fa4b..33c1bb059e 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala
@@ -69,7 +69,7 @@ class BucketedWriteSuite extends QueryTest with SQLTestUtils with TestHiveSingle
private val df = (0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k")
def tableDir: File = {
- val identifier = hiveContext.sqlParser.parseTableIdentifier("bucketed_table")
+ val identifier = hiveContext.sessionState.sqlParser.parseTableIdentifier("bucketed_table")
new File(URI.create(hiveContext.catalog.hiveDefaultTableFilePath(identifier)))
}