aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorTakuya UESHIN <ueshin@happy-camper.st>2014-10-20 17:09:12 -0700
committerMichael Armbrust <michael@databricks.com>2014-10-20 17:09:12 -0700
commit7586e2e67ad45007f78803179b04d199c174bd69 (patch)
tree051549bc84e26b149dff02b9260f822f5dbff631 /sql/core
parentfce1d41611fdb27956c3394a706ed14960182a83 (diff)
downloadspark-7586e2e67ad45007f78803179b04d199c174bd69.tar.gz
spark-7586e2e67ad45007f78803179b04d199c174bd69.tar.bz2
spark-7586e2e67ad45007f78803179b04d199c174bd69.zip
[SPARK-3969][SQL] Optimizer should have a super class as an interface.
Some developers want to replace `Optimizer` to fit their projects but can't do so because currently `Optimizer` is an `object`. Author: Takuya UESHIN <ueshin@happy-camper.st> Closes #2825 from ueshin/issues/SPARK-3969 and squashes the following commits: abbc53c [Takuya UESHIN] Re-rename Optimizer object. 4d2e1bc [Takuya UESHIN] Rename Optimizer object. 9547a23 [Takuya UESHIN] Extract abstract class from Optimizer for developers to be able to replace Optimizer.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala4
1 files changed, 2 insertions, 2 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index 23e7b2d270..0e4a9ca60b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -29,7 +29,7 @@ import org.apache.spark.sql.catalyst.ScalaReflection
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.dsl.ExpressionConversions
import org.apache.spark.sql.catalyst.expressions._
-import org.apache.spark.sql.catalyst.optimizer.Optimizer
+import org.apache.spark.sql.catalyst.optimizer.{Optimizer, DefaultOptimizer}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.catalyst.types.DataType
@@ -68,7 +68,7 @@ class SQLContext(@transient val sparkContext: SparkContext)
new Analyzer(catalog, functionRegistry, caseSensitive = true)
@transient
- protected[sql] val optimizer = Optimizer
+ protected[sql] lazy val optimizer: Optimizer = DefaultOptimizer
@transient
protected[sql] val sqlParser = {