aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-07-17 15:02:13 -0700
committerMichael Armbrust <michael@databricks.com>2015-07-17 15:02:13 -0700
commitb2aa490bb60176631c94ecadf87c14564960f12c (patch)
tree2af0af6bc45ae590475aababb9f489d849bff924
parent42d8a012f6652df1fa3f560f87c53731ea070640 (diff)
downloadspark-b2aa490bb60176631c94ecadf87c14564960f12c.tar.gz
spark-b2aa490bb60176631c94ecadf87c14564960f12c.tar.bz2
spark-b2aa490bb60176631c94ecadf87c14564960f12c.zip
[SPARK-9142] [SQL] Removing unnecessary self types in Catalyst.
Just a small change to add Product type to the base expression/plan abstract classes, based on suggestions on #7434 and offline discussions. Author: Reynold Xin <rxin@databricks.com> Closes #7479 from rxin/remove-self-types and squashes the following commits: e407ffd [Reynold Xin] [SPARK-9142][SQL] Removing unnecessary self types in Catalyst.
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala1
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala7
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregates.scala3
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala1
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionals.scala1
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/generators.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/math.scala5
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala4
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala3
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/random.scala1
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala9
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/partitioning.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala9
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala2
18 files changed, 9 insertions, 49 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala
index 7089f079b6..4a1a1ed61e 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala
@@ -96,7 +96,6 @@ case class UnresolvedFunction(name: String, children: Seq[Expression]) extends E
* "SELECT * FROM ...". A [[Star]] gets automatically expanded during analysis.
*/
abstract class Star extends LeafExpression with NamedExpression {
- self: Product =>
override def name: String = throw new UnresolvedException(this, "name")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
index f396bd08a8..c70b5af4aa 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
@@ -43,8 +43,7 @@ import org.apache.spark.sql.types._
*
* See [[Substring]] for an example.
*/
-abstract class Expression extends TreeNode[Expression] {
- self: Product =>
+abstract class Expression extends TreeNode[Expression] with Product {
/**
* Returns true when an expression is a candidate for static evaluation before the query is
@@ -187,7 +186,6 @@ abstract class Expression extends TreeNode[Expression] {
* A leaf expression, i.e. one without any child expressions.
*/
abstract class LeafExpression extends Expression {
- self: Product =>
def children: Seq[Expression] = Nil
}
@@ -198,7 +196,6 @@ abstract class LeafExpression extends Expression {
* if the input is evaluated to null.
*/
abstract class UnaryExpression extends Expression {
- self: Product =>
def child: Expression
@@ -277,7 +274,6 @@ abstract class UnaryExpression extends Expression {
* if any input is evaluated to null.
*/
abstract class BinaryExpression extends Expression {
- self: Product =>
def left: Expression
def right: Expression
@@ -370,7 +366,6 @@ abstract class BinaryExpression extends Expression {
* the analyzer will find the tightest common type and do the proper type casting.
*/
abstract class BinaryOperator extends BinaryExpression with ExpectsInputTypes {
- self: Product =>
/**
* Expected input type from both left/right child expressions, similar to the
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregates.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregates.scala
index 71c943dc79..af9a674ab4 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregates.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregates.scala
@@ -27,7 +27,6 @@ import org.apache.spark.sql.types._
import org.apache.spark.util.collection.OpenHashSet
trait AggregateExpression extends Expression {
- self: Product =>
/**
* Aggregate expressions should not be foldable.
@@ -65,7 +64,6 @@ case class SplitEvaluation(
* These partial evaluations can then be combined to compute the actual answer.
*/
trait PartialAggregate extends AggregateExpression {
- self: Product =>
/**
* Returns a [[SplitEvaluation]] that computes this aggregation using partial aggregation.
@@ -79,7 +77,6 @@ trait PartialAggregate extends AggregateExpression {
*/
abstract class AggregateFunction
extends LeafExpression with AggregateExpression with Serializable {
- self: Product =>
/** Base should return the generic aggregate expression that this function is computing */
val base: AggregateExpression
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
index 1616d1bc0a..c5960eb390 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
@@ -77,7 +77,6 @@ case class Abs(child: Expression) extends UnaryExpression with ExpectsInputTypes
}
abstract class BinaryArithmetic extends BinaryOperator {
- self: Product =>
override def dataType: DataType = left.dataType
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionals.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionals.scala
index 9162b73fe5..15b33da884 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionals.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionals.scala
@@ -77,7 +77,6 @@ case class If(predicate: Expression, trueValue: Expression, falseValue: Expressi
}
trait CaseWhenLike extends Expression {
- self: Product =>
// Note that `branches` are considered in consecutive pairs (cond, val), and the optional last
// element is the value for the default catch-all case (if provided).
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/generators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/generators.scala
index 51dc77ee3f..c58a6d3614 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/generators.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/generators.scala
@@ -40,7 +40,7 @@ import org.apache.spark.sql.types._
* requested. The attributes produced by this function will be automatically copied anytime rules
* result in changes to the Generator or its children.
*/
-trait Generator extends Expression { self: Product =>
+trait Generator extends Expression {
// TODO ideally we should return the type of ArrayType(StructType),
// however, we don't keep the output field names in the Generator.
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/math.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/math.scala
index 7a543ff36a..b05a7b3ed0 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/math.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/math.scala
@@ -34,7 +34,6 @@ import org.apache.spark.unsafe.types.UTF8String
*/
abstract class LeafMathExpression(c: Double, name: String)
extends LeafExpression with Serializable {
- self: Product =>
override def dataType: DataType = DoubleType
override def foldable: Boolean = true
@@ -58,7 +57,7 @@ abstract class LeafMathExpression(c: Double, name: String)
* @param name The short name of the function
*/
abstract class UnaryMathExpression(f: Double => Double, name: String)
- extends UnaryExpression with Serializable with ImplicitCastInputTypes { self: Product =>
+ extends UnaryExpression with Serializable with ImplicitCastInputTypes {
override def inputTypes: Seq[DataType] = Seq(DoubleType)
override def dataType: DataType = DoubleType
@@ -92,7 +91,7 @@ abstract class UnaryMathExpression(f: Double => Double, name: String)
* @param name The short name of the function
*/
abstract class BinaryMathExpression(f: (Double, Double) => Double, name: String)
- extends BinaryExpression with Serializable with ImplicitCastInputTypes { self: Product =>
+ extends BinaryExpression with Serializable with ImplicitCastInputTypes {
override def inputTypes: Seq[DataType] = Seq(DoubleType, DoubleType)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala
index 8bf7a7ce4e..c083ac08de 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala
@@ -40,7 +40,7 @@ case class ExprId(id: Long)
/**
* An [[Expression]] that is named.
*/
-trait NamedExpression extends Expression { self: Product =>
+trait NamedExpression extends Expression {
/** We should never fold named expressions in order to not remove the alias. */
override def foldable: Boolean = false
@@ -83,7 +83,7 @@ trait NamedExpression extends Expression { self: Product =>
}
}
-abstract class Attribute extends LeafExpression with NamedExpression { self: Product =>
+abstract class Attribute extends LeafExpression with NamedExpression {
override def references: AttributeSet = AttributeSet(this)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
index aa6c30e2f7..7a6fb2b378 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
@@ -38,8 +38,6 @@ object InterpretedPredicate {
* An [[Expression]] that returns a boolean value.
*/
trait Predicate extends Expression {
- self: Product =>
-
override def dataType: DataType = BooleanType
}
@@ -222,7 +220,6 @@ case class Or(left: Expression, right: Expression) extends BinaryOperator with P
abstract class BinaryComparison extends BinaryOperator with Predicate {
- self: Product =>
override def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = {
if (ctx.isPrimitiveType(left.dataType)) {
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/random.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/random.scala
index e10ba55396..65093dc722 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/random.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/random.scala
@@ -33,7 +33,6 @@ import org.apache.spark.util.random.XORShiftRandom
* Since this expression is stateful, it cannot be a case object.
*/
abstract class RDG(seed: Long) extends LeafExpression with Serializable {
- self: Product =>
/**
* Record ID within each partition. By being transient, the Random Number Generator is
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
index 3443616858..c8aa571df6 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
@@ -254,8 +254,6 @@ object SpecifiedWindowFrame {
* to retrieve value corresponding with these n rows.
*/
trait WindowFunction extends Expression {
- self: Product =>
-
def init(): Unit
def reset(): Unit
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
index adac37231c..dd6c5d43f5 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
@@ -25,8 +25,7 @@ import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.trees.TreeNode
-abstract class LogicalPlan extends QueryPlan[LogicalPlan] with Logging {
- self: Product =>
+abstract class LogicalPlan extends QueryPlan[LogicalPlan] with Logging with Product{
/**
* Computes [[Statistics]] for this plan. The default implementation assumes the output
@@ -277,8 +276,6 @@ abstract class LogicalPlan extends QueryPlan[LogicalPlan] with Logging {
* A logical plan node with no children.
*/
abstract class LeafNode extends LogicalPlan {
- self: Product =>
-
override def children: Seq[LogicalPlan] = Nil
}
@@ -286,8 +283,6 @@ abstract class LeafNode extends LogicalPlan {
* A logical plan node with single child.
*/
abstract class UnaryNode extends LogicalPlan {
- self: Product =>
-
def child: LogicalPlan
override def children: Seq[LogicalPlan] = child :: Nil
@@ -297,8 +292,6 @@ abstract class UnaryNode extends LogicalPlan {
* A logical plan node with a left and right child.
*/
abstract class BinaryNode extends LogicalPlan {
- self: Product =>
-
def left: LogicalPlan
def right: LogicalPlan
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala
index fae339808c..fbe104db01 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala
@@ -298,7 +298,7 @@ case class Expand(
}
trait GroupingAnalytics extends UnaryNode {
- self: Product =>
+
def groupByExprs: Seq[Expression]
def aggregations: Seq[NamedExpression]
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/partitioning.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/partitioning.scala
index 63df2c1ee7..1f76b03bcb 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/partitioning.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/partitioning.scala
@@ -24,8 +24,6 @@ import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression, SortOrd
* result have expectations about the distribution and ordering of partitioned input data.
*/
abstract class RedistributeData extends UnaryNode {
- self: Product =>
-
override def output: Seq[Attribute] = child.output
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
index 632f633d82..ba12056ee7 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
@@ -39,8 +39,7 @@ object SparkPlan {
* :: DeveloperApi ::
*/
@DeveloperApi
-abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging with Serializable {
- self: Product =>
+abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging with Product with Serializable {
/**
* A handle to the SQL Context that was used to create this plan. Since many operators need
@@ -239,14 +238,10 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging with Serializ
}
private[sql] trait LeafNode extends SparkPlan {
- self: Product =>
-
override def children: Seq[SparkPlan] = Nil
}
private[sql] trait UnaryNode extends SparkPlan {
- self: Product =>
-
def child: SparkPlan
override def children: Seq[SparkPlan] = child :: Nil
@@ -255,8 +250,6 @@ private[sql] trait UnaryNode extends SparkPlan {
}
private[sql] trait BinaryNode extends SparkPlan {
- self: Product =>
-
def left: SparkPlan
def right: SparkPlan
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
index 5e9951f248..bace3f8a9c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
@@ -35,8 +35,6 @@ import org.apache.spark.sql.{DataFrame, Row, SQLConf, SQLContext}
* wrapped in `ExecutedCommand` during execution.
*/
private[sql] trait RunnableCommand extends LogicalPlan with logical.Command {
- self: Product =>
-
override def output: Seq[Attribute] = Seq.empty
override def children: Seq[LogicalPlan] = Seq.empty
def run(sqlContext: SQLContext): Seq[Row]
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala
index e0bea65a15..086559e9f7 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetRelation.scala
@@ -54,8 +54,6 @@ private[sql] case class ParquetRelation(
partitioningAttributes: Seq[Attribute] = Nil)
extends LeafNode with MultiInstanceRelation {
- self: Product =>
-
/** Schema derived from ParquetFile */
def parquetSchema: MessageType =
ParquetTypesConverter
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 4b7a782c80..6589bc6ea2 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -596,8 +596,6 @@ private[hive] case class MetastoreRelation
(@transient sqlContext: SQLContext)
extends LeafNode with MultiInstanceRelation {
- self: Product =>
-
override def equals(other: Any): Boolean = other match {
case relation: MetastoreRelation =>
databaseName == relation.databaseName &&