aboutsummaryrefslogtreecommitdiff
path: root/sql/catalyst
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-04-02 17:50:40 -0700
committerReynold Xin <rxin@databricks.com>2016-04-02 17:50:40 -0700
commit4a6e78abd9d5edc4a5092738dff0006bbe202a89 (patch)
tree5ecbee86bb057139128b65b0f99405c51e637e38 /sql/catalyst
parentf705037617d55bb479ec60bcb1e55c736224be94 (diff)
downloadspark-4a6e78abd9d5edc4a5092738dff0006bbe202a89.tar.gz
spark-4a6e78abd9d5edc4a5092738dff0006bbe202a89.tar.bz2
spark-4a6e78abd9d5edc4a5092738dff0006bbe202a89.zip
[MINOR][DOCS] Use multi-line JavaDoc comments in Scala code.
## What changes were proposed in this pull request? This PR aims to fix all Scala-Style multiline comments into Java-Style multiline comments in Scala codes. (All comment-only changes over 77 files: +786 lines, −747 lines) ## How was this patch tested? Manual. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #12130 from dongjoon-hyun/use_multiine_javadoc_comments.
Diffstat (limited to 'sql/catalyst')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala24
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala20
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala6
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala26
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/grouping.scala18
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala4
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala40
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala4
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala28
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala4
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala6
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OptimizerExtendableSuite.scala14
12 files changed, 97 insertions, 97 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala
index 1f20e26354..e0bfe3c32f 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala
@@ -140,27 +140,27 @@ object Encoders {
def STRING: Encoder[java.lang.String] = ExpressionEncoder()
/**
- * An encoder for nullable decimal type.
- * @since 1.6.0
- */
+ * An encoder for nullable decimal type.
+ * @since 1.6.0
+ */
def DECIMAL: Encoder[java.math.BigDecimal] = ExpressionEncoder()
/**
- * An encoder for nullable date type.
- * @since 1.6.0
- */
+ * An encoder for nullable date type.
+ * @since 1.6.0
+ */
def DATE: Encoder[java.sql.Date] = ExpressionEncoder()
/**
- * An encoder for nullable timestamp type.
- * @since 1.6.0
- */
+ * An encoder for nullable timestamp type.
+ * @since 1.6.0
+ */
def TIMESTAMP: Encoder[java.sql.Timestamp] = ExpressionEncoder()
/**
- * An encoder for arrays of bytes.
- * @since 1.6.1
- */
+ * An encoder for arrays of bytes.
+ * @since 1.6.1
+ */
def BINARY: Encoder[Array[Byte]] = ExpressionEncoder()
/**
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 05e2b9a447..a6e317ebf0 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -733,9 +733,9 @@ class Analyzer(
}
/**
- * Add the missing attributes into projectList of Project/Window or aggregateExpressions of
- * Aggregate.
- */
+ * Add the missing attributes into projectList of Project/Window or aggregateExpressions of
+ * Aggregate.
+ */
private def addMissingAttr(plan: LogicalPlan, missingAttrs: AttributeSet): LogicalPlan = {
if (missingAttrs.isEmpty) {
return plan
@@ -767,9 +767,9 @@ class Analyzer(
}
/**
- * Resolve the expression on a specified logical plan and it's child (recursively), until
- * the expression is resolved or meet a non-unary node or Subquery.
- */
+ * Resolve the expression on a specified logical plan and it's child (recursively), until
+ * the expression is resolved or meet a non-unary node or Subquery.
+ */
@tailrec
private def resolveExpressionRecursively(expr: Expression, plan: LogicalPlan): Expression = {
val resolved = resolveExpression(expr, plan)
@@ -1398,8 +1398,8 @@ class Analyzer(
}
/**
- * Check and add order to [[AggregateWindowFunction]]s.
- */
+ * Check and add order to [[AggregateWindowFunction]]s.
+ */
object ResolveWindowOrder extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case logical: LogicalPlan => logical transformExpressions {
@@ -1489,8 +1489,8 @@ object EliminateSubqueryAliases extends Rule[LogicalPlan] {
}
/**
- * Removes [[Union]] operators from the plan if it just has one child.
- */
+ * Removes [[Union]] operators from the plan if it just has one child.
+ */
object EliminateUnions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Union(children) if children.size == 1 => children.head
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala
index 053e612f3e..354311c5e7 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala
@@ -136,9 +136,9 @@ object UnsafeProjection {
}
/**
- * Same as other create()'s but allowing enabling/disabling subexpression elimination.
- * TODO: refactor the plumbing and clean this up.
- */
+ * Same as other create()'s but allowing enabling/disabling subexpression elimination.
+ * TODO: refactor the plumbing and clean this up.
+ */
def create(
exprs: Seq[Expression],
inputSchema: Seq[Attribute],
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
index cd490dd676..b64d3eea49 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
@@ -58,10 +58,10 @@ class CodegenContext {
val references: mutable.ArrayBuffer[Any] = new mutable.ArrayBuffer[Any]()
/**
- * Add an object to `references`, create a class member to access it.
- *
- * Returns the name of class member.
- */
+ * Add an object to `references`, create a class member to access it.
+ *
+ * Returns the name of class member.
+ */
def addReferenceObj(name: String, obj: Any, className: String = null): String = {
val term = freshName(name)
val idx = references.length
@@ -72,9 +72,9 @@ class CodegenContext {
}
/**
- * Holding a list of generated columns as input of current operator, will be used by
- * BoundReference to generate code.
- */
+ * Holding a list of generated columns as input of current operator, will be used by
+ * BoundReference to generate code.
+ */
var currentVars: Seq[ExprCode] = null
/**
@@ -169,14 +169,14 @@ class CodegenContext {
final var INPUT_ROW = "i"
/**
- * The map from a variable name to it's next ID.
- */
+ * The map from a variable name to it's next ID.
+ */
private val freshNameIds = new mutable.HashMap[String, Int]
freshNameIds += INPUT_ROW -> 1
/**
- * A prefix used to generate fresh name.
- */
+ * A prefix used to generate fresh name.
+ */
var freshNamePrefix = ""
/**
@@ -234,8 +234,8 @@ class CodegenContext {
}
/**
- * Update a column in MutableRow from ExprCode.
- */
+ * Update a column in MutableRow from ExprCode.
+ */
def updateColumn(
row: String,
dataType: DataType,
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/grouping.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/grouping.scala
index 437e417266..3be761c867 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/grouping.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/grouping.scala
@@ -22,8 +22,8 @@ import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.types._
/**
- * A placeholder expression for cube/rollup, which will be replaced by analyzer
- */
+ * A placeholder expression for cube/rollup, which will be replaced by analyzer
+ */
trait GroupingSet extends Expression with CodegenFallback {
def groupByExprs: Seq[Expression]
@@ -43,9 +43,9 @@ case class Cube(groupByExprs: Seq[Expression]) extends GroupingSet {}
case class Rollup(groupByExprs: Seq[Expression]) extends GroupingSet {}
/**
- * Indicates whether a specified column expression in a GROUP BY list is aggregated or not.
- * GROUPING returns 1 for aggregated or 0 for not aggregated in the result set.
- */
+ * Indicates whether a specified column expression in a GROUP BY list is aggregated or not.
+ * GROUPING returns 1 for aggregated or 0 for not aggregated in the result set.
+ */
case class Grouping(child: Expression) extends Expression with Unevaluable {
override def references: AttributeSet = AttributeSet(VirtualColumn.groupingIdAttribute :: Nil)
override def children: Seq[Expression] = child :: Nil
@@ -54,10 +54,10 @@ case class Grouping(child: Expression) extends Expression with Unevaluable {
}
/**
- * GroupingID is a function that computes the level of grouping.
- *
- * If groupByExprs is empty, it means all grouping expressions in GroupingSets.
- */
+ * GroupingID is a function that computes the level of grouping.
+ *
+ * If groupByExprs is empty, it means all grouping expressions in GroupingSets.
+ */
case class GroupingID(groupByExprs: Seq[Expression]) extends Expression with Unevaluable {
override def references: AttributeSet = AttributeSet(VirtualColumn.groupingIdAttribute :: Nil)
override def children: Seq[Expression] = groupByExprs
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala
index e8a3e129b4..eb8dc1423a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala
@@ -467,8 +467,8 @@ object Murmur3HashFunction extends InterpretedHashFunction {
}
/**
- * Print the result of an expression to stderr (used for debugging codegen).
- */
+ * Print the result of an expression to stderr (used for debugging codegen).
+ */
case class PrintToStderr(child: Expression) extends UnaryExpression {
override def dataType: DataType = child.dataType
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
index a5ab390c76..69b09bcb35 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
@@ -31,9 +31,9 @@ import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.types._
/**
- * Abstract class all optimizers should inherit of, contains the standard batches (extending
- * Optimizers can override this.
- */
+ * Abstract class all optimizers should inherit of, contains the standard batches (extending
+ * Optimizers can override this.
+ */
abstract class Optimizer extends RuleExecutor[LogicalPlan] {
def batches: Seq[Batch] = {
// Technically some of the rules in Finish Analysis are not optimizer rules and belong more
@@ -111,11 +111,11 @@ abstract class Optimizer extends RuleExecutor[LogicalPlan] {
}
/**
- * Non-abstract representation of the standard Spark optimizing strategies
- *
- * To ensure extendability, we leave the standard rules in the abstract optimizer rules, while
- * specific rules go to the subclasses
- */
+ * Non-abstract representation of the standard Spark optimizing strategies
+ *
+ * To ensure extendability, we leave the standard rules in the abstract optimizer rules, while
+ * specific rules go to the subclasses
+ */
object DefaultOptimizer extends Optimizer
/**
@@ -962,21 +962,21 @@ object PushPredicateThroughAggregate extends Rule[LogicalPlan] with PredicateHel
}
/**
- * Reorder the joins and push all the conditions into join, so that the bottom ones have at least
- * one condition.
- *
- * The order of joins will not be changed if all of them already have at least one condition.
- */
+ * Reorder the joins and push all the conditions into join, so that the bottom ones have at least
+ * one condition.
+ *
+ * The order of joins will not be changed if all of them already have at least one condition.
+ */
object ReorderJoin extends Rule[LogicalPlan] with PredicateHelper {
/**
- * Join a list of plans together and push down the conditions into them.
- *
- * The joined plan are picked from left to right, prefer those has at least one join condition.
- *
- * @param input a list of LogicalPlans to join.
- * @param conditions a list of condition for join.
- */
+ * Join a list of plans together and push down the conditions into them.
+ *
+ * The joined plan are picked from left to right, prefer those has at least one join condition.
+ *
+ * @param input a list of LogicalPlans to join.
+ * @param conditions a list of condition for join.
+ */
@tailrec
def createOrderedJoin(input: Seq[LogicalPlan], conditions: Seq[Expression]): LogicalPlan = {
assert(input.size >= 2)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
index c350f3049f..8541b1f7c6 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
@@ -1430,8 +1430,8 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging {
}
/**
- * Create a [[StructType]] from a sequence of [[StructField]]s.
- */
+ * Create a [[StructType]] from a sequence of [[StructField]]s.
+ */
protected def createStructType(ctx: ColTypeListContext): StructType = {
StructType(Option(ctx).toSeq.flatMap(visitColTypeList))
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
index 28d2c445b1..6f35d87ebb 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
@@ -140,20 +140,20 @@ object ExtractEquiJoinKeys extends Logging with PredicateHelper {
}
/**
- * A pattern that collects the filter and inner joins.
- *
- * Filter
- * |
- * inner Join
- * / \ ----> (Seq(plan0, plan1, plan2), conditions)
- * Filter plan2
- * |
- * inner join
- * / \
- * plan0 plan1
- *
- * Note: This pattern currently only works for left-deep trees.
- */
+ * A pattern that collects the filter and inner joins.
+ *
+ * Filter
+ * |
+ * inner Join
+ * / \ ----> (Seq(plan0, plan1, plan2), conditions)
+ * Filter plan2
+ * |
+ * inner join
+ * / \
+ * plan0 plan1
+ *
+ * Note: This pattern currently only works for left-deep trees.
+ */
object ExtractFiltersAndInnerJoins extends PredicateHelper {
// flatten all inner joins, which are next to each other
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
index 22a4461e66..609a33e2f1 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
@@ -122,8 +122,8 @@ abstract class QueryPlan[PlanType <: QueryPlan[PlanType]] extends TreeNode[PlanT
AttributeSet(children.flatMap(_.asInstanceOf[QueryPlan[PlanType]].output))
/**
- * The set of all attributes that are produced by this node.
- */
+ * The set of all attributes that are produced by this node.
+ */
def producedAttributes: AttributeSet = AttributeSet.empty
/**
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala
index be9f1ffa22..d449088498 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/physical/partitioning.scala
@@ -76,9 +76,9 @@ case class OrderedDistribution(ordering: Seq[SortOrder]) extends Distribution {
}
/**
- * Represents data where tuples are broadcasted to every node. It is quite common that the
- * entire set of tuples is transformed into different data structure.
- */
+ * Represents data where tuples are broadcasted to every node. It is quite common that the
+ * entire set of tuples is transformed into different data structure.
+ */
case class BroadcastDistribution(mode: BroadcastMode) extends Distribution
/**
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OptimizerExtendableSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OptimizerExtendableSuite.scala
index 7e3da6bea7..6e5672ddc3 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OptimizerExtendableSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OptimizerExtendableSuite.scala
@@ -23,21 +23,21 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
/**
- * This is a test for SPARK-7727 if the Optimizer is kept being extendable
- */
+ * This is a test for SPARK-7727 if the Optimizer is kept being extendable
+ */
class OptimizerExtendableSuite extends SparkFunSuite {
/**
- * Dummy rule for test batches
- */
+ * Dummy rule for test batches
+ */
object DummyRule extends Rule[LogicalPlan] {
def apply(p: LogicalPlan): LogicalPlan = p
}
/**
- * This class represents a dummy extended optimizer that takes the batches of the
- * Optimizer and adds custom ones.
- */
+ * This class represents a dummy extended optimizer that takes the batches of the
+ * Optimizer and adds custom ones.
+ */
class ExtendedOptimizer extends Optimizer {
// rules set to DummyRule, would not be executed anyways