aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-04-06 16:02:55 -0700
committerAndrew Or <andrew@databricks.com>2016-04-06 16:02:55 -0700
commitd717ae1fd74d125a9df21350a70e7c2b2d2b4786 (patch)
tree7fea2e27627c52cd6ffa04421df66068e5b28881 /sql
parent457e58befe8cb7c346e54b344a45fa357b68cfc0 (diff)
downloadspark-d717ae1fd74d125a9df21350a70e7c2b2d2b4786.tar.gz
spark-d717ae1fd74d125a9df21350a70e7c2b2d2b4786.tar.bz2
spark-d717ae1fd74d125a9df21350a70e7c2b2d2b4786.zip
[SPARK-14444][BUILD] Add a new scalastyle `NoScalaDoc` to prevent ScalaDoc-style multiline comments
## What changes were proposed in this pull request? According to the [Spark Code Style Guide](https://cwiki.apache.org/confluence/display/SPARK/Spark+Code+Style+Guide#SparkCodeStyleGuide-Indentation), this PR adds a new scalastyle rule to prevent the followings. ``` /** In Spark, we don't use the ScalaDoc style so this * is not correct. */ ``` ## How was this patch tested? Pass the Jenkins tests (including `lint-scala`). Author: Dongjoon Hyun <dongjoon@apache.org> Closes #12221 from dongjoon-hyun/SPARK-14444.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala18
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala8
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala10
7 files changed, 28 insertions, 26 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
index d241b8a79b..4795fc2557 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
@@ -762,15 +762,15 @@ trait ScalaReflection {
}
/**
- * Returns the full class name for a type. The returned name is the canonical
- * Scala name, where each component is separated by a period. It is NOT the
- * Java-equivalent runtime name (no dollar signs).
- *
- * In simple cases, both the Scala and Java names are the same, however when Scala
- * generates constructs that do not map to a Java equivalent, such as singleton objects
- * or nested classes in package objects, it uses the dollar sign ($) to create
- * synthetic classes, emulating behaviour in Java bytecode.
- */
+ * Returns the full class name for a type. The returned name is the canonical
+ * Scala name, where each component is separated by a period. It is NOT the
+ * Java-equivalent runtime name (no dollar signs).
+ *
+ * In simple cases, both the Scala and Java names are the same, however when Scala
+ * generates constructs that do not map to a Java equivalent, such as singleton objects
+ * or nested classes in package objects, it uses the dollar sign ($) to create
+ * synthetic classes, emulating behaviour in Java bytecode.
+ */
def getClassNameFromType(tpe: `Type`): String = {
tpe.erasure.typeSymbol.asClass.fullName
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
index 1bebd4e904..ee7f4fadca 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
@@ -626,15 +626,15 @@ abstract class CodeGenerator[InType <: AnyRef, OutType <: AnyRef] extends Loggin
object CodeGenerator extends Logging {
/**
- * Compile the Java source code into a Java class, using Janino.
- */
+ * Compile the Java source code into a Java class, using Janino.
+ */
def compile(code: String): GeneratedClass = {
cache.get(code)
}
/**
- * Compile the Java source code into a Java class, using Janino.
- */
+ * Compile the Java source code into a Java class, using Janino.
+ */
private[this] def doCompile(code: String): GeneratedClass = {
val evaluator = new ClassBodyEvaluator()
evaluator.setParentClassLoader(Utils.getContextOrSparkClassLoader)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
index 609a33e2f1..0a11574f44 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
@@ -211,8 +211,10 @@ abstract class QueryPlan[PlanType <: QueryPlan[PlanType]] extends TreeNode[PlanT
if (changed) makeCopy(newArgs).asInstanceOf[this.type] else this
}
- /** Returns the result of running [[transformExpressions]] on this node
- * and all its children. */
+ /**
+ * Returns the result of running [[transformExpressions]] on this node
+ * and all its children.
+ */
def transformAllExpressions(rule: PartialFunction[Expression, Expression]): this.type = {
transform {
case q: QueryPlan[_] => q.transformExpressions(rule).asInstanceOf[PlanType]
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
index 91c02053ae..7dbf2e6c7c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
@@ -408,7 +408,7 @@ private[sql] object RelationalGroupedDataset {
private[sql] object RollupType extends GroupType
/**
- * To indicate it's the PIVOT
- */
+ * To indicate it's the PIVOT
+ */
private[sql] case class PivotType(pivotCol: Expression, values: Seq[Literal]) extends GroupType
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
index 98129d6c52..c4594f0480 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
@@ -312,8 +312,8 @@ case class WholeStageCodegen(child: SparkPlan) extends UnaryNode with CodegenSup
}
/** Codegened pipeline for:
- * ${toCommentSafeString(child.treeString.trim)}
- */
+ * ${toCommentSafeString(child.treeString.trim)}
+ */
final class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
private Object[] references;
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
index 012b125d6b..c6fcb6956c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
@@ -167,8 +167,8 @@ private[ui] class SparkPlanGraphNode(
}
/**
- * Represent a tree of SparkPlan for WholeStageCodegen.
- */
+ * Represent a tree of SparkPlan for WholeStageCodegen.
+ */
private[ui] class SparkPlanGraphCluster(
id: Long,
name: String,
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index cfe4911cb7..948106fd06 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -100,11 +100,11 @@ abstract class JdbcDialect extends Serializable {
}
/**
- * Override connection specific properties to run before a select is made. This is in place to
- * allow dialects that need special treatment to optimize behavior.
- * @param connection The connection object
- * @param properties The connection properties. This is passed through from the relation.
- */
+ * Override connection specific properties to run before a select is made. This is in place to
+ * allow dialects that need special treatment to optimize behavior.
+ * @param connection The connection object
+ * @param properties The connection properties. This is passed through from the relation.
+ */
def beforeFetch(connection: Connection, properties: Map[String, String]): Unit = {
}