aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/main/scala/org/apache/spark/SparkConf.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala12
-rw-r--r--core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala6
-rw-r--r--external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala6
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala10
-rw-r--r--scalastyle-config.xml5
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala18
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala8
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala10
14 files changed, 59 insertions, 44 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala
index 5da2e98f1f..e0fd248c43 100644
--- a/core/src/main/scala/org/apache/spark/SparkConf.scala
+++ b/core/src/main/scala/org/apache/spark/SparkConf.scala
@@ -419,8 +419,10 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
*/
private[spark] def getenv(name: String): String = System.getenv(name)
- /** Checks for illegal or deprecated config settings. Throws an exception for the former. Not
- * idempotent - may mutate this conf object to convert deprecated settings to supported ones. */
+ /**
+ * Checks for illegal or deprecated config settings. Throws an exception for the former. Not
+ * idempotent - may mutate this conf object to convert deprecated settings to supported ones.
+ */
private[spark] def validateSettings() {
if (contains("spark.local.dir")) {
val msg = "In Spark 1.0 and later spark.local.dir will be overridden by the value set by " +
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
index 4e8e363635..41ac308808 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
@@ -76,9 +76,9 @@ class SparkHadoopUtil extends Logging {
/**
- * Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
- * configuration.
- */
+ * Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
+ * configuration.
+ */
def appendS3AndSparkHadoopConfigurations(conf: SparkConf, hadoopConf: Configuration): Unit = {
// Note: this null check is around more than just access to the "conf" object to maintain
// the behavior of the old implementation of this code, for backwards compatibility.
@@ -108,9 +108,9 @@ class SparkHadoopUtil extends Logging {
}
/**
- * Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
- * subsystems.
- */
+ * Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
+ * subsystems.
+ */
def newConfiguration(conf: SparkConf): Configuration = {
val hadoopConf = new Configuration()
appendS3AndSparkHadoopConfigurations(conf, hadoopConf)
diff --git a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
index d06b2c67d2..c562c70aba 100644
--- a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
+++ b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
@@ -28,8 +28,8 @@ class BoundedDouble(val mean: Double, val confidence: Double, val low: Double, v
this.mean.hashCode ^ this.confidence.hashCode ^ this.low.hashCode ^ this.high.hashCode
/**
- * Note that consistent with Double, any NaN value will make equality false
- */
+ * Note that consistent with Double, any NaN value will make equality false
+ */
override def equals(that: Any): Boolean =
that match {
case that: BoundedDouble => {
diff --git a/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala b/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
index a2d59a1c95..d12ef642bd 100644
--- a/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala
@@ -22,8 +22,10 @@ import scala.collection.JavaConverters._
import org.apache.spark.util.Utils
-/** Prints out environmental information, sleeps, and then exits. Made to
- * test driver submission in the standalone scheduler. */
+/**
+ * Prints out environmental information, sleeps, and then exits. Made to
+ * test driver submission in the standalone scheduler.
+ */
object DriverSubmissionTest {
def main(args: Array[String]) {
if (args.length < 1) {
diff --git a/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala b/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala
index 6e7c3f358e..13aa817492 100644
--- a/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala
+++ b/external/flume/src/main/scala/org/apache/spark/streaming/flume/FlumeInputDStream.scala
@@ -130,8 +130,10 @@ class FlumeEventServer(receiver: FlumeReceiver) extends AvroSourceProtocol {
}
}
-/** A NetworkReceiver which listens for events using the
- * Flume Avro interface. */
+/**
+ * A NetworkReceiver which listens for events using the
+ * Flume Avro interface.
+ */
private[streaming]
class FlumeReceiver(
host: String,
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
index 052b5b1d65..6c6e9fb7c6 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/distribution/MultivariateGaussian.scala
@@ -61,15 +61,17 @@ class MultivariateGaussian @Since("1.3.0") (
*/
private val (rootSigmaInv: DBM[Double], u: Double) = calculateCovarianceConstants
- /** Returns density of this multivariate Gaussian at given point, x
- */
+ /**
+ * Returns density of this multivariate Gaussian at given point, x
+ */
@Since("1.3.0")
def pdf(x: Vector): Double = {
pdf(x.toBreeze)
}
- /** Returns the log-density of this multivariate Gaussian at given point, x
- */
+ /**
+ * Returns the log-density of this multivariate Gaussian at given point, x
+ */
@Since("1.3.0")
def logpdf(x: Vector): Double = {
logpdf(x.toBreeze)
diff --git a/scalastyle-config.xml b/scalastyle-config.xml
index 33c2cbd293..472a8f4084 100644
--- a/scalastyle-config.xml
+++ b/scalastyle-config.xml
@@ -223,6 +223,11 @@ This file is divided into 3 sections:
]]></customMessage>
</check>
+ <check customId="NoScalaDoc" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
+ <parameters><parameter name="regex">(?m)^(\s*)/[*][*].*$(\r|)\n^\1 [*]</parameter></parameters>
+ <customMessage>Use Javadoc style indentation for multiline comments</customMessage>
+ </check>
+
<!-- ================================================================================ -->
<!-- rules we'd like to enforce, but haven't cleaned up the codebase yet -->
<!-- ================================================================================ -->
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
index d241b8a79b..4795fc2557 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
@@ -762,15 +762,15 @@ trait ScalaReflection {
}
/**
- * Returns the full class name for a type. The returned name is the canonical
- * Scala name, where each component is separated by a period. It is NOT the
- * Java-equivalent runtime name (no dollar signs).
- *
- * In simple cases, both the Scala and Java names are the same, however when Scala
- * generates constructs that do not map to a Java equivalent, such as singleton objects
- * or nested classes in package objects, it uses the dollar sign ($) to create
- * synthetic classes, emulating behaviour in Java bytecode.
- */
+ * Returns the full class name for a type. The returned name is the canonical
+ * Scala name, where each component is separated by a period. It is NOT the
+ * Java-equivalent runtime name (no dollar signs).
+ *
+ * In simple cases, both the Scala and Java names are the same, however when Scala
+ * generates constructs that do not map to a Java equivalent, such as singleton objects
+ * or nested classes in package objects, it uses the dollar sign ($) to create
+ * synthetic classes, emulating behaviour in Java bytecode.
+ */
def getClassNameFromType(tpe: `Type`): String = {
tpe.erasure.typeSymbol.asClass.fullName
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
index 1bebd4e904..ee7f4fadca 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
@@ -626,15 +626,15 @@ abstract class CodeGenerator[InType <: AnyRef, OutType <: AnyRef] extends Loggin
object CodeGenerator extends Logging {
/**
- * Compile the Java source code into a Java class, using Janino.
- */
+ * Compile the Java source code into a Java class, using Janino.
+ */
def compile(code: String): GeneratedClass = {
cache.get(code)
}
/**
- * Compile the Java source code into a Java class, using Janino.
- */
+ * Compile the Java source code into a Java class, using Janino.
+ */
private[this] def doCompile(code: String): GeneratedClass = {
val evaluator = new ClassBodyEvaluator()
evaluator.setParentClassLoader(Utils.getContextOrSparkClassLoader)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
index 609a33e2f1..0a11574f44 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
@@ -211,8 +211,10 @@ abstract class QueryPlan[PlanType <: QueryPlan[PlanType]] extends TreeNode[PlanT
if (changed) makeCopy(newArgs).asInstanceOf[this.type] else this
}
- /** Returns the result of running [[transformExpressions]] on this node
- * and all its children. */
+ /**
+ * Returns the result of running [[transformExpressions]] on this node
+ * and all its children.
+ */
def transformAllExpressions(rule: PartialFunction[Expression, Expression]): this.type = {
transform {
case q: QueryPlan[_] => q.transformExpressions(rule).asInstanceOf[PlanType]
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
index 91c02053ae..7dbf2e6c7c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala
@@ -408,7 +408,7 @@ private[sql] object RelationalGroupedDataset {
private[sql] object RollupType extends GroupType
/**
- * To indicate it's the PIVOT
- */
+ * To indicate it's the PIVOT
+ */
private[sql] case class PivotType(pivotCol: Expression, values: Seq[Literal]) extends GroupType
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
index 98129d6c52..c4594f0480 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
@@ -312,8 +312,8 @@ case class WholeStageCodegen(child: SparkPlan) extends UnaryNode with CodegenSup
}
/** Codegened pipeline for:
- * ${toCommentSafeString(child.treeString.trim)}
- */
+ * ${toCommentSafeString(child.treeString.trim)}
+ */
final class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
private Object[] references;
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
index 012b125d6b..c6fcb6956c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
@@ -167,8 +167,8 @@ private[ui] class SparkPlanGraphNode(
}
/**
- * Represent a tree of SparkPlan for WholeStageCodegen.
- */
+ * Represent a tree of SparkPlan for WholeStageCodegen.
+ */
private[ui] class SparkPlanGraphCluster(
id: Long,
name: String,
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index cfe4911cb7..948106fd06 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -100,11 +100,11 @@ abstract class JdbcDialect extends Serializable {
}
/**
- * Override connection specific properties to run before a select is made. This is in place to
- * allow dialects that need special treatment to optimize behavior.
- * @param connection The connection object
- * @param properties The connection properties. This is passed through from the relation.
- */
+ * Override connection specific properties to run before a select is made. This is in place to
+ * allow dialects that need special treatment to optimize behavior.
+ * @param connection The connection object
+ * @param properties The connection properties. This is passed through from the relation.
+ */
def beforeFetch(connection: Connection, properties: Map[String, String]): Unit = {
}