aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/scala/org/apache/spark/SparkConf.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala12
-rw-r--r--core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala4
3 files changed, 12 insertions, 10 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala
index 5da2e98f1f..e0fd248c43 100644
--- a/core/src/main/scala/org/apache/spark/SparkConf.scala
+++ b/core/src/main/scala/org/apache/spark/SparkConf.scala
@@ -419,8 +419,10 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging {
*/
private[spark] def getenv(name: String): String = System.getenv(name)
- /** Checks for illegal or deprecated config settings. Throws an exception for the former. Not
- * idempotent - may mutate this conf object to convert deprecated settings to supported ones. */
+ /**
+ * Checks for illegal or deprecated config settings. Throws an exception for the former. Not
+ * idempotent - may mutate this conf object to convert deprecated settings to supported ones.
+ */
private[spark] def validateSettings() {
if (contains("spark.local.dir")) {
val msg = "In Spark 1.0 and later spark.local.dir will be overridden by the value set by " +
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
index 4e8e363635..41ac308808 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
@@ -76,9 +76,9 @@ class SparkHadoopUtil extends Logging {
/**
- * Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
- * configuration.
- */
+ * Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
+ * configuration.
+ */
def appendS3AndSparkHadoopConfigurations(conf: SparkConf, hadoopConf: Configuration): Unit = {
// Note: this null check is around more than just access to the "conf" object to maintain
// the behavior of the old implementation of this code, for backwards compatibility.
@@ -108,9 +108,9 @@ class SparkHadoopUtil extends Logging {
}
/**
- * Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
- * subsystems.
- */
+ * Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
+ * subsystems.
+ */
def newConfiguration(conf: SparkConf): Configuration = {
val hadoopConf = new Configuration()
appendS3AndSparkHadoopConfigurations(conf, hadoopConf)
diff --git a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
index d06b2c67d2..c562c70aba 100644
--- a/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
+++ b/core/src/main/scala/org/apache/spark/partial/BoundedDouble.scala
@@ -28,8 +28,8 @@ class BoundedDouble(val mean: Double, val confidence: Double, val low: Double, v
this.mean.hashCode ^ this.confidence.hashCode ^ this.low.hashCode ^ this.high.hashCode
/**
- * Note that consistent with Double, any NaN value will make equality false
- */
+ * Note that consistent with Double, any NaN value will make equality false
+ */
override def equals(that: Any): Boolean =
that match {
case that: BoundedDouble => {