aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorhyukjinkwon <gurwls223@gmail.com>2016-04-16 14:56:23 +0100
committerSean Owen <sowen@cloudera.com>2016-04-16 14:56:23 +0100
commit9f678e97549b19d6d979b22fa4079094ce9fb2c0 (patch)
tree978e18e46d294ba336f4657c6d627d146680f915 /core
parent527c780bb0d6cb074128448da00cb330e9049385 (diff)
downloadspark-9f678e97549b19d6d979b22fa4079094ce9fb2c0.tar.gz
spark-9f678e97549b19d6d979b22fa4079094ce9fb2c0.tar.bz2
spark-9f678e97549b19d6d979b22fa4079094ce9fb2c0.zip
[MINOR] Remove inappropriate type notation and extra anonymous closure within functional transformations
## What changes were proposed in this pull request? This PR removes - Inappropriate type notations For example, from ```scala words.foreachRDD { (rdd: RDD[String], time: Time) => ... ``` to ```scala words.foreachRDD { (rdd, time) => ... ``` - Extra anonymous closure within functional transformations. For example, ```scala .map(item => { ... }) ``` which can be just simply as below: ```scala .map { item => ... } ``` and corrects some obvious style nits. ## How was this patch tested? This was tested after adding rules in `scalastyle-config.xml`, which ended up with not finding all perfectly. The rules applied were below: - For the first correction, ```xml <check customId="NoExtraClosure" level="error" class="org.scalastyle.file.RegexChecker" enabled="true"> <parameters><parameter name="regex">(?m)\.[a-zA-Z_][a-zA-Z0-9]*\(\s*[^,]+s*=>\s*\{[^\}]+\}\s*\)</parameter></parameters> </check> ``` ```xml <check customId="NoExtraClosure" level="error" class="org.scalastyle.file.RegexChecker" enabled="true"> <parameters><parameter name="regex">\.[a-zA-Z_][a-zA-Z0-9]*\s*[\{|\(]([^\n>,]+=>)?\s*\{([^()]|(?R))*\}^[,]</parameter></parameters> </check> ``` - For the second correction ```xml <check customId="TypeNotation" level="error" class="org.scalastyle.file.RegexChecker" enabled="true"> <parameters><parameter name="regex">\.[a-zA-Z_][a-zA-Z0-9]*\s*[\{|\(]\s*\([^):]*:R))*\}^[,]</parameter></parameters> </check> ``` **Those rules were not added** Author: hyukjinkwon <gurwls223@gmail.com> Closes #12413 from HyukjinKwon/SPARK-style.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/TaskEndReason.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala10
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/master/ui/ApplicationPage.scala5
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala7
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala8
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/ParallelCollectionRDD.scala9
7 files changed, 23 insertions, 24 deletions
diff --git a/core/src/main/scala/org/apache/spark/TaskEndReason.scala b/core/src/main/scala/org/apache/spark/TaskEndReason.scala
index 83af226bfd..7487cfe9c5 100644
--- a/core/src/main/scala/org/apache/spark/TaskEndReason.scala
+++ b/core/src/main/scala/org/apache/spark/TaskEndReason.scala
@@ -149,9 +149,7 @@ case class ExceptionFailure(
this(e, accumUpdates, preserveCause = true)
}
- def exception: Option[Throwable] = exceptionWrapper.flatMap {
- (w: ThrowableSerializationWrapper) => Option(w.exception)
- }
+ def exception: Option[Throwable] = exceptionWrapper.flatMap(w => Option(w.exception))
override def toErrorString: String =
if (fullStackTrace == null) {
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
index 4212027122..6f3b8faf03 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
@@ -105,7 +105,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
* Return a new RDD by applying a function to all elements of this RDD.
*/
def mapToDouble[R](f: DoubleFunction[T]): JavaDoubleRDD = {
- new JavaDoubleRDD(rdd.map(x => f.call(x).doubleValue()))
+ new JavaDoubleRDD(rdd.map(f.call(_).doubleValue()))
}
/**
@@ -131,7 +131,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
*/
def flatMapToDouble(f: DoubleFlatMapFunction[T]): JavaDoubleRDD = {
def fn: (T) => Iterator[jl.Double] = (x: T) => f.call(x).asScala
- new JavaDoubleRDD(rdd.flatMap(fn).map((x: jl.Double) => x.doubleValue()))
+ new JavaDoubleRDD(rdd.flatMap(fn).map(_.doubleValue()))
}
/**
@@ -173,7 +173,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
def fn: (Iterator[T]) => Iterator[jl.Double] = {
(x: Iterator[T]) => f.call(x.asJava).asScala
}
- new JavaDoubleRDD(rdd.mapPartitions(fn).map((x: jl.Double) => x.doubleValue()))
+ new JavaDoubleRDD(rdd.mapPartitions(fn).map(_.doubleValue()))
}
/**
@@ -196,7 +196,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
(x: Iterator[T]) => f.call(x.asJava).asScala
}
new JavaDoubleRDD(rdd.mapPartitions(fn, preservesPartitioning)
- .map(x => x.doubleValue()))
+ .map(_.doubleValue()))
}
/**
@@ -215,7 +215,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
* Applies a function f to each partition of this RDD.
*/
def foreachPartition(f: VoidFunction[java.util.Iterator[T]]) {
- rdd.foreachPartition((x => f.call(x.asJava)))
+ rdd.foreachPartition(x => f.call(x.asJava))
}
/**
diff --git a/core/src/main/scala/org/apache/spark/deploy/master/ui/ApplicationPage.scala b/core/src/main/scala/org/apache/spark/deploy/master/ui/ApplicationPage.scala
index 1b18cf0ded..96274958d1 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/ui/ApplicationPage.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/ui/ApplicationPage.scala
@@ -35,9 +35,8 @@ private[ui] class ApplicationPage(parent: MasterWebUI) extends WebUIPage("app")
def render(request: HttpServletRequest): Seq[Node] = {
val appId = request.getParameter("appId")
val state = master.askWithRetry[MasterStateResponse](RequestMasterState)
- val app = state.activeApps.find(_.id == appId).getOrElse({
- state.completedApps.find(_.id == appId).getOrElse(null)
- })
+ val app = state.activeApps.find(_.id == appId)
+ .getOrElse(state.completedApps.find(_.id == appId).orNull)
if (app == null) {
val msg = <div class="row-fluid">No running application with ID {appId}</div>
return UIUtils.basicSparkPage(msg, "Not Found")
diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala b/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala
index aad2e91b25..f4376dedea 100644
--- a/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala
@@ -68,7 +68,10 @@ private[deploy] class DriverRunner(
private var clock: Clock = new SystemClock()
private var sleeper = new Sleeper {
- def sleep(seconds: Int): Unit = (0 until seconds).takeWhile(f => {Thread.sleep(1000); !killed})
+ def sleep(seconds: Int): Unit = (0 until seconds).takeWhile { _ =>
+ Thread.sleep(1000)
+ !killed
+ }
}
/** Starts a thread to run and manage the driver. */
@@ -116,7 +119,7 @@ private[deploy] class DriverRunner(
/** Terminate this driver (or prevent it from ever starting if not yet started) */
private[worker] def kill() {
synchronized {
- process.foreach(p => p.destroy())
+ process.foreach(_.destroy())
killed = true
}
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala b/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
index 90d9735cb3..35665ab7c0 100644
--- a/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
@@ -190,11 +190,11 @@ private class PartitionCoalescer(maxPartitions: Int, prev: RDD[_], balanceSlack:
// initializes/resets to start iterating from the beginning
def resetIterator(): Iterator[(String, Partition)] = {
- val iterators = (0 to 2).map( x =>
- prev.partitions.iterator.flatMap(p => {
+ val iterators = (0 to 2).map { x =>
+ prev.partitions.iterator.flatMap { p =>
if (currPrefLocs(p).size > x) Some((currPrefLocs(p)(x), p)) else None
- } )
- )
+ }
+ }
iterators.reduceLeft((x, y) => x ++ y)
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala b/core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala
index 526138093d..5426bf80ba 100644
--- a/core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala
@@ -65,11 +65,11 @@ class JdbcRDD[T: ClassTag](
override def getPartitions: Array[Partition] = {
// bounds are inclusive, hence the + 1 here and - 1 on end
val length = BigInt(1) + upperBound - lowerBound
- (0 until numPartitions).map(i => {
+ (0 until numPartitions).map { i =>
val start = lowerBound + ((i * length) / numPartitions)
val end = lowerBound + (((i + 1) * length) / numPartitions) - 1
new JdbcPartition(i, start.toLong, end.toLong)
- }).toArray
+ }.toArray
}
override def compute(thePart: Partition, context: TaskContext): Iterator[T] = new NextIterator[T]
diff --git a/core/src/main/scala/org/apache/spark/rdd/ParallelCollectionRDD.scala b/core/src/main/scala/org/apache/spark/rdd/ParallelCollectionRDD.scala
index bb84e4af15..34a1c112cb 100644
--- a/core/src/main/scala/org/apache/spark/rdd/ParallelCollectionRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/ParallelCollectionRDD.scala
@@ -129,7 +129,7 @@ private object ParallelCollectionRDD {
}
seq match {
case r: Range =>
- positions(r.length, numSlices).zipWithIndex.map({ case ((start, end), index) =>
+ positions(r.length, numSlices).zipWithIndex.map { case ((start, end), index) =>
// If the range is inclusive, use inclusive range for the last slice
if (r.isInclusive && index == numSlices - 1) {
new Range.Inclusive(r.start + start * r.step, r.end, r.step)
@@ -137,7 +137,7 @@ private object ParallelCollectionRDD {
else {
new Range(r.start + start * r.step, r.start + end * r.step, r.step)
}
- }).toSeq.asInstanceOf[Seq[Seq[T]]]
+ }.toSeq.asInstanceOf[Seq[Seq[T]]]
case nr: NumericRange[_] =>
// For ranges of Long, Double, BigInteger, etc
val slices = new ArrayBuffer[Seq[T]](numSlices)
@@ -150,10 +150,9 @@ private object ParallelCollectionRDD {
slices
case _ =>
val array = seq.toArray // To prevent O(n^2) operations for List etc
- positions(array.length, numSlices).map({
- case (start, end) =>
+ positions(array.length, numSlices).map { case (start, end) =>
array.slice(start, end).toSeq
- }).toSeq
+ }.toSeq
}
}
}