aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-05-29 13:38:37 -0700
committerReynold Xin <rxin@databricks.com>2015-05-29 13:38:37 -0700
commit94f62a4979e4bc5f7bf4f5852d76977e097209e6 (patch)
tree8bdc73d6326fab1519a192fa1b8d07c583608059 /core
parent6181937f315480543d28e542d43269cfa591e9d0 (diff)
downloadspark-94f62a4979e4bc5f7bf4f5852d76977e097209e6.tar.gz
spark-94f62a4979e4bc5f7bf4f5852d76977e097209e6.tar.bz2
spark-94f62a4979e4bc5f7bf4f5852d76977e097209e6.zip
[SPARK-7940] Enforce whitespace checking for DO, TRY, CATCH, FINALLY, MATCH, LARROW, RARROW in style checker.
… Author: Reynold Xin <rxin@databricks.com> Closes #6491 from rxin/more-whitespace and squashes the following commits: f6e63dc [Reynold Xin] [SPARK-7940] Enforce whitespace checking for DO, TRY, CATCH, FINALLY, MATCH, LARROW, RARROW in style checker.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/network/nio/Connection.scala5
-rw-r--r--core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala5
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala2
4 files changed, 6 insertions, 8 deletions
diff --git a/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala b/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala
index 1a92a799d0..67a3761029 100644
--- a/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala
+++ b/core/src/main/scala/org/apache/spark/network/nio/BlockMessage.scala
@@ -155,7 +155,7 @@ private[nio] class BlockMessage() {
override def toString: String = {
"BlockMessage [type = " + typ + ", id = " + id + ", level = " + level +
- ", data = " + (if (data != null) data.remaining.toString else "null") + "]"
+ ", data = " + (if (data != null) data.remaining.toString else "null") + "]"
}
}
diff --git a/core/src/main/scala/org/apache/spark/network/nio/Connection.scala b/core/src/main/scala/org/apache/spark/network/nio/Connection.scala
index 6b898bd4bf..1499da07bb 100644
--- a/core/src/main/scala/org/apache/spark/network/nio/Connection.scala
+++ b/core/src/main/scala/org/apache/spark/network/nio/Connection.scala
@@ -326,15 +326,14 @@ class SendingConnection(val address: InetSocketAddress, selector_ : Selector,
// MUST be called within the selector loop
def connect() {
- try{
+ try {
channel.register(selector, SelectionKey.OP_CONNECT)
channel.connect(address)
logInfo("Initiating connection to [" + address + "]")
} catch {
- case e: Exception => {
+ case e: Exception =>
logError("Error connecting to " + address, e)
callOnExceptionCallbacks(e)
- }
}
}
diff --git a/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala b/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala
index 497871ed6d..c0bca2c4bc 100644
--- a/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala
+++ b/core/src/main/scala/org/apache/spark/network/nio/ConnectionManager.scala
@@ -635,12 +635,11 @@ private[nio] class ConnectionManager(
val message = securityMsgResp.toBufferMessage
if (message == null) throw new IOException("Error creating security message")
sendSecurityMessage(waitingConn.getRemoteConnectionManagerId(), message)
- } catch {
- case e: Exception => {
+ } catch {
+ case e: Exception =>
logError("Error handling sasl client authentication", e)
waitingConn.close()
throw new IOException("Error evaluating sasl response: ", e)
- }
}
}
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala b/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
index 7598ff617b..9e3880714a 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PartitionerAwareUnionRDD.scala
@@ -86,7 +86,7 @@ class PartitionerAwareUnionRDD[T: ClassTag](
}
val location = if (locations.isEmpty) {
None
- } else {
+ } else {
// Find the location that maximum number of parent partitions prefer
Some(locations.groupBy(x => x).maxBy(_._2.length)._1)
}