aboutsummaryrefslogtreecommitdiff
path: root/yarn
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-04-03 01:25:02 -0700
committerReynold Xin <rxin@databricks.com>2015-04-03 01:25:02 -0700
commit82701ee25fda64f03899713bc56f82ca6f278151 (patch)
tree07fba36d66228f7561bd65dd502fd668d50a9be5 /yarn
parentc42c3fc7f7b79a1f6ce990d39b5d9d14ab19fcf0 (diff)
downloadspark-82701ee25fda64f03899713bc56f82ca6f278151.tar.gz
spark-82701ee25fda64f03899713bc56f82ca6f278151.tar.bz2
spark-82701ee25fda64f03899713bc56f82ca6f278151.zip
[SPARK-6428] Turn on explicit type checking for public methods.
This builds on my earlier pull requests and turns on the explicit type checking in scalastyle. Author: Reynold Xin <rxin@databricks.com> Closes #5342 from rxin/SPARK-6428 and squashes the following commits: 7b531ab [Reynold Xin] import ordering 2d9a8a5 [Reynold Xin] jl e668b1c [Reynold Xin] override 9b9e119 [Reynold Xin] Parenthesis. 82e0cf5 [Reynold Xin] [SPARK-6428] Turn on explicit type checking for public methods.
Diffstat (limited to 'yarn')
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala62
-rw-r--r--yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala6
2 files changed, 36 insertions, 32 deletions
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
index 3d18690cd9..455554eea0 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
@@ -162,7 +162,7 @@ private[spark] class ApplicationMaster(
* status to SUCCEEDED in cluster mode to handle if the user calls System.exit
* from the application code.
*/
- final def getDefaultFinalStatus() = {
+ final def getDefaultFinalStatus(): FinalApplicationStatus = {
if (isClusterMode) {
FinalApplicationStatus.SUCCEEDED
} else {
@@ -175,31 +175,35 @@ private[spark] class ApplicationMaster(
* This means the ResourceManager will not retry the application attempt on your behalf if
* a failure occurred.
*/
- final def unregister(status: FinalApplicationStatus, diagnostics: String = null) = synchronized {
- if (!unregistered) {
- logInfo(s"Unregistering ApplicationMaster with $status" +
- Option(diagnostics).map(msg => s" (diag message: $msg)").getOrElse(""))
- unregistered = true
- client.unregister(status, Option(diagnostics).getOrElse(""))
+ final def unregister(status: FinalApplicationStatus, diagnostics: String = null): Unit = {
+ synchronized {
+ if (!unregistered) {
+ logInfo(s"Unregistering ApplicationMaster with $status" +
+ Option(diagnostics).map(msg => s" (diag message: $msg)").getOrElse(""))
+ unregistered = true
+ client.unregister(status, Option(diagnostics).getOrElse(""))
+ }
}
}
- final def finish(status: FinalApplicationStatus, code: Int, msg: String = null) = synchronized {
- if (!finished) {
- val inShutdown = Utils.inShutdown()
- logInfo(s"Final app status: ${status}, exitCode: ${code}" +
- Option(msg).map(msg => s", (reason: $msg)").getOrElse(""))
- exitCode = code
- finalStatus = status
- finalMsg = msg
- finished = true
- if (!inShutdown && Thread.currentThread() != reporterThread && reporterThread != null) {
- logDebug("shutting down reporter thread")
- reporterThread.interrupt()
- }
- if (!inShutdown && Thread.currentThread() != userClassThread && userClassThread != null) {
- logDebug("shutting down user thread")
- userClassThread.interrupt()
+ final def finish(status: FinalApplicationStatus, code: Int, msg: String = null): Unit = {
+ synchronized {
+ if (!finished) {
+ val inShutdown = Utils.inShutdown()
+ logInfo(s"Final app status: $status, exitCode: $code" +
+ Option(msg).map(msg => s", (reason: $msg)").getOrElse(""))
+ exitCode = code
+ finalStatus = status
+ finalMsg = msg
+ finished = true
+ if (!inShutdown && Thread.currentThread() != reporterThread && reporterThread != null) {
+ logDebug("shutting down reporter thread")
+ reporterThread.interrupt()
+ }
+ if (!inShutdown && Thread.currentThread() != userClassThread && userClassThread != null) {
+ logDebug("shutting down user thread")
+ userClassThread.interrupt()
+ }
}
}
}
@@ -506,7 +510,7 @@ private[spark] class ApplicationMaster(
private class AMActor(driverUrl: String, isClusterMode: Boolean) extends Actor {
var driver: ActorSelection = _
- override def preStart() = {
+ override def preStart(): Unit = {
logInfo("Listen to driver: " + driverUrl)
driver = context.actorSelection(driverUrl)
// Send a hello message to establish the connection, after which
@@ -520,7 +524,7 @@ private[spark] class ApplicationMaster(
}
}
- override def receive = {
+ override def receive: PartialFunction[Any, Unit] = {
case x: DisassociatedEvent =>
logInfo(s"Driver terminated or disconnected! Shutting down. $x")
// In cluster mode, do not rely on the disassociated event to exit
@@ -567,7 +571,7 @@ object ApplicationMaster extends Logging {
private var master: ApplicationMaster = _
- def main(args: Array[String]) = {
+ def main(args: Array[String]): Unit = {
SignalLogger.register(log)
val amArgs = new ApplicationMasterArguments(args)
SparkHadoopUtil.get.runAsSparkUser { () =>
@@ -576,11 +580,11 @@ object ApplicationMaster extends Logging {
}
}
- private[spark] def sparkContextInitialized(sc: SparkContext) = {
+ private[spark] def sparkContextInitialized(sc: SparkContext): Unit = {
master.sparkContextInitialized(sc)
}
- private[spark] def sparkContextStopped(sc: SparkContext) = {
+ private[spark] def sparkContextStopped(sc: SparkContext): Boolean = {
master.sparkContextStopped(sc)
}
@@ -592,7 +596,7 @@ object ApplicationMaster extends Logging {
*/
object ExecutorLauncher {
- def main(args: Array[String]) = {
+ def main(args: Array[String]): Unit = {
ApplicationMaster.main(args)
}
diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
index c1d3f7320f..1ce10d906a 100644
--- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
+++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/ExecutorRunnable.scala
@@ -59,15 +59,15 @@ class ExecutorRunnable(
val yarnConf: YarnConfiguration = new YarnConfiguration(conf)
lazy val env = prepareEnvironment(container)
- def run = {
+ override def run(): Unit = {
logInfo("Starting Executor Container")
nmClient = NMClient.createNMClient()
nmClient.init(yarnConf)
nmClient.start()
- startContainer
+ startContainer()
}
- def startContainer = {
+ def startContainer(): java.util.Map[String, ByteBuffer] = {
logInfo("Setting up ContainerLaunchContext")
val ctx = Records.newRecord(classOf[ContainerLaunchContext])