aboutsummaryrefslogtreecommitdiff
path: root/yarn
diff options
context:
space:
mode:
authorHari Shreedharan <hshreedharan@apache.org>2015-06-06 21:09:56 -0700
committerAndrew Or <andrew@databricks.com>2015-06-06 21:09:56 -0700
commited2cc3ee890694ca0c1fa0bbc7186c8b80da3fab (patch)
tree7b4aaddc68b896d3cab76f90c9db059aa70fe9a1 /yarn
parent18c4fcebbeecc3b26476a728bc9db62f5c0a6f87 (diff)
downloadspark-ed2cc3ee890694ca0c1fa0bbc7186c8b80da3fab.tar.gz
spark-ed2cc3ee890694ca0c1fa0bbc7186c8b80da3fab.tar.bz2
spark-ed2cc3ee890694ca0c1fa0bbc7186c8b80da3fab.zip
[SPARK-8136] [YARN] Fix flakiness in YarnClusterSuite.
Instead of actually downloading the logs, just verify that the logs link is actually a URL and is in the expected format. Author: Hari Shreedharan <hshreedharan@apache.org> Closes #6680 from harishreedharan/simplify-am-log-tests and squashes the following commits: 3183aeb [Hari Shreedharan] Remove check for hostname which can fail on machines with several hostnames. Removed some unused imports. 50d69a7 [Hari Shreedharan] [SPARK-8136][YARN] Fix flakiness in YarnClusterSuite.
Diffstat (limited to 'yarn')
-rw-r--r--yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala16
1 files changed, 9 insertions, 7 deletions
diff --git a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala
index bc42e12dfa..93d587d0cb 100644
--- a/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala
+++ b/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala
@@ -18,12 +18,12 @@
package org.apache.spark.deploy.yarn
import java.io.{File, FileOutputStream, OutputStreamWriter}
+import java.net.URL
import java.util.Properties
import java.util.concurrent.TimeUnit
import scala.collection.JavaConversions._
import scala.collection.mutable
-import scala.io.Source
import com.google.common.base.Charsets.UTF_8
import com.google.common.io.ByteStreams
@@ -344,18 +344,20 @@ private object YarnClusterDriver extends Logging with Matchers {
assert(info.logUrlMap.nonEmpty)
}
- // If we are running in yarn-cluster mode, verify that driver logs are downloadable.
+ // If we are running in yarn-cluster mode, verify that driver logs links and present and are
+ // in the expected format.
if (conf.get("spark.master") == "yarn-cluster") {
assert(listener.driverLogs.nonEmpty)
val driverLogs = listener.driverLogs.get
assert(driverLogs.size === 2)
assert(driverLogs.containsKey("stderr"))
assert(driverLogs.containsKey("stdout"))
- val stderr = driverLogs("stderr") // YARN puts everything in stderr.
- val lines = Source.fromURL(stderr).getLines()
- // Look for a line that contains YarnClusterSchedulerBackend, since that is guaranteed in
- // cluster mode.
- assert(lines.exists(_.contains("YarnClusterSchedulerBackend")))
+ val urlStr = driverLogs("stderr")
+ // Ensure that this is a valid URL, else this will throw an exception
+ new URL(urlStr)
+ val containerId = YarnSparkHadoopUtil.get.getContainerId
+ val user = Utils.getCurrentUserName()
+ assert(urlStr.endsWith(s"/node/containerlogs/$containerId/$user/stderr?start=0"))
}
}