aboutsummaryrefslogtreecommitdiff
path: root/external
diff options
context:
space:
mode:
authorAdam Roberts <aroberts@uk.ibm.com>2016-09-16 10:20:50 +0100
committerSean Owen <sowen@cloudera.com>2016-09-16 10:20:50 +0100
commitfc1efb720c9c0033077c3c20ee144d0f757e6bcd (patch)
treefbe114d166c568f2eb392b11fe3eeae7388bb118 /external
parentb2e27262440015f57bcfa888921c9cc017800910 (diff)
downloadspark-fc1efb720c9c0033077c3c20ee144d0f757e6bcd.tar.gz
spark-fc1efb720c9c0033077c3c20ee144d0f757e6bcd.tar.bz2
spark-fc1efb720c9c0033077c3c20ee144d0f757e6bcd.zip
[SPARK-17534][TESTS] Increase timeouts for DirectKafkaStreamSuite tests
**## What changes were proposed in this pull request?** There are two tests in this suite that are particularly flaky on the following hardware: 2x Intel(R) Xeon(R) CPU E5-2697 v2 2.70GHz and 16 GB of RAM, 1 TB HDD This simple PR increases the timeout times and batch duration so they can reliably pass **## How was this patch tested?** Existing unit tests with the two core box where I was seeing the failures often Author: Adam Roberts <aroberts@uk.ibm.com> Closes #15094 from a-roberts/patch-6.
Diffstat (limited to 'external')
-rw-r--r--external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala8
1 files changed, 4 insertions, 4 deletions
diff --git a/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala b/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala
index b1d90b8a82..e04f35eceb 100644
--- a/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala
+++ b/external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/DirectKafkaStreamSuite.scala
@@ -108,7 +108,7 @@ class DirectKafkaStreamSuite
val expectedTotal = (data.values.sum * topics.size) - 2
val kafkaParams = getKafkaParams("auto.offset.reset" -> "earliest")
- ssc = new StreamingContext(sparkConf, Milliseconds(200))
+ ssc = new StreamingContext(sparkConf, Milliseconds(1000))
val stream = withClue("Error creating direct stream") {
KafkaUtils.createDirectStream[String, String](
ssc,
@@ -150,7 +150,7 @@ class DirectKafkaStreamSuite
allReceived.addAll(Arrays.asList(rdd.map(r => (r.key, r.value)).collect(): _*))
}
ssc.start()
- eventually(timeout(20000.milliseconds), interval(200.milliseconds)) {
+ eventually(timeout(100000.milliseconds), interval(1000.milliseconds)) {
assert(allReceived.size === expectedTotal,
"didn't get expected number of messages, messages:\n" +
allReceived.asScala.mkString("\n"))
@@ -172,7 +172,7 @@ class DirectKafkaStreamSuite
val expectedTotal = (data.values.sum * 2) - 3
val kafkaParams = getKafkaParams("auto.offset.reset" -> "earliest")
- ssc = new StreamingContext(sparkConf, Milliseconds(200))
+ ssc = new StreamingContext(sparkConf, Milliseconds(1000))
val stream = withClue("Error creating direct stream") {
KafkaUtils.createDirectStream[String, String](
ssc,
@@ -214,7 +214,7 @@ class DirectKafkaStreamSuite
allReceived.addAll(Arrays.asList(rdd.map(r => (r.key, r.value)).collect(): _*))
}
ssc.start()
- eventually(timeout(20000.milliseconds), interval(200.milliseconds)) {
+ eventually(timeout(100000.milliseconds), interval(1000.milliseconds)) {
assert(allReceived.size === expectedTotal,
"didn't get expected number of messages, messages:\n" +
allReceived.asScala.mkString("\n"))