aboutsummaryrefslogtreecommitdiff
path: root/streaming
diff options
context:
space:
mode:
authorjerryshao <saisai.shao@intel.com>2015-01-12 13:14:44 -0800
committerTathagata Das <tathagata.das1565@gmail.com>2015-01-12 13:14:44 -0800
commit3aed3051c0b6cd5f38d7db7d20fa7a1680bfde6f (patch)
treeb54cb311df9de6daea35ba6f20bc29aaf0da8612 /streaming
parentaff49a3ee1a91d730590f4a0b9ac485fd52dc8bf (diff)
downloadspark-3aed3051c0b6cd5f38d7db7d20fa7a1680bfde6f.tar.gz
spark-3aed3051c0b6cd5f38d7db7d20fa7a1680bfde6f.tar.bz2
spark-3aed3051c0b6cd5f38d7db7d20fa7a1680bfde6f.zip
[SPARK-4999][Streaming] Change storeInBlockManager to false by default
Currently WAL-backed block is read out from HDFS and put into BlockManger with storage level MEMORY_ONLY_SER by default, since WAL-backed block is already materialized in HDFS with fault-tolerance, no need to put into BlockManger again by default. Author: jerryshao <saisai.shao@intel.com> Closes #3906 from jerryshao/SPARK-4999 and squashes the following commits: b95f95e [jerryshao] Change storeInBlockManager to false by default
Diffstat (limited to 'streaming')
-rw-r--r--streaming/src/main/scala/org/apache/spark/streaming/dstream/ReceiverInputDStream.scala2
1 files changed, 1 insertions, 1 deletions
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/dstream/ReceiverInputDStream.scala b/streaming/src/main/scala/org/apache/spark/streaming/dstream/ReceiverInputDStream.scala
index c834744631..afd3c4bc4c 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/dstream/ReceiverInputDStream.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/dstream/ReceiverInputDStream.scala
@@ -86,7 +86,7 @@ abstract class ReceiverInputDStream[T: ClassTag](@transient ssc_ : StreamingCont
}.toArray
// Since storeInBlockManager = false, the storage level does not matter.
new WriteAheadLogBackedBlockRDD[T](ssc.sparkContext,
- blockIds, logSegments, storeInBlockManager = true, StorageLevel.MEMORY_ONLY_SER)
+ blockIds, logSegments, storeInBlockManager = false, StorageLevel.MEMORY_ONLY_SER)
} else {
new BlockRDD[T](ssc.sc, blockIds)
}