diff options
-rw-r--r-- | core/src/main/scala/org/apache/spark/SparkEnv.scala | 2 | ||||
-rw-r--r-- | docs/configuration.md | 10 |
2 files changed, 11 insertions, 1 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkEnv.scala b/core/src/main/scala/org/apache/spark/SparkEnv.scala index 557d2f5128..16c5d6648d 100644 --- a/core/src/main/scala/org/apache/spark/SparkEnv.scala +++ b/core/src/main/scala/org/apache/spark/SparkEnv.scala @@ -274,7 +274,7 @@ object SparkEnv extends Logging { val shuffleMemoryManager = new ShuffleMemoryManager(conf) val blockTransferService = - conf.get("spark.shuffle.blockTransferService", "nio").toLowerCase match { + conf.get("spark.shuffle.blockTransferService", "netty").toLowerCase match { case "netty" => new NettyBlockTransferService(conf) case "nio" => diff --git a/docs/configuration.md b/docs/configuration.md index 3007706a25..78c4bf332c 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -359,6 +359,16 @@ Apart from these, the following properties are also available, and may be useful map-side aggregation and there are at most this many reduce partitions. </td> </tr> +<tr> + <td><code>spark.shuffle.blockTransferService</code></td> + <td>netty</td> + <td> + Implementation to use for transferring shuffle and cached blocks between executors. There + are two implementations available: <code>netty</code> and <code>nio</code>. Netty-based + block transfer is intended to be simpler but equally efficient and is the default option + starting in 1.2. + </td> +</tr> </table> #### Spark UI |