From 09f3b677cb7cce08882ea030e9af5798a63046ba Mon Sep 17 00:00:00 2001 From: Stephen Haberman Date: Wed, 30 Oct 2013 12:29:39 -0500 Subject: Avoid match errors when filtering for spark.hadoop settings. --- core/src/main/scala/org/apache/spark/SparkContext.scala | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'core') diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index d694dfe4d9..28ac49a24a 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -248,8 +248,10 @@ class SparkContext( conf.set("fs.s3n.awsSecretAccessKey", System.getenv("AWS_SECRET_ACCESS_KEY")) } // Copy any "spark.hadoop.foo=bar" system properties into conf as "foo=bar" - Utils.getSystemProperties.foreach { case (key, value) if key.startsWith("spark.hadoop.") => - conf.set(key.substring("spark.hadoop.".length), value) + Utils.getSystemProperties.foreach { case (key, value) => + if (key.startsWith("spark.hadoop.")) { + conf.set(key.substring("spark.hadoop.".length), value) + } } val bufferSize = System.getProperty("spark.buffer.size", "65536") conf.set("io.file.buffer.size", bufferSize) -- cgit v1.2.3