aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2016-07-30 04:42:38 -0700
committerSean Owen <sowen@cloudera.com>2016-07-30 04:42:38 -0700
commit0dc4310b470c7e4355c0da67ca3373c3013cc9dd (patch)
tree9a8ac5aefbb25188958e9ae028c7ffdc117b705a /sql
parentbbc247548ac6faeca15afc05c266cee37ef13416 (diff)
downloadspark-0dc4310b470c7e4355c0da67ca3373c3013cc9dd.tar.gz
spark-0dc4310b470c7e4355c0da67ca3373c3013cc9dd.tar.bz2
spark-0dc4310b470c7e4355c0da67ca3373c3013cc9dd.zip
[SPARK-16694][CORE] Use for/foreach rather than map for Unit expressions whose side effects are required
## What changes were proposed in this pull request? Use foreach/for instead of map where operation requires execution of body, not actually defining a transformation ## How was this patch tested? Jenkins Author: Sean Owen <sowen@cloudera.com> Closes #14332 from srowen/SPARK-16694.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala4
4 files changed, 7 insertions, 7 deletions
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
index b3f20692b2..2a445b8cdb 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
@@ -141,7 +141,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper {
val primitiveTypes = Seq(IntegerType, FloatType, DoubleType, StringType, ByteType, ShortType,
LongType, BinaryType, BooleanType, DecimalType.USER_DEFAULT, TimestampType)
- primitiveTypes.map { t =>
+ primitiveTypes.foreach { t =>
val dataGen = RandomDataGenerator.forType(t, nullable = true).get
val inputData = Seq.fill(10) {
val value = dataGen.apply()
@@ -182,7 +182,7 @@ class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper {
val primitiveTypes = Seq(IntegerType, FloatType, DoubleType, StringType, ByteType, ShortType,
LongType, BinaryType, BooleanType, DecimalType.USER_DEFAULT, TimestampType)
- primitiveTypes.map { t =>
+ primitiveTypes.foreach { t =>
val dataGen = RandomDataGenerator.forType(t, nullable = true).get
val inputData = Seq.fill(10) {
val value = dataGen.apply()
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
index 1aadd700d7..babf944e6a 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
@@ -79,7 +79,7 @@ object CompressionSchemeBenchmark extends AllCompressionSchemes {
input: ByteBuffer): Unit = {
val benchmark = new Benchmark(name, iters * count)
- schemes.filter(_.supports(tpe)).map { scheme =>
+ schemes.filter(_.supports(tpe)).foreach { scheme =>
val (compressFunc, compressionRatio, buf) = prepareEncodeInternal(count, tpe, scheme, input)
val label = s"${getFormattedClassName(scheme)}(${compressionRatio.formatted("%.3f")})"
@@ -103,7 +103,7 @@ object CompressionSchemeBenchmark extends AllCompressionSchemes {
input: ByteBuffer): Unit = {
val benchmark = new Benchmark(name, iters * count)
- schemes.filter(_.supports(tpe)).map { scheme =>
+ schemes.filter(_.supports(tpe)).foreach { scheme =>
val (compressFunc, _, buf) = prepareEncodeInternal(count, tpe, scheme, input)
val compressedBuf = compressFunc(input, buf)
val label = s"${getFormattedClassName(scheme)}"
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
index 988a577a7b..a530e27074 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
@@ -47,7 +47,7 @@ class IntegralDeltaSuite extends SparkFunSuite {
}
}
- input.map { value =>
+ input.foreach { value =>
val row = new GenericMutableRow(1)
columnType.setField(row, 0, value)
builder.appendFrom(row, 0)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
index ddcc24a7f5..2f551b1a01 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
@@ -343,7 +343,7 @@ class FileSourceStrategySuite extends QueryTest with SharedSQLContext with Predi
test("SPARK-15654 do not split non-splittable files") {
// Check if a non-splittable file is not assigned into partitions
- Seq("gz", "snappy", "lz4").map { suffix =>
+ Seq("gz", "snappy", "lz4").foreach { suffix =>
val table = createTable(
files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, s"file3.${suffix}" -> 1)
)
@@ -359,7 +359,7 @@ class FileSourceStrategySuite extends QueryTest with SharedSQLContext with Predi
}
// Check if a splittable compressed file is assigned into multiple partitions
- Seq("bz2").map { suffix =>
+ Seq("bz2").foreach { suffix =>
val table = createTable(
files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, s"file3.${suffix}" -> 1)
)